diff --git a/COPYRIGHT b/COPYRIGHT new file mode 100644 index 0000000..0b3f7c7 --- /dev/null +++ b/COPYRIGHT @@ -0,0 +1,3 @@ + +See LICENSE file. + diff --git a/CREDITS b/CREDITS new file mode 100644 index 0000000..64bb736 --- /dev/null +++ b/CREDITS @@ -0,0 +1,88 @@ + +The following is a list of people who have contributed to the +development of mod_python. + +Anyone who has contributed code or otherwise made contributions that were +constructive to the development of the project may have his name listed +here. Note that the decision on whether or not a name goes on this list is +initially taken by me (grisha@modpython.org) and that I can be wrong. So +if you feel that you should be credited as well, or if you feel that you +should not be listed, please e-mail me. + +The names are listed alphabetically by last name. + +Ron Alford + +Richard Barrett + +Gary Benson + +Stéphane Bidoul + +Gregory Bond + +Graham Dumpleton + +Justin Erenkrantz + +David Fraser + +Jim Gallacher + +Damjan Georgievski + +Thomas Geraghty + +James Gessling + +Bob Ippolito + +Indrek Järve + +Mads Kiilerich + +Jørgen Frøjk Kjærsgaard + +Nicolas Lehuen + +Miguel Marques + +Thom May + +Robin Munn + +Brendan O'Connor + +Barry Pearce + +Barry Pederson + +Sean Reifschneider + +Conrad Steenberg + +Sean Treadway + +Chris Trengove + +Jarkko Torppa + +Ville Skyttä + +Greg Stein + +Dr. L.A. Timochouk + +Gregory Trubetskoy + +Sean True + +Sebastian Tusk + +Enrique Vaamonde + +Dave Wallace + +Aaron Watters + + diff --git a/Doc/Makefile.in b/Doc/Makefile.in new file mode 100644 index 0000000..705fe43 --- /dev/null +++ b/Doc/Makefile.in @@ -0,0 +1,174 @@ + +# +# Makefile for Python documentation +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# + +# You can set these variables from the command line. +PYTHON = @PYTHON_BIN@ +SVNROOT = http://svn.python.org/projects +SPHINXOPTS = +PAPER = +SOURCES = +DISTVERSION = $(shell ../dist/version.sh) + +ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees -D latex_paper_size=$(PAPER) \ + $(SPHINXOPTS) . build/$(BUILDER) $(SOURCES) + +.PHONY: help checkout update build html htmlhelp latex text changes linkcheck \ + suspicious coverage htmlview clean dist check serve \ + autobuild-dev autobuild-stable + +help: + @echo "Please use \`make ' where is one of" + @echo " clean to remove build files" + @echo " update to update build tools" + @echo " html to make standalone HTML files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter (NB: you'll need to backslash escape the _ in Mod_python in the .tex file)" + @echo " text to make plain text files" + @echo " changes to make an overview over all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " coverage to check documentation coverage for library and C API" + @echo " dist to create a \"dist\" directory with archived docs for download" + @echo " suspicious to check for suspicious markup in output text" + @echo " check to run a check for frequent markup errors" + @echo " serve to serve the documentation on the localhost (8000)" + +# Note: if you update versions here, do the same in make.bat and README.txt +checkout: + @if [ ! -d tools/sphinx ]; then \ + echo "Checking out Sphinx..."; \ + svn checkout $(SVNROOT)/external/Sphinx-1.0.7/sphinx tools/sphinx; \ + fi + @if [ ! -d tools/docutils ]; then \ + echo "Checking out Docutils..."; \ + svn checkout $(SVNROOT)/external/docutils-0.6/docutils tools/docutils; \ + fi + @if [ ! -d tools/jinja2 ]; then \ + echo "Checking out Jinja..."; \ + svn checkout $(SVNROOT)/external/Jinja-2.3.1/jinja2 tools/jinja2; \ + fi + @if [ ! -d tools/pygments ]; then \ + echo "Checking out Pygments..."; \ + svn checkout $(SVNROOT)/external/Pygments-1.3.1/pygments tools/pygments; \ + fi + +update: clean checkout + +build: checkout + mkdir -p build/$(BUILDER) build/doctrees + $(PYTHON) tools/sphinx-build.py $(ALLSPHINXOPTS) + @echo + +html: BUILDER = html +html: build + @echo "Build finished. The HTML pages are in build/html." + +htmlhelp: BUILDER = htmlhelp +htmlhelp: build + @echo "Build finished; now you can run HTML Help Workshop with the" \ + "build/htmlhelp/pydoc.hhp project file." + +latex: BUILDER = latex +latex: build + @echo "Build finished; the LaTeX files are in build/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +text: BUILDER = text +text: build + @echo "Build finished; the text files are in build/text." + +changes: BUILDER = changes +changes: build + @echo "The overview file is in build/changes." + +linkcheck: BUILDER = linkcheck +linkcheck: build + @echo "Link check complete; look for any errors in the above output" \ + "or in build/$(BUILDER)/output.txt" + +suspicious: BUILDER = suspicious +suspicious: build + @echo "Suspicious check complete; look for any errors in the above output" \ + "or in build/$(BUILDER)/suspicious.csv. If all issues are false" \ + "positives, append that file to tools/sphinxext/susp-ignored.csv." + +coverage: BUILDER = coverage +coverage: build + @echo "Coverage finished; see c.txt and python.txt in build/coverage" + +htmlview: html + $(PYTHON) -c "import webbrowser; webbrowser.open('build/html/index.html')" + +clean: + -rm -rf build/* + -rm -rf tools/sphinx + -rm -rf tools/pygments + -rm -rf tools/jinja2 + -rm -rf tools/docutils + +dist: + rm -rf dist + mkdir -p dist + + # archive the HTML + make html + cp -pPR build/html dist/mod_python-$(DISTVERSION)-docs-html + tar -C dist -cf dist/mod_python-$(DISTVERSION)-docs-html.tar mod_python-$(DISTVERSION)-docs-html + bzip2 -9 -k dist/mod_python-$(DISTVERSION)-docs-html.tar + (cd dist; zip -q -r -9 mod_python-$(DISTVERSION)-docs-html.zip mod_python-$(DISTVERSION)-docs-html) + rm -r dist/mod_python-$(DISTVERSION)-docs-html + rm dist/mod_python-$(DISTVERSION)-docs-html.tar + + # archive the text build + make text + cp -pPR build/text dist/mod_python-$(DISTVERSION)-docs-text + tar -C dist -cf dist/mod_python-$(DISTVERSION)-docs-text.tar mod_python-$(DISTVERSION)-docs-text + bzip2 -9 -k dist/mod_python-$(DISTVERSION)-docs-text.tar + (cd dist; zip -q -r -9 mod_python-$(DISTVERSION)-docs-text.zip mod_python-$(DISTVERSION)-docs-text) + rm -r dist/mod_python-$(DISTVERSION)-docs-text + rm dist/mod_python-$(DISTVERSION)-docs-text.tar + + # archive the A4 latex + rm -rf build/latex + make latex PAPER=a4 + -sed -i 's/makeindex/makeindex -q/' build/latex/Makefile + (cd build/latex; make clean && make all-pdf && make FMT=pdf zip bz2) + cp build/latex/docs-pdf.zip dist/mod_python-$(DISTVERSION)-docs-pdf-a4.zip + cp build/latex/docs-pdf.tar.bz2 dist/mod_python-$(DISTVERSION)-docs-pdf-a4.tar.bz2 + + # archive the letter latex + rm -rf build/latex + make latex PAPER=letter + -sed -i 's/makeindex/makeindex -q/' build/latex/Makefile + (cd build/latex; make clean && make all-pdf && make FMT=pdf zip bz2) + cp build/latex/docs-pdf.zip dist/mod_python-$(DISTVERSION)-docs-pdf-letter.zip + cp build/latex/docs-pdf.tar.bz2 dist/mod_python-$(DISTVERSION)-docs-pdf-letter.tar.bz2 + +check: + $(PYTHON) tools/rstlint.py -i tools + +serve: + ./tools/serve.py build/html + +# Targets for daily automated doc build + +# for development releases: always build +autobuild-dev: + make update + make dist SPHINXOPTS='-A daily=1 -A versionswitcher=1' + +# for quick rebuilds (HTML only) +autobuild-html: + make html SPHINXOPTS='-A daily=1 -A versionswitcher=1' + +# for stable releases: only build if not in pre-release stage (alpha, beta, rc) +autobuild-stable: + @case $(DISTVERSION) in *[abc]*) \ + echo "Not building; $(DISTVERSION) is not a release version."; \ + exit 1;; \ + esac + @make autobuild-dev + diff --git a/Doc/about.rst b/Doc/about.rst new file mode 100644 index 0000000..e559569 --- /dev/null +++ b/Doc/about.rst @@ -0,0 +1,12 @@ +===================== +About these documents +===================== + + +These documents are generated from `reStructuredText`_ sources by `Sphinx`_, a +document processor specifically written for the Python documentation. + +.. _reStructuredText: http://docutils.sf.net/rst.html +.. _Sphinx: http://sphinx.pocoo.org/ + + diff --git a/Doc/changes.rst b/Doc/changes.rst new file mode 100644 index 0000000..8b20d37 --- /dev/null +++ b/Doc/changes.rst @@ -0,0 +1,271 @@ + +.. _changes: + +******* +Changes +******* + +.. _changes_from_3_3_1: + +Changes from version 3.3.1 +========================== + +New Features +------------ + +* Create the mod_python command-line tool to report version, manage Apache configuration and instances. +* Make httpdconf directives render themselves as Python, add the only_if conditional and comments. +* Expose and document httpdconf, make mod_python importable outside of Apache. +* Provide a WSGI handler. +* Change the Copyright to reflect the new status. +* Add support for Apache HTTP Server 2.4. +* Add support for Python 2.7. + +Improvements +------------ + +* Improve WSGI and Python path documentation. +* Change WSGI handler to use Location path as SCRIPT_NAME. +* Add is_location to hlist object, skip the map_to_storage for Location-wrapped Python*Handlers. +* Some optimizations to Python code to make it run faster. +* Add Mutex to Apache 2.4 tests. +* Provide and internal add_cgi_vars() implementation which does not use sub-requests. +* Many documentation clarifications and improvements. +* Add a test to ensure that req.write() and req.flush() do not leak memory (2.4 only). +* Many new tests and test framework improvements. +* Added a curl hint to the tests for easier stagin/debugging. +* Get rid of the ancient memberlist and PyMember_Get/Set calls. +* Add support for the c.remote_ip/addr to c.client_ip/addr change in 2.4. Add req.useragent_addr (also new in 2.4). +* Always check C version against Py version and warn. +* Remove APLOG_NOERRNO references. +* A more unified and cleaned up method of keeping version information. +* Convert documentation to the new reStructuredText format. +* Revert to using the old importer from 3.2. +* Replace README with README.md +* (`MODPYTHON-238 `_) Make req.chunked and req.connection.keepalive writable. Being able to set these allows chunking to be turned off when HTTP/1.1 is used but no content length supplied in response. +* (`MODPYTHON-226 `_) Make req.status_line writable. + +Bug Fixes +--------- + +* Make PythonCleanupHandler run again. +* Use PCapsule API instead of PyCObject for Python 2.7+. +* Fix SCRIPT_NAME and PATH_INFO inconsistencies so that the WSGI handler behaves correctly. +* Remove with-python-src configure option as it is no longer used to build the docs. +* (`MODPYTHON-243 `_) Fixed format string error. +* (`MODPYTHON-250 `_) Fixed MacOS X (10.5) Leopard 64 bit architecture problems. +* (`MODPYTHON-249 `_) Fixed incorrect use of APR bucket brigades shown up by APR 1.3.2. +* (`MODPYTHON-245 `_) Fix prototype of optional exported function mp_release_interpreter(). +* (`MODPYTHON-220 `_) Fix 'import' from same directory as PSP file. + +.. _changes_from_3_2_10: + +Changes from version 3.2.10 +=========================== + +New Features +------------ + +* (`MODPYTHON-103 `_) New req.add_output_filter(), req.add_input_filter(), req.register_output_fiter(), req.register_input_filter() methods. These allows the dynamic registration of filters and the attaching of filters to the current request. +* (`MODPYTHON-104 `_) Support added for using Python in content being passed through "INCLUDES" output filter, or as more commonly referred to server side include (SSI) mechanism. +* (`MODPYTHON-108 `_) Added support to cookies for httponly attribute, an extension originally created by Microsoft, but now getting more widespread use in the battle against cross site-scripting attacks. +* (`MODPYTHON-118 `_) Now possible using the PythonImport directive to specify the name of a function contained in the module to be called once the designated module has been imported. +* (`MODPYTHON-124 `_) New req.auth_name() and req.auth_type() methods. These return the values associated with the AuthName and AuthType directives respectively. The req.ap_auth_type has now also been made writable so that it can be set by an authentication handler. +* (`MODPYTHON-130 `_) Added req.set_etag(), req.set_last_modified() and req.update_mtime() functions as wrappers for similar functions provided by Apache C API. These are required to effectively use the req.meets_condition() function. The documentation for req.meets_condition() has also been updated as what it previously described probably wouldn't actually work. +* (`MODPYTHON-132 `_) New req.construct_url() method. Used to construct a fully qualified URI string incorporating correct scheme, server and port. +* (`MODPYTHON-144 `_) The "apache.interpreter" and "apache.main_server" attributes have been made publically available. These were previously private and not part of the public API. +* (`MODPYTHON-149 `_) Added support for session objects that span domains. +* (`MODPYTHON-153 `_) Added req.discard_request_body() function as wrapper for similar function provided by Apache C API. The function tests for and reads any message body in the request, simply discarding whatever it receives. +* (`MODPYTHON-164 `_) The req.add_handler(), req.register_input_filter() and req.register_output_filter() methods can now take a direct reference to a callable object as well a string which refers to a module or module::function combination by name. +* (`MODPYTHON-165 `_) Exported functions from mod_python module to be used in other third party modules for Apache. The purpose of these functions is to allow those other modules to access the mechanics of how mod_python creates interpreters, thereby allowing other modules to also embed Python and for there not to be a conflict with mod_python. +* (`MODPYTHON-170 `_) Added req._request_rec, server._server_rec and conn._conn_rec semi private members for getting accessing to underlying Apache struct as a Python CObject. These can be used for use in implementing SWIG bindings for lower level APIs of Apache. These members should be regarded as experimental and there are no guarantees that they will remain present in this specific form in the future. +* (`MODPYTHON-193 `_) Added new attribute available as req.hlist.location. For a handler executed directly as the result of a handler directive within a Location directive, this will be set to the value of the Location directive. If LocationMatch, or wildcards or regular expressions are used with Location, the value will be the matched value in the URL and not the pattern. + +Improvements +------------ + +* (`MODPYTHON-27 `_) When using mod_python.publisher, the __auth__() and __access__() functions and the __auth_realm__ string can now be nested within a class method as a well a normal function. +* (`MODPYTHON-90 `_) The PythonEnablePdb configuration option will now be ignored if Apache hasn't been started up in single process mode. +* (`MODPYTHON-91 `_) If running Apache in single process mode with PDB enabled and the "quit" command is used to exit that debug session, an exception indicating that the PDB session has been aborted is raised rather than None being returned with a subsequent error complaining about the handler returning an invalid value. +* (`MODPYTHON-93 `_) Improved util.FieldStorage efficiency and made the interface more dictionary like. +* (`MODPYTHON-101 `_) Force an exception when handler evaluates to something other than None but is otherwise not callable. Previously an exception would not be generated if the handler evaluated to False. +* (`MODPYTHON-107 `_) Neither mod_python.publisher nor mod_python.psp explicitly flush output after writing the content of the response back to the request object. By not flushing output it is now possible to use the "CONTENT_LENGTH" output filter to add a "Content-Length" header. +* (`MODPYTHON-111 `_) Note made in session documentation that a save is required to avoid session timeouts. +* (`MODPYTHON-125 `_) The req.handler attribute is now writable. This allows a handler executing in a phase prior to the response phase to specify which Apache module will be responsible for generating the content. +* (`MODPYTHON-128 `_) Made the req.canonical_filename attribute writable. Changed the req.finfo attribute from being a tuple to an actual object. For backwards compatibility the attributes of the object can still be accessed as if they were a tuple. New code however should access the attributes as member data. The req.finfo attribute is also now writable and can be assigned to using the result of calling the new function apache.stat(). This function is a wrapper for apr_stat(). +* (`MODPYTHON-129 `_) When specifying multiple handlers for a phase, the status returned by each handler is now treated the same as how Apache would treat the status if the handler was registered using the low level C API. What this means is that whereas stacked handlers of any phase would in turn previously be executed as long as they returned apache.OK, this is no longer the case and what happens is dependent on the phase. Specifically, a handler returning apache.DECLINED no longer causes the execution of subsequent handlers for the phase to be skipped. Instead, it will move to the next of the stacked handlers. In the case of PythonTransHandler, PythonAuthenHandler, PythonAuthzHandler and PythonTypeHandler, as soon as apache.OK is returned, subsequent handlers for the phase will be skipped, as the result indicates that any processing pertinent to that phase has been completed. For other phases, stacked handlers will continue to be executed if apache.OK is returned as well as when apache.DECLINED is returned. This new interpretation of the status returned also applies to stacked content handlers listed against the PythonHandler directive even though Apache notionally only ever calls at most one content handler. Where all stacked content handlers in that phase run, the status returned from the last handler becomes the overall status from the content phase. +* (`MODPYTHON-141 `_) The req.proxyreq and req.uri attributes are now writable. This allows a handler to setup these values and trigger proxying of the current request to a remote server. +* (`MODPYTHON-142 `_) The req.no_cache and req.no_local_copy attributes are now writable. +* (`MODPYTHON-143 `_) Completely reimplemented the module importer. This is now used whenever modules are imported corresponding to any of the Python*Handler, Python*Filter and PythonImport directives. The module importer is still able to be used directly using the apache.import_module() function. The new module importer no longer supports automatic reloading of packages/modules that appear on the standard Python module search path as defined by the PythonPath directive or within an application by direct changes to sys.path. Automatic module reloading is however still performed on file based modules (not packages) which are located within the document tree where handlers are located. Locations within the document tree are however no longer added to the standard Python module search path automatically as they are maintained within a distinct importer search path. The PythonPath directive MUST not be used to point at directories within the document tree. To have additional directories be searched by the module importer, they should be listed in the mod_python.importer.path option using the PythonOption directive. This is a path similar to how PythonPath argument is supplied, but MUST not reference sys.path nor contain any directories also listed in the standard Python module search path. If an application does not appear to work under the module importer, the old module importer can be reenabled by setting the mod_python.legacy.importer option using the PythonOption directive to the value '*'. This option must be set in the global Apache configuration. +* (`MODPYTHON-152 `_) When in a sub request, when a request is the result of an internal redirect, or when when returning from such a request, the req.main, req.prev and req.next members now correctly return a reference to the original Python request object wrapper first created for the specific request_rec instance rather than creating a new distinct Python request object. This means that any data added explicitly to a request object can be passed between such requests. +* (`MODPYTHON-178 `_) When using mod_python.psp, if the PSP file which is the target of the request doesn't actually exist, an apache.HTTP_NOT_FOUND server error is now returned to the client rather than raising a ValueError exception which results in a 500 internal server error. Note that if using SetHandler and the request is against the directory and no DirectoryIndex directive is specified which lists a valid PSP index file, then the same apache.HTTP_NOT_FOUND server error is returned to the client. +* (`MODPYTHON-196 `_) For completeness, added req.server.log_error() and req.connection.log_error(). The latter wraps ap_log_cerror() (when available), allowing client information to be logged along with message from a connection handler. +* (`MODPYTHON-206 `_) The attribute req.used_path_info is now modifiable and can be set from within handlers. This is equivalent to having used the AcceptPathInfo directive. +* (`MODPYTHON-207 `_) The attribute req.args is now modifiable and can be set from within handlers. + +Bug Fixes +--------- + +* (`MODPYTHON-38 `_) Fixed issue when using PSP pages in conjunction with publisher handler or where a PSP error page was being triggered, that form parameters coming from content of a POST request weren't available or only available using a workaround. Specifically, the PSP page will now use any FieldStorage object instance cached as req.form left there by preceding code. +* (`MODPYTHON-43 `_) Nested __auth__() functions in mod_python.publisher now execute in context of globals from the file the function is in and not that of mod_python.publisher itself. +* (`MODPYTHON-47 `_) Fixed mod_python.publisher so it will not return a HTTP Bad Request response when mod_auth is being used to provide Digest authentication. +* (`MODPYTHON-63 `_) When handler directives are used within Directory or DirectoryMatch directives where wildcards or regular expressions are used, the handler directory will be set to the shortest directory matched by the directory pattern. Handler directives can now also be used within Files and FilesMatch directives and the handler directory will correctly resolve to the directory corresponding to the enclosing Directory or DirectoryMatch directive, or the directory the .htaccess file is contained in. +* (`MODPYTHON-76 `_) The FilterDispatch callback should not flush the filter if it has already been closed. +* (`MODPYTHON-84 `_) The original change to fix the symlink issue for req.sendfile() was causing problems on Win32, plus code needed to be changed to work with APR 1.2.7. +* (`MODPYTHON-100 `_) When using stacked handlers and a SERVER_RETURN exception was used to return an OK status for that handler, any following handlers weren't being run if appropriate for the phase. +* (`MODPYTHON-109 `_) The Py_Finalize() function was being called on child process shutdown. This was being done though from within the context of a signal handler, which is generally unsafe and would cause the process to lock up. This function is no longer called on child process shutdown. +* (`MODPYTHON-112 `_) The req.phase attribute is no longer overwritten by an input or output filter. The filter.is_input member should be used to determine if a filter is an input or output filter. +* (`MODPYTHON-113 `_) The PythonImport directive now uses the apache.import_module() function to import modules to avoid reloading problems when same module is imported from a handler. +* (`MODPYTHON-114 `_) Fixed race conditions on setting sys.path when the PythonPath directive is being used as well as problems with infinite extension of path. +* (`MODPYTHON-120 `_) (`MODPYTHON-121 `_) Fixes to test suite so it will work on virtual hosting environments where localhost doesn't resolve to 127.0.0.1 but the actual IP address of the host. +* (`MODPYTHON-126 `_) When Python*Handler or Python*Filter directive is used inside of a Files directive container, the handler/filter directory value will now correctly resolve to the directory corresponding to any parent Directory directive or the location of the .htaccess file the Files directive is contained in. +* (`MODPYTHON-133 `_) The table object returned by req.server.get_config() was not being populated correctly to be the state of directives set at global scope for the server. +* (`MODPYTHON-134 `_) Setting PythonDebug to Off, wasn't overriding On setting in parent scope. +* (`MODPYTHON-140 `_) The util.redirect() function should be returning server status of apache.DONE and not apache.OK otherwise it will not give desired result if used in non content handler phase or where there are stacked content handlers. +* (`MODPYTHON-147 `_) Stopped directories being added to sys.path multiple times when PythonImport and PythonPath directive used. +* (`MODPYTHON-148 `_) Added missing Apache contants apache.PROXYREQ_RESPONSE and apache.HTTP_UPGRADE_REQUIRED. Also added new constants for Apache magic mime types and values for interpreting the req.connection.keepalive and req.read_body members. +* (`MODPYTHON-150 `_) In a multithread MPM, the apache.init() function could be called more than once for a specific interpreter instance whereas it should only be called once. +* (`MODPYTHON-151 `_) Debug error page returned to client when an exception in a handler occurred wasn't escaping special HTML characters in the traceback or the details of the exception. +* (`MODPYTHON-157 `_) Wrong interpreter name used for fixup handler phase and earlier, when PythonInterpPerDirectory was enabled and request was against a directory but client didn't provide the trailing slash. +* (`MODPYTHON-159 `_) Fix FieldStorage class so that it can handle multiline headers. +* (`MODPYTHON-160 `_) Using PythonInterpPerDirective when setting content handler to run dynamically with req.add_handler() would cause Apache to crash. +* (`MODPYTHON-161 `_) Directory argument supplied to req.add_handler() is canonicalized and a trailing slash added automatically. This is needed to ensure that the directory is always in POSIX path style as used by Apache and that convention where directories associated with directives always have trailing slash is adhered to. If this is not done, a different interpreter can be chosen to that expected when the PythonInterpPerDirective is used. +* (`MODPYTHON-166 `_) PythonHandlerModule was not setting up registration of the PythonFixupHandler or PythonAuthenHandler. For the latter this meant that using Require directive with PythonHandlerModule would cause a 500 error and complaint in error log about "No groups file". +* (`MODPYTHON-167 `_) When PythonDebug was On and and exception occurred, the response to the client had a status of 200 when it really should have been a 500 error status indicating that an internal error occurred. A 500 error status was correctly being returned when PythonDebug was Off. +* (`MODPYTHON-168 `_) Fixed psp_parser error when CR is used as a line terminator in psp code. This may occur with some older editors such as GoLive on Mac OS X. +* (`MODPYTHON-175 `_) Fixed problem whereby a main PSP page and an error page triggered from that page both accessing the session object would cause a deadlock. +* (`MODPYTHON-176 `_) Fixed issue whereby PSP code would unlock session object which it had inherited from the caller meaning caller could no longer use it safely. PSP code will now only unlock session if it created it in the first place. +* (`MODPYTHON-179 `_) Fixed the behaviour of req.readlines() when a size hint was provided. Previously, it would always return a single line when a size hint was provided. +* (`MODPYTHON-180 `_) Publisher would wrongly output a warning about nothing to publish if req.write() or req.sendfile() used and data not flushed, and then published function returned None. +* (`MODPYTHON-181 `_) Fixed memory leak when mod_python handlers are defined for more than one phase at the same time. +* (`MODPYTHON-182 `_) Fixed memory leak in req.readline(). +* (`MODPYTHON-184 `_) Fix memory leak in apache.make_table(). This was used by util.FieldStorage class so affected all code using forms. +* (`MODPYTHON-185 `_) Fixed segfault in psp.parsestring(src_string) when src_string is empty. +* (`MODPYTHON-187 `_) Table objects could crash in various ways when the value of an item was NULL. This could occur for SCRIPT_FILENAME when the req.subprocess_env table was accessed in the post read request handler phase. +* (`MODPYTHON-189 `_) Fixed representation returned by calling repr() on a table object. +* (`MODPYTHON-191 `_) Session class will no longer accept a normal cookie if a signed cookie was expected. +* (`MODPYTHON-194 `_) Fixed potential memory leak due to not clearing the state of thread state objects before deleting them. +* (`MODPYTHON-195 `_) Fix potential Win32 resource leaks in parent Apache process when process restarts occur. +* (`MODPYTHON-198 `_) Python 2.5 broke nested __auth__/__access__/__auth_realm__ in mod_python.publisher. +* (`MODPYTHON-200 `_) Fixed problem whereby signed and marshalled cookies could not be used at the same time. When expecting marshalled cookie, any signed, but not marshalled cookies will be returned as normal cookies. + + +.. _changes_from_3_2_8: + +Changes from version 3.2.8 +========================== + +New Features +------------ + +* (`MODPYTHON-78 `_) Added support for Apache 2.2. +* (`MODPYTHON-94 `_) New req.is_https() and req.ssl_var_lookup() methods. These communicate direct with the Apache mod_ssl module, allowing it to be determined if the connection is using SSL/TLS and what the values of internal ssl variables are. +* (`MODPYTHON-131 `_) The directory used for mutex locks can now be specified at at compile time using ./configure --with-mutex-dir value or at run time with PythonOption mod_python.mutex_directory value. +* (`MODPYTHON-137 `_) New req.server.get_options() method. This returns the subset of Python options set at global scope within the Apache configuration. That is, outside of the context of any VirtualHost, Location, Directory or Files directives. +* (`MODPYTHON-145 `_) The number of mutex locks can now be specified at run time with PythonOption mod_python.mutex_locks value. +* (`MODPYTHON-172 `_) Fixed three memory leaks that were found in _apachemodule.parse_qsl, req.readlines and util.cfgtree_walk. + +Improvements +------------ + +* (`MODPYTHON-77 `_) Third party C modules that use the simplified API for the Global Interpreter Lock (GIL), as described in PEP 311, can now be used. The only requirement is that such modules can only be used in the context of the "main_interpreter". +* (`MODPYTHON-119 `_) DbmSession unit test no longer uses the default directory for the dbm file, so the test will not interfer with the user's current apache instance. +* (`MODPYTHON-158 `_) Added additional debugging and logging output for where mod_python cannot initialise itself properly due to Python or mod_python version mismatches or missing Python module code files. + +Bug Fixes +--------- + +* (`MODPYTHON-84 `_) Fixed request.sendfile() bug for symlinked files on Win32. +* (`MODPYTHON-122 `_) Fixed configure problem when using bash 3.1.x. +* (`MODPYTHON-173 `_) Fixed DbmSession to create db file with mode 0640. + + +.. _changes_from_3_2_7: + +Changes from version 3.2.7 +========================== + +Security Fix +------------ + +* (`MODPYTHON-135 `_) Fixed possible directory traversal attack in FileSession. The session id is now checked to ensure it only contains valid characters. This check is performed for all sessions derived from the BaseSession class. + +.. _changes_from_3_1_4: + +Changes from version 3.1.4 +========================== + +New Features +------------ + +* New apache.register_cleanup() method. +* New apache.exists_config_define() method. +* New file-based session manager class. +* Session cookie name can be specified. +* The maximum number of mutexes mod_python uses for session locking can now be specifed at compile time using configure --with-max-locks. +* New a version attribute in mod_python module. +* New test handler testhandler.py has been added. + +Improvements +------------ + +* Autoreload of a module using apache.import_module() now works if modification time for the module is different from the file. Previously, the module was only reloaded if the the modification time of the file was more recent. This allows for a more graceful reload if a file with an older modification time needs to be restored from backup. +* Fixed the publisher traversal security issue +* Objects hierarchy a la CherryPy can now be published. +* mod_python.c now logs reason for a 500 error +* Calls to PyErr_Print in mod_python.c are now followed by fflush() +* Using an empty value with PythonOption will unset a PythonOption key. +* req.path_info is now a read/write member. +* Improvements to FieldStorage allow uploading of large files. Uploaded files are now streamed to disk, not to memory. +* Path to flex is now discovered at configuration time or can be specifed using configure --with-flex=/path/to/flex. +* sys.argv is now initialized to ["mod_python"] so that modules like numarray and pychart can work properly. + +Bug Fixes +--------- + +* Fixed memory leak which resulted from circular references starting from the request object. +* Fixed memory leak resulting from multiple PythonOption directives. +* Fixed Multiple/redundant interpreter creation problem. +* Cookie attributes with attribute names prefixed with $ are now ignored. See Section 4.7 for more information. +* Bug in setting up of config_dir from Handler directives fixed. +* mod_python.publisher will now support modules with the same name but in different directories +* Fixed continual reloading of modules problem +* Fixed big marshalled cookies error. +* Fixed mod_python.publisher extension handling +* mod_python.publisher default index file traversal +* mod_python.publisher loading wrong module and giving no warning/error +* apply_fs_data() now works with "new style" objects +* File descriptor fd closed after ap_send_fd() in req_sendfile() +* Bug in mem_cleanup in MemorySession fixed. +* Fixed bug in _apache._global_lock() which could cause a segfault if the lock index parameter is greater number of mutexes created at mod_python startup. +* Fixed bug where local_ip and local_host in connection object were returning remote_ip and remote_host instead +* Fixed install_dso Makefile rule so it only installs the dso, not the python files +* Potential deadlock in psp cache handling fixed +* Fixed bug where sessions are used outside directive. +* Fixed compile problem on IRIX. ln -s requires both TARGET and LINK_NAME on IRIX. ie. ln -s TARGET LINK_NAME +* Fixed ./configure problem on SuSE Linux 9.2 (x86-64). Python libraries are in lib64/ for this platform. +* Fixed req.sendfile() problem where sendfile(filename) sends the incorrect number of bytes when filename is a symlink. +* Fixed problem where util.FieldStorage was not correctly checking the mime types of POSTed entities +* Fixed conn.local_addr and conn.remote_addr for a better IPv6 support. +* Fixed psp_parser.l to properly escape backslash-n, backslash-t and backslash-r character sequences. +* Fixed segfault bug when accessing some request object members (allowed_methods, allowed_xmethods, content_languages) and some server object members (names, wild_names). +* Fixed request.add_handler() segfault bug when adding a handler to an empty handler list. +* Fixed PythonAutoReload directive so that AutoReload can be turned off. +* Fixed connection object read() bug on FreeBSD. +* Fixed potential buffer corruption bug in connection object read(). + +.. _changes_from_2_x: + +Changes from version 2.x +======================== + +* Mod_python 3.0 no longer works with Apache 1.3, only Apache 2.x is supported. +* Mod_python no longer works with Python versions less than 2.2.1 +* Mod_python now supports Apache filters. +* Mod_python now supports Apache connection handlers. +* Request object supports internal_redirect(). +* Connection object has read(), readline() and write(). +* Server object has get_config(). +* Httpdapi handler has been deprecated. +* Zpublisher handler has been deprecated. +* Username is now in req.user instead of req.connection.user diff --git a/Doc/commandline.rst b/Doc/commandline.rst new file mode 100644 index 0000000..e2a31ca --- /dev/null +++ b/Doc/commandline.rst @@ -0,0 +1,134 @@ +.. _cmd: + +****************************** +Command Line Tool - mod_python +****************************** + +.. _cmd-overview: + +Overview of mod_python command +============================== + +mod_python includes a command-line tool named ``mod_python``. The +``mod_python`` command exists to facilitate tasks related to +configuration and management of mod_python. + +The general syntax for the command is ``mod_python `` +where ```` is a separate tool with its own argument requirements. + +.. _cmd-subcommands: + +mod_python command line tool sub-commands +========================================= + +create +------ + +``create`` sub-command creates a simple Apache configuration and a +skeleton directory structure necessary for placement of configuration, +logs and content. It is meant to be executed only once per lifetime of +a project. + +The configuration generated by ``create`` consists of an +:mod:`httpdconf` based version (in Python) which can then be used to +generate an actual Apache configuration (by using the ``genconfig`` +subcommand or simply executing the config files itself). The idea is +that the Apache configuration is always generated and the Python +version is the one meant for editing/adjustments. + +The ``create`` subcommand will create the necessary files and +directories if they do not exist, but will not overwrite any existing +files or directories only producing a warning when a file or directory +already exists. It will abort if the Python version of the +configuration file already exists. + +``create`` requires a single argument: the distination directory, +Apache ``ServerRoot``. + +``create`` has the following command options: + +.. cmdoption:: --listen + + A string describing the port and optional IP address on which the + server is to listen for incoming requests in the form + ``[ip_address:]port`` The argument will be applied to the Apache + ``Listen`` directive as is and therefore must be syntactically + compatible with it. + +.. cmdoption:: --pythonpath + + A colon (``":"``) separate list of paths to be applied to the + :ref:`dir-other-pp` directive. + +.. cmdoption:: --pythonhandler + + The name of the Python handler to use. Applied to the + :ref:`dir-handlers-ph` directive. + +.. cmdoption:: --pythonoption + + An option to be specified in the configuration. Multiple options + are alowed. Applied to the :ref:`dir-other-po` directive. + +.. _cmd-sub-create-example: + +genconfig +--------- + +This sub-command exists to facilitate re-generation of an Apache +configuration from a Python-based one. All it does is run the script, +but its use is recommended because the mod_python command will execute +the correct version of Python under which mod_python was initially +compiled. Example:: + + mod_python genconfig /path/to/server_root/httpd_conf.py > /path/to/server_root/httpd.conf + +start +----- + +Starts an Apache instance. Requires a single argument, the path to +Apache configuration file. + +stop +---- + +Stops an Apache instance (using graceful-stop). Requires a single +argument, the path to Apache configuration file. + +restart +------- + +Stops an Apache instance (using graceful). Requires a single argument, +the path to Apache configuration file. + +version +------- + +This sub-command prints out version and location information about +this mod_python installation, the Apache HTTP Server and Python used +when building this mod_python instance. + +Example +------- + +To create an Apache instance with all the required directories for a +WSGI application which is located in ``/path/to/myapp`` and defined in +``/path/to/myapp/myapp/myapp/wsgi.py``, run the following:: + + mod_python create /path/to/new/server_root \ + --pythonpath=/path/to/my/app \ + --pythonhandler=mod_python.wsgi \ + --pythonoption="mod_python.wsgi.application myapp.wsgi::application" + +The above example will create a Python-based configuration in +``/path/to/new/server_root/conf/http_conf.py`` which is a simple +Pythong script. When executed, the output of the script becomes an +Apache configuration (``create`` will take care of generating the +first Apache config for you). + +You should be able to run this Apache instance by executing:: + + mod_python start /path/to/new/server_root/conf/httpd.conf + + + diff --git a/Doc/conf.py b/Doc/conf.py new file mode 100644 index 0000000..f3a3c40 --- /dev/null +++ b/Doc/conf.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# +# Documentation build configuration file +# +# This file is execfile()d with the current directory set to its containing dir. +# +# The contents of this file are pickled, so don't put values in the namespace +# that aren't pickleable (module imports are okay, they're removed automatically). + +import sys, os, time +sys.path.append(os.path.abspath('tools/sphinxext')) + + +# General configuration +# --------------------- + +# General substitutions. +project = 'Mod_python' +copyright = '1990-%s, Apache Software Foundation, Gregory Trubetskoy' % time.strftime('%Y') + +# The default replacements for |version| and |release|. +# +# The short X.Y version. +# version = '2.6' +# The full version, including alpha/beta/rc tags. +# release = '2.6a0' + +# version +import commands +v, r = commands.getoutput("../dist/version.sh").rsplit('.', 1) +version, release = v, v+'.'+r + +# Ignore .rst in Sphinx its self. +exclude_trees = ['tools/sphinx'] + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = True + + +# Options for HTML output +# ----------------------- + +html_theme = 'default' +html_theme_options = {'collapsiblesidebar': True} + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +html_use_smartypants = True + +# Custom sidebar templates, filenames relative to this file. +html_sidebars = { + 'index': 'indexsidebar.html', +} + +# Output file base name for HTML help builder. +htmlhelp_basename = 'mod_python' + release.replace('.', '') + +# Split the index +html_split_index = True + + +# Options for LaTeX output +# ------------------------ + +# The paper size ('letter' or 'a4'). +latex_paper_size = 'a4' + +# The font size ('10pt', '11pt' or '12pt'). +latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, document class [howto/manual]). +_stdauthor = r'Gregory Trubetskoy' +latex_documents = [ + ('contents', 'contents.tex', + 'Mod_python Documentation', _stdauthor, 'manual'), +] + +# Documents to append as an appendix to all manuals. +latex_appendices = ['about', 'license', 'copyright'] + +# Get LaTeX to handle Unicode correctly +latex_elements = {'inputenc': r'\usepackage[utf8x]{inputenc}', 'utf8extra': ''} diff --git a/Doc/contents.rst b/Doc/contents.rst new file mode 100644 index 0000000..2265d7c --- /dev/null +++ b/Doc/contents.rst @@ -0,0 +1,30 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%% + Mod_python Documentation +%%%%%%%%%%%%%%%%%%%%%%%%%%% + +This document aims to be the only necessary and authoritative source +of information about mod_python, usable as a comprehensive reference, +a user guide and a tutorial all-in-one. + +.. seealso:: + + `Python Language Web Site `_ + for information on the Python language + + `Apache HTTP Server Project Web Site `_ + for information on the Apache server + +.. toctree:: + + introduction.rst + installation.rst + tutorial.rst + pythonapi.rst + directives.rst + handlers.rst + commandline.rst + ssi.rst + changes.rst + license.rst + about.rst + copyright.rst diff --git a/Doc/copyright.rst b/Doc/copyright.rst new file mode 100644 index 0000000..f770901 --- /dev/null +++ b/Doc/copyright.rst @@ -0,0 +1,13 @@ +********* +Copyright +********* + +Mod_python and this documentation is: + +Copyright © 2000, 2001, 2013 Gregory Trubetskoy + +Copyright © 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + +------- + +See :ref:`history-and-license` for complete license and permissions information. diff --git a/Doc/directives.rst b/Doc/directives.rst new file mode 100644 index 0000000..87fe330 --- /dev/null +++ b/Doc/directives.rst @@ -0,0 +1,987 @@ + +.. |br| raw:: html + +
+ +.. _directives: + +******************************* +Apache Configuration Directives +******************************* + +.. _dir-handlers: + +Request Handlers +================ + +.. _dir-handlers-syn: + +Python*Handler Directive Syntax +------------------------------- + +.. index:: + single: Python*Handler Syntax + + +All request handler directives have the following syntax: + +``Python*Handler handler [handler ...] [ | .ext [.ext ...] ]`` + +Where *handler* is a callable object that accepts a single argument - +request object, and *.ext* is an optional file extension. + +Multiple handlers can be specified on a single line, in which case +they will be called sequentially, from left to right. Same handler +directives can be specified multiple times as well, with the same +result - all handlers listed will be executed sequentially, from first +to last. + +If any handler in the sequence returns a value other than +``apache.OK`` or ``apache.DECLINED``, then execution of all subsequent +handlers for that phase is aborted. What happens when either +``apache.OK`` or ``apache.DECLINED`` is returned is dependent on which +phase is executing. + +Note that prior to mod_python 3.3, if any handler in the sequence, no +matter which phase was executing, returned a value other than +``apache.OK``, then execution of all subsequent handlers for that phase +was aborted. + +The list of handlers can optionally be followed by a ``|`` followed +by one or more file extensions. This would restrict the execution of +the handler to those file extensions only. This feature only works for +handlers executed after the trans phase. + +A *handler* has the following syntax: + +``module[::object]`` + +Where *module* can be a full module name (package dot notation is +accepted) or an actual path to a module code file. The module is loaded +using the mod_python module importer as implemented by the +``apache.import_module()`` function. Reference should be made to +the documentation of that function for further details of how module +importing is managed. + +The optional *object* is the name of an object inside the module. +Object can also contain dots, in which case it will be resolved from +left to right. During resolution, if mod_python encounters an object +of type ````, it will try instantiating it passing it a single +argument, a request object. + +If no object is specified, then it will default to the directive of +the handler, all lower case, with the word ``'python'`` +removed. E.g. the default object for PythonAuthenHandler would be +authenhandler. + +Example:: + + PythonAuthzHandler mypackage.mymodule::checkallowed + +For more information on handlers, see :ref:`pyapi-handler`. + +.. note:: The ``'::'`` was chosen for performance reasons. In order + for Python to use objects inside modules, the modules first need to + be imported. Having the separator as simply a ``'.'``, would + considerably complicate process of sequentially evaluating every + word to determine whether it is a package, module, class etc. Using + the (admittedly un-Python-like) ``'::'`` removes the time-consuming + work of figuring out where the module part ends and the object + inside of it begins, resulting in a modest performance gain. + +.. index:: + pair: phase; order + +The handlers in this document are listed in order in which phases are +processed by Apache. + +.. _dir-handlers-pp: + +Python*Handlers and Python path +------------------------------- + +.. index:: + pair: pythonpath, Python*Handler + +If a ``Python*Handler`` directive is specified in a *directory section* +(i.e. inside a ```` or +```` or in an ``.htaccess`` file), +then this directory is automatically prepended to the Python path +(``sys.path``) *unless* Python path is specified explicitely with the +``PythonPath`` directive. + +If a ``Python*Handler`` directive is specified in a *location section* +(i.e. inside ```` or +````), then no path modification is done +automatically and in most cases a ``PythonPath`` directive is required +to augment the path so that the handler module can be imported. + +Also for ``Python*Handlers`` inside a location section mod_python +disables the phase of the request that maps the URI to a file on the +filesystem (``ap_hook_map_to_storage``). This is because there is +usually no link between path specified in ```` and the +filesystem, while attempting to map to a filesystem location results +in unnecessary and expensive filesystem calls. Note that an important +side-effect of this is that once a request URI has been matched to a +```` containing a mod_python handler, all ```` +and ```` directives and their contents are ignored for +this request. + +.. _dir-handlers-prrh: + +PythonPostReadRequestHandler +---------------------------- + +.. index:: + single: PythonPostReadRequestHandler + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This handler is called after the request has been read but before any +other phases have been processed. This is useful to make decisions +based upon the input header fields. + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.OK`` or ``apache.DECLINED``, then +execution of all subsequent handlers for this phase is aborted. + +.. note:: + + When this phase of the request is processed, the URI has not yet + been translated into a path name, therefore this directive could + never be executed by Apache if it could specified within + ````, ````, ```` directives or in an + :file:`.htaccess` file. The only place this directive is allowed is + the main configuration file, and the code for it will execute in + the main interpreter. And because this phase happens before any + identification of the type of content being requested is done + (i.e. is this a python program or a gif?), the python routine + specified with this handler will be called for *ALL* requests on + this server (not just python programs), which is an important + consideration if performance is a priority. + + + +.. _dir-handlers-th: + +PythonTransHandler +------------------ + +.. index:: + single: PythonTransHandler + + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + +This handler allows for an opportunity to translate the URI into +an actual filename, before the server's default rules (Alias +directives and the like) are followed. + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.DECLINED``, then execution of all +subsequent handlers for this phase is aborted. + +.. note:: + + At the time when this phase of the request is being processed, the + URI has not been translated into a path name, therefore this + directive will never be executed by Apache if specified within + ````, ````, ```` directives or in an + :file:`.htaccess` file. The only place this can be specified is the + main configuration file, and the code for it will execute in the + main interpreter. + + +.. _dir-handlers-hph: + +PythonHeaderParserHandler +------------------------- + +.. index:: + single: PythonHeaderParserHandler + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This handler is called to give the module a chance to look at the +request headers and take any appropriate specific actions early in the +processing sequence. + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.OK`` or ``apache.DECLINED``, then +execution of all subsequent handlers for this phase is aborted. + + +.. _dir-handlers-pih: + +PythonInitHandler +------------------ + +.. index:: + single: PythonInitHandler + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This handler is the first handler called in the request processing +phases that is allowed both inside and outside :file`.htaccess` and +directory. + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.OK`` or ``apache.DECLINED``, then +execution of all subsequent handlers for this phase is aborted. + +This handler is actually an alias to two different handlers. When +specified in the main config file outside any directory tags, it is an +alias to ``PostReadRequestHandler``. When specified inside directory +(where ``PostReadRequestHandler`` is not allowed), it aliases to +``PythonHeaderParserHandler``. + +\*(This idea was borrowed from mod_perl) + + +.. _dir-handlers-ach: + +PythonAccessHandler +------------------- + +.. index:: + single: PythonAccessHandler + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This routine is called to check for any module-specific restrictions +placed upon the requested resource. + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.OK`` or ``apache.DECLINED``, then +execution of all subsequent handlers for this phase is aborted. + +For example, this can be used to restrict access by IP number. To do +so, you would return ``HTTP_FORBIDDEN`` or some such to indicate +that access is not allowed. + +.. _dir-handlers-auh: + +PythonAuthenHandler +------------------- + +.. index:: + single: PythonAuthenHandler + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This routine is called to check the authentication information sent +with the request (such as looking up the user in a database and +verifying that the [encrypted] password sent matches the one in the +database). + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.DECLINED``, then execution of all +subsequent handlers for this phase is aborted. + +To obtain the username, use ``req.user``. To obtain the password +entered by the user, use the :meth:`request.get_basic_auth_pw` function. + +A return of ``apache.OK`` means the authentication succeeded. A return +of ``apache.HTTP_UNAUTHORIZED`` with most browser will bring up the +password dialog box again. A return of ``apache.HTTP_FORBIDDEN`` will +usually show the error on the browser and not bring up the password +dialog ``again. HTTP_FORBIDDEN`` should be used when authentication +succeeded, but the user is not permitted to access a particular URL. + +An example authentication handler might look like this:: + + def authenhandler(req): + + pw = req.get_basic_auth_pw() + user = req.user + if user == "spam" and pw == "eggs": + return apache.OK + else: + return apache.HTTP_UNAUTHORIZED + +.. note:: + + :meth:`request.get_basic_auth_pw` must be called prior to using the + :attr:`request.user` value. Apache makes no attempt to decode the + authentication information unless :meth:`request.get_basic_auth_pw` is called. + + +.. _dir-handlers-auzh: + +PythonAuthzHandler +------------------- + +.. index:: + single: PythonAuthzHandler + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This handler runs after AuthenHandler and is intended for checking +whether a user is allowed to access a particular resource. But more +often than not it is done right in the AuthenHandler. + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.DECLINED``, then execution of all +subsequent handlers for this phase is aborted. + +.. _dir-handlers-tph: + +PythonTypeHandler +------------------- + +.. index:: + single: PythonTypeHandler + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This routine is called to determine and/or set the various document +type information bits, like Content-type (via ``r->content_type``), +language, et cetera. + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.DECLINED``, then execution of all +subsequent handlers for this phase is aborted. + + +.. _dir-handlers-fuh: + +PythonFixupHandler +------------------- + +.. index:: + single: PythonFixupHandler + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This routine is called to perform any module-specific fixing of header +fields, et cetera. It is invoked just before any content-handler. + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.OK`` or ``apache.DECLINED``, then +execution of all subsequent handlers for this phase is aborted. + +.. _dir-handlers-ph: + +PythonHandler +------------- + +.. index:: + single: PythonHandler + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This is the main request handler. Many applications will only provide +this one handler. + +Where multiple handlers are specified, if any handler in the sequence +returns a status value other than ``apache.OK`` or +``apache.DECLINED``, then execution of subsequent handlers for the phase +are skipped and the return status becomes that for the whole content +handler phase. If all handlers are run, the return status of the final +handler is what becomes the return status of the whole content handler +phase. Where that final status is ``apache.DECLINED``, Apache will fall +back to using the ``default-handler`` and attempt to serve up the target +as a static file. + +.. _dir-handlers-plh: + +PythonLogHandler +---------------- + +.. index:: + single: PythonLogHandler + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This routine is called to perform any module-specific logging +activities. + +Where multiple handlers are specified, if any handler in the sequence +returns a value other than ``apache.OK`` or ``apache.DECLINED``, then +execution of all subsequent handlers for this phase is aborted. + +.. _dir-handlers-pch: + +PythonCleanupHandler +-------------------- + +.. index:: + single: PythonCleanupHandler + + +`Syntax: `_ *Python\*Handler Syntax* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +This is the very last handler, called just before the request object +is destroyed by Apache. + +Unlike all the other handlers, the return value of this handler is +ignored. Any errors will be logged to the error log, but will not be +sent to the client, even if PythonDebug is On. + +This handler is not a valid argument to the ``rec.add_handler()`` +function. For dynamic clean up registration, use +``req.register_cleanup()``. + +Once cleanups have started, it is not possible to register more of +them. Therefore, ``req.register_cleanup()`` has no effect within this +handler. + +Cleanups registered with this directive will execute *after* cleanups +registered with ``req.register_cleanup()``. + +.. _dir-filter: + +Filters +======= + +.. _dir-filter-if: + +PythonInputFilter +----------------- + +.. index:: + single: PythonInputFilter + + +`Syntax: `_ PythonInputFilter handler name |br| +`Context: `_ server config |br| +`Module: `_ mod_python.c |br| + + +Registers an input filter *handler* under name *name*. *Handler* is a +module name optionally followed ``::`` and a callable object name. If +callable object name is omitted, it will default to +``'inputfilter'``. *Name* is the name under which the filter is +registered, by convention filter names are usually in all caps. + +The *module* referred to by the handler can be a full module name +(package dot notation is accepted) or an actual path to a module code file. +The module is loaded using the mod_python module importer as implemented by +the :func:`apache.import_module` function. Reference should be made to the +documentation of that function for further details of how module importing +is managed. + +To activate the filter, use the ``AddInputFilter`` directive. + +.. _dir-filter-of: + +PythonOutputFilter +------------------ + +.. index:: + single: PythonOutputFilter + + +`Syntax: `_ PythonOutputFilter handler name |br| +`Context: `_ server config |br| +`Module: `_ mod_python.c |br| + + +Registers an output filter *handler* under name *name*. *handler* is a +module name optionally followed ``::`` and a callable object name. If +callable object name is omitted, it will default to +``'outputfilter'``. *name* is the name under which the filter is +registered, by convention filter names are usually in all caps. + +The *module* referred to by the handler can be a full module name +(package dot notation is accepted) or an actual path to a module code file. +The module is loaded using the mod_python module importer as implemented by +the :func:`apache.import_module` function. Reference should be made to the +documentation of that function for further details of how module importing +is managed. + +To activate the filter, use the ``AddOutputFilter`` directive. + +.. _dir-conn: + +Connection Handler +================== + +.. _dir-conn-ch: + +PythonConnectionHandler +----------------------- + +.. index:: + single: PythonConnectionHandler + + +`Syntax: `_ PythonConnectionHandler handler |br| +`Context: `_ server config |br| +`Module: `_ mod_python.c |br| + + +Specifies that the connection should be handled with *handler* +connection handler. *handler* will be passed a single argument - +the connection object. + +*Handler* is a module name optionally followed ``::`` and a +callable object name. If callable object name is omitted, it will +default to ``'connectionhandler'``. + +The *module* can be a full module name (package dot notation is +accepted) or an absolute path to a module code file. The module is loaded +using the mod_python module importer as implemented by the +:func:`apache.import_module` function. Reference should be made to the +documentation of that function for further details of how module importing +is managed. + +.. _dir-other: + +Other Directives +================== + +.. _dir-other-epd: + +PythonEnablePdb +--------------- + +.. index:: + single: PythonEnablePdb + + +`Syntax: `_ PythonEnablePdb {On, Off} |br| +`Default: `_ PythonEnablePdb Off |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +When On, mod_python will execute the handler functions within the +Python debugger pdb using the :func:`pdb.runcall` function. + +Because pdb is an interactive tool, start httpd from the command line +with the ``-DONE_PROCESS`` option when using this directive. As soon as +your handler code is entered, you will see a Pdb prompt allowing you +to step through the code and examine variables. + +.. _dir-other-pd: + +PythonDebug +----------- + +.. index:: + single: PythonDebug + +`Syntax: `_ PythonDebug {On, Off} |br| +`Default: `_ PythonDebug Off |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +Normally, the traceback output resulting from uncaught Python errors +is sent to the error log. With PythonDebug On directive specified, the +output will be sent to the client (as well as the log), except when +the error is :exc:`IOError` while writing, in which case it will go +to the error log. + +This directive is very useful during the development process. It is +recommended that you do not use it production environment as it may +reveal to the client unintended, possibly sensitive security +information. + +.. _dir-other-pimp: + +PythonImport +------------ + +.. index:: + single: PythonImport + +`Syntax: `_ PythonImport *module* *interpreter_name* |br| +`Context: `_ server config |br| +`Module: `_ mod_python.c |br| + + +Tells the server to import the Python module module at process startup +under the specified interpreter name. The import takes place at child +process initialization, so the module will actually be imported once for +every child process spawned. + +The *module* can be a full module name (package dot notation is +accepted) or an absolute path to a module code file. The module is loaded +using the mod_python module importer as implemented by the +:func:`apache.import_module` function. Reference should be made to +the documentation of that function for further details of how module +importing is managed. + +The ``PythonImport`` directive is useful for initialization tasks that +could be time consuming and should not be done at the time of processing a +request, e.g. initializing a database connection. Where such initialization +code could fail and cause the importing of the module to fail, it should be +placed in its own function and the alternate syntax used: + +``PythonImport *module::function* *interpreter_name*`` + +The named function will be called only after the module has been imported +successfully. The function will be called with no arguments. + +.. note:: + + At the time when the import takes place, the configuration is not + completely read yet, so all other directives, including + PythonInterpreter have no effect on the behavior of modules + imported by this directive. Because of this limitation, the + interpreter must be specified explicitly, and must match the name + under which subsequent requests relying on this operation will + execute. If you are not sure under what interpreter name a request + is running, examine the :attr:`request.interpreter` member of the request + object. + +See also Multiple Interpreters. + +.. _dir-other-ipd: + +PythonInterpPerDirectory +------------------------ + +.. index:: + single: PythonInterpPerDirectory + +`Syntax: `_ PythonInterpPerDirectory {On, Off} |br| +`Default: `_ PythonInterpPerDirectory Off |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +Instructs mod_python to name subinterpreters using the directory of +the file in the request (``req.filename``) rather than the the +server name. This means that scripts in different directories will +execute in different subinterpreters as opposed to the default policy +where scripts in the same virtual server execute in the same +subinterpreter, even if they are in different directories. + +For example, assume there is a +:file:`/directory/subdirectory`. :file:`/directory` has an +``.htaccess`` file with a ``PythonHandler`` directive. +:file:`/directory/subdirectory` doesn't have an ``.htaccess``. By +default, scripts in :file:`/directory` and +:file:`/directory/subdirectory` would execute in the same interpreter +assuming both directories are accessed via the same virtual +server. With ``PythonInterpPerDirectory``, there would be two +different interpreters, one for each directory. + +.. note:: + + In early phases of the request prior to the URI translation + (PostReadRequestHandler and TransHandler) the path is not yet known + because the URI has not been translated. During those phases and + with PythonInterpPerDirectory on, all python code gets executed in + the main interpreter. This may not be exactly what you want, but + unfortunately there is no way around this. + + +.. seealso:: + + :ref:`pyapi-interps` + for more information + + +.. _dir-other-ipdv: + +PythonInterpPerDirective +------------------------ + +.. index:: + single: PythonInterpPerDirective + +`Syntax: `_ PythonInterpPerDirective {On, Off} |br| +`Default: `_ PythonInterpPerDirective Off |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +Instructs mod_python to name subinterpreters using the directory in +which the Python*Handler directive currently in effect was +encountered. + +For example, assume there is a +:file:`/directory/subdirectory`. :file:`/directory` has an ``.htaccess`` +file with a ``PythonHandler`` directive. :file:`/directory/subdirectory` +has another :file:`.htaccess` file with another ``PythonHandler``. By +default, scripts in :file:`/directory` and +:file:`/directory/subdirectory` would execute in the same interpreter +assuming both directories are in the same virtual server. With +``PythonInterpPerDirective``, there would be two different interpreters, +one for each directive. + +.. seealso:: + + :ref:`pyapi-interps` + for more information + +.. _dir-other-pi: + +PythonInterpreter +----------------- + +.. index:: + single: PythonInterpreter + +`Syntax: `_ PythonInterpreter *name* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +Forces mod_python to use interpreter named *name*, overriding the +default behaviour or behaviour dictated by a :ref:`dir-other-ipd` or +:ref:`dir-other-ipdv` direcive. + +This directive can be used to force execution that would normally +occur in different subinterpreters to run in the same one. When +specified in the DocumentRoot, it forces the whole server to run in one +subinterpreter. + +.. seealso:: + + :ref:`pyapi-interps` + for more information + +.. _dir-other-phm: + +PythonHandlerModule +------------------- + +.. index:: + single: PythonHandlerModule + +`Syntax: `_ PythonHandlerModule *module* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +PythonHandlerModule can be used an alternative to Python*Handler +directives. The module specified in this handler will be searched for +existence of functions matching the default handler function names, +and if a function is found, it will be executed. + +For example, instead of:: + + PythonAuthenHandler mymodule + PythonHandler mymodule + PythonLogHandler mymodule + + +one can simply use:: + + PythonHandlerModule mymodule + + +.. _dir-other-par: + +PythonAutoReload +---------------- + +.. index:: + single: PythonAutoReload + +`Syntax: `_ PythonAutoReload {On, Off} |br| +`Default: `_ PythonAutoReload On |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +If set to Off, instructs mod_python not to check the modification date +of the module file. + +By default, mod_python checks the time-stamp of the file and reloads +the module if the module's file modification date is later than the +last import or reload. This way changed modules get automatically +reimported, eliminating the need to restart the server for every +change. + +Disabling autoreload is useful in production environment where the +modules do not change; it will save some processing time and give a +small performance gain. + +.. _dir-other-pomz: + +PythonOptimize +-------------- + +.. index:: + single: PythonOptimize + +`Syntax: `_ PythonOptimize {On, Off} |br| +`Default: `_ PythonOptimize Off |br| +`Context: `_ server config |br| +`Module: `_ mod_python.c |br| + + +Enables Python optimization. Same as the Python ``-O`` option. + +.. _dir-other-po: + +PythonOption +------------ + +.. index:: + single: PythonOption + +`Syntax: `_ PythonOption key [value] |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +Assigns a key value pair to a table that can be later retrieved by the +:meth:`request.get_options` function. This is useful to pass information +between the apache configuration files (:file:`httpd.conf`, +:file:`.htaccess`, etc) and the Python programs. If the value is omitted or empty (``""``), +then the key is removed from the local configuration. + +Reserved PythonOption Keywords +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Some ``PythonOption`` keywords are used for configuring various aspects of +mod_python. Any keyword starting with mod_python.\* should be considered +as reserved for internal mod_python use. + +Users are encouraged to use their own namespace qualifiers when creating +add-on modules, and not pollute the global namespace. + +The following PythonOption keys are currently used by mod_python. + +| mod_python.mutex_directory +| mod_python.mutex_locks +| mod_python.psp.cache_database_filename +| mod_python.session.session_type +| mod_python.session.cookie_name +| mod_python.session.application_domain +| mod_python.session.application_path +| mod_python.session.database_directory +| mod_python.dbm_session.database_filename +| mod_python.dbm_session.database_directory +| mod_python.file_session.enable_fast_cleanup +| mod_python.file_session.verify_session_timeout +| mod_python.file_session.cleanup_grace_period +| mod_python.file_session.cleanup_time_limit +| mod_python.file_session.database_directory +| mod_python.wsgi.application +| mod_python.wsgi.base_uri + +| session *Deprecated in 3.3, use mod_python.session.session_type* +| ApplicationPath *Deprecated in 3.3, use mod_python.session.application_path* +| session_cookie_name *Deprecated in 3.3, use mod_python.session.cookie_name* +| session_directory *Deprecated in 3.3, use mod_python.session.database_directory* +| session_dbm *Deprecated in 3.3, use mod_python.dbm_session.database_filename* +| session_cleanup_time_limit *Deprecated in 3.3, use mod_python.file_session.cleanup_time_limit* +| session_fast_cleanup *Deprecated in 3.3, use mod_python.file_session.enable_fast_cleanup* +| session_grace_period *Deprecated in 3.3, use mod_python.file_session.cleanup_grace_period* +| session_verify_cleanup *Deprecated in 3.3, use mod_python.file_session.cleanup_session_timeout* +| PSPDbmCache *Deprecated in 3.3, use mod_python.psp.cache_database_filename* + + +.. _dir-other-pp: + +PythonPath +---------- + +.. index:: + single: PythonPath + +`Syntax: `_ PythonPath *path* |br| +`Context: `_ server config, virtual host, directory, htaccess |br| +`Override: `_ not None |br| +`Module: `_ mod_python.c |br| + + +PythonPath directive sets the PythonPath. The path must be specified +in Python list notation, e.g.:: + + PythonPath "['/usr/local/lib/python2.0', '/usr/local/lib/site_python', '/some/other/place']" + +The path specified in this directive will replace the path, not add to +it. However, because the value of the directive is evaled, to append a +directory to the path, one can specify something like:: + + PythonPath "sys.path+['/mydir']" + +Mod_python tries to minimize the number of evals associated with the +PythonPath directive because evals are slow and can negatively impact +performance, especially when the directive is specified in an +:file:`.htaccess` file which gets parsed at every hit. Mod_python will +remember the arguments to the PythonPath directive in the un-evaled +form, and before evaling the value it will compare it to the +remembered value. If the value is the same, no action is +taken. Because of this, you should not rely on the directive as a way +to restore the pythonpath to some value if your code changes it. + +When multiple PythonPath directives are specified, the effect is not +cumulative, last directive will override all previous ones. + +.. note:: + + This directive should not be used as a security measure since the + Python path is easily manipulated from within the scripts. + + + + + + + + + diff --git a/Doc/handlers.rst b/Doc/handlers.rst new file mode 100644 index 0000000..7aa3114 --- /dev/null +++ b/Doc/handlers.rst @@ -0,0 +1,489 @@ + +.. _handlers: + +***************** +Standard Handlers +***************** + +.. _hand-pub: + +Publisher Handler +================= + +.. index:: + pair: publisher; handler + +The ``publisher`` handler is a good way to avoid writing your own +handlers and focus on rapid application development. It was inspired +by `Zope `_ ZPublisher. + +.. _hand-pub-intro: + +Introduction +------------ + +To use the handler, you need the following lines in your configuration::: + + + SetHandler mod_python + PythonHandler mod_python.publisher + + + +This handler allows access to functions and variables within a module +via URL's. For example, if you have the following module, called +:file:`hello.py`::: + + """ Publisher example """ + + def say(req, what="NOTHING"): + return "I am saying %s" % what + + +A URL ``http://www.mysite.com/hello.py/say`` would return +``'I am saying NOTHING``. A URL +``http://www.mysite.com/hello.py/say?what=hello`` would +return ``'I am saying hello``. + + +.. _hand-pub-alg: + +The Publishing Algorithm +------------------------ + +The Publisher handler maps a URI directly to a Python variable or +callable object, then, respectively, returns it's string +representation or calls it returning the string representation of the +return value. + +.. index:: + pair: publisher; traversal + +.. _hand-pub-alg-trav: + +Traversal +^^^^^^^^^ + +The Publisher handler locates and imports the module specified in the +URI. The module location is determined from the :attr:`request.filename` +attribute. Before importing, the file extension, if any, is +discarded. + +If :attr:`request.filename` is empty, the module name defaults to +``'index'``. + +Once module is imported, the remaining part of the URI up to the +beginning of any query data (a.k.a. :const:`PATH_INFO`) is used to find an +object within the module. The Publisher handler *traverses* the +path, one element at a time from left to right, mapping the elements +to Python object within the module. + +If no ``path_info`` was given in the URL, the Publisher handler will use +the default value of ``'index'``. If the last element is an object inside +a module, and the one immediately preceding it is a directory +(i.e. no module name is given), then the module name will also default +to ``'index'``. + +The traversal will stop and :const:`HTTP_NOT_FOUND` will be returned to +the client if: + + +* Any of the traversed object's names begin with an underscore + (``'_'``). Use underscores to protect objects that should not be + accessible from the web. + +* A module is encountered. Published objects cannot be modules for + security reasons. + + +If an object in the path could not be found, :const:`HTTP_NOT_FOUND` +is returned to the client. + +For example, given the following configuration::: + + DocumentRoot /some/dir + + + SetHandler mod_python + PythonHandler mod_python.publisher + + +And the following :file:`/some/dir/index.py` file::: + + def index(req): + return "We are in index()" + + def hello(req): + return "We are in hello()" + + +Then: + +* http://www.somehost/index/index will return ``'We are in index()'`` + +* http://www.somehost/index/ will return ``'We are in index()'`` + +* http://www.somehost/index/hello will return ``'We are in hello()'`` + +* http://www.somehost/hello will return ``'We are in hello()'`` + +* http://www.somehost/spam will return ``'404 Not Found'`` + + +.. _hand-pub-alg-args: + + +Argument Matching and Invocation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Once the destination object is found, if it is callable and not a +class, the Publisher handler will get a list of arguments that the +object expects. This list is compared with names of fields from HTML +form data submitted by the client via ``POST`` or +``GET``. Values of fields whose names match the names of callable +object arguments will be passed as strings. Any fields whose names do +not match the names of callable argument objects will be silently dropped, +unless the destination callable object has a ``**kwargs`` style +argument, in which case fields with unmatched names will be passed in the +``**kwargs`` argument. + +If the destination is not callable or is a class, then its string +representation is returned to the client. + + +.. index:: + pair: publisher; authentication + +.. _hand-pub-alg-auth: + +Authentication +^^^^^^^^^^^^^^ + +The publisher handler provides simple ways to control access to +modules and functions. + +At every traversal step, the Publisher handler checks for presence of +``__auth__`` and ``__access__`` attributes (in this order), as +well as ``__auth_realm__`` attribute. + +If ``__auth__`` is found and it is callable, it will be called +with three arguments: the ``request`` object, a string containing +the user name and a string containing the password. If the return +value of +``__auth__`` is false, then :const:`HTTP_UNAUTHORIZED` is +returned to the client (which will usually cause a password dialog box +to appear). + +If :meth:`__auth__` is a dictionary, then the user name will be +matched against the key and the password against the value associated +with this key. If the key and password do not match, +:const:`HTTP_UNAUTHORIZED` is returned. Note that this requires +storing passwords as clear text in source code, which is not very secure. + +``__auth__`` can also be a constant. In this case, if it is false +(i.e. ``None``, ``0``, ``""``, etc.), then +:const:`HTTP_UNAUTHORIZED` is returned. + +If there exists an ``__auth_realm__`` string, it will be sent +to the client as Authorization Realm (this is the text that usually +appears at the top of the password dialog box). + +If ``__access__`` is found and it is callable, it will be called +with two arguments: the ``request`` object and a string containing +the user name. If the return value of ``__access__`` is false, then +:const:`HTTP_FORBIDDEN` is returned to the client. + +If ``__access__`` is a list, then the user name will be matched +against the list elements. If the user name is not in the list, +:const:`HTTP_FORBIDDEN` is returned. + +Similarly to ``__auth__``, ``__access__`` can be a constant. + +In the example below, only user ``'eggs'`` with password ``'spam'`` +can access the ``hello`` function::: + + __auth_realm__ = "Members only" + + def __auth__(req, user, passwd): + + if user == "eggs" and passwd == "spam" or \ + user == "joe" and passwd == "eoj": + return 1 + else: + return 0 + + def __access__(req, user): + if user == "eggs": + return 1 + else: + return 0 + + def hello(req): + return "hello" + +Here is the same functionality, but using an alternative technique::: + + __auth_realm__ = "Members only" + __auth__ = {"eggs":"spam", "joe":"eoj"} + __access__ = ["eggs"] + + def hello(req): + return "hello" + + +Since functions cannot be assigned attributes, to protect a function, +an ``__auth__`` or ``__access__`` function can be defined within +the function, e.g.::: + + def sensitive(req): + + def __auth__(req, user, password): + if user == 'spam' and password == 'eggs': + # let them in + return 1 + else: + # no access + return 0 + + # something involving sensitive information + return 'sensitive information` + +Note that this technique will also work if ``__auth__`` or +``__access__`` is a constant, but will not work is they are +a dictionary or a list. + +The ``__auth__`` and ``__access__`` mechanisms exist +independently of the standard +:ref:`dir-handlers-auh`. It +is possible to use, for example, the handler to authenticate, then the +``__access__`` list to verify that the authenticated user is +allowed to a particular function. + +.. note:: + + In order for mod_python to access ``__auth__``, the module + containing it must first be imported. Therefore, any module-level + code will get executed during the import even if + ``__auth__`` is false. To truly protect a module from being + accessed, use other authentication mechanisms, e.g. the Apache + ``mod_auth`` or with a mod_python :ref:`dir-handlers-auh`. + + +.. _hand-pub-form: + +Form Data +--------- + +In the process of matching arguments, the Publisher handler creates an +instance of :ref:`pyapi-util-fstor`. +A reference to this instance is stored in an attribute \member{form} +of the ``request`` object. + +Since a ``FieldStorage`` can only be instantiated once per +request, one must not attempt to instantiate ``FieldStorage`` when +using the Publisher handler and should use +:attr:`request.form` instead. + + +.. _hand-wsgi: + +WSGI Handler +============ + +.. index:: + pair: WSGI; handler + +WSGI handler can run WSGI applications as described in :pep:`333`. + +Assuming there exists the following minimal WSGI app residing in a file named +``mysite/wsgi.py`` in directory ``/path/to/mysite`` (so that the full +path to ``wsgi.py`` is ``/path/to/mysite/mysite/wsgi.py``):: + + def application(environ, start_response): + status = '200 OK' + output = 'Hello World!' + + response_headers = [('Content-type', 'text/plain'), + ('Content-Length', str(len(output)))] + start_response(status, response_headers) + + return [output] + +It can be executed using the WSGI handler by adding the following to the +Apache configuration:: + + PythonHandler mod_python.wsgi + PythonOption mod_python.wsgi.application mysite.wsgi + PythonPath "sys.path+['/path/to/mysite']" + +The above configuration will import a module named ``mysite.wsgi`` and +will look for an ``application`` callable in the module. + +An alternative name for the callable can be specified by appending it +to the module name separated by ``'::'``, e.g.:: + + PythonOption mod_python.wsgi.application mysite.wsgi::my_application + +If you would like your application to appear under a base URI, it can +be specified by wrapping your configuration in a ```` +block. It can also be specified via the ``mod_python.wsgi.base_uri`` +option, but the ```` method is recommended, also because it +has a side-benefit of informing mod_python to skip the map-to-storage +processing phase and thereby improving performance. + +For example, if you would like the above application to appear under +``'/wsgiapps'``, you could specify:: + + + PythonHandler mod_python.wsgi + PythonOption mod_python.wsgi.application mysite.wsgi + PythonPath "sys.path+['/path/to/mysite']" + + +With the above configuration, content formerly under +``http://example.com/hello`` becomes available under +``http://example.com/wsgiapps/hello``. + +If both ```` and ``mod_python.wsgi.base_uri`` exist, then +``mod_python.wsgi.base_uri`` takes precedence. +``mod_python.wsgi.base_uri`` cannot be ``'/'`` or end with a +``'/'``. "Root" (or no base_uri) is a blank string, which is the +default. (Note that it is allowed for ```` path to be +``"/"`` or have a trailing slash, it will automatically be removed by +mod_python before computing ``PATH_INFO``). + + +.. index:: + pair: WSGI; SCRIPT_NAME + pair: WSGI; PATH_INFO + +.. note:: + + :pep:`333` describes ``SCRIPT_NAME`` and ``PATH_INFO`` environment + variables which are core to the specification. Most WSGI-supporting + frameworks currently in existence use the value of ``PATH_INFO`` as the + request URI. + + The two variable's name and function originate in CGI + (:rfc:`3875`), which describes an environment wherein a script (or + any executable's) output could be passed on by the web server as + content. A typical CGI script resides somewhere on the filesystem + to which the request URI maps. As part of serving the request the + server traverses the URI mapping each element to an element of the + filesystem path to locate the script. Once the script is found, the + portion of the URI used thus far is assigned to the ``SCRIPT_NAME`` + variable, while the remainder of the URI gets assigned to + ``PATH_INFO``. + + Because the relationship between Python modules and files on disk + is largely tangential, it is not very clear what exactly + ``PATH_INFO`` and ``SCRIPT_NAME`` ought to be. Even though Python + modules are most often files on disk located somewhere in the + Python path, they don't have to be (they could be code objects + constructed on-the-fly), and their location in the filesystem has + no relationship to the URL structure at all. + + The mismatch between CGI and WSGI results in an ambiguity which + requires that the split between the two variables be explicitely + specified, which is why ``mod_python.wsgi.base_uri`` exists. In essence + ``mod_python.wsgi.base_uri`` (or the path in surrounding + ````) is the ``SCRIPT_NAME`` portion of the URI and + defaults to ``''``. + + An important detail is that ``SCRIPT_NAME`` + ``PATH_INFO`` should + result in the original URI (encoding issues aside). Since + ``SCRIPT_NAME`` (in its original CGI definition) referrs to an + actual file, its name never ends with a slash. The slash, if any, + always ends up in ``PATH_INFO``. E.g. ``/path/to/myscrip/foo/bar`` + splits into ``/path/to/myscript`` and ``/foo/bar``. If the whole + site is served by an app or a script, then ``SCRIPT_NAME`` is a + blank string ``''``, not a ``'/'``. + + +.. _hand-psp: + +PSP Handler +=========== + +.. index:: + pair: PSP; handler + +PSP handler is a handler that processes documents using the +``PSP`` class in ``mod_python.psp`` module. + +To use it, simply add this to your httpd configuration:: + + AddHandler mod_python .psp + PythonHandler mod_python.psp + +For more details on the PSP syntax, see Section :ref:`pyapi-psp`. + +If ``PythonDebug`` server configuration is ``On``, then by +appending an underscore (``'_'``) to the end of the url you can get a +nice side-by-side listing of original PSP code and resulting Python +code generated by the ``psp} module``. This is very useful for +debugging. You'll need to adjust your httpd configuration::: + + AddHandler mod_python .psp .psp_ + PythonHandler mod_python.psp + PythonDebug On + +.. note:: + + Leaving debug on in a production environment will allow remote users + to display source code of your PSP pages! + +.. _hand-cgi: + +CGI Handler +=========== + +.. index:: + pair: CGI; handler + + +CGI handler is a handler that emulates the CGI environment under mod_python. + +Note that this is not a ``'true'`` CGI environment in that it is +emulated at the Python level. ``stdin`` and ``stdout`` are +provided by substituting ``sys.stdin`` and ``sys.stdout``, and +the environment is replaced by a dictionary. The implication is that +any outside programs called from within this environment via +``os.system``, etc. will not see the environment available to the +Python program, nor will they be able to read/write from standard +input/output with the results expected in a ``'true'`` CGI environment. + +The handler is provided as a stepping stone for the migration of +legacy code away from CGI. It is not recommended that you settle on +using this handler as the preferred way to use mod_python for the long +term. This is because the CGI environment was not intended for +execution within threads (e.g. requires changing of current directory +with is inherently not thread-safe, so to overcome this cgihandler +maintains a thread lock which forces it to process one request at a +time in a multi-threaded server) and therefore can only be implemented +in a way that defeats many of the advantages of using mod_python in +the first place. + +To use it, simply add this to your :file:`.htaccess` file::: + + SetHandler mod_python + PythonHandler mod_python.cgihandler + +As of version 2.7, the cgihandler will properly reload even indirectly +imported module. This is done by saving a list of loaded modules +(sys.modules) prior to executing a CGI script, and then comparing it +with a list of imported modules after the CGI script is done. Modules +(except for whose whose __file__ attribute points to the standard +Python library location) will be deleted from sys.modules thereby +forcing Python to load them again next time the CGI script imports +them. + +If you do not want the above behavior, edit the :file:`cgihandler.py` +file and comment out the code delimited by ###. + +Tests show the cgihandler leaking some memory when processing a lot of +file uploads. It is still not clear what causes this. The way to work +around this is to set the Apache ``MaxRequestsPerChild`` to a non-zero +value. + + diff --git a/Doc/installation.rst b/Doc/installation.rst new file mode 100644 index 0000000..8332c63 --- /dev/null +++ b/Doc/installation.rst @@ -0,0 +1,322 @@ +.. _installation: + +************ +Installation +************ + +.. note:: + + By far the best place to get help with installation and other issues + is the mod_python mailing list. Please take a moment to join the + mod_python mailing list by sending an e-mail with the word + "subscribe" in the subject to mod_python-request@modpython.org or visit the + `mod_python mailing list page `_ + + +.. _inst-prerequisites: + +Prerequisites +============= + +In the ideal case your Operating System provides a pre-packaged +version of mod_python. If not, you will need to compile it +yourself. This version of mod_python requires: + +* Python 2 (2.6 and up) or Python 3 (3.3 and up). +* Apache 2.2 or later. Apache 2.4 is highly recommended over 2.2. + +In order to compile mod_python you will need to have the include files +for both Apache and Python, as well as the Python library installed on +your system. If you installed Python and Apache from source, then you +already have everything needed. However, if you are using pre-packaged +software then you may need to install the "development" packages +which contain the include files and libraries necessary to compile +mod_python. Please check your OS documentation for specifics. (Hint: +look for packages named python-devel or python-dev and apache-devel or +apache-dev or httpd-dev, etc.). + +.. _inst-compiling: + +Compiling +========= + +.. _inst-configure: + +Running :file:`./configure` +--------------------------- + +The :file:`./configure` script will analyze your environment and +create custom Makefiles particular to your system. Aside from all the +standard autoconf stuff, :file:`./configure` does the following: + +.. index:: + single: apxs + pair: ./configure; --with-apxs + +* Finds out whether a program called :program:`apxs` is available. This + program is part of the standard Apache distribution, and is required + for compilation. + + You can manually specify the location of apxs by using the + :option:`with-apxs` option, e.g.:: + + $ ./configure --with-apxs=/usr/local/apache/bin/apxs + + It is recommended that you specify this option. + +.. index:: + single: libpython.a + pair: ./configure; --with-python + +* Checks your Python version and attempts to figure out where + :file:`libpython` is by looking at various parameters compiled into + your Python binary. By default, it will use the :program:`python` + program found in your :envvar:`PATH`. + + If the first Python binary in the path is not suitable or not the one + desired for mod_python, you can specify an alternative location with the + :option:`with-python` option, e.g.:: + + $ ./configure --with-python=/usr/local/bin/python2.3 + +.. index:: + pair: ./configure; --with-mutex-dir + +* Sets the directory for the apache mutex locks (if the mutex + mechanism chosen by APR requires one). + + Note: mutex locks are used only by :ref:`mod_python Sessions ` and + :ref:`PSP ` (which maintains a Session implicitly). If you're + not using mod_python Sessions or PSP, then this setting should not + matter. + + Default is :file:`/tmp`. The directory must exist and be + writable by the owner of the apache process. + + Use :option:`with-mutex-dir` option, e.g:: + + $ ./configure --with-mutex-dir=/var/run/mod_python + + The mutex directory can also be specified at run time using + :ref:`dir-other-po` ``mod_python.mutex_directory``. + See :ref:`inst-apacheconfig`. + + *New in version 3.3.0* + +.. index:: + pair: ./configure; --with-max-locks + +* Sets the maximum number of mutex locks reserved by mod_python. + + Note: mutex locks are used only by :ref:`mod_python Sessions ` and + :ref:`PSP ` (which maintains a Session implicitly). If you're + not using mod_python Sessions or PSP, then this setting should not + matter. + + The mutexes used for locking are a limited resource on some + systems. Increasing the maximum number of locks may increase performance + when using session locking. The default is 8. A reasonable number for + higher performance would be 32. + Use :option:`with-max-locks` option, e.g:: + + $ ./configure --with-max-locks=32 + + The number of locks can also be specified at run time using + :ref:`dir-other-po` ``mod_python.mutex_locks``. + See :ref:`inst-apacheconfig`. + + *New in version 3.2.0* + +.. index:: + single: flex + pair: ./configure; --with-flex + +* Attempts to locate :program:`flex` and determine its version. + If :program:`flex` cannot be found in your :envvar:`PATH` :program:`configure` + will fail. If the wrong version is found :program:`configure` will generate a warning. + You can generally ignore this warning unless you need to re-create + :file:`src/psp_parser.c`. + + The parser used by psp (See :ref:`pyapi-psp`) is written in C + generated using :program:`flex`. (This requires a reentrant version + of :program:`flex`, 2.5.31 or later). + + If the first flex binary in the path is not suitable or not the one desired + you can specify an alternative location with the option:with-flex: + option, e.g:: + + $ ./configure --with-flex=/usr/local/bin/flex + + *New in version 3.2.0* + +.. _inst-make: + +Running :file:`make` +-------------------- + +.. index:: + single: make + +* To start the build process, simply run:: + + $ make + +.. _inst-installing: + +Installing +========== + +.. _inst-makeinstall: + +.. index:: + pair: make; install + +Running :file:`make install` + +* This part of the installation in most cases needs to be done as root:: + + $ sudo make install + + * This will copy the mod_python library (:file:`mod_python.so`) into your Apache + :file:`libexec` or :file:`modules` directory, where all the other modules are. + + * Lastly, it will install the Python libraries in + :file:`site-packages` and compile them. + +.. index:: + pair: make targets; install_py_lib + pair: make targets; install_dso + +.. note:: + + If you wish to selectively install just the Python libraries + or the DSO (mod_python.so) (which may not always require superuser + privileges), you can use the following :program:`make` targets: + :option:`install_py_lib` and :option:`install_dso`. + +.. _inst-apacheconfig: + +Configuring Apache +================== + +.. index:: + pair: LoadModule; apache configuration + single: mod_python.so + +* *LoadModule* + + You need to configure Apache to load the module by adding the + following line in the Apache configuration file, usually called + :file:`httpd.conf` or :file:`apache.conf`:: + + LoadModule python_module libexec/mod_python.so + + The actual path to :program:`mod_python.so` may vary, but :program:`make install` + should report at the very end exactly where :program:`mod_python.so` + was placed and how the ``LoadModule`` directive should appear. + +* See :ref:`inst-testing` below for more basic configuration parameters. + + +.. _inst-testing: + +Testing +======= + +#. Make a directory that would be visible on your web site, e.g. ``htdocs/test``. + +#. Add the following configuration directives to the main server config file:: + + + AddHandler mod_python .py + PythonHandler mptest + PythonDebug On + + + (Substitute ``/some/directory`` above for something applicable to + your system, usually your Apache ServerRoot) + + This configuration can also be specified in an :file:`.htaccess` + file. Note that :file:`.htaccess` configuration is typically + disabled by default, to enable it in a directory specify + ``AllowOverride`` with at least ``FileInfo``. + +#. This causes all requests for URLs ending in ``.py`` to be processed + by mod_python. Upon being handed a request, mod_python looks for + the appropriate *python handler* to handle it. Here, there is a + single ``PythonHandler`` directive defining module ``mptest`` as + the python handler to use. We'll see next how this python handler + is defined. + +#. At this time, if you made changes to the main configuration file, + you will need to restart Apache in order for the changes to take + effect. + +#. Edit :file:`mptest.py` file in the :file:`htdocs/test` directory so + that is has the following lines (be careful when cutting and + pasting from your browser, you may end up with incorrect + indentation and a syntax error):: + + from mod_python import apache + + def handler(req): + req.content_type = 'text/plain' + req.write("Hello World!") + return apache.OK + +#. Point your browser to the URL referring to the :file:`mptest.py`; + you should see ``'Hello World!'``. If you didn't - refer to the + troubleshooting section next. + +#. Note that according to the configuration written above, you can + point your browser to *any* URL ending in .py in the test + directory. Therefore pointing your browser to + :file:`/test/foobar.py` will be handled exactly the same way by + :file:`mptest.py`. This is because the code in the ``handler`` + function does not bother examining the URL and always acts the same + way no matter what the URL is. + +#. If everything worked well, move on to Chapter :ref:`tutorial`. + + +.. _inst-trouble: + +Troubleshooting +=============== + +There are a few things you can try to identify the problem: + +* Carefully study the error output, if any. + +* Check the server error log file, it may contain useful clues. + +* Try running Apache from the command line in single process mode:: + + ./httpd -X + + This prevents it from backgrounding itself and may provide some useful + information. + +* Beginning with mod_python 3.2.0, you can use the mod_python.testhandler + to diagnose your configuration. Add this to your :file:`httpd.conf` file:: + + + SetHandler mod_python + PythonHandler mod_python.testhandler + + + Now point your browser to the :file:`/mpinfo` URL + (e.g. :file:`http://localhost/mpinfo`) and note down the information given. + This will help you reporting your problem to the mod_python list. + +* Ask on the `mod_python list `_. + Please make sure to provide specifics such as: + + * mod_python version. + * Your operating system type, name and version. + * Your Python version, and any unusual compilation options. + * Your Apache version. + * Relevant parts of the Apache config, .htaccess. + * Relevant parts of the Python code. + + diff --git a/Doc/introduction.rst b/Doc/introduction.rst new file mode 100644 index 0000000..3788edd --- /dev/null +++ b/Doc/introduction.rst @@ -0,0 +1,65 @@ +.. _introduction: + +************ +Introduction +************ + +.. _performance: + +Performance +=========== + +One of the main advantages of mod_python is the increase in +performance over traditional CGI. Below are results of a very crude +test. The test was done on a 1.2GHz Pentium machine running Red Hat +Linux 7.3. `Ab `_ +was used to poll 4 kinds of scripts, all of which imported the +standard cgi module (because this is how a typical Python cgi script +begins), then output a single word ``'Hello!'``. The results are +based on 10000 requests with concurrency of 1:: + + Standard CGI: 23 requests/s + Mod_python cgihandler: 385 requests/s + Mod_python publisher: 476 requests/s + Mod_python handler: 1203 requests/s + + +.. _apache_api: + +Apache HTTP Server API +====================== + +Apache processes requests in *phases* (i.e. read the request, parse +headers, check access, etc.). Phases are implemented by +functions called *handlers*. Traditionally, handlers are written in C +and compiled into Apache modules. Mod_python provides a way to extend +Apache functionality by writing Apache handlers in Python. For a +detailed description of the Apache request processing process, see the +`Apache Developer Documentation `_, as well as the +`Mod_python - Integrating Python with Apache `_ +paper. + +Currently only a subset of the Apache HTTP Server API is accessible +via mod_python. It was never the goal of the project to provide a 100% +coverage of the API. Rather, mod_python is focused on the most useful +parts of the API and on providing the most "Pythonic" ways of using +it. + +.. _intro_other: + +Other Features +============== + +Mod_python also provides a number features that fall in the category +of web development, e.g. a parser for embedding Python in HTML +(:ref:`pyapi-psp`), a handler that maps URL space into modules and +functions (:ref:`hand-pub`), support for session (:ref:`pyapi-sess`) +and cookie (:ref:`pyapi-cookie`) handling. + +.. seealso:: + + `Apache HTTP Server Developer Documentation `_ + for HTTP developer information + + `Mod_python - Integrating Python with Apache `_ + for details on how mod_python interfaces with Apache HTTP Server diff --git a/Doc/license.rst b/Doc/license.rst new file mode 100644 index 0000000..9f48cf0 --- /dev/null +++ b/Doc/license.rst @@ -0,0 +1,298 @@ +.. highlightlang:: none + +.. _history-and-license: + +******************* +History and License +******************* + +History +======= + +Mod_python originates from a project called +`Httpdapy `_ (1997). For a +long time Httpdapy was not called mod_python because Httpdapy was not +meant to be Apache-specific. Httpdapy was designed to be +cross-platform and in fact was initially written for the Netscape +server (back then it was called Nsapy (1997)). + +Nsapy itself was based on an original concept and first code by Aaron +Watters from "Internet Programming with Python" by Aaron Watters, +Guido Van Rossum and James C. Ahlstrom, ISBN 1-55851-484-8. + +Without Aaron's inspiration, there would be no mod_python. Quoting +from the Httpdapy README file:: + + Although Nsapy only worked with Netscape servers, it was very + generic in its design and was based on some brilliant ideas that + weren't necessarily Netscape specific. Its design is a combination + of extensibility, simplicity and efficiency that takes advantage of + many of the key benefits of Python and is totally in the spirit of + Python. + +This excerpt from the Httpdapy README file describes well the +challenges and the solution provided by embedding Python within the +HTTP server:: + + While developing my first WWW applications a few years back, I + found that using CGI for programs that need to connect to + relational databases (commercial or not) is too slow because every + hit requires loading of the interpreter executable which can be + megabytes in size, any database libraries that can themselves be + pretty big, plus, the database connection/authentication process + carries a very significant overhead because it involves things like + DNS resolutions, encryption, memory allocation, etc.. Under + pressure to speed up the application, I nearly gave up the idea of + using Python for the project and started researching other tools + that claimed to specialize in www database integration. I did not + have any faith in MS's ASP; was quite frustrated by Netscape + LiveWire's slow performance and bugginess; Cold Fusion seemed + promising, but I soon learned that writing in html-like tags makes + programs as readable as assembly. Same is true for PHP. Besides, I + *really* wanted to write things in Python. + + Around the same time the Internet Programming With Python book came + out and the chapter describing how to embed Python within Netscape + server immediately caught my attention. I used the example in my + project, and developed an improved version of what I later called + Nsapy that compiled on both Windows NT and Solaris. + + Although Nsapy only worked with Netscape servers, it was a very + intelligent generic OO design that, in the spirit of Python, that + lent itself for easy portability to other web servers. + + Incidently, the popularity of Netscape's servers was taking a turn + south, and so I set out to port Nsapy to other servers starting + with the most popular one, Apache. And so from Nsapy was born + Httpdapy. + + +...continuing this saga, yours truly later learned that writing +Httpdapy for every server is a task a little bigger and less +interesting than I originally imagined. + +Instead, it seemed like providing a Python counterpart to the popular +Perl Apache extension mod_perl that would give Python users the same +(or better) capability would be a much more exciting thing to do. + +And so it was done. The first release of mod_python happened in May of +2000. + +License +======= + +Licensed under the Apache License, Version 2.0 (the "License"); you +may not use this file except in compliance with the License. You may +obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. + +Apache License:: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Doc/pythonapi.rst b/Doc/pythonapi.rst new file mode 100644 index 0000000..61d78f2 --- /dev/null +++ b/Doc/pythonapi.rst @@ -0,0 +1,3362 @@ + +.. _pythonapi: + +********** +Python API +********** + +.. _pyapi-interps: + +Multiple Interpreters +===================== + +When working with mod_python, it is important to be aware of a feature +of Python that is normally not used when using the language for +writing scripts to be run from command line. (In fact, this feature is not +available from within Python itself and can only be accessed through +the `C language API `_.) +Python C API provides the ability to create :dfn:`subinterpreters`. A +more detailed description of a subinterpreter is given in the +documentation for the +`Py_NewInterpreter() `_ +function. For this discussion, it will suffice to say that each +subinterpreter has its own separate namespace, not accessible from +other subinterpreters. Subinterpreters are very useful to make sure +that separate programs running under the same Apache server do not +interfere with one another. + +.. index:: + single: main_interpreter + +At server start-up or mod_python initialization time, mod_python +initializes the *main interpeter*. The main interpreter contains a +dictionary of subinterpreters. Initially, this dictionary is +empty. With every request, as needed, subinterpreters are created, and +references to them are stored in this dictionary. The dictionary is +keyed on a string, also known as *interpreter name*. This name can be +any string. The main interpreter is named ``'main_interpreter'``. +The way all other interpreters are named can be controlled by +``PythonInterp*`` directives. Default behavior is to name +interpreters using the Apache virtual server name (``ServerName`` +directive). This means that all scripts in the same virtual server +execute in the same subinterpreter, but scripts in different virtual +servers execute in different subinterpreters with completely separate +namespaces. :ref:`dir-other-ipd` and :ref:`dir-other-ipdv` directives +alter the naming convention to use the absolute path of the directory +being accessed, or the directory in which the ``Python*Handler`` was +encountered, respectively. :ref:`dir-other-pi` can be used to force +the interpreter name to a specific string overriding any naming +conventions. + +Once created, a subinterpreter will be reused for subsequent requests. +It is never destroyed and exists until the Apache process ends. + +You can find out the name of the interpreter under which you're +running by peeking at :attr:`request.interpreter`. + +.. note:: + + If any module is being used which has a C code component that uses + the simplified API for access to the Global Interpreter Lock (GIL) + for Python extension modules, then the interpreter name must be + forcibly set to be ``'main_interpreter'``. This is necessary as such + a module will only work correctly if run within the context of the + first Python interpreter created by the process. If not forced to + run under the ``'main_interpreter'``, a range of Python errors can + arise, each typically referring to code being run in *restricted + mode*. + +.. seealso:: + + ``_ + Python C Language API + ``_ + PEP 0311 - Simplified Global Interpreter Lock Acquisition for Extensions + +.. _pyapi-handler: + +Overview of a Request Handler +============================= + +.. index:: + pair: request; handler + +A :dfn:`handler` is a function that processes a particular phase of a +request. Apache processes requests in phases - read the request, +process headers, provide content, etc. For every phase, it will call +handlers, provided by either the Apache core or one of its modules, +such as mod_python which passes control to functions provided by the +user and written in Python. A handler written in Python is not any +different from a handler written in C, and follows these rules: + +.. index:: + single: req + pair: request; object + +A handler function will always be passed a reference to a request +object. (Throughout this manual, the request object is often referred +to by the ``req`` variable.) + +Every handler can return: + +* :const:`apache.OK`, meaning this phase of the request was handled by this + handler and no errors occurred. + +* :const:`apache.DECLINED`, meaning this handler has not handled this + phase of the request to completion and Apache needs to look for + another handler in subsequent modules. + +* :const:`apache.HTTP_ERROR`, meaning an HTTP error occurred. + *HTTP_ERROR* can be any of the following:: + + HTTP_CONTINUE = 100 + HTTP_SWITCHING_PROTOCOLS = 101 + HTTP_PROCESSING = 102 + HTTP_OK = 200 + HTTP_CREATED = 201 + HTTP_ACCEPTED = 202 + HTTP_NON_AUTHORITATIVE = 203 + HTTP_NO_CONTENT = 204 + HTTP_RESET_CONTENT = 205 + HTTP_PARTIAL_CONTENT = 206 + HTTP_MULTI_STATUS = 207 + HTTP_MULTIPLE_CHOICES = 300 + HTTP_MOVED_PERMANENTLY = 301 + HTTP_MOVED_TEMPORARILY = 302 + HTTP_SEE_OTHER = 303 + HTTP_NOT_MODIFIED = 304 + HTTP_USE_PROXY = 305 + HTTP_TEMPORARY_REDIRECT = 307 + HTTP_BAD_REQUEST = 400 + HTTP_UNAUTHORIZED = 401 + HTTP_PAYMENT_REQUIRED = 402 + HTTP_FORBIDDEN = 403 + HTTP_NOT_FOUND = 404 + HTTP_METHOD_NOT_ALLOWED = 405 + HTTP_NOT_ACCEPTABLE = 406 + HTTP_PROXY_AUTHENTICATION_REQUIRED= 407 + HTTP_REQUEST_TIME_OUT = 408 + HTTP_CONFLICT = 409 + HTTP_GONE = 410 + HTTP_LENGTH_REQUIRED = 411 + HTTP_PRECONDITION_FAILED = 412 + HTTP_REQUEST_ENTITY_TOO_LARGE = 413 + HTTP_REQUEST_URI_TOO_LARGE = 414 + HTTP_UNSUPPORTED_MEDIA_TYPE = 415 + HTTP_RANGE_NOT_SATISFIABLE = 416 + HTTP_EXPECTATION_FAILED = 417 + HTTP_UNPROCESSABLE_ENTITY = 422 + HTTP_LOCKED = 423 + HTTP_FAILED_DEPENDENCY = 424 + HTTP_INTERNAL_SERVER_ERROR = 500 + HTTP_NOT_IMPLEMENTED = 501 + HTTP_BAD_GATEWAY = 502 + HTTP_SERVICE_UNAVAILABLE = 503 + HTTP_GATEWAY_TIME_OUT = 504 + HTTP_VERSION_NOT_SUPPORTED = 505 + HTTP_VARIANT_ALSO_VARIES = 506 + HTTP_INSUFFICIENT_STORAGE = 507 + HTTP_NOT_EXTENDED = 510 + +As an alternative to *returning* an HTTP error code, handlers can +signal an error by *raising* the :const:`apache.SERVER_RETURN` +exception, and providing an HTTP error code as the exception value, +e.g.:: + + raise apache.SERVER_RETURN, apache.HTTP_FORBIDDEN + +Handlers can send content to the client using the :meth:`request.write()` +method. + +Client data, such as POST requests, can be read by using the +:meth:`request.read()` function. + +An example of a minimalistic handler might be:: + + from mod_python import apache + + def requesthandler(req): + req.content_type = "text/plain" + req.write("Hello World!") + return apache.OK + +.. _pyapi-filter: + +Overview of a Filter Handler +============================ + +.. index:: + pair: filter; handler + +A :dfn:`filter handler` is a function that can alter the input or the +output of the server. There are two kinds of filters - :dfn:`input` and +:dfn:`output` that apply to input from the client and output to the +client respectively. + +At this time mod_python supports only request-level filters, meaning +that only the body of HTTP request or response can be filtered. Apache +provides support for connection-level filters, which will be supported +in the future. + +A filter handler receives a *filter* object as its argument. The +request object is available as well via ``filter.req``, but all +writing and reading should be done via the filter's object read and +write methods. + +Filters need to be closed when a read operation returns None +(indicating End-Of-Stream). + +The return value of a filter is ignored. Filters cannot decline +processing like handlers, but the same effect can be achieved +by using the :meth:`filter.pass_on()` method. + +Filters must first be registered using ``PythonInputFilter`` or +``PythonOutputFilter``, then added using the Apache +``Add/SetInputFilter`` or ``Add/SetOutputFilter`` directives. + +Here is an example of how to specify an output filter, it tells the +server that all .py files should processed by CAPITALIZE filter:: + + PythonOutputFilter capitalize CAPITALIZE + AddOutputFilter CAPITALIZE .py + +And here is what the code for the :file:`capitalize.py` might look +like:: + + from mod_python import apache + + def outputfilter(filter): + + s = filter.read() + while s: + filter.write(s.upper()) + s = filter.read() + + if s is None: + filter.close() + +When writing filters, keep in mind that a filter will be called any +time anything upstream requests an IO operation, and the filter has no +control over the amount of data passed through it and no notion of +where in the request processing it is called. For example, within a +single request, a filter may be called once or five times, and there +is no way for the filter to know beforehand that the request is over +and which of calls is last or first for this request, thought +encounter of an EOS (None returned from a read operation) is a fairly +strong indication of an end of a request. + +Also note that filters may end up being called recursively in +subrequests. To avoid the data being altered more than once, always +make sure you are not in a subrequest by examining the :attr:`request.main` +value. + +For more information on filters, see ``_. + +.. _pyapi-conn: + +Overview of a Connection Handler +================================ + +.. index:: + pair: connection; handler + +A :dfn:`connection handler` handles the connection, starting almost +immediately from the point the TCP connection to the server was +made. + +Unlike HTTP handlers, connection handlers receive a *connection* +object as an argument. + +Connection handlers can be used to implement protocols. Here is an +example of a simple echo server: + +Apache configuration:: + + PythonConnectionHandler echo + +Contents of :file:`echo.py` file:: + + from mod_python import apache + + def connectionhandler(conn): + + while 1: + conn.write(conn.readline()) + + return apache.OK + +:mod:`apache` -- Access to Apache Internals. +=============================================== + +.. module:: apache + :synopsis: Access to Apache Internals. +.. moduleauthor:: Gregory Trubetskoy grisha@modpython.org + +The Python interface to Apache internals is contained in a module +appropriately named :mod:`apache`, located inside the +:mod:`mod_python` package. This module provides some important +objects that map to Apache internal structures, as well as some useful +functions, all documented below. (The request object also provides an +interface to Apache internals, it is covered in its own section of +this manual.) + +.. index:: + pair: _apache; module + +The :mod:`apache` module can only be imported by a script running +under mod_python. This is because it depends on a built-in module +:mod:`_apache` provided by mod_python. + +It is best imported like this:: + + from mod_python import apache + +:mod:`mod_python.apache` module defines the following functions and +objects. For a more in-depth look at Apache internals, see the +`Apache Developer Page `_ + +.. _pyapi-apmeth: + +Functions +--------- + +.. function:: log_error(message[, level[, server]]) + + An interface to the Apache ``ap_log_error()`` + function. *message* is a string with the error message, + *level* is one of the following flags constants:: + + APLOG_EMERG + APLOG_ALERT + APLOG_CRIT + APLOG_ERR + APLOG_WARNING + APLOG_NOTICE + APLOG_INFO + APLOG_DEBUG + APLOG_NOERRNO // DEPRECATED + + *server* is a reference to a :meth:`request.server` object. If + *server* is not specified, then the error will be logged to the + default error log, otherwise it will be written to the error log for + the appropriate virtual server. When *server* is not specified, + the setting of LogLevel does not apply, the LogLevel is dictated by + an httpd compile-time default, usually ``warn``. + + If you have a reference to a request object available, consider using + :meth:`request.log_error` instead, it will prepend request-specific + information such as the source IP of the request to the log entry. + +.. function:: import_module(module_name[, autoreload=1, log=0, path=None]) + + This function can be used to import modules taking advantage of + mod_python's internal mechanism which reloads modules automatically + if they have changed since last import. + + *module_name* is a string containing the module name (it can + contain dots, e.g. ``mypackage.mymodule``); *autoreload* indicates + whether the module should be reloaded if it has changed since last + import; when *log* is true, a message will be written to the + logs when a module is reloaded; *path* allows restricting + modules to specific paths. + + Example:: + + from mod_python import apache + module = apache.import_module('module_name', log=1) + +.. function:: allow_methods([*args]) + + A convenience function to set values in :meth:`request.allowed`. + :meth:`request.allowed` is a bitmask that is used to construct the + ``'Allow:'`` header. It should be set before returning a + :const:`HTTP_NOT_IMPLEMENTED` error. + + Arguments can be one or more of the following:: + + M_GET + M_PUT + M_POST + M_DELETE + M_CONNECT + M_OPTIONS + M_TRACE + M_PATCH + M_PROPFIND + M_PROPPATCH + M_MKCOL + M_COPY + M_MOVE + M_LOCK + M_UNLOCK + M_VERSION_CONTROL + M_CHECKOUT + M_UNCHECKOUT + M_CHECKIN + M_UPDATE + M_LABEL + M_REPORT + M_MKWORKSPACE + M_MKACTIVITY + M_BASELINE_CONTROL + M_MERGE + M_INVALID + +.. function:: exists_config(name) + + This function returns True if the Apache server was launched with + the definition with the given *name*. This means that you can + test whether Apache was launched with the ``-DFOOBAR`` parameter + by calling ``apache.exists_config_define('FOOBAR')``. + +.. function:: stat(fname, wanted) + + This function returns an instance of an ``mp_finfo`` object + describing information related to the file with name ``fname``. + The ``wanted`` argument describes the minimum attributes which + should be filled out. The resultant object can be assigned to the + :attr:`request.finfo` attribute. + +.. function:: register_cleanup(callable[, data]) + + Registers a cleanup that will be performed at child shutdown + time. Equivalent to :func:`server.register_cleanup`, except + that a request object is not required. *Warning:* do not pass + directly or indirectly a request object in the data + parameter. Since the callable will be called at server shutdown + time, the request object won't exist anymore and any manipulation + of it in the handler will give undefined behaviour. + +.. function:: config_tree() + + Returns the server-level configuration tree. This tree does not + include directives from .htaccess files. This is a *copy* of the + tree, modifying it has no effect on the actual configuration. + +.. function:: server_root() + + Returns the value of ServerRoot. + +.. function:: make_table() + + This function is obsolete and is an alias to :class:`table` (see below). + +.. function:: mpm_query(code) + + Allows querying of the MPM for various parameters such as numbers of + processes and threads. The return value is one of three constants:: + + AP_MPMQ_NOT_SUPPORTED = 0 # This value specifies whether + # an MPM is capable of + # threading or forking. + AP_MPMQ_STATIC = 1 # This value specifies whether + # an MPM is using a static # of + # threads or daemons. + AP_MPMQ_DYNAMIC = 2 # This value specifies whether + # an MPM is using a dynamic # of + # threads or daemons. + + The *code* argument must be one of the following:: + + AP_MPMQ_MAX_DAEMON_USED = 1 # Max # of daemons used so far + AP_MPMQ_IS_THREADED = 2 # MPM can do threading + AP_MPMQ_IS_FORKED = 3 # MPM can do forking + AP_MPMQ_HARD_LIMIT_DAEMONS = 4 # The compiled max # daemons + AP_MPMQ_HARD_LIMIT_THREADS = 5 # The compiled max # threads + AP_MPMQ_MAX_THREADS = 6 # # of threads/child by config + AP_MPMQ_MIN_SPARE_DAEMONS = 7 # Min # of spare daemons + AP_MPMQ_MIN_SPARE_THREADS = 8 # Min # of spare threads + AP_MPMQ_MAX_SPARE_DAEMONS = 9 # Max # of spare daemons + AP_MPMQ_MAX_SPARE_THREADS = 10 # Max # of spare threads + AP_MPMQ_MAX_REQUESTS_DAEMON= 11 # Max # of requests per daemon + AP_MPMQ_MAX_DAEMONS = 12 # Max # of daemons by config + + Example:: + + if apache.mpm_query(apache.AP_MPMQ_IS_THREADED): + # do something + else: + # do something else + +.. _pyapi-apmem: + +Attributes +---------- + +.. attribute:: interpreter + + String. The name of the subinterpreter under which we're running. + *(Read-Only)* + +.. attribute:: main_server + + A ``server`` object for the main server. + *(Read-Only)* + +.. attribute:: MODULE_MAGIC_NUMBER_MAJOR + + Integer. An internal to Apache version number useful to determine whether + certain features should be available. See :attr:`MODULE_MAGIC_NUMBER_MINOR`. + + Major API changes that could cause compatibility problems for older + modules such as structure size changes. No binary compatibility is + possible across a change in the major version. + + *(Read-Only)* + + +.. attribute:: MODULE_MAGIC_NUMBER_MINOR + + Integer. An internal to Apache version number useful to determine whether + certain features should be available. See :attr:`MODULE_MAGIC_NUMBER_MAJOR`. + + Minor API changes that do not cause binary compatibility problems. + + *(Read-Only)* + + +.. _pyapi-mptable: + +Table Object (mp_table) +----------------------- +.. index:: + singe: table + +.. class:: table([mapping-or-sequence]) + + Returns a new empty object of type ``mp_table``. See Section + :ref:`pyapi-mptable` for description of the table object. The + *mapping-or-sequence* will be used to provide initial values for + the table. + + The table object is a wrapper around the Apache APR table. The + table object behaves very much like a dictionary (including the + Python 2.2 features such as support of the ``in`` operator, etc.), + with the following differences: + + * Both keys and values must be strings. + * Key lookups are case-insensitive. + * Duplicate keys are allowed (see :meth:`table.add()` below). When there is + more than one value for a key, a subscript operation returns a list. + + Much of the information that Apache uses is stored in tables. For + example, :meth:`request.headers_in` and :meth:`request.headers_out`. + + All the tables that mod_python provides inside the request object + are actual mappings to the Apache structures, so changing the + Python table also changes the underlying Apache table. + + In addition to normal dictionary-like behavior, the table object + also has the following method: + + .. method:: add(key, val) + + Allows for creating duplicate keys, which is useful + when multiple headers, such as `Set-Cookie:` are required. + +.. _pyapi-mprequest: + +Request Object +-------------- + +.. index:: + single: req + single: request + single: request_rec + +The request object is a Python mapping to the Apache `request_rec` +structure. When a handler is invoked, it is always passed a single +argument - the request object. For brevity, we often refer to it here +and throughout the code as ``req``. + +You can dynamically assign attributes to it as a way to communicate +between handlers. + +.. _pyapi-mprequest-meth: + +Request Methods +^^^^^^^^^^^^^^^ + +.. method:: request.add_cgi_vars() + + Calls Apache function ``ap_add_common_vars()`` followed some code + very similar to Apache ``ap_add_cgi_vars()`` with the exception of + calculating ``PATH_TRANSLATED`` value, thereby avoiding + sub-requests and filesystem access used in the ``ap_add_cgi_vars()`` + implementation. + +.. method:: request.add_common_vars() + + Use of this method is discouraged, use + :meth:`request.add_cgi_vars()` instead. + + Calls the Apache ``ap_add_common_vars()`` function. After a call to + this method, :attr:`request.subprocess_env` will contain *some* CGI + information. + +.. method:: request.add_handler(htype, handler[, dir]) + + Allows dynamic handler registration. *htype* is a string + containing the name of any of the apache request (but not filter or + connection) handler directives, + e.g. ``'PythonHandler'``. *handler* is a string containing the + name of the module and the handler function. Optional *dir* is + a string containing the name of the directory to be added to the + pythonpath. If no directory is specified, then, if there is already + a handler of the same type specified, its directory is inherited, + otherwise the directory of the presently executing handler is + used. If there is a ``'PythonPath'`` directive in effect, then + ``sys.path`` will be set exactly according to it (no directories + added, the *dir* argument is ignored). + + A handler added this way only persists throughout the life of the + request. It is possible to register more handlers while inside the + handler of the same type. One has to be careful as to not to create + an infinite loop this way. + + Dynamic handler registration is a useful technique that allows the + code to dynamically decide what will happen next. A typical example + might be a ``PythonAuthenHandler`` that will assign different + ``PythonHandlers`` based on the authorization level, something + like:: + + if manager: + req.add_handler("PythonHandler", "menu::admin") + else: + req.add_handler("PythonHandler", "menu::basic") + + .. note:: + + If you pass this function an invalid handler, an exception will be + generated at the time an attempt is made to find the handler. + + +.. method:: request.add_input_filter(filter_name) + + Adds the named filter into the input filter chain for the current + request. The filter should be added before the first attempt to + read any data from the request. + + +.. method:: request.add_output_filter(filter_name) + + Adds the named filter into the output filter chain for the current + request. The filter should be added before the first attempt to + write any data for the response. + + Provided that all data written is being buffered and not flushed, + this could be used to add the "CONTENT_LENGTH" filter into the + chain of output filters. The purpose of the "CONTENT_LENGTH" filter + is to add a ``Content-Length:`` header to the response.:: + + + req.add_output_filter("CONTENT_LENGTH") + req.write("content",0) + +.. method:: request.allow_methods(methods[, reset]) + + Adds methods to the :meth:`request.allowed_methods` list. This list + will be passed in `Allowed:` header if + :const:`HTTP_METHOD_NOT_ALLOWED` or :const:`HTTP_NOT_IMPLEMENTED` + is returned to the client. Note that Apache doesn't do anything to + restrict the methods, this list is only used to construct the + header. The actual method-restricting logic has to be provided in + the handler code. + + *methods* is a sequence of strings. If *reset* is 1, then + the list of methods is first cleared. + + +.. method:: request.auth_name() + + Returns AuthName setting. + + +.. method:: request.auth_type() + + Returns AuthType setting. + + +.. method:: request.construct_url(uri) + + This function returns a fully qualified URI string from the path + specified by uri, using the information stored in the request to + determine the scheme, server name and port. The port number is not + included in the string if it is the same as the default port 80. + + For example, imagine that the current request is directed to the + virtual server www.modpython.org at port 80. Then supplying + ``'/index.html'`` will yield the string + ``'http://www.modpython.org/index.html'``. + + +.. method:: request.discard_request_body() + + Tests for and reads any message body in the request, simply discarding + whatever it receives. + + +.. method:: request.document_root() + + Returns DocumentRoot setting. + + +.. method:: request.get_basic_auth_pw() + + Returns a string containing the password when Basic authentication is + used. + + On Python 3 the string will be decoded to Unicode using Latin1. + +.. method:: request.get_config() + + Returns a reference to the table object containing the mod_python + configuration in effect for this request except for + ``Python*Handler`` and ``PythonOption`` (The latter can be obtained + via :meth:`request.get_options()`. The table has directives as keys, + and their values, if any, as values. + + +.. method:: request.get_remote_host([type[, str_is_ip]]) + + This method is used to determine remote client's DNS name or IP + number. The first call to this function may entail a DNS look up, + but subsequent calls will use the cached result from the first + call. + + The optional *type* argument can specify the following: + + * :const:`apache.REMOTE_HOST` Look up the DNS name. Return None if Apache + directive ``HostNameLookups`` is ``Off`` or the hostname cannot + be determined. + + * :const:`apache.REMOTE_NAME` *(Default)* Return the DNS name if + possible, or the IP (as a string in dotted decimal notation) + otherwise. + + * :const:`apache.REMOTE_NOLOOKUP` Don't perform a DNS lookup, return an + IP. Note: if a lookup was performed prior to this call, then the + cached host name is returned. + + * :const:`apache.REMOTE_DOUBLE_REV` Force a double-reverse lookup. On + failure, return None. + + If *str_is_ip* is ``None`` or unspecified, then the return + value is a string representing the DNS name or IP address. + + If the optional *str_is_ip* argument is not ``None``, then + the return value is an ``(address, str_is_ip)`` tuple, where + ``str_is_ip`` is non-zero if ``address`` is an IP address + string. + + On failure, ``None`` is returned. + + +.. method:: request.get_options() + + Returns a reference to the table object containing the options set by + the ``PythonOption`` directives. + + +.. method:: request.internal_redirect(new_uri) + + Internally redirects the request to the *new_uri*. *new_uri* + must be a string. + + The httpd server handles internal redirection by creating a new + request object and processing all request phases. Within an + internal redirect, :meth:`request.prev` will contain a reference to a + request object from which it was redirected. + + +.. method:: request.is_https() + + Returns non-zero if the connection is using SSL/TLS. Will always return + zero if the mod_ssl Apache module is not loaded. + + You can use this method during any request phase, unlike looking + for the ``HTTPS`` variable in the :attr:`request.subprocess_env` member + dictionary. This makes it possible to write an authentication or + access handler that makes decisions based upon whether SSL is being + used. + + Note that this method will not determine the quality of the + encryption being used. For that you should call the + `ssl_var_lookup` method to get one of the `SSL_CIPHER*` variables. + + +.. method:: request.log_error(message[, level]) + + An interface to the Apache `ap_log_rerror` function. *message* is a + string with the error message, *level* is one of the following + flags constants:: + + + APLOG_EMERG + APLOG_ALERT + APLOG_CRIT + APLOG_ERR + APLOG_WARNING + APLOG_NOTICE + APLOG_INFO + APLOG_DEBUG + APLOG_NOERRNO + + If you need to write to log and do not have a reference to a request object, + use the :func:`apache.log_error` function. + + +.. method:: request.meets_conditions() + + Calls the Apache ``ap_meets_conditions()`` function which returns a + status code. If *status* is :const:`apache.OK`, generate the + content of the response normally. If not, simply return *status*. + Note that *mtime* (and possibly the ETag header) should be set as + appropriate prior to calling this function. The same goes for + :meth:`request.status` if the status differs from :const:`apache.OK`. + + Example:: + + # ... + r.headers_out['ETag'] = '"1130794f-3774-4584-a4ea-0ab19e684268"' + r.headers_out['Expires'] = 'Mon, 18 Apr 2005 17:30:00 GMT' + r.update_mtime(1000000000) + r.set_last_modified() + + status = r.meets_conditions() + if status != apache.OK: + return status + + # ... do expensive generation of the response content ... + + +.. method:: request.requires() + + Returns a tuple of strings of arguments to ``require`` directive. + + For example, with the following apache configuration:: + + AuthType Basic + require user joe + require valid-user + + :meth:`request.requires()` would return ``('user joe', 'valid-user')``. + + +.. method:: request.read([len]) + + Reads at most *len* bytes directly from the client, returning a + string with the data read. If the *len* argument is negative or + omitted, reads all data given by the client. + + This function is affected by the ``Timeout`` Apache + configuration directive. The read will be aborted and an + :exc:`IOError` raised if the :exc:`Timeout` is reached while + reading client data. + + This function relies on the client providing the ``Content-length`` + header. Absence of the ``Content-length`` header will be treated as + if ``Content-length: 0`` was supplied. + + Incorrect ``Content-length`` may cause the function to try to read + more data than available, which will make the function block until + a ``Timeout`` is reached. + + On Python 3 the output is always bytes. + +.. method:: request.readline([len]) + + Like :meth:`request.read()` but reads until end of line. + + .. note:: + + In accordance with the HTTP specification, most clients will be + terminating lines with ``'\r\n'`` rather than simply + ``'\n'``. + + +.. method:: request.readlines([sizehint]) + + Reads all lines using :meth:`request.readline()` and returns a list of + the lines read. If the optional *sizehint* parameter is given in, + the method will read at least *sizehint* bytes of data, up to the + completion of the line in which the *sizehint* bytes limit is + reached. + + +.. method:: request.register_cleanup(callable[, data]) + + Registers a cleanup. Argument *callable* can be any callable + object, the optional argument *data* can be any object (default is + ``None``). At the very end of the request, just before the actual + request record is destroyed by Apache, *callable* will be + called with one argument, *data*. + + It is OK to pass the request object as data, but keep in mind that + when the cleanup is executed, the request processing is already + complete, so doing things like writing to the client is completely + pointless. + + If errors are encountered during cleanup processing, they should be + in error log, but otherwise will not affect request processing in + any way, which makes cleanup bugs sometimes hard to spot. + + If the server is shut down before the cleanup had a chance to run, + it's possible that it will not be executed. + + +.. method:: request.register_input_filter(filter_name, filter[, dir]) + + Allows dynamic registration of mod_python input + filters. *filter_name* is a string which would then subsequently be + used to identify the filter. *filter* is a string containing + the name of the module and the filter function. Optional *dir* + is a string containing the name of the directory to be added to the + pythonpath. If there is a ``PythonPath`` directive in effect, + then ``sys.path`` will be set exactly according to it (no + directories added, the *dir* argument is ignored). + + The registration of the filter this way only persists for the life + of the request. To actually add the filter into the chain of input + filters for the current request ``request.add_input_filter()`` would be + used. + + +.. method:: request.register_output_filter(filter_name, filter[, dir]) + + Allows dynamic registration of mod_python output + filters. *filter_name* is a string which would then subsequently be + used to identify the filter. *filter* is a string containing the + name of the module and the filter function. Optional *dir* is a + string containing the name of the directory to be added to the + pythonpath. If there is a ``PythonPath`` directive in effect, then + ``sys.path`` will be set exactly according to it (no directories + added, the *dir* argument is ignored). + + The registration of the filter this way only persists for the life + of the request. To actually add the filter into the chain of output + filters for the current request :meth:`request.add_output_filter()` + would be used. + + +.. method:: request.sendfile(path[, offset, len]) + + Sends *len* bytes of file *path* directly to the client, starting + at offset *offset* using the server's internal API. *offset* + defaults to 0, and *len* defaults to -1 (send the entire file). + + Returns the number of bytes sent, or raises an IOError exception on + failure. + + This function provides the most efficient way to send a file to the + client. + + +.. method:: request.set_etag() + + Sets the outgoing ``'ETag'`` header. + + +.. method:: request.set_last_modified() + + Sets the outgoing ``Last-Modified`` header based on value of + ``mtime`` attribute. + + +.. method:: request.ssl_var_lookup(var_name) + + Looks up the value of the named SSL variable. This method queries + the mod_ssl Apache module directly, and may therefore be used in + early request phases (unlike using the :attr:`request.subprocess_env` + member. + + If the mod_ssl Apache module is not loaded or the variable is not + found then ``None`` is returned. + + If you just want to know if a SSL or TLS connection is being used, + you may consider calling the ``is_https`` method instead. + + It is unfortunately not possible to get a list of all available + variables with the current mod_ssl implementation, so you must know + the name of the variable you want. Some of the potentially useful + ssl variables are listed below. For a complete list of variables + and a description of their values see the mod_ssl documentation.:: + + + SSL_CIPHER + SSL_CLIENT_CERT + SSL_CLIENT_VERIFY + SSL_PROTOCOL + SSL_SESSION_ID + + .. note:: + + Not all SSL variables are defined or have useful values in every + request phase. Also use caution when relying on these values + for security purposes, as SSL or TLS protocol parameters can + often be renegotiated at any time during a request. + + +.. method:: request.update_mtime(dependency_mtime) + + If *ependency_mtime* is later than the value in the ``mtime`` + attribute, sets the attribute to the new value. + + +.. method:: request.write(string[, flush=1]) + + Writes *string* directly to the client, then flushes the buffer, + unless flush is 0. Unicode strings are encoded using ``utf-8`` + encoding. + + +.. method:: request.flush() + + Flushes the output buffer. + + +.. method:: request.set_content_length(len) + + Sets the value of :attr:`request.clength` and the ``'Content-Length'`` + header to len. Note that after the headers have been sent out + (which happens just before the first byte of the body is written, + i.e. first call to :meth:`request.write`), calling the method is + meaningless. + +.. _pyapi-mprequest-mem: + +Request Members +^^^^^^^^^^^^^^^ + +.. attribute:: request.connection + + A ``connection`` object associated with this request. See + :ref:`pyapi-mpconn` Object for more details. + *(Read-Only)* + + +.. attribute:: request.server + + A server object associated with this request. See + :ref:`pyapi-mpserver` for more details. + *(Read-Only)* + + +.. attribute:: request.next + + If this is an internal redirect, the request object we redirect to. + *(Read-Only)* + + +.. attribute:: request.prev + + If this is an internal redirect, the request object we redirect from. + *(Read-Only)* + + +.. attribute:: request.main + + If this is a sub-request, pointer to the main request. + *(Read-Only)* + + +.. attribute:: request.the_request + + String containing the first line of the request. + *(Read-Only)* + + +.. attribute:: request.assbackwards + + Indicates an HTTP/0.9 "simple" request. This means that the + response will contain no headers, only the body. Although this + exists for backwards compatibility with obsolescent browsers, some + people have figured out that setting assbackwards to 1 can be a + useful technique when including part of the response from an + internal redirect to avoid headers being sent. + + +.. attribute:: request.proxyreq + + A proxy request: one of :const:`apache.PROXYREQ_*` values. + + +.. attribute:: request.header_only + + A boolean value indicating HEAD request, as opposed to GET. + *(Read-Only)* + + +.. attribute:: request.protocol + + Protocol, as given by the client, or ``'HTTP/0.9'``. Same as CGI :envvar:`SERVER_PROTOCOL`. + *(Read-Only)* + + +.. attribute:: request.proto_num + + Integer. Number version of protocol; 1.1 = 1001 *(Read-Only)* + + +.. attribute:: request.hostname + + String. Host, as set by full URI or Host: header. *(Read-Only)* + + +.. attribute:: request.request_time + + A long integer. When request started. *(Read-Only)* + + +.. attribute:: request.status_line + + Status line. E.g. ``'200 OK'``. *(Read-Only)* + + +.. attribute:: request.status + + Status. One of :const:`apache.HTTP_*` values. + + +.. attribute:: request.method + + A string containing the method - ``'GET'``, ``'HEAD'``, ``'POST'``, etc. Same + as CGI :envvar:`REQUEST_METHOD`. *(Read-Only)* + + +.. attribute:: request.method_number + + Integer containing the method number. *(Read-Only)* + + +.. attribute:: request.allowed + + Integer. A bitvector of the allowed methods. Used to construct the + Allowed: header when responding with + :const:`HTTP_METHOD_NOT_ALLOWED` or + :const:`HTTP_NOT_IMPLEMENTED`. This field is for Apache's + internal use, to set the ``Allowed:`` methods use + :meth:`request.allow_methods` method, described in section + :ref:`pyapi-mprequest-meth`. *(Read-Only)* + + +.. attribute:: request.allowed_xmethods + + Tuple. Allowed extension methods. *(Read-Only)* + + +.. attribute:: request.allowed_methods + + Tuple. List of allowed methods. Used in relation with + :const:`METHOD_NOT_ALLOWED`. This member can be modified via + :meth:`request.allow_methods` described in section + :ref:`pyapi-mprequest-meth`. *(Read-Only)* + + +.. attribute:: request.sent_bodyct + + Integer. Byte count in stream is for body. (?) *(Read-Only)* + + +.. attribute:: request.bytes_sent + + Long integer. Number of bytes sent. *(Read-Only)* + + +.. attribute:: request.mtime + + Long integer. Time the resource was last modified. *(Read-Only)* + + +.. attribute:: request.chunked + + Boolean value indicating when sending chunked transfer-coding. + *(Read-Only)* + + +.. attribute:: request.range + + String. The ``Range:`` header. *(Read-Only)* + + +.. attribute:: request.clength + + Long integer. The "real" content length. *(Read-Only)* + + +.. attribute:: request.remaining + + Long integer. Bytes left to read. (Only makes sense inside a read + operation.) *(Read-Only)* + + +.. attribute:: request.read_length + + Long integer. Number of bytes read. *(Read-Only)* + + +.. attribute:: request.read_body + + Integer. How the request body should be read. *(Read-Only)* + + +.. attribute:: request.read_chunked + + Boolean. Read chunked transfer coding. *(Read-Only)* + + +.. attribute:: request.expecting_100 + + Boolean. Is client waiting for a 100 (:const:`HTTP_CONTINUE`) + response. *(Read-Only)* + + +.. attribute:: request.headers_in + + A :class:`table` object containing headers sent by the client. + + +.. attribute:: request.headers_out + + A :class:`table` object representing the headers to be sent to the + client. + + +.. attribute:: request.err_headers_out + + These headers get send with the error response, instead of + headers_out. + + +.. attribute:: request.subprocess_env + + A :class:`table` object containing environment information + typically usable for CGI. You may have to call + :meth:`request.add_common_vars` and :meth:`request.add_cgi_vars` + first to fill in the information you need. + + +.. attribute:: request.notes + + A :class:`table` object that could be used to store miscellaneous + general purpose info that lives for as long as the request + lives. If you need to pass data between handlers, it's better to + simply add members to the request object than to use + :attr:`request.notes`. + + +.. attribute:: request.phase + + The phase currently being being processed, + e.g. ``'PythonHandler'``. *(Read-Only)* + + +.. attribute:: request.interpreter + + The name of the subinterpreter under which we're running. + *(Read-Only)* + + +.. attribute:: request.content_type + + String. The content type. Mod_python maintains an internal flag + (:attr:`request._content_type_set`) to keep track of whether + :attr:`request.content_type` was set manually from within + Python. The publisher handler uses this flag in the following way: + when :attr:`request.content_type` isn't explicitly set, it attempts + to guess the content type by examining the first few bytes of the + output. + + +.. attribute:: request.content_languages + + Tuple. List of strings representing the content languages. + + +.. attribute:: request.handler + + The symbolic name of the content handler (as in module, not + mod_python handler) that will service the request during the + response phase. When the SetHandler/AddHandler directives are used + to trigger mod_python, this will be set to ``'mod_python'`` by + mod_mime. A mod_python handler executing prior to the response + phase may also set this to ``'mod_python'`` along with calling + :meth:`request.add_handler` to register a mod_python handler for + the response phase:: + + def typehandler(req): + if os.path.splitext(req.filename)[1] == ".py": + req.handler = "mod_python" + req.add_handler("PythonHandler", "mod_python.publisher") + return apache.OK + return apache.DECLINED + + +.. attribute:: request.content_encoding + + String. Content encoding. *(Read-Only)* + + +.. attribute:: request.vlist_validator + + Integer. Variant list validator (if negotiated). *(Read-Only)* + + +.. attribute:: request.user + + If an authentication check is made, this will hold the user + name. Same as CGI :envvar:`REMOTE_USER`. + + On Python 3 the string is decoded using Latin1. (Different browsers + use different encodings for non-Latin1 characters for the basic + authentication string making a solution that fits all impossible, + you can always decode the header manually.) + + .. note:: + + :meth:`request.get_basic_auth_pw` must be called prior to using this value. + + +.. attribute:: request.ap_auth_type + + Authentication type. Same as CGI :envvar:`AUTH_TYPE`. + + +.. attribute:: request.no_cache + + Boolean. This response cannot be cached. + + +.. attribute:: request.no_local_copy + + Boolean. No local copy exists. + + +.. attribute:: request.unparsed_uri + + The URI without any parsing performed. *(Read-Only)* + + +.. attribute:: request.uri + + The path portion of the URI. + + +.. attribute:: request.filename + + String. File name being requested. + + +.. attribute:: request.canonical_filename + + String. The true filename (:attr:`request.filename` is + canonicalized if they don't match). + + +.. attribute:: request.path_info + + String. What follows after the file name, but is before query args, + if anything. Same as CGI :envvar:`PATH_INFO`. + + +.. attribute:: request.args + + String. Same as CGI :envvar:`QUERY_ARGS`. + + +.. attribute:: request.finfo + + A file information object with type ``mp_finfo``, analogous to the + result of the POSIX stat function, describing the file pointed to + by the URI. The object provides the attributes ``fname``, + ``filetype``, ``valid``, ``protection``, ``user``, ``group``, ``size``, + ``inode``, ``device``, ``nlink``, ``atime``, ``mtime``, ``ctime`` and + ``name``. + + The attribute may be assigned to using the result of + :func:`apache.stat`. For example:: + + if req.finfo.filetype == apache.APR_DIR: + req.filename = posixpath.join(req.filename, 'index.html') + req.finfo = apache.stat(req.filename, apache.APR_FINFO_MIN) + + For backward compatibility, the object can also be accessed as if + it were a tuple. The ``apache`` module defines a set of + :const:`FINFO_*` constants that should be used to access elements + of this tuple.:: + + user = req.finfo[apache.FINFO_USER] + + +.. attribute:: request.parsed_uri + + Tuple. The URI broken down into pieces. ``(scheme, hostinfo, user, password, hostname, port, path, query, fragment)``. + The :mod:`apache` module defines a set of :const:`URI_*` constants that + should be used to access elements of this tuple. Example:: + + fname = req.parsed_uri[apache.URI_PATH] + + *(Read-Only)* + + +.. attribute:: request.used_path_info + + Flag to accept or reject path_info on current request. + + +.. attribute:: request.eos_sent + + Boolean. EOS bucket sent. *(Read-Only)* + + +.. attribute:: request.useragent_addr + + *Apache 2.4 only* + + The (address, port) tuple for the user agent. + + This attribute should reflect the address of the user agent and + not necessarily the other end of the TCP connection, for which + there is :attr:`connection.client_addr`. + *(Read-Only)* + + +.. attribute:: request.useragent_ip + + *Apache 2.4 only* + + String with the IP of the user agent. Same as CGI :envvar:`REMOTE_ADDR`. + + This attribute should reflect the address of the user agent and + not necessarily the other end of the TCP connection, for which + there is :attr:`connection.client_ip`. + *(Read-Only)* + + +.. _pyapi-mpconn: + +Connection Object (mp_conn) +--------------------------- + +.. index:: + singe: mp_conn + +The connection object is a Python mapping to the Apache +:c:type:`conn_rec` structure. + +.. _pyapi-mpconn-meth: + +Connection Methods +^^^^^^^^^^^^^^^^^^ + +.. method:: connection.log_error(message[, level]) + + An interface to the Apache ``ap_log_cerror`` function. *message* is + a string with the error message, *level* is one of the following + flags constants:: + + APLOG_EMERG + APLOG_ALERT + APLOG_CRIT + APLOG_ERR + APLOG_WARNING + APLOG_NOTICE + APLOG_INFO + APLOG_DEBUG + APLOG_NOERRNO + + If you need to write to log and do not have a reference to a connection or + request object, use the :func:`apache.log_error` function. + + +.. method:: connection.read([length]) + + Reads at most *length* bytes from the client. The read blocks + indefinitely until there is at least one byte to read. If length is + -1, keep reading until the socket is closed from the other end + (This is known as ``EXHAUSTIVE`` mode in the http server code). + + This method should only be used inside *Connection Handlers*. + + .. note:: + + The behavior of this method has changed since version 3.0.3. In + 3.0.3 and prior, this method would block until *length* bytes + was read. + + +.. method:: connection.readline([length]) + + Reads a line from the connection or up to *length* bytes. + + This method should only be used inside *Connection Handlers*. + + +.. method:: connection.write(string) + + Writes *string* to the client. + + This method should only be used inside *Connection Handlers*. + + +.. _pyapi-mpconn-mem: + +Connection Members +^^^^^^^^^^^^^^^^^^ + +.. attribute:: connection.base_server + + A ``server`` object for the physical vhost that this connection came + in through. *(Read-Only)* + + +.. attribute:: connection.local_addr + + The (address, port) tuple for the server. *(Read-Only)* + + +.. attribute:: connection.remote_addr + + *Deprecated in Apache 2.4, use client_addr. (Aliased to client_addr for backward compatibility)* + + The (address, port) tuple for the client. *(Read-Only)* + + +.. attribute:: connection.client_addr + + *Apache 2.4 only* + + The (address, port) tuple for the client. + + This attribute reflects the other end of the TCP connection, which + may not always be the address of the user agent. A more accurate + source of the user agent address is :attr:`request.useragent_addr`. + *(Read-Only)* + + +.. attribute:: connection.remote_ip + + *Deprecated in Apache 2.4, use client_ip. (Aliased to client_ip for backward compatibility)* + + String with the IP of the client. In Apache 2.2 same as CGI :envvar:`REMOTE_ADDR`. + *(Read-Only)* + + +.. attribute:: connection.client_ip + + *Apache 2.4 only* + + String with the IP of the client. + + This attribute reflects the other end of the TCP connection, which + may not always be the address of the user agent. A more accurate + source of the user agent address is :attr:`request.useragent_ip`. + + *(Read-Only)* + + +.. attribute:: connection.remote_host + + String. The DNS name of the remote client. None if DNS has not been + checked, ``''`` (empty string) if no name found. Same as CGI + :envvar:`REMOTE_HOST`. *(Read-Only)* + + +.. attribute:: connection.remote_logname + + Remote name if using :rfc:`1413` (ident). Same as CGI + :envvar:`REMOTE_IDENT`. *(Read-Only)* + + +.. attribute:: connection.aborted + + Boolean. True is the connection is aborted. *(Read-Only)* + + +.. attribute:: connection.keepalive + + Integer. 1 means the connection will be kept for the next request, + 0 means "undecided", -1 means "fatal error". *(Read-Only)* + + +.. attribute:: connection.double_reverse + + Integer. 1 means double reverse DNS lookup has been performed, 0 + means not yet, -1 means yes and it failed. *(Read-Only)* + + +.. attribute:: connection.keepalives + + The number of times this connection has been used. (?) + *(Read-Only)* + + +.. attribute:: connection.local_ip + + String with the IP of the server. *(Read-Only)* + + +.. attribute:: connection.local_host + + DNS name of the server. *(Read-Only)* + + +.. attribute:: connection.id + + Long. A unique connection id. *(Read-Only)* + + +.. attribute:: connection.notes + + A :class:`table` object containing miscellaneous general purpose + info that lives for as long as the connection lives. + + +.. _pyapi-mpfilt: + +Filter Object (mp_filter) +------------------------- + +.. index:: + singe: mp_filter + +A filter object is passed to mod_python input and output filters. It +is used to obtain filter information, as well as get and pass +information to adjacent filters in the filter stack. + +.. _pyapi-mpfilt-meth: + +Filter Methods +^^^^^^^^^^^^^^ + +.. method:: filter.pass_on() + + Passes all data through the filter without any processing. + + +.. method:: filter.read([length]) + + Reads at most *len* bytes from the next filter, returning a + string with the data read or None if End Of Stream (EOS) has been + reached. A filter *must* be closed once the EOS has been + encountered. + + If the *length* argument is negative or omitted, reads all data + currently available. + + +.. method:: filter.readline([length]) + + Reads a line from the next filter or up to *length* bytes. + + +.. method:: filter.write(string) + + Writes *string* to the next filter. + + +.. method:: filte.flush() + + Flushes the output by sending a FLUSH bucket. + + +.. method:: filter.close() + + Closes the filter and sends an EOS bucket. Any further IO + operations on this filter will throw an exception. + + +.. method:: filter.disable() + + Tells mod_python to ignore the provided handler and just pass the + data on. Used internally by mod_python to print traceback from + exceptions encountered in filter handlers to avoid an infinite + loop. + + +.. _pyapi-mpfilt-mem: + +Filter Members +^^^^^^^^^^^^^^ + +.. attribute:: filter.closed + + A boolean value indicating whether a filter is closed. + *(Read-Only)* + + +.. attribute:: filter.name + + String. The name under which this filter is registered. + *(Read-Only)* + + +.. attribute:: filter.req + + A reference to the request object. *(Read-Only)* + + +.. attribute:: filter.is_input + + Boolean. True if this is an input filter. *(Read-Only)* + + +.. attribute:: filter.handler + + String. The name of the Python handler for this filter as specified + in the configuration. *(Read-Only)* + + +.. _pyapi-mpserver: + +Server Object (mp_server) +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. index:: + single: mp_server + + +The request object is a Python mapping to the Apache +``request_rec`` structure. The server structure describes the +server (possibly virtual server) serving the request. + +.. _pyapi-mpsrv-meth: + +Server Methods +^^^^^^^^^^^^^^ + +.. method:: server.get_config() + + Similar to :meth:`request.get_config()`, but returns a table object + holding only the mod_python configuration defined at global scope + within the Apache configuration. That is, outside of the context of + any VirtualHost, Location, Directory or Files directives. + + +.. method:: server.get_options() + + Similar to :meth:`request.get_options()`, but returns a table + object holding only the mod_python options defined at global scope + within the Apache configuration. That is, outside of the context of + any VirtualHost, Location, Directory or Files directives. + + +.. method:: server.log_error(message[level]) + + An interface to the Apache ``ap_log_error`` function. *message* is + a string with the error message, *level* is one of the following + flags constants:: + + APLOG_EMERG + APLOG_ALERT + APLOG_CRIT + APLOG_ERR + APLOG_WARNING + APLOG_NOTICE + APLOG_INFO + APLOG_DEBUG + APLOG_NOERRNO + + If you need to write to log and do not have a reference to a server or + request object, use the :func:`apache.log_error` function. + + +.. method:: server.register_cleanup(request, callable[, data]) + + Registers a cleanup. Very similar to :meth:`req.register_cleanup`, + except this cleanup will be executed at child termination + time. This function requires the request object be supplied to + infer the interpreter name. If you don't have any request object + at hand, then you must use the :func:`apache.register_cleanup` + variant. + + .. note:: + + *Warning:* do not pass directly or indirectly a request object in + the data parameter. Since the callable will be called at server + shutdown time, the request object won't exist anymore and any + manipulation of it in the callable will give undefined behaviour. + +.. _pyapi-mpsrv-mem: + +Server Members +^^^^^^^^^^^^^^ + +.. attribute:: server.defn_name + + String. The name of the configuration file where the server + definition was found. *(Read-Only)* + + +.. attribute:: server.defn_line_number + + Integer. Line number in the config file where the server definition + is found. *(Read-Only)* + + +.. attribute:: server.server_admin + + Value of the ``ServerAdmin`` directive. *(Read-Only)* + + +.. attribute:: server.server_hostname + + Value of the ``ServerName`` directive. Same as CGI + :envvar:`SERVER_NAME`. *(Read-Only)* + + +.. attribute:: server.names + + Tuple. List of normal server names specified in the ``ServerAlias`` + directive. This list does not include wildcarded names, which are + listed separately in ``wild_names``. *(Read-Only)* + + +.. attribute:: server.wild_names + + Tuple. List of wildcarded server names specified in the ``ServerAlias`` + directive. *(Read-Only)* + + +.. attribute:: server.port + + Integer. TCP/IP port number. Same as CGI :envvar:`SERVER_PORT`. + *This member appears to be 0 on Apache 2.0, look at + req.connection.local_addr instead* *(Read-Only)* + + +.. attribute:: server.error_fname + + The name of the error log file for this server, if any. + *(Read-Only)* + + +.. attribute:: server.loglevel + + Integer. Logging level. *(Read-Only)* + + +.. attribute:: server.is_virtual + + Boolean. True if this is a virtual server. *(Read-Only)* + + +.. attribute:: server.timeout + + Integer. Value of the ``Timeout`` directive. *(Read-Only)* + + +.. attribute:: server.keep_alive_timeout + + Integer. Keepalive timeout. *(Read-Only)* + + +.. attribute:: server.keep_alive_max + + Maximum number of requests per keepalive. *(Read-Only)* + + +.. attribute:: server.keep_alive + + Use persistent connections? *(Read-Only)* + + +.. attribute:: server.path + + String. Path for ``ServerPath`` *(Read-Only)* + + +.. attribute:: server.pathlen + + Integer. Path length. *(Read-Only)* + + +.. attribute:: server.limit_req_line + + Integer. Limit on size of the HTTP request line. *(Read-Only)* + + +.. attribute:: server.limit_req_fieldsize + + Integer. Limit on size of any request header field. *(Read-Only)* + + +.. attribute:: server.limit_req_fields + + Integer. Limit on number of request header fields. *(Read-Only)* + + +.. _pyapi-util: + +:mod:`util` -- Miscellaneous Utilities +====================================== + +.. module:: util + :synopsis: Miscellaneous Utilities. +.. moduleauthor:: Gregory Trubetskoy grisha@modpython.org + +The :mod:`util` module provides a number of utilities handy to a web +application developer similar to those in the standard library +:mod:`cgi` module. The implementations in the :mod:`util` module are +much more efficient because they call directly into Apache API's as +opposed to using CGI which relies on the environment to pass +information. + +The recommended way of using this module is:: + + from mod_python import util + + +.. seealso:: + + :rfc:`3875` + for detailed information on the CGI specification + +.. _pyapi-util-fstor: + + +FieldStorage class +------------------ + +Access to form data is provided via the :class:`FieldStorage` +class. This class is similar to the standard library module +``cgi.FieldStorage`` + +.. class:: FieldStorage(req[, keep_blank_values[, strict_parsing[, file_callback[, field_callback]]]]) + + This class provides uniform access to HTML form data submitted by + the client. *req* is an instance of the mod_python + :class:`request` object. + + The optional argument *keep_blank_values* is a flag indicating + whether blank values in URL encoded form data should be treated as + blank strings. The default is false, which means that blank values + are ignored as if they were not included. + + The optional argument *strict_parsing* is not yet implemented. + + The optional argument *file_callback* allows the application to + override both file creation/deletion semantics and location. See + :ref:`pyapi-util-fstor-examples` for + additional information. *New in version 3.2* + + The optional argument *field_callback* allows the application to + override both the creation/deletion semantics and behavior. *New + in version 3.2* + + During initialization, :class:`FieldStorage` class reads all of the + data provided by the client. Since all data provided by the client + is consumed at this point, there should be no more than one + :class:`FieldStorage` class instantiated per single request, nor + should you make any attempts to read client data before or after + instantiating a :class:`FieldStorage`. A suggested strategy for + dealing with this is that any handler should first check for the + existence of a ``form`` attribute within the request object. If + this exists, it should be taken to be an existing instance of the + :class:`FieldStorage` class and that should be used. If the + attribute does not exist and needs to be created, it should be + cached as the ``form`` attribute of the request object so later + handler code can use it. + + When the :class:`FieldStorage` class instance is created, the data + read from the client is then parsed into separate fields and + packaged in :class:`Field` objects, one per field. For HTML form + inputs of type ``file``, a temporary file is created that can later + be accessed via the :attr:`Field.file` attribute of a + :class:`Field` object. + + The :class:`FieldStorage` class has a mapping object interface, + i.e. it can be treated like a dictionary in most instances, but is + not strictly compatible as is it missing some methods provided by + dictionaries and some methods don't behave entirely like their + counterparts, especially when there is more than one value + associated with a form field. When used as a mapping, the keys are + form input names, and the returned dictionary value can be: + + * An instance of :class:`StringField`, containing the form input + value. This is only when there is a single value corresponding to + the input name. :class:`StringField` is a subclass of + :class:`str` which provides the additional + :attr:`StringField.value` attribute for compatibility with + standard library :mod:`cgi` module. + + * An instance of a :class:`Field` class, if the input is a file + upload. + + * A list of :class:`StringField` and/or :class:`Field` + objects. This is when multiple values exist, such as for a + ``
+ Email:
+ Comment:
+ + + + + +The ``action`` element of the ``
`` tag points to +``form.py/email``. We are going to create a file called +:file:`form.py`, like this:: + + import smtplib + + WEBMASTER = "webmaster" # webmaster e-mail + SMTP_SERVER = "localhost" # your SMTP server + + def email(req, name, email, comment): + + # make sure the user provided all the parameters + if not (name and email and comment): + return "A required parameter is missing, \ + please go back and correct the error" + + # create the message text + msg = """\ + From: %s + Subject: feedback + To: %s + + I have the following comment: + + %s + + Thank You, + + %s + + """ % (email, WEBMASTER, comment, name) + + # send it out + conn = smtplib.SMTP(SMTP_SERVER) + conn.sendmail(email, [WEBMASTER], msg) + conn.quit() + + # provide feedback to the user + s = """\ + + + Dear %s,
+ Thank You for your kind comments, we + will get back to you shortly. + + """ % name + + return s + +When the user clicks the Submit button, the publisher handler will +load the :func:`email` function in the :mod:`form` module, +passing it the form fields as keyword arguments. It will also pass the +request object as ``req``. + +You do not have to have ``req`` as one of the arguments if you do not +need it. The publisher handler is smart enough to pass your function +only those arguments that it will accept. + +The data is sent back to the browser via the return value of the +function. + +Even though the Publisher handler simplifies mod_python programming a +great deal, all the power of mod_python is still available to this +program, since it has access to the request object. You can do all the +same things you can do with a "native" mod_python handler, e.g. set +custom headers via ``req.headers_out``, return errors by raising +:exc:`apache.SERVER_ERROR` exceptions, write or read directly to +and from the client via :meth:`req.write()` and :meth:`req.read()`, +etc. + +Read Section :ref:`hand-pub` for more information on the publisher +handler. + +.. _tut-overview: + +Quick Overview of how Apache Handles Requests +============================================= + +Apache processes requests in :dfn:`phases`. For example, the first +phase may be to authenticate the user, the next phase to verify +whether that user is allowed to see a particular file, then (next +phase) read the file and send it to the client. A typical static file +request involves three phases: (1) translate the requested URI to a +file location (2) read the file and send it to the client, then (3) +log the request. Exactly which phases are processed and how varies +greatly and depends on the configuration. + +A :dfn:`handler` is a function that processes one phase. There may be +more than one handler available to process a particular phase, in +which case they are called by Apache in sequence. For each of the +phases, there is a default Apache handler (most of which by default +perform only very basic functions or do nothing), and then there are +additional handlers provided by Apache modules, such as mod_python. + +Mod_python provides every possible handler to Apache. Mod_python +handlers by default do not perform any function, unless specifically +told so by a configuration directive. These directives begin with +``'Python'`` and end with ``'Handler'`` +(e.g. ``PythonAuthenHandler``) and associate a phase with a Python +function. So the main function of mod_python is to act as a dispatcher +between Apache handlers and Python functions written by a developer +like you. + +The most commonly used handler is ``PythonHandler``. It handles the +phase of the request during which the actual content is +provided. Because it has no name, it is sometimes referred to as as +:dfn:`generic` handler. The default Apache action for this handler is +to read the file and send it to the client. Most applications you will +write will provide this one handler. To see all the possible +handlers, refer to Section :ref:`directives`. + +.. _tut-what-it-do: + +So what Exactly does Mod-python do? +=================================== + +Let's pretend we have the following configuration:: + + + AddHandler mod_python .py + PythonHandler myscript + PythonDebug On + + +Note: ``/mywebdir`` is an absolute physical path in this case. + +And let's say that we have a python program (Windows users: substitute +forward slashes for backslashes) :file:`/mywedir/myscript.py` that looks like +this:: + + from mod_python import apache + + def handler(req): + + req.content_type = "text/plain" + req.write("Hello World!") + + return apache.OK + +Here is what's going to happen: The ``AddHandler`` directive tells +Apache that any request for any file ending with :file:`.py` in the +:file:`/mywebdir` directory or a subdirectory thereof needs to be +processed by mod_python. The ``'PythonHandler myscript'`` directive +tells mod_python to process the generic handler using the +`myscript` script. The ``'PythonDebug On'`` directive instructs +mod_python in case of an Python error to send error output to the +client (in addition to the logs), very useful during development. + +When a request comes in, Apache starts stepping through its request +processing phases calling handlers in mod_python. The mod_python +handlers check whether a directive for that handler was specified in +the configuration. (Remember, it acts as a dispatcher.) In our +example, no action will be taken by mod_python for all handlers except +for the generic handler. When we get to the generic handler, +mod_python will notice ``'PythonHandler myscript'`` directive and do +the following: + +* If not already done, prepend the directory in which the + ``PythonHandler`` directive was found to ``sys.path``. + +* Attempt to import a module by name ``myscript``. (Note that if + ``myscript`` was in a subdirectory of the directory where + ``PythonHandler`` was specified, then the import would not work + because said subdirectory would not be in the ``sys.path``. One + way around this is to use package notation, e.g. + ``'PythonHandler subdir.myscript'``.) + +* Look for a function called ``handler`` in module ``myscript``. + +* Call the function, passing it a request object. (More on what a + request object is later). + +* At this point we're inside the script, let's examine it line-by-line: + + * :: + + from mod_python import apache + + This imports the apache module which provides the interface to + Apache. With a few rare exceptions, every mod_python program will have + this line. + + .. index:: + single: handler + + * :: + + def handler(req): + + This is our :dfn:`handler` function declaration. It + is called ``'handler'`` because mod_python takes the name of the + directive, converts it to lower case and removes the word + ``'python'``. Thus ``'PythonHandler'`` becomes + ``'handler'``. You could name it something else, and specify it + explicitly in the directive using ``'::'``. For example, if the + handler function was called ``'spam'``, then the directive would + be ``'PythonHandler myscript::spam'``. + + Note that a handler must take one argument - the :ref:`pyapi-mprequest`. + The request object is an object that provides all of the + information about this particular request - such as the IP of + client, the headers, the URI, etc. The communication back to the + client is also done via the request object, i.e. there is no + "response" object. + + * :: + + req.content_type = "text/plain" + + This sets the content type to ``'text/plain'``. The default is + usually ``'text/html'``, but because our handler does not produce + any html, ``'text/plain'`` is more appropriate. You should always + make sure this is set *before* any call to ``'req.write'``. When + you first call ``'req.write'``, the response HTTP header is sent + to the client and all subsequent changes to the content type (or + other HTTP headers) have no effect. + + * :: + + req.write("Hello World!") + + This writes the ``'Hello World!'`` string to the client. + + * :: + + return apache.OK + + This tells Apache that everything went OK and that the request has + been processed. If things did not go OK, this line could return + :const:`apache.HTTP_INTERNAL_SERVER_ERROR` or + :const:`apache.HTTP_FORBIDDEN`. When things do not go OK, Apache + logs the error and generates an error message for the client. + +.. note:: + + It is important to understand that in order for the handler code to + be executed, the URL needs not refer specficially to + :file:`myscript.py`. The only requirement is that it refers to a + :file:`.py` file. This is because the ``AddHandler mod_python .py`` + directive assignes mod_python to be a handler for a file *type* + (based on extention ``.py``), not a specific file. Therefore the + name in the URL does not matter, in fact the file referred to in the + URL doesn't event have to exist. Given the above configuration, + ``'http://myserver/mywebdir/myscript.py'`` and + ``'http://myserver/mywebdir/montypython.py'`` would yield the exact + same result. + + +.. _tut-more-complicated: + +Now something More Complicated - Authentication +=============================================== + +Now that you know how to write a basic handler, let's try +something more complicated. + +Let's say we want to password-protect this directory. We want the +login to be ``'spam'``, and the password to be ``'eggs'``. + +First, we need to tell Apache to call our *authentication* +handler when authentication is needed. We do this by adding the +``PythonAuthenHandler``. So now our config looks like this:: + + + AddHandler mod_python .py + PythonHandler myscript + PythonAuthenHandler myscript + PythonDebug On + + +Notice that the same script is specified for two different +handlers. This is fine, because if you remember, mod_python will look +for different functions within that script for the different handlers. + +Next, we need to tell Apache that we are using Basic HTTP +authentication, and only valid users are allowed (this is fairly basic +Apache stuff, so we're not going to go into details here). Our config +looks like this now:: + + + AddHandler mod_python .py + PythonHandler myscript + PythonAuthenHandler myscript + PythonDebug On + AuthType Basic + AuthName "Restricted Area" + require valid-user + + +Note that depending on which version of Apache is being used, you may need +to set either the \code{AuthAuthoritative} or ``AuthBasicAuthoritative`` +directive to ``Off`` to tell Apache that you want allow the task of +performing basic authentication to fall through to your handler. + +Now we need to write an authentication handler function in +:file:`myscript.py`. A basic authentication handler would look like +this:: + + from mod_python import apache + + def authenhandler(req): + + pw = req.get_basic_auth_pw() + user = req.user + + if user == "spam" and pw == "eggs": + return apache.OK + else: + return apache.HTTP_UNAUTHORIZED + +Let's look at this line by line: + +* :: + + def authenhandler(req): + + This is the handler function declaration. This one is called + ``authenhandler`` because, as we already described above, + mod_python takes the name of the directive + (``PythonAuthenHandler``), drops the word ``'Python'`` and converts + it lower case. + +* :: + + pw = req.get_basic_auth_pw() + + This is how we obtain the password. The basic HTTP authentication + transmits the password in base64 encoded form to make it a little + bit less obvious. This function decodes the password and returns it + as a string. Note that we have to call this function before obtaining + the user name. + +* :: + + user = req.user + + This is how you obtain the username that the user entered. + +* :: + + if user == "spam" and pw == "eggs": + return apache.OK + + + We compare the values provided by the user, and if they are what we + were expecting, we tell Apache to go ahead and proceed by returning + :const:`apache.OK`. Apache will then consider this phase of the + request complete, and proceed to the next phase. (Which in this case + would be :func:`handler()` if it's a ``'.py'`` file). + +* :: + + else: + return apache.HTTP_UNAUTHORIZED + + Else, we tell Apache to return :const:`HTTP_UNAUTHORIZED` to the + client, which usually causes the browser to pop a dialog box asking + for username and password. + +.. _tut-404-handler: + +Your Own 404 Handler +==================== + +In some cases, you may wish to return a 404 (:const:`HTTP_NOT_FOUND`) or +other non-200 result from your handler. There is a trick here. if you +return :const:`HTTP_NOT_FOUND` from your handler, Apache will handle +rendering an error page. This can be problematic if you wish your handler +to render it's own error page. + +In this case, you need to set ``req.status = apache.HTTP_NOT_FOUND``, +render your page, and then ``return(apache.OK)``:: + + from mod_python import apache + + def handler(req): + if req.filename[-17:] == 'apache-error.html': + # make Apache report an error and render the error page + return(apache.HTTP_NOT_FOUND) + if req.filename[-18:] == 'handler-error.html': + # use our own error page + req.status = apache.HTTP_NOT_FOUND + pagebuffer = 'Page not here. Page left, not know where gone.' + else: + # use the contents of a file + pagebuffer = open(req.filename, 'r').read() + + # fall through from the latter two above + req.write(pagebuffer) + return(apache.OK) + +Note that if wishing to returning an error page from a handler phase other +than the response handler, the value ``apache.DONE`` must be returned +instead of ``apache.OK``. If this is not done, subsequent handler phases +will still be run. The value of ``apache.DONE`` indicates that processing +of the request should be stopped immediately. If using stacked response +handlers, then ``apache.DONE`` should also be returned in that situation +to prevent subsequent handlers registered for that phase being run if +appropriate. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile.in b/Makefile.in new file mode 100644 index 0000000..df6afa4 --- /dev/null +++ b/Makefile.in @@ -0,0 +1,86 @@ + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +@SET_MAKE@ +LIBEXECDIR=@LIBEXECDIR@ +AP_SRC=@AP_SRC@ +AP_SRC_OWN=@AP_SRC_OWN@ +AP_SRC_GRP=@AP_SRC_GRP@ +INSTALL=@INSTALL@ +PYTHON_BIN=@PYTHON_BIN@ + +.PHONY: all test clean + +all: @ALL@ + +dso: @DSO@ + +do_dso: + @cd src && $(MAKE) + @cd dist && $(MAKE) build + @echo + @echo 'Now run sudo make install' + @echo ' (or, if you only want to perform a partial install,' + @echo ' you can use make install_dso and make install_py_lib)' + @echo + +no_dso: + @echo + @echo "DSO compilation not available. (Probably because apxs could not be found)." + @echo + +static: @STATIC@ + +no_static: + @echo + @echo "Static compilation not available. (Probably because --with-apache was not specified)." + @echo + +install: + $(MAKE) install_dso + $(MAKE) install_py_lib + cd scripts && $(MAKE) install + +install_dso: dso + @echo + @echo "Performing DSO installation." + @echo + $(INSTALL) -d $(DESTDIR)$(LIBEXECDIR) + $(INSTALL) src/mod_python.so $(DESTDIR)$(LIBEXECDIR) + +install_py_lib: + cd dist && $(MAKE) install_py_lib + +clean: + cd src && $(MAKE) clean + cd dist && $(MAKE) clean + cd scripts && $(MAKE) clean + cd test && $(MAKE) clean + rm -f core + +distclean: clean + cd src && $(MAKE) distclean + cd Doc && $(MAKE) distclean + cd dist && $(MAKE) distclean + cd scripts && $(MAKE) distclean + cd test && $(MAKE) distclean + rm -rf Makefile config.h config.status config.cache config.log \ + test/testconf.py + +test: @ALL@ + cd test && $(MAKE) test diff --git a/NEWS b/NEWS new file mode 100644 index 0000000..ce845cc --- /dev/null +++ b/NEWS @@ -0,0 +1,318 @@ +Oct 22 2013 - 3.4.1 released + +Jan 29 2007 - 3.3.1 is being tagged + +Dec 9 2006 - 3.3.0b is being tagged + +Aug 7 2006 - 3.2.10 released + +July 19 2006 - 3.2.10 is being tagged from branches/3.2.x + +July 19 2006 - The public release of 3.2.9 is being abandoned due to some + recently reported memory leaks. Although 3.2.9 is ready for + release, it is felt that it is prudent to address these now + and proceed immediately to 3.2.10. See MODPYTHON-172 for + on the specific details on the issues. + +June 29 2006 - 3.2.9 is being tagged from branches/3.2.x + +Feb 19 2006 - 3.2.8 is being tagged from branches/3.2.x + 3.2.8 is a security release to fix the possible + directory traversal attack in FileSession. + +Feb 9 2006 - Created 3.2.x stable bugfix branch in svn as + branches/3.2.x + +Feb 3 2006 - 3.2.7 is being tagged + +Jan 16 2006 - 3.2.6 final is being tagged (no changes from 3.2.6b) + +Jan 14 2006 - 3.2.6b is being tagged + +Nov 15 2005 - 3.2.5b is being tagged + +Oct 28 2005 - 3.2.4b is being tagged + +Oct 22 2005 - 3.2.3b is being tagged + +Sep 13 2005 - 3.2.2b is being tagged + +Sep 6 2005 - 3.2.1b is being tagged + +Aug 16 2005 - 3.2.0b is being tagged + +Feb 17 2004 - 3.1.3 is being tagged + +Oct 14 2003 - 3.1.1b is tagged + +Aug 29 2003 - 3.1.0a (Alpha) is out + +Mar 17 2003 - 3.0.3 is released + + This file will no longer have details, since those + are in CVS anyway. + +Feb 12 2003 - Added a test for req.headers_out + + Fixed a bug where None was added to sys.path + + Tests for connection and server added + +Jan 18 2003 - Between now and the last entry the following things took + place: + + Problem with loading DSO on Solaris relating to _muldi3 + fixed. + + A problem fixed related to | .ext syntax. + + An environ issue addressed. + + Filtering by type should now work correctly. + + Publisher's code for dealing with file uploads simplified. + + A windows installer added. + + Install adopted to use distutils. + + A cgihandler problem resulting in double output fixed. + + PyOS_AfterFork() added where needed. + + The binary produced by MSVC no longer needs any MS dll's. + + Filter flush() now cleans memory so large files can be + processed. + +Nov 26 2002 - 3.0.1 is about to be released. This was file has not been + updated during the 3.0 development because of too many + things changing. + +Nov 28 2001 - Now compiles against Apache 2.0.28 + +Nov 6 2001 - More internal changes. Bugs from previous change, cleaner + reference keeping. + +Nov 1 2001 - Many internal changes. We no longer use req->notes for things + that shouldn't be there, handler directives can be restricted + to specific file extensions thereby allowing multiple handlers + per directory, etc. The config is now stored in a new hlist + object, which can be accessed from python via an hlistobject. + +Aug 16 2001 - Mod_python is compiling and running with Apache 2.0 and the + filter functionality seems to work. There is still a lot of + work to be done though. + +Aug 4 2001 - 2.7.6 released. Sometime prior a publisher upload bug was + fixed. + +Jun 11 2001 - 2.7.5 released for those who want Python 2.1 + +Jun 11 2001 - Changed configure.in to accomodate Python 2.1. It looks like + everything else works without any changes. + +Jun 4 2001 - 2.7.4 released. + +May 28 2001 - Renamed make_table, log_error, table_add and copy_table with + mp_ so as to not get confused with the ones in ap_compat.h + +May 24 2001 - PythonNoReload obsoleted in favor of PythonAutoReload. + +May 23 2001 - The "fix" re import on Apr 10 turned out to be a bug and + I put it back. But in the process found that the FLAG directives + were not read correctly - e.g. mere presence sometimes indicated + On. Also, ReportError was returning DONE in finally clause which + was wrong. + +May 22 2001 - The bug that was supposedly fixed below was a bit more + complicated than it seemed at first. The fix below caused + a circular reference, and some further fixing had to be done, + and now it's really fixed, thanks to Chris's dilligence. + +May 17 2001 - Fixed a threading bug reported by Chris Trengove where + the callback object could have the reference to self.req + overwritten by other threads. The reference to the Request + object is now stored inside the requestobject C structure. + +May 12 2001 - 2.7.3 released + +Apr 10 2001 - Fixed a PythonImport bug reported by Chris Hagner. Removed + the atol hack as per Miguel Marques patch. Fixed apache.py + to use a messier looking but for some obscure reason that + now escapes me necessary way of importing modules via imp. + +Apr 7 2001 - "Dissapearing args" on Solaris fixed. It was a problem + in parse_qsl. + +Apr 6 2001 - Missing _eprintf() on Solaris/gcc workaround added. I + wonder if this applies to other OS's? + +Apr 5 2001 - A couple doc fixes. + +Feb 10 2001 - 2.7.2 released + +Feb 10 2001 - Fixed the dissapearing / in PATH_INFO. + +Jan 23 2001 - Fixed the bug where req.server.register_cleanup was + expecting a builtin _req object rather than the new real + Python Request. + +Jan 21 2001 - The Publisher handler __auth__ can now be a dictionary or. + a const. There is also new __access__, which can be a list. + +Jan 19 2001 - Added req._content_type_set flag which is set to 1 when + a value is assigned to content_type from Python. This way + the publisher handler can know if it's been purposely set + and will not attempt to guess it. + +Jan 18 2001 - Documented req.form. Fixed a security problem with the + Publisher handler - it now does not allow modules to be + published, so a user can't access test.os.renames, etc. + +Dec 18 2000 - 2.7 had a typo in it + win32 wants types initialized + separately like I thought. Time for 2.7.1. + +Dec 16 2000 - Releasing 2.7.... + +Dec 16 2000 - Found another bug related to ALLOW_THREADS macros which are + noops without threads, but that is actually wrong, because + what they is needed wvwn without threads. Also, some days + ago 2.6.4 was released. + +Dec 13 2000 - The initial documentation for the util module and for the + publisher handler is done. Perhaps the tutorial should have + a bit on the publisher handler. + +Dec 12 2000 - publisher handler appears to be working pretty well. Now need + to document it. + +Dec 11 2000 - It appears I found a big booboo with mispalced #ifdef + WITH_THREADS... The "Dispatch returned nothing" should be + gone now. This means 2.6.3 has major problems with multiple + interpreters. + +Dec 8 2000 - connection.user now writable. More Doc improvements. + +Dec 6 2000 - The COPYRIGHT no longer has the advertizing clause. + +Dec 4 2000 - Initial (not proof-read) LaTeX source for documentation is + checked in. + +Nov 26 2000 - Dilligently migrating all the documentation to LaTeX using the + Python standards. + +Nov 17 2000 - I forgot to uncomment type initialization. type(req.headers_in) + would segfault. Fixed. Continuing work on publisher.py module. + +Nov 08 2000 - read() and reqadline() now behave very much like the standard + Python file object counterparts. Added James Gessling's VMS + instructions. + +Nov 07 2000 - Initial version of req.readline(), also some fixes to + req.read() (both now raise appropriate errors). Both still need + some work. + +Nov 04 2000 - Implemented _apache.parse_qs. Also, CGIStin had a read() bug. + PythonHandlerModule is documented. + +Oct 30 2000 - Implemented PythonHandlerModule. Still need to document it. + +Oct 29 2000 - 2.6.3 release. Mostly static install bug fixes. + +Oct 22 2000 - 2.6.2 release + +Oct 22 2000 - "Fatal Python error: PyThreadState_Get: no current thread" upon + exit is now fixed. Also, --with-python was improved to point + to the right Makefile (Modules/Makefile) when scanning for LIBS. + +Oct 21 2000 - 2.6.1 release + +Oct 20 2000 - Fixed some minor installation bugs. + +Oct 19 2000 - 2.6 out + +Oct 16 2000 - Began a major file reorganization. All objects are now in + separate files, and all external functions have an Mp prefix + and named consistently with Python C API conventions. + +Oct 15 2000 - We now use mkdep. + +Oct 12 2000 - Autoconf now works. Documentation has been adjusted. Also + added Windows installation instructions to the docs. + +Oct 2 2000 - PythonInterpPerServer is now default behavior, and this + directive is therefore obsolete. The old default behavior can + be achieved via the new PythonInterpPerDirective directive. + +Sep ? 2000 - Request is now a real python object. This means that it can be + used to retain state between requests. + +Sep 9 2000 - Added server.register_cleanup(). This happened to be a little + trickier than I thought since it turned out that server_rec does + not have a pool member. Py_Finalze() has been moved to a + cleanup from the ChildExit handler because ChildExit runs *before* + any cleanups. (Now I know why mod_perl doesn't use child_exit.) + +Sep 8 2000 - Sean True's fix to call note_basic_auth_failure added. + +Sep 6 2000 - Stéphane Bidoul's Win32 fixes applied. + +Sep 5 2000 - 2.5 released. + +Sep 4 2000 - Added the PythonCleanupHandler. + +Sep 4 2000 - Added req.register_cleanup(). Still need server.register_cleanup(), + as well as a PythonCleanupHandler. + +Sep 2 2000 - Added PythonInterpPerServer directive. Fixed a bug relating + to ap_add_version_component. + +Aug 28 2000 - Added Richard Barret's patch that gives python socket module + behaviour from req.connection.local_addr and remote_addr. + +Aug 27 2000 - Added PythonInitHandler. Also, it looks like dynamic handler + addition now works correctly, after trying 15 different ways of + implementing it, resorting to req->notes seems to be the only + option. + +Aug 18 2000 - Added req.get_remote_host() + +Aug 16 2000 - Added Dr. Timochouk's PythonOptimize directive patch. + +Aug 15 2000 - Extensive improvements to req.read() prompted by Dr. Timochouk's + patches. + +Aug 10 2000 - Documentation change - get_basic_auth_pw must be called before + using connection.user. + +Aug 06 2000 - Table oject now has a new method "add" which allows creation + of multiple keys. This is useful with things like "Set-Cookie" + headers. + +Jul 22 2000 - Added req.add_handler (dynamic handler registration) + +Jul 18 2000 - Added PythonEnablePdb + ChildExitHandler now properly calls Py_Finalize() + python_handler is now cumulative rather than overriding + +Jul 04 2000 - 2.4.1 Released. Mostly bug fixes. Should be pretty stable. + +Jun 20 2000 - 2.4 Released. + +Jun 17 2000 - Started the tutorial.html. + +Jun 11 2000 - Stephane Bidoul's thread-safe win32 changes put in. As part + of this, all chdir()'s are gone and now instead of '.', the + file path is prepended to pythonpath. + +Jun 8 2000 - 2.3 Released. + +Jun 7 2000 - PythonImport now works. + +Jun 5 2000 - PythonDebug and other on/off type handlers are now of type FLAG + so they require an argument of On or Off. + +Apr 2000 - rename to mod_python and go apache-specific. +Nov 1998 - support for multiple interpreters introduced. +May 1998 - initial release (httpdapy). diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..0c672ea --- /dev/null +++ b/NOTICE @@ -0,0 +1,9 @@ +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). + +Mod_python was originally developed by Gregory Trubetskoy. + +This software is based on the original concept as published in the +book "Internet Programming with Python" by Aaron Watters, Guido van +Rossum and James C. Ahlstrom, 1996 M&T Books, ISBN: 1-55851-484-8. +The book and original software is Copyright 1996 by M&T Books. diff --git a/README.md b/README.md new file mode 100644 index 0000000..3a9a995 --- /dev/null +++ b/README.md @@ -0,0 +1,37 @@ +mod_python +========== + +Documentation for mod_python is on http://www.modpython.org/ + +Quick Start +----------- + +If you can't read instructions: + +```shell +$ ./configure +$ make +$ sudo make install +$ make test +``` + +If the above worked - read the tutorial in the documentation. + +OS Hints +-------- + +### Windows: + +HELP NEEDED! I do not have access to a Windows development +environment. If you get a Windows compile working, please create a +pull request or drop a note on the mod_python mailing list: +mod_python@modpython.org (Note: subscription required). + +### Mac OS X/Darwin: + +At least on OS X 10.8.5, the following was required in order for compile to work: + +```shell +sudo ln -s /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain \ + /Applications/Xcode.app/Contents/Developer/Toolchains/OSX10.8.xctoolchain +``` diff --git a/configure b/configure new file mode 100644 index 0000000..b0beb76 --- /dev/null +++ b/configure @@ -0,0 +1,4730 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.63. +# +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +# 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + + + +# PATH needs CR +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + { (exit 1); exit 1; } +fi + +# Work around bugs in pre-3.0 UWIN ksh. +for as_var in ENV MAIL MAILPATH +do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# CDPATH. +$as_unset CDPATH + + +if test "x$CONFIG_SHELL" = x; then + if (eval ":") 2>/dev/null; then + as_have_required=yes +else + as_have_required=no +fi + + if test $as_have_required = yes && (eval ": +(as_func_return () { + (exit \$1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = \"\$1\" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test \$exitcode = 0) || { (exit 1); exit 1; } + +( + as_lineno_1=\$LINENO + as_lineno_2=\$LINENO + test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" && + test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; } +") 2> /dev/null; then + : +else + as_candidate_shells= + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + case $as_dir in + /*) + for as_base in sh bash ksh sh5; do + as_candidate_shells="$as_candidate_shells $as_dir/$as_base" + done;; + esac +done +IFS=$as_save_IFS + + + for as_shell in $as_candidate_shells $SHELL; do + # Try only shells that exist, to save several forks. + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { ("$as_shell") 2> /dev/null <<\_ASEOF +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + +: +_ASEOF +}; then + CONFIG_SHELL=$as_shell + as_have_required=yes + if { "$as_shell" 2> /dev/null <<\_ASEOF +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + +: +(as_func_return () { + (exit $1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = "$1" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test $exitcode = 0) || { (exit 1); exit 1; } + +( + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; } + +_ASEOF +}; then + break +fi + +fi + + done + + if test "x$CONFIG_SHELL" != x; then + for as_var in BASH_ENV ENV + do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var + done + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} +fi + + + if test $as_have_required = no; then + echo This script requires a shell more modern than all the + echo shells that I found on your system. Please install a + echo modern shell, or manually run the script under such a + echo shell if you do have one. + { (exit 1); exit 1; } +fi + + +fi + +fi + + + +(eval "as_func_return () { + (exit \$1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = \"\$1\" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test \$exitcode = 0") || { + echo No shell found that supports shell functions. + echo Please tell bug-autoconf@gnu.org about your system, + echo including any error possibly output before this message. + echo This can help us improve future autoconf versions. + echo Configuration will now proceed without shell functions. +} + + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line after each line using $LINENO; the second 'sed' + # does the real work. The second script uses 'N' to pair each + # line-number line with the line containing $LINENO, and appends + # trailing '-' during substitution so that $LINENO is not a special + # case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # scripts with optimization help from Paolo Bonzini. Blame Lee + # E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in +-n*) + case `echo 'x\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + *) ECHO_C='\c';; + esac;; +*) + ECHO_N='-n';; +esac +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -p' + fi +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + + +exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= +SHELL=${CONFIG_SHELL-/bin/sh} + +# Identity of this package. +PACKAGE_NAME= +PACKAGE_TARNAME= +PACKAGE_VERSION= +PACKAGE_STRING= +PACKAGE_BUGREPORT= + +ac_unique_file="src/mod_python.c" +ac_subst_vars='LTLIBOBJS +LIBOBJS +LEX +MAX_LOCKS +MUTEX_DIR +TEST_MOD_PYTHON_SO +TEST_SERVER_ROOT +ARCHFLAGS +INCLUDES +LDLIBS +PYTHON_BIN +STATIC +AP_SRC_GRP +AP_SRC_OWN +AP_SRC +APR_VERSION +HTTPD_VERSION +HTTPD +SOLARIS_HACKS +SYSCONFDIR +LIBEXECDIR +ALL +DSO +APXS +SET_MAKE +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +AR +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +with_apxs +with_apache +with_python +with_mutex_dir +with_max_locks +with_flex +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 + { (exit 1); exit 1; }; } + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 + { (exit 1); exit 1; }; } + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 + { (exit 1); exit 1; }; } + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 + { (exit 1); exit 1; }; } + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) { $as_echo "$as_me: error: unrecognized option: $ac_option +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid variable name: $ac_envvar" >&2 + { (exit 1); exit 1; }; } + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + { $as_echo "$as_me: error: missing argument to $ac_option" >&2 + { (exit 1); exit 1; }; } +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) { $as_echo "$as_me: error: unrecognized options: $ac_unrecognized_opts" >&2 + { (exit 1); exit 1; }; } ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + { $as_echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; } +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. + If a cross compiler is detected then cross compile mode will be used." >&2 + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + { $as_echo "$as_me: error: working directory cannot be determined" >&2 + { (exit 1); exit 1; }; } +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + { $as_echo "$as_me: error: pwd does not report name of working directory" >&2 + { (exit 1); exit 1; }; } + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + { $as_echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 + { (exit 1); exit 1; }; } +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || { $as_echo "$as_me: error: $ac_msg" >&2 + { (exit 1); exit 1; }; } + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures this package to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF +_ACEOF +fi + +if test -n "$ac_init_help"; then + + cat <<\_ACEOF + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-apxs=NAME name of the apxs executable [apxs] +--with-apache=DIR Path to Apache sources +--with-python=PATH Path to specific Python binary +--with-mutex-dir=DIR Mutex directory +--with-max-locks=INTEGER Maximum number of locks +--with-flex=PATH Path to specific flex binary. + Flex Version 2.5.31 or greater is required to regenerate psp_parser.c + from psp_parse.l. A prepared psp_parser.c file is included with the + source, so you will only need flex if you make changes to psp_parser.l + See the README for more information. + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +configure +generated by GNU Autoconf 2.63 + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by $as_me, which was +generated by GNU Autoconf 2.63. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" +done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; + 2) + ac_configure_args1="$ac_configure_args1 '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + ac_configure_args="$ac_configure_args '$ac_arg'" + ;; + esac + done +done +$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } +$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + cat <<\_ASBOX +## ---------------- ## +## Cache variables. ## +## ---------------- ## +_ASBOX + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) $as_unset $ac_var ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + cat <<\_ASBOX +## ----------------- ## +## Output variables. ## +## ----------------- ## +_ASBOX + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + cat <<\_ASBOX +## ------------------- ## +## File substitutions. ## +## ------------------- ## +_ASBOX + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + cat <<\_ASBOX +## ----------- ## +## confdefs.h. ## +## ----------- ## +_ASBOX + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + ac_site_file1=$CONFIG_SITE +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test -r "$ac_site_file"; then + { $as_echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special + # files actually), so we avoid doing that. + if test -f "$cache_file"; then + { $as_echo "$as_me:$LINENO: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:$LINENO: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:$LINENO: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:$LINENO: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:$LINENO: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + { { $as_echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 +$as_echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} + { (exit 1); exit 1; }; } +fi + + + + + + + + + + + + + + + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +# includes +INCLUDES="-I`pwd`/src/include" + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:$LINENO: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:$LINENO: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:$LINENO: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:$LINENO: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +{ { $as_echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; }; } + +# Provide some information about the compiler. +$as_echo "$as_me:$LINENO: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +{ (ac_try="$ac_compiler --version >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compiler --version >&5") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -v >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compiler -v >&5") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -V >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compiler -V >&5") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { (ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi + +{ $as_echo "$as_me:$LINENO: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +if test -z "$ac_file"; then + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +{ { $as_echo "$as_me:$LINENO: error: C compiler cannot create executables +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: C compiler cannot create executables +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; }; } +fi + +ac_exeext=$ac_cv_exeext + +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:$LINENO: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +# FIXME: These cross compiler hacks should be removed for Autoconf 3.0 +# If not cross compiling, check that we can run a simple program. +if test "$cross_compiling" != yes; then + if { ac_try='./$ac_file' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +{ { $as_echo "$as_me:$LINENO: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; }; } + fi + fi +fi +{ $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +{ $as_echo "$as_me:$LINENO: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +{ $as_echo "$as_me:$LINENO: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +{ { $as_echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; }; } +fi + +rm -f conftest$ac_cv_exeext +{ $as_echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +{ $as_echo "$as_me:$LINENO: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if test "${ac_cv_objext+set}" = set; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +{ { $as_echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; }; } +fi + +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if test "${ac_cv_c_compiler_gnu+set}" = set; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_compiler_gnu=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_compiler_gnu=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if test "${ac_cv_prog_cc_g+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cc_g=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + CFLAGS="" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cc_g=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if test "${ac_cv_prog_cc_c89+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cc_c89=$ac_arg +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:$LINENO: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:$LINENO: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +for ac_prog in ar aal +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_prog_AR+set}" = set; then + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AR="$ac_prog" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:$LINENO: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AR" && break +done +test -n "$AR" || AR="ar" + +ac_aux_dir= +for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + { { $as_echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&5 +$as_echo "$as_me: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&2;} + { (exit 1); exit 1; }; } +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if test "${ac_cv_path_install+set}" = set; then + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in + ./ | .// | /cC/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + +done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:$LINENO: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +{ $as_echo "$as_me:$LINENO: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + + + + +{ $as_echo "$as_me:$LINENO: checking for main in -lm" >&5 +$as_echo_n "checking for main in -lm... " >&6; } +if test "${ac_cv_lib_m_main+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lm $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_lib_m_main=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_m_main=no +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_m_main" >&5 +$as_echo "$ac_cv_lib_m_main" >&6; } +if test "x$ac_cv_lib_m_main" = x""yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBM 1 +_ACEOF + + LIBS="-lm $LIBS" + +fi + + + +{ $as_echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5 +$as_echo_n "checking for an ANSI C-conforming const... " >&6; } +if test "${ac_cv_c_const+set}" = set; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +/* FIXME: Include the comments suggested by Paul. */ +#ifndef __cplusplus + /* Ultrix mips cc rejects this. */ + typedef int charset[2]; + const charset cs; + /* SunOS 4.1.1 cc rejects this. */ + char const *const *pcpcc; + char **ppc; + /* NEC SVR4.0.2 mips cc rejects this. */ + struct point {int x, y;}; + static struct point const zero = {0,0}; + /* AIX XL C 1.02.0.0 rejects this. + It does not let you subtract one const X* pointer from another in + an arm of an if-expression whose if-part is not a constant + expression */ + const char *g = "string"; + pcpcc = &g + (g ? g-g : 0); + /* HPUX 7.0 cc rejects these. */ + ++pcpcc; + ppc = (char**) pcpcc; + pcpcc = (char const *const *) ppc; + { /* SCO 3.2v4 cc rejects this. */ + char *t; + char const *s = 0 ? (char *) 0 : (char const *) 0; + + *t++ = 0; + if (s) return 0; + } + { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ + int x[] = {25, 17}; + const int *foo = &x[0]; + ++foo; + } + { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ + typedef const int *iptr; + iptr p = 0; + ++p; + } + { /* AIX XL C 1.02.0.0 rejects this saying + "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ + struct s { int j; const int *ap[3]; }; + struct s *b; b->j = 5; + } + { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ + const int foo = 10; + if (!foo) return 0; + } + return !cs[0] && !zero.x; +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_c_const=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_c_const=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5 +$as_echo "$ac_cv_c_const" >&6; } +if test $ac_cv_c_const = no; then + +cat >>confdefs.h <<\_ACEOF +#define const /**/ +_ACEOF + +fi + + +### humor lowers blood pressure +{ $as_echo "$as_me:$LINENO: checking your blood pressure" >&5 +$as_echo_n "checking your blood pressure... " >&6; } +{ $as_echo "$as_me:$LINENO: result: a bit high, but we can proceed" >&5 +$as_echo "a bit high, but we can proceed" >&6; } + +## The goal is to find apxs +{ $as_echo "$as_me:$LINENO: checking whether apxs is available..." >&5 +$as_echo "$as_me: checking whether apxs is available..." >&6;} + + + + +# check for --with-apxs + +# Check whether --with-apxs was given. +if test "${with_apxs+set}" = set; then + withval=$with_apxs; APXS="$with_apxs" +fi + + +if test -z "${APXS}"; then + for ac_prog in apxs2 apxs +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_path_APXS+set}" = set; then + $as_echo_n "(cached) " >&6 +else + case $APXS in + [\\/]* | ?:[\\/]*) + ac_cv_path_APXS="$APXS" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_dummy="$PATH:/usr/local/apache/bin:/usr/sbin" +for as_dir in $as_dummy +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_APXS="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + + ;; +esac +fi +APXS=$ac_cv_path_APXS +if test -n "$APXS"; then + { $as_echo "$as_me:$LINENO: result: $APXS" >&5 +$as_echo "$APXS" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$APXS" && break +done + +fi + +# if apxs was still not found, then no DSO + + + + + + +if test -z "$APXS"; then + { $as_echo "$as_me:$LINENO: WARNING: **** apxs was not found, DSO compilation will not be available." >&5 +$as_echo "$as_me: WARNING: **** apxs was not found, DSO compilation will not be available." >&2;} + { $as_echo "$as_me:$LINENO: WARNING: **** You can use --with-apxs to specify where your apxs is." >&5 +$as_echo "$as_me: WARNING: **** You can use --with-apxs to specify where your apxs is." >&2;} + DSO="no_dso" + ALL="static" +else + DSO="do_dso" + ALL="dso" + + # check Apache version + { $as_echo "$as_me:$LINENO: checking Apache version" >&5 +$as_echo_n "checking Apache version... " >&6; } + HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`" + HTTPD_VERSION=`$HTTPD -v | awk '/version/ {print $3}' | awk -F/ '{print $2}' | awk '{print $1}'` + APR_VERSION=`${APXS} -q APR_VERSION` + { $as_echo "$as_me:$LINENO: result: $HTTPD_VERSION" >&5 +$as_echo "$HTTPD_VERSION" >&6; } + + # make sure version begins with 2 + if test -z "`echo $HTTPD_VERSION | egrep \^2`"; then + { { $as_echo "$as_me:$LINENO: error: This version of mod_python only works with Apache 2. The one we have ($HTTPD) seems to be $HTTPD_VERSION." >&5 +$as_echo "$as_me: error: This version of mod_python only works with Apache 2. The one we have ($HTTPD) seems to be $HTTPD_VERSION." >&2;} + { (exit 1); exit 1; }; } + fi + + # determine LIBEXEC + { $as_echo "$as_me:$LINENO: checking for Apache libexec directory" >&5 +$as_echo_n "checking for Apache libexec directory... " >&6; } + LIBEXECDIR=`${APXS} -q LIBEXECDIR` + { $as_echo "$as_me:$LINENO: result: $LIBEXECDIR" >&5 +$as_echo "$LIBEXECDIR" >&6; } + + # determine INCLUDES + { $as_echo "$as_me:$LINENO: checking for Apache include directory" >&5 +$as_echo_n "checking for Apache include directory... " >&6; } + AP_INCLUDES="-I`${APXS} -q INCLUDEDIR`" + { $as_echo "$as_me:$LINENO: result: $AP_INCLUDES" >&5 +$as_echo "$AP_INCLUDES" >&6; } + + if test "`uname`" = "SunOS"; then + { $as_echo "$as_me:$LINENO: checking for gcc on Solaris possible missing _eprintf problem" >&5 +$as_echo_n "checking for gcc on Solaris possible missing _eprintf problem... " >&6; } + if test "$CC" = "gcc"; then + SOLARIS_HACKS="_eprintf.o _floatdidf.o _muldi3.o" + fi + { $as_echo "$as_me:$LINENO: result: \"done\"" >&5 +$as_echo "\"done\"" >&6; } + fi + +fi + +# check for --with-apache + + + +## static is disabled, thus no --with-apache +##AC_MSG_CHECKING(for --with-apache) + +# Check whether --with-apache was given. +if test "${with_apache+set}" = set; then + withval=$with_apache; + + # temporarily disable static on 2.0 until I figure out how to + # do it right + { { $as_echo "$as_me:$LINENO: error: Sorry, --with-apache (static compilation) is not supported at this time!" >&5 +$as_echo "$as_me: error: Sorry, --with-apache (static compilation) is not supported at this time!" >&2;} + { (exit 1); exit 1; }; } + + AP_SRC=`cd $withval; pwd` + + if test ! -f "$AP_SRC/include/httpd.h"; then + { { $as_echo "$as_me:$LINENO: error: $withval does not look like an Apache 2.0 source directory." >&5 +$as_echo "$as_me: error: $withval does not look like an Apache 2.0 source directory." >&2;} + { (exit 1); exit 1; }; } + fi + + { $as_echo "$as_me:$LINENO: result: $AP_SRC" >&5 +$as_echo "$AP_SRC" >&6; } + AP_INCLUDES="-I${AP_SRC}/src/include -I${AP_SRC}/src/os/unix" + + # note who owns the apache source directory + AP_SRC_OWN="`ls -ld $AP_SRC | awk '{print $3}'`" + AP_SRC_GRP="`ls -ld $AP_SRC | awk '{print $4}'`" + +fi + +##AC_MSG_RESULT(no)) + + +if test -z "$AP_SRC"; then +## AC_MSG_WARN([**** No apache sources specified, static compilation will not be available.]) +## AC_MSG_WARN([**** You can use --with-apache to specify where your Apache sources are.]) + STATIC="no_static" +else + STATIC="do_static" +fi + +if test "$STATIC" = "no_static" -a "$DSO" = "no_dso"; then + { { $as_echo "$as_me:$LINENO: error: Neither static nor DSO option available, there is no point in continuing." >&5 +$as_echo "$as_me: error: Neither static nor DSO option available, there is no point in continuing." >&2;} + { (exit 1); exit 1; }; } +fi + + +{ $as_echo "$as_me:$LINENO: checking for --with-python" >&5 +$as_echo_n "checking for --with-python... " >&6; } + +# Check whether --with-python was given. +if test "${with_python+set}" = set; then + withval=$with_python; + PYTHON_BIN="$withval" + { $as_echo "$as_me:$LINENO: result: $PYTHON_BIN" >&5 +$as_echo "$PYTHON_BIN" >&6; } + +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# check for Python executable +if test -z "$PYTHON_BIN"; then + # Extract the first word of "python", so it can be a program name with args. +set dummy python; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_path_PYTHON_BIN+set}" = set; then + $as_echo_n "(cached) " >&6 +else + case $PYTHON_BIN in + [\\/]* | ?:[\\/]*) + ac_cv_path_PYTHON_BIN="$PYTHON_BIN" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_PYTHON_BIN="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + + ;; +esac +fi +PYTHON_BIN=$ac_cv_path_PYTHON_BIN +if test -n "$PYTHON_BIN"; then + { $as_echo "$as_me:$LINENO: result: $PYTHON_BIN" >&5 +$as_echo "$PYTHON_BIN" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + + if test -z "$PYTHON_BIN"; then + { { $as_echo "$as_me:$LINENO: error: python binary not found in path" >&5 +$as_echo "$as_me: error: python binary not found in path" >&2;} + { (exit 1); exit 1; }; } + fi +fi + +# find out python version +{ $as_echo "$as_me:$LINENO: checking Python version" >&5 +$as_echo_n "checking Python version... " >&6; } +PyVERSION=`$PYTHON_BIN -c 'import sys; print(sys.version[:3])'` +PyMAJVERSION=`$PYTHON_BIN -c 'import sys; print(sys.version[:1])'` +PyMINVERSION=`$PYTHON_BIN -c 'import sys; print(sys.version.split(".")[1])'` +{ $as_echo "$as_me:$LINENO: result: $PyVERSION" >&5 +$as_echo "$PyVERSION" >&6; } + +# make sure Python version is >= 2.6 for 2 and >= 3.3 for 3 +if test "$PyMAJVERSION" -lt "2"; then + { { $as_echo "$as_me:$LINENO: error: This version of mod_python only works with Python major version 2 or higher. The one you have seems to be $PyVERSION." >&5 +$as_echo "$as_me: error: This version of mod_python only works with Python major version 2 or higher. The one you have seems to be $PyVERSION." >&2;} + { (exit 1); exit 1; }; } +fi +if test "$PyMAJVERSION" -eq "2"; then + if test "$PyMINVERSION" -lt "6"; then + { { $as_echo "$as_me:$LINENO: error: This version of mod_python only works with Python 2.x version 2.6 or higher. The one you have seems to be $PyVERSION." >&5 +$as_echo "$as_me: error: This version of mod_python only works with Python 2.x version 2.6 or higher. The one you have seems to be $PyVERSION." >&2;} + { (exit 1); exit 1; }; } + fi +fi +if test "$PyMAJVERSION" -eq "3"; then + if test "$PyMINVERSION" -lt "3"; then + { { $as_echo "$as_me:$LINENO: error: This version of mod_python only works with Python 3.x version 3.3 or higher. The one you have seems to be $PyVERSION." >&5 +$as_echo "$as_me: error: This version of mod_python only works with Python 3.x version 3.3 or higher. The one you have seems to be $PyVERSION." >&2;} + { (exit 1); exit 1; }; } + fi +fi + +# calculate compiler options +CPPFLAGS1=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print("-I" + sysconfig.get_config_var("INCLUDEPY"))'` + +CPPFLAGS2=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(" ".join(filter(lambda x: x.startswith("-D"), \ + sysconfig.get_config_var("CFLAGS").split())))'` + +CPPFLAGS="${CPPFLAGS1} ${CPPFLAGS2}" + + + +PYTHONFRAMEWORKDIR=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("PYTHONFRAMEWORKDIR"))'` +PYTHONFRAMEWORKPREFIX=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX"))'` +PYTHONFRAMEWORK=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("PYTHONFRAMEWORK"))'` + +if test "${PYTHONFRAMEWORKDIR}" = "no-framework"; then + # this directory may contain the .so library, our preference, list 1st + LDFLAGS1=`${PYTHON_BIN} -c 'import distutils.sysconfig; \ + print("-L" + distutils.sysconfig.get_config_var("LIBDIR"))'` + LDFLAGS2=`${PYTHON_BIN} -c 'import distutils.sysconfig; \ + print("-L" + distutils.sysconfig.get_python_lib(plat_specific=1, \ + standard_lib=1) +"/config")'` + LDFLAGS="${LDFLAGS1} ${LDFLAGS2}" + + if test "$PyMAJVERSION" -eq "3"; then + LDLIBS1="-lpython${PyMAJVERSION}" + else + LDLIBS1="-lpython${PyVERSION}" + fi + LDLIBS2=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("LIBS"))'` + + LDLIBS="${LDLIBS1} ${LDLIBS2}" +else + LDFLAGS1="-Wl,-F${PYTHONFRAMEWORKPREFIX} -framework ${PYTHONFRAMEWORK}" + + STRING="${PYTHONFRAMEWORKDIR}/Versions/${PyVERSION}/${PYTHONFRAMEWORK}" + LDFLAGS2=`${PYTHON_BIN} -c "from distutils import sysconfig; \ + print(sysconfig.get_config_var(\"LINKFORSHARED\").replace( \ + \"${STRING}\", ''))"` + + LDFLAGS="${LDFLAGS1} ${LDFLAGS2}" + + LDLIBS=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("LIBS"))'` +fi + +CFLAGS="" +ARCHFLAGS="" +if test -x /usr/bin/lipo; then + LDFLAGS3="" + ARCHITECTURES=`/usr/bin/lipo -info $HTTPD | sed -e 's/.*://'` + for ARCH in $ARCHITECTURES; do + CFLAGS="${CFLAGS} -Wc,'-arch ${ARCH}'" + LDFLAGS3="${LDFLAGS3} -arch ${ARCH}" + ARCHFLAGS="${ARCHFLAGS} -arch ${ARCH}" + done + LDFLAGS="${LDFLAGS3} ${LDFLAGS}" +fi + + + + + + + + + +# this for the test.py script + +TEST_SERVER_ROOT="`pwd`/test" + +TEST_MOD_PYTHON_SO="`pwd`/src/mod_python.so" + +# configure the MUTEX_DIR for location of mutex locks + +{ $as_echo "$as_me:$LINENO: checking for --with-mutex-dir" >&5 +$as_echo_n "checking for --with-mutex-dir... " >&6; } + +# Check whether --with-mutex-dir was given. +if test "${with_mutex_dir+set}" = set; then + withval=$with_mutex_dir; + MUTEX_DIR="$withval" + { $as_echo "$as_me:$LINENO: result: $MUTEX_DIR" >&5 +$as_echo "$MUTEX_DIR" >&6; } + +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test -z "$MUTEX_DIR"; then + MUTEX_DIR="/tmp" +fi +# TODO - check if MUTEX_DIR is an absolute path +{ $as_echo "$as_me:$LINENO: result: Using MUTEX_DIR $MUTEX_DIR" >&5 +$as_echo "Using MUTEX_DIR $MUTEX_DIR" >&6; } + +# configure the MAX_LOCKS for number of mutex locks + +{ $as_echo "$as_me:$LINENO: checking for --with-max-locks" >&5 +$as_echo_n "checking for --with-max-locks... " >&6; } + +# Check whether --with-max-locks was given. +if test "${with_max_locks+set}" = set; then + withval=$with_max_locks; + MAX_LOCKS="$withval" + { $as_echo "$as_me:$LINENO: result: $MAX_LOCKS" >&5 +$as_echo "$MAX_LOCKS" >&6; } + +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test -z "$MAX_LOCKS"; then + MAX_LOCKS="8" +fi +{ $as_echo "$as_me:$LINENO: result: Using $MAX_LOCKS MAX_LOCKS." >&5 +$as_echo "Using $MAX_LOCKS MAX_LOCKS." >&6; } + +# Check for correct flex version +# Requires flex 2.5.31 for reentrant support +# See README for more details + + +{ $as_echo "$as_me:$LINENO: checking for --with-flex" >&5 +$as_echo_n "checking for --with-flex... " >&6; } + +# Check whether --with-flex was given. +if test "${with_flex+set}" = set; then + withval=$with_flex; + LEX="$withval" + { $as_echo "$as_me:$LINENO: result: $LEX" >&5 +$as_echo "$LEX" >&6; } + +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# check for flex executable +if test -z "$LEX"; then + # Extract the first word of "flex", so it can be a program name with args. +set dummy flex; ac_word=$2 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if test "${ac_cv_path_LEX+set}" = set; then + $as_echo_n "(cached) " >&6 +else + case $LEX in + [\\/]* | ?:[\\/]*) + ac_cv_path_LEX="$LEX" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_LEX="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + + ;; +esac +fi +LEX=$ac_cv_path_LEX +if test -n "$LEX"; then + { $as_echo "$as_me:$LINENO: result: $LEX" >&5 +$as_echo "$LEX" >&6; } +else + { $as_echo "$as_me:$LINENO: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi + +if test "$LEX" && test -x "$LEX"; then + { $as_echo "$as_me:$LINENO: result: found $LEX, we'll use this. Use --with-flex to specify another." >&5 +$as_echo "found $LEX, we'll use this. Use --with-flex to specify another." >&6; } + + { $as_echo "$as_me:$LINENO: checking flex version" >&5 +$as_echo_n "checking flex version... " >&6; } + FlexVERSION=`$LEX --version | sed 's/version//g' | awk '/flex/ {print $2}'` + Flex_MAJOR=`echo $FlexVERSION| awk -F . '{print $1}'` + Flex_MINOR=`echo $FlexVERSION| awk -F . '{print $2}'` + Flex_PATCH=`echo $FlexVERSION| awk -F . '{print $3}'` + + if test "$Flex_MAJOR" -eq "2" && test "$Flex_MINOR" -eq "5" && test "$Flex_PATCH" -ge "31"; then + { $as_echo "$as_me:$LINENO: result: $FlexVERSION. Good" >&5 +$as_echo "$FlexVERSION. Good" >&6; } + else + { $as_echo "$as_me:$LINENO: WARNING: Flex version $FlexVERSION found. + Version 2.5.31 or greater is required. You can generally ignore this + warning unless you need to regenerate psp_parser.c from psp_parse.l. + If you do need regenerate psp_parser.c, use --with-flex to specify the + location of the correct flex version. See the README for more information." >&5 +$as_echo "$as_me: WARNING: Flex version $FlexVERSION found. + Version 2.5.31 or greater is required. You can generally ignore this + warning unless you need to regenerate psp_parser.c from psp_parse.l. + If you do need regenerate psp_parser.c, use --with-flex to specify the + location of the correct flex version. See the README for more information." >&2;} + fi + +else + { $as_echo "$as_me:$LINENO: WARNING: flex $LEX not found + You can generally ignore this warning unless you need to regenerate + psp_parser.c from psp_parse.l. If you do need regenerate psp_parser.c, + use --with-flex to specify the location of flex. + See the README for more information." >&5 +$as_echo "$as_me: WARNING: flex $LEX not found + You can generally ignore this warning unless you need to regenerate + psp_parser.c from psp_parse.l. If you do need regenerate psp_parser.c, + use --with-flex to specify the location of flex. + See the README for more information." >&2;} +fi + +ac_config_files="$ac_config_files Makefile src/Makefile Doc/Makefile src/include/mod_python.h test/Makefile dist/setup.py dist/Makefile scripts/Makefile" + +ac_config_files="$ac_config_files scripts/mod_python" + + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) $as_unset $ac_var ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes (double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \). + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + test "x$cache_file" != "x/dev/null" && + { $as_echo "$as_me:$LINENO: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + cat confcache >$cache_file + else + { $as_echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +# Transform confdefs.h into DEFS. +# Protect against shell expansion while executing Makefile rules. +# Protect against Makefile macro expansion. +# +# If the first sed substitution is executed (which looks for macros that +# take arguments), then branch to the quote section. Otherwise, +# look for a macro that doesn't take arguments. +ac_script=' +:mline +/\\$/{ + N + s,\\\n,, + b mline +} +t clear +:clear +s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g +t quote +s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g +t quote +b any +:quote +s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g +s/\[/\\&/g +s/\]/\\&/g +s/\$/$$/g +H +:any +${ + g + s/^\n// + s/\n/ /g + p +} +' +DEFS=`sed -n "$ac_script" confdefs.h` + + +ac_libobjs= +ac_ltlibobjs= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" + ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + +: ${CONFIG_STATUS=./config.status} +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +cat >$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false +SHELL=\${CONFIG_SHELL-$SHELL} +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + + + +# PATH needs CR +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + { (exit 1); exit 1; } +fi + +# Work around bugs in pre-3.0 UWIN ksh. +for as_var in ENV MAIL MAILPATH +do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# CDPATH. +$as_unset CDPATH + + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line after each line using $LINENO; the second 'sed' + # does the real work. The second script uses 'N' to pair each + # line-number line with the line containing $LINENO, and appends + # trailing '-' during substitution so that $LINENO is not a special + # case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # scripts with optimization help from Paolo Bonzini. Blame Lee + # E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in +-n*) + case `echo 'x\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + *) ECHO_C='\c';; + esac;; +*) + ECHO_N='-n';; +esac +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -p' + fi +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 + +# Save the log message, to keep $[0] and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by $as_me, which was +generated by GNU Autoconf 2.63. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files from templates according to the +current configuration. + +Usage: $0 [OPTION]... [FILE]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + +Configuration files: +$config_files + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_version="\\ +config.status +configured by $0, generated by GNU Autoconf 2.63, + with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" + +Copyright (C) 2008 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + CONFIG_FILES="$CONFIG_FILES '$ac_optarg'" + ac_need_defaults=false;; + --he | --h | --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) { $as_echo "$as_me: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } ;; + + *) ac_config_targets="$ac_config_targets $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; + "Doc/Makefile") CONFIG_FILES="$CONFIG_FILES Doc/Makefile" ;; + "src/include/mod_python.h") CONFIG_FILES="$CONFIG_FILES src/include/mod_python.h" ;; + "test/Makefile") CONFIG_FILES="$CONFIG_FILES test/Makefile" ;; + "dist/setup.py") CONFIG_FILES="$CONFIG_FILES dist/setup.py" ;; + "dist/Makefile") CONFIG_FILES="$CONFIG_FILES dist/Makefile" ;; + "scripts/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/Makefile" ;; + "scripts/mod_python") CONFIG_FILES="$CONFIG_FILES scripts/mod_python" ;; + + *) { { $as_echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 +$as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;} + { (exit 1); exit 1; }; };; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= + trap 'exit_status=$? + { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status +' 0 + trap '{ (exit 1); exit 1; }' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -n "$tmp" && test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || +{ + $as_echo "$as_me: cannot create a temporary directory in ." >&2 + { (exit 1); exit 1; } +} + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=' ' +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } +ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\).*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\).*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ + || { { $as_echo "$as_me:$LINENO: error: could not setup config files machinery" >&5 +$as_echo "$as_me: error: could not setup config files machinery" >&2;} + { (exit 1); exit 1; }; } +_ACEOF + +# VPATH may cause trouble with some makes, so we remove $(srcdir), +# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=/{ +s/:*\$(srcdir):*/:/ +s/:*\${srcdir}:*/:/ +s/:*@srcdir@:*/:/ +s/^\([^=]*=[ ]*\):*/\1/ +s/:*$// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + + +eval set X " :F $CONFIG_FILES " +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) { { $as_echo "$as_me:$LINENO: error: invalid tag $ac_tag" >&5 +$as_echo "$as_me: error: invalid tag $ac_tag" >&2;} + { (exit 1); exit 1; }; };; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + { { $as_echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 +$as_echo "$as_me: error: cannot find input file: $ac_f" >&2;} + { (exit 1); exit 1; }; };; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + ac_file_inputs="$ac_file_inputs '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:$LINENO: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$tmp/stdin" \ + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + { as_dir="$ac_dir" + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 +$as_echo "$as_me: error: cannot create directory $as_dir" >&2;} + { (exit 1); exit 1; }; }; } + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= + +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p +' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined." >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined." >&2;} + + rm -f "$tmp/stdin" + case $ac_file in + -) cat "$tmp/out" && rm -f "$tmp/out";; + *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";; + esac \ + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } + ;; + + + + esac + + + case $ac_file$ac_mode in + "scripts/mod_python":F) chmod +x scripts/mod_python ;; + + esac +done # for ac_tag + + +{ (exit 0); exit 0; } +_ACEOF +chmod +x $CONFIG_STATUS +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + { { $as_echo "$as_me:$LINENO: error: write failure creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: write failure creating $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || { (exit 1); exit 1; } +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:$LINENO: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + diff --git a/configure.in b/configure.in new file mode 100644 index 0000000..64f13d8 --- /dev/null +++ b/configure.in @@ -0,0 +1,353 @@ + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +dnl Process this file with autoconf to produce a configure script. +AC_INIT(src/mod_python.c) + +# includes +INCLUDES="-I`pwd`/src/include" + +dnl Checks for programs. +AC_PROG_CC +AC_SUBST(AR) +AC_CHECK_PROGS(AR, ar aal, ar) +AC_PROG_INSTALL +AC_PROG_MAKE_SET + +dnl Replace `main' with a function in -lm: +AC_CHECK_LIB(m, main) + +dnl Checks for header files. + +dnl Checks for typedefs, structures, and compiler characteristics. +AC_C_CONST + +### humor lowers blood pressure +AC_MSG_CHECKING(your blood pressure) +AC_MSG_RESULT([a bit high, but we can proceed]) + +## The goal is to find apxs +AC_CHECKING(whether apxs is available) +AC_SUBST(APXS) +AC_SUBST(DSO) +AC_SUBST(ALL) + +# check for --with-apxs +AC_ARG_WITH(apxs, AC_HELP_STRING([--with-apxs=NAME], + [name of the apxs executable [[apxs]]]), + [APXS="$with_apxs"]) + +if test -z "${APXS}"; then + AC_PATH_PROGS(APXS, apxs2 apxs, [], + [$PATH:/usr/local/apache/bin:/usr/sbin]) +fi + +# if apxs was still not found, then no DSO +AC_SUBST(LIBEXECDIR) +AC_SUBST(SYSCONFDIR) +AC_SUBST(SOLARIS_HACKS) +AC_SUBST(HTTPD) +AC_SUBST(HTTPD_VERSION) +AC_SUBST(APR_VERSION) +if test -z "$APXS"; then + AC_MSG_WARN([**** apxs was not found, DSO compilation will not be available.]) + AC_MSG_WARN([**** You can use --with-apxs to specify where your apxs is.]) + DSO="no_dso" + ALL="static" +else + DSO="do_dso" + ALL="dso" + + # check Apache version + AC_MSG_CHECKING(Apache version) + HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`" + HTTPD_VERSION=`$HTTPD -v | awk '/version/ {print $3}' | awk -F/ '{print $2}' | awk '{print $1}'` + APR_VERSION=`${APXS} -q APR_VERSION` + AC_MSG_RESULT($HTTPD_VERSION) + + # make sure version begins with 2 + if test -z "`echo $HTTPD_VERSION | egrep \^2`"; then + AC_MSG_ERROR([This version of mod_python only works with Apache 2. The one we have ($HTTPD) seems to be $HTTPD_VERSION.]) + fi + + # determine LIBEXEC + AC_MSG_CHECKING(for Apache libexec directory) + LIBEXECDIR=`${APXS} -q LIBEXECDIR` + AC_MSG_RESULT($LIBEXECDIR) + + # determine INCLUDES + AC_MSG_CHECKING([for Apache include directory]) + AP_INCLUDES="-I`${APXS} -q INCLUDEDIR`" + AC_MSG_RESULT($AP_INCLUDES) + + dnl Small hack to work around _eprintf.o problem on Solaris + if test "`uname`" = "SunOS"; then + AC_MSG_CHECKING([for gcc on Solaris possible missing _eprintf problem]) + if test "$CC" = "gcc"; then + SOLARIS_HACKS="_eprintf.o _floatdidf.o _muldi3.o" + fi + AC_MSG_RESULT("done") + fi + +fi + +# check for --with-apache +AC_SUBST(AP_SRC) +AC_SUBST(AP_SRC_OWN) +AC_SUBST(AP_SRC_GRP) +## static is disabled, thus no --with-apache +##AC_MSG_CHECKING(for --with-apache) +AC_ARG_WITH(apache, [--with-apache=DIR Path to Apache sources], +[ + + # temporarily disable static on 2.0 until I figure out how to + # do it right + AC_MSG_ERROR([Sorry, --with-apache (static compilation) is not supported at this time!]) + + AP_SRC=`cd $withval; pwd` + + dnl Make sure this looks like Apache source + if test ! -f "$AP_SRC/include/httpd.h"; then + AC_MSG_ERROR([$withval does not look like an Apache 2.0 source directory.]) + fi + + AC_MSG_RESULT($AP_SRC) + AP_INCLUDES="-I${AP_SRC}/src/include -I${AP_SRC}/src/os/unix" + + # note who owns the apache source directory + AP_SRC_OWN="`ls -ld $AP_SRC | awk '{print $3}'`" + AP_SRC_GRP="`ls -ld $AP_SRC | awk '{print $4}'`" +],) +##AC_MSG_RESULT(no)) + +AC_SUBST(STATIC) +if test -z "$AP_SRC"; then +## AC_MSG_WARN([**** No apache sources specified, static compilation will not be available.]) +## AC_MSG_WARN([**** You can use --with-apache to specify where your Apache sources are.]) + STATIC="no_static" +else + STATIC="do_static" +fi + +if test "$STATIC" = "no_static" -a "$DSO" = "no_dso"; then + AC_MSG_ERROR([Neither static nor DSO option available, there is no point in continuing.]) +fi + +AC_SUBST(PYTHON_BIN) +AC_MSG_CHECKING(for --with-python) +AC_ARG_WITH(python, [--with-python=PATH Path to specific Python binary], +[ + PYTHON_BIN="$withval" + AC_MSG_RESULT($PYTHON_BIN) +], +AC_MSG_RESULT(no)) + +# check for Python executable +if test -z "$PYTHON_BIN"; then + AC_PATH_PROG(PYTHON_BIN, python) + if test -z "$PYTHON_BIN"; then + AC_MSG_ERROR(python binary not found in path) + fi +fi + +# find out python version +AC_MSG_CHECKING(Python version) +PyVERSION=`$PYTHON_BIN -c ['import sys; print(sys.version[:3])'`] +PyMAJVERSION=`$PYTHON_BIN -c ['import sys; print(sys.version[:1])'`] +PyMINVERSION=`$PYTHON_BIN -c ['import sys; print(sys.version.split(".")[1])'`] +AC_MSG_RESULT($PyVERSION) + +# make sure Python version is >= 2.6 for 2 and >= 3.3 for 3 +if test "$PyMAJVERSION" -lt "2"; then + AC_MSG_ERROR([This version of mod_python only works with Python major version 2 or higher. The one you have seems to be $PyVERSION.]) +fi +if test "$PyMAJVERSION" -eq "2"; then + if test "$PyMINVERSION" -lt "6"; then + AC_MSG_ERROR([This version of mod_python only works with Python 2.x version 2.6 or higher. The one you have seems to be $PyVERSION.]) + fi +fi +if test "$PyMAJVERSION" -eq "3"; then + if test "$PyMINVERSION" -lt "3"; then + AC_MSG_ERROR([This version of mod_python only works with Python 3.x version 3.3 or higher. The one you have seems to be $PyVERSION.]) + fi +fi + +# calculate compiler options +CPPFLAGS1=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print("-I" + sysconfig.get_config_var("INCLUDEPY"))'` + +CPPFLAGS2=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(" ".join(filter(lambda x: x.startswith("-D"), \ + sysconfig.get_config_var("CFLAGS").split())))'` + +CPPFLAGS="${CPPFLAGS1} ${CPPFLAGS2}" + +AC_SUBST(CPPFLAGS) + +PYTHONFRAMEWORKDIR=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("PYTHONFRAMEWORKDIR"))'` +PYTHONFRAMEWORKPREFIX=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX"))'` +PYTHONFRAMEWORK=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("PYTHONFRAMEWORK"))'` + +if test "${PYTHONFRAMEWORKDIR}" = "no-framework"; then + # this directory may contain the .so library, our preference, list 1st + LDFLAGS1=`${PYTHON_BIN} -c 'import distutils.sysconfig; \ + print("-L" + distutils.sysconfig.get_config_var("LIBDIR"))'` + LDFLAGS2=`${PYTHON_BIN} -c 'import distutils.sysconfig; \ + print("-L" + distutils.sysconfig.get_python_lib(plat_specific=1, \ + standard_lib=1) +"/config")'` + LDFLAGS="${LDFLAGS1} ${LDFLAGS2}" + + if test "$PyMAJVERSION" -eq "3"; then + LDLIBS1="-lpython${PyMAJVERSION}" + else + LDLIBS1="-lpython${PyVERSION}" + fi + LDLIBS2=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("LIBS"))'` + + LDLIBS="${LDLIBS1} ${LDLIBS2}" +else + LDFLAGS1="-Wl,-F${PYTHONFRAMEWORKPREFIX} -framework ${PYTHONFRAMEWORK}" + + STRING="${PYTHONFRAMEWORKDIR}/Versions/${PyVERSION}/${PYTHONFRAMEWORK}" + LDFLAGS2=`${PYTHON_BIN} -c "from distutils import sysconfig; \ + print(sysconfig.get_config_var(\"LINKFORSHARED\").replace( \ + \"${STRING}\", ''))"` + + LDFLAGS="${LDFLAGS1} ${LDFLAGS2}" + + LDLIBS=`${PYTHON_BIN} -c 'from distutils import sysconfig; \ + print(sysconfig.get_config_var("LIBS"))'` +fi + +CFLAGS="" +ARCHFLAGS="" +if test -x /usr/bin/lipo; then + LDFLAGS3="" + ARCHITECTURES=`/usr/bin/lipo -info $HTTPD | sed -e 's/.*://'` + for ARCH in $ARCHITECTURES; do + CFLAGS="${CFLAGS} -Wc,'-arch ${ARCH}'" + LDFLAGS3="${LDFLAGS3} -arch ${ARCH}" + ARCHFLAGS="${ARCHFLAGS} -arch ${ARCH}" + done + LDFLAGS="${LDFLAGS3} ${LDFLAGS}" +fi + +AC_SUBST(CFLAGS) +AC_SUBST(LDFLAGS) +AC_SUBST(LDLIBS) + +AC_SUBST(INCLUDES) + +AC_SUBST(ARCHFLAGS) + +# this for the test.py script +AC_SUBST(TEST_SERVER_ROOT) +TEST_SERVER_ROOT="`pwd`/test" +AC_SUBST(TEST_MOD_PYTHON_SO) +TEST_MOD_PYTHON_SO="`pwd`/src/mod_python.so" + +# configure the MUTEX_DIR for location of mutex locks +AC_SUBST(MUTEX_DIR) +AC_MSG_CHECKING(for --with-mutex-dir) +AC_ARG_WITH(mutex-dir, [--with-mutex-dir=DIR Mutex directory], +[ + MUTEX_DIR="$withval" + AC_MSG_RESULT($MUTEX_DIR) +], +AC_MSG_RESULT(no)) + +if test -z "$MUTEX_DIR"; then + MUTEX_DIR="/tmp" +fi +# TODO - check if MUTEX_DIR is an absolute path +AC_MSG_RESULT([Using MUTEX_DIR $MUTEX_DIR]) + +# configure the MAX_LOCKS for number of mutex locks +AC_SUBST(MAX_LOCKS) +AC_MSG_CHECKING(for --with-max-locks) +AC_ARG_WITH(max-locks, [--with-max-locks=INTEGER Maximum number of locks], +[ + MAX_LOCKS="$withval" + AC_MSG_RESULT($MAX_LOCKS) +], +AC_MSG_RESULT(no)) + +if test -z "$MAX_LOCKS"; then + MAX_LOCKS="8" +fi +AC_MSG_RESULT([Using $MAX_LOCKS MAX_LOCKS.]) + +# Check for correct flex version +# Requires flex 2.5.31 for reentrant support +# See README for more details +AC_SUBST(LEX) + +AC_MSG_CHECKING(for --with-flex) +AC_ARG_WITH(flex, [--with-flex=PATH Path to specific flex binary. + Flex Version 2.5.31 or greater is required to regenerate psp_parser.c + from psp_parse.l. A prepared psp_parser.c file is included with the + source, so you will only need flex if you make changes to psp_parser.l + See the README for more information.], +[ + LEX="$withval" + AC_MSG_RESULT($LEX) +], +AC_MSG_RESULT(no)) + +# check for flex executable +if test -z "$LEX"; then + AC_PATH_PROG(LEX, flex) +fi + +if test "$LEX" && test -x "$LEX"; then + AC_MSG_RESULT([found $LEX, we'll use this. Use --with-flex to specify another.]) + + AC_MSG_CHECKING(flex version) + FlexVERSION=`$LEX --version | sed 's/version//g' | awk '/flex/ {print $2}'` + Flex_MAJOR=`echo $FlexVERSION| awk -F . '{print $1}'` + Flex_MINOR=`echo $FlexVERSION| awk -F . '{print $2}'` + Flex_PATCH=`echo $FlexVERSION| awk -F . '{print $3}'` + + if test "$Flex_MAJOR" -eq "2" && test "$Flex_MINOR" -eq "5" && test "$Flex_PATCH" -ge "31"; then + AC_MSG_RESULT([$FlexVERSION. Good]) + else + AC_MSG_WARN([Flex version $FlexVERSION found. + Version 2.5.31 or greater is required. You can generally ignore this + warning unless you need to regenerate psp_parser.c from psp_parse.l. + If you do need regenerate psp_parser.c, use --with-flex to specify the + location of the correct flex version. See the README for more information.]) + fi + +else + AC_MSG_WARN([flex $LEX not found + You can generally ignore this warning unless you need to regenerate + psp_parser.c from psp_parse.l. If you do need regenerate psp_parser.c, + use --with-flex to specify the location of flex. + See the README for more information.]) +fi + +AC_CONFIG_FILES([Makefile src/Makefile Doc/Makefile \ + src/include/mod_python.h test/Makefile \ + dist/setup.py dist/Makefile scripts/Makefile]) +AC_CONFIG_FILES([scripts/mod_python], [chmod +x scripts/mod_python]) + +AC_OUTPUT diff --git a/dist/Makefile.in b/dist/Makefile.in new file mode 100644 index 0000000..d798adf --- /dev/null +++ b/dist/Makefile.in @@ -0,0 +1,56 @@ + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + # $Id$ + # + +PYTHON_BIN=@PYTHON_BIN@ + +build: mod_python src + @cd src; $(MAKE) psp_parser.c + $(PYTHON_BIN) setup.py build + +# this one requires at least python 2.3 +windist: mod_python.so + $(PYTHON_BIN) setup.py bdist_wininst --install-script=win32_postinstall.py + +install: install_py_lib + +# this may require root priviledges +install_py_lib: mod_python src + @cd src; $(MAKE) psp_parser.c + if test -z "$(DESTDIR)" ; then \ + $(PYTHON_BIN) setup.py install --optimize 2 --force ; \ + else \ + $(PYTHON_BIN) setup.py install --optimize 2 --force --root $(DESTDIR) ; \ + fi + +mod_python.so: + @echo "Please place a WIN32 compiled mod_python.so in this directory" + exit 1 + +mod_python: + ln -s ../lib/python/mod_python mod_python + +src: + ln -s ../src src + +clean: + rm -rf mod_python build dist + +distclean: + rm -rf mod_python src build dist mod_python.so setup.py Makefile MANIFEST MANIFSET.in diff --git a/dist/README b/dist/README new file mode 100644 index 0000000..9285d99 --- /dev/null +++ b/dist/README @@ -0,0 +1,5 @@ +$Id$ + +This directory contains files necessary for building +mod_python distributions. + diff --git a/dist/build_installer.bat b/dist/build_installer.bat new file mode 100644 index 0000000..97eea27 --- /dev/null +++ b/dist/build_installer.bat @@ -0,0 +1,58 @@ +@echo off +rem Copyright 2004 Apache Software Foundation +rem +rem Licensed under the Apache License, Version 2.0 (the "License"); +rem you may not use this file except in compliance with the License. +rem You may obtain a copy of the License at +rem +rem http://www.apache.org/licenses/LICENSE-2.0 +rem +rem Unless required by applicable law or agreed to in writing, software +rem distributed under the License is distributed on an "AS IS" BASIS, +rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +rem See the License for the specific language governing permissions and +rem limitations under the License. +rem +rem Originally developed by Gregory Trubetskoy. +rem +rem $Id$ +rem +rem This script builds the installer for Windows + +rem Test for APACHESRC +if "%APACHESRC%"=="" GOTO NOAPACHESRC +if not exist "%APACHESRC%\include" GOTO BADAPACHESRC + +rem Cleanup +rmdir /s /q build +del /s ..\src\*.obj ..\src\*.lib ..\src\*.exp ..\src\*.res + +rem Build +python setup.py.in bdist_wininst --install-script win32_postinstall.py +GOTO END + +rem Use this instead of the previous line to create a debug build +rem For this you need a Python debug build. The .py files will be installed +rem directly in the Python debug build's site-packages. The .so file will remain +rem in build/lib.win32-2.4, so you'll have to make sure your testconf.py file +rem points to it instead of the copy that may already reside in LIBEXECDIR. + +rem python_d setup.py.in build --debug install +rem GOTO END + +rem Compress the installer if possible +upx.exe --no-color --no-progress --best dist\*.exe +GOTO END + +:BADAPACHESRC +echo Currently APACHESRC points to %APACHESRC% +echo This value seems wrong as we could not find a proper +echo Apache installation here. + +:NOAPACHESRC +echo Please set the APACHESRC variable to point to your Apache setup +echo E.g. set APACHESRC=c:\apache +echo This can be a binary distribution, no need for the Apache sources. +GOTO END + +:END diff --git a/dist/setup.py.in b/dist/setup.py.in new file mode 100644 index 0000000..68581f3 --- /dev/null +++ b/dist/setup.py.in @@ -0,0 +1,215 @@ + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + # + # $Id: setup.py.in 475516 2006-11-16 01:12:40Z grahamd $ + +from distutils.core import setup, Extension + +import sys +import re +import os.path +if sys.version[0] == '2': + from commands import getoutput +else: + from subprocess import getoutput + +try: + __file__ +except NameError: + __file__ = '.' + +def getmp_rootdir(): + """gets the root directory of the mod_python source tree...""" + return os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + +def getmp_srcdir(): + """gets the src subdirectory of the mod_python source tree...""" + return os.path.join(getmp_rootdir(), 'src') + +def getmp_includedir(): + """gets the src subdirectory of the mod_python source tree...""" + return os.path.join(getmp_rootdir(), 'src', 'include') + +def getmp_mpdir(): + """gets the mod_python dir""" + return os.path.join(getmp_rootdir(), 'lib', 'python', 'mod_python') + +def getconfigure_option(option_name): + """gets an option from the config.status file""" + config_status_file = os.path.join(getmp_rootdir(), 'config.status') + if not os.path.exists(config_status_file): + raise AssertionError("config.status not found in expected location (%s)" % config_status_file) + header = open(config_status_file, 'r') + r = re.compile(r's,\s*@%s@,\s*(?P[^,]+),\s*' % (option_name)) + for line in header.readlines(): + m = r.search(line) + if m is not None: + return m.group('OPTION_STRING') + raise AssertionError("unable to find @%s@ definition in %s", (option_name, config_status_file)) + +def getmp_version(): + return getoutput('./version.sh') + +def getapxs_location(): + """finds the location of apxs from the config.status file""" + return getconfigure_option("APXS") + +def getapxs_option(option): + APXS = getapxs_location() + return getoutput("%s -q %s" % (APXS, option)) + +def getapache_srcdir(): + """returns apache src directory""" + return os.getenv("APACHESRC") + +def getapache_includedir(): + """returns apache include directory""" + apache_srcdir = getapache_srcdir() + if apache_srcdir is None: + return getapxs_option("INCLUDEDIR") + else: + return os.path.join(getapache_srcdir(), "include") + +def getapache_libdir(): + """returns apache lib directory""" + apache_srcdir = getapache_srcdir() + if apache_srcdir is None: + return getapxs_option("LIBDIR") + else: + return os.path.join(apache_srcdir, "lib") + +def generate_version_py(): + with open(os.path.join(getmp_mpdir(), 'version.py'), 'w') as version_py: + version_py.write('# THIS FILE IS AUTO-GENERATED BY setup.py\n\n') + version_py.write('version = "%s"\n' % getmp_version()) + version_py.write('\n# Some build-time constants:\n') + version_py.write('HTTPD = "@HTTPD@"\n') + version_py.write('HTTPD_VERSION = "@HTTPD_VERSION@"\n') + version_py.write('APR_VERSION = "@APR_VERSION@"\n') + version_py.write('LIBEXECDIR = "@LIBEXECDIR@"\n') + version_py.write('SYSCONFDIR = "@SYSCONFDIR@"\n') + version_py.write('TEST_MOD_PYTHON_SO = "@TEST_MOD_PYTHON_SO@" #NB: This is for test.py only\n') + version_py.write('TESTHOME = "@TEST_SERVER_ROOT@"\n') + version_py.write('PYTHON_BIN = "@PYTHON_BIN@"\n') + + +VER = getmp_version() + +# TODO: improve the intelligence here... +winbuild = ("bdist_wininst" in sys.argv) or (os.name == "nt") + +class PSPExtension(Extension): + """a class that helps build the PSP extension""" + def __init__(self, source_dir, include_dirs): + Extension.__init__(self, "mod_python._psp", + [os.path.join(source_dir, source_file) for source_file in + ("psp_string.c", "psp_parser.c", "_pspmodule.c")], + include_dirs=include_dirs + ) + + if winbuild: + self.define_macros.extend([('WIN32', None), ('NDEBUG', None), ('_WINDOWS', None)]) + +PSPModule = PSPExtension(getmp_srcdir(), [getmp_includedir()]) + +modpy_src_files = ("mod_python.c", "_apachemodule.c", "connobject.c", "filterobject.c", + "hlist.c", "hlistobject.c", "requestobject.c", "serverobject.c", "tableobject.c", + "util.c", "finfoobject.c") + +class finallist(list): + """this represents a list that cannot be appended to...""" + def append(self, object): + return + +class ModPyExtension(Extension): + """a class that actually builds the mod_python.so extension for Apache (yikes)""" + def __init__(self, source_dir, include_dirs, library_dirs): + if winbuild: + apr1 = 0 + for dir in library_dirs: + if os.path.exists(os.path.join(dir, 'libapr-1.lib')): + apr1 = 1 + if apr1: + libraries = ['libhttpd', 'libapr-1', 'libaprutil-1', 'ws2_32'] + else: + libraries = ['libhttpd', 'libapr', 'libaprutil', 'ws2_32'] + else: + libraries = ['apr-0', 'aprutil-0'] + + Extension.__init__(self, "mod_python_so", + sources = [os.path.join(source_dir, source_file) for source_file in modpy_src_files], + include_dirs=include_dirs, + libraries = libraries, + library_dirs=library_dirs + ) + if winbuild: + self.define_macros.extend([('WIN32', None),('NDEBUG', None),('_WINDOWS', None)]) + self.sources.append(os.path.join(source_dir, "Version.rc")) + else: + # TODO: fix this to autodetect if required... + self.include_dirs.append("/usr/include/apr-0") + # this is a hack to prevent build_ext from trying to append "initmod_python" to the export symbols + self.export_symbols = finallist(self.export_symbols) + + +if winbuild: + + # build mod_python.so + ModPyModule = ModPyExtension(getmp_srcdir(), [getmp_includedir(), getapache_includedir()], [getapache_libdir()]) + + scripts = ["win32_postinstall.py"] + # put the mod_python.so file in the Python root ... + # win32_postinstall.py will pick it up from there... + # data_files = [("", [(os.path.join(getmp_srcdir(), 'Release', 'mod_python.so'))])] + data_files = [] + ext_modules = [ModPyModule, PSPModule] + +else: + + scripts = [] + data_files = [] + ext_modules = [PSPModule] + +import string +from distutils import sysconfig + +generate_version_py() + +if sys.platform == "darwin": + if not '-undefined' in sysconfig.get_config_var("LDSHARED").split(): + sysconfig._config_vars["LDSHARED"] = \ + string.replace(sysconfig.get_config_var("LDSHARED"), \ + " -bundle "," -bundle -flat_namespace -undefined suppress ") + sysconfig._config_vars["BLDSHARED"] = \ + string.replace(sysconfig.get_config_var("BLDSHARED"), \ + " -bundle "," -bundle -flat_namespace -undefined suppress ") + +setup(name="mod_python", + version=VER, + description="Apache/Python Integration", + author="Gregory Trubetskoy et al", + author_email="mod_python@modpython.org", + url="http://www.modpython.org/", + packages=["mod_python"], + package_dir={'mod_python': os.path.join(getmp_rootdir(), 'lib', 'python', 'mod_python')}, + scripts=scripts, + data_files=data_files, + ext_modules=ext_modules) + +# makes emacs go into python mode +### Local Variables: +### mode:python +### End: diff --git a/dist/version.sh b/dist/version.sh new file mode 100644 index 0000000..1060b4f --- /dev/null +++ b/dist/version.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +MPV_PATH="`dirname $0`/../src/include/mp_version.h" + +MAJ=`awk '/MP_VERSION_MAJOR/ {print $3}' $MPV_PATH` +MIN=`awk '/MP_VERSION_MINOR/ {print $3}' $MPV_PATH` +PCH=`awk '/MP_VERSION_PATCH/ {print $3}' $MPV_PATH` +GIT=`git describe --always` + +echo $MAJ.$MIN.$PCH-$GIT diff --git a/dist/win32_postinstall.py b/dist/win32_postinstall.py new file mode 100644 index 0000000..2a95934 --- /dev/null +++ b/dist/win32_postinstall.py @@ -0,0 +1,152 @@ + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + # $Id$ + # + # this script runs at the end of windows install + + +import sys, os, shutil +import distutils.sysconfig + +def getApacheDirOptions(): + """find potential apache directories in the registry...""" + try: + import win32api, win32con + class nullregkey: + """a registry key that doesn't exist...""" + def childkey(self, subkeyname): + return nullregkey() + def subkeynames(self): + return [] + def getvalue(self, valuename): + raise AttributeError("Cannot access registry value %r: key does not exist" % (valuename)) + class regkey: + """simple wrapper for registry functions that closes keys nicely...""" + def __init__(self, parent, subkeyname): + self.key = win32api.RegOpenKey(parent, subkeyname) + def childkey(self, subkeyname): + try: + return regkey(self.key, subkeyname) + except win32api.error: + return nullregkey() + def subkeynames(self): + numsubkeys = win32api.RegQueryInfoKey(self.key)[0] + return [win32api.RegEnumKey(self.key, index) for index in range(numsubkeys)] + def getvalue(self, valuename): + try: + return win32api.RegQueryValueEx(self.key, valuename) + except win32api.error: + raise AttributeError("Cannot access registry value %r" % (valuename)) + def __del__(self): + if hasattr(self, "key"): + win32api.RegCloseKey(self.key) + except ImportError: + return {} + versions = {} + hklm_key = regkey(win32con.HKEY_LOCAL_MACHINE, "Software").childkey("Apache Group").childkey("Apache") + hkcu_key = regkey(win32con.HKEY_CURRENT_USER, "Software").childkey("Apache Group").childkey("Apache") + for apachekey in (hklm_key, hkcu_key): + for versionname in apachekey.subkeynames(): + try: + serverroot = apachekey.childkey(versionname).getvalue("ServerRoot") + except AttributeError: + continue + versions[versionname] = serverroot[0] + return versions + +def askForApacheDir(apachediroptions): + # try to ask for Apache directory + if len(apachediroptions) > 0: + # get the most recent version... + versionnames = apachediroptions.keys() + versionnames.sort() + initialdir = apachediroptions[versionnames[-1]] + else: + initialdir="C:/Program Files/Apache Group/Apache2" + # TODO: let the user select the name from a list, or click browse to choose... + try: + from tkFileDialog import askdirectory + from Tkinter import Tk + root = Tk() + root.withdraw() + path = askdirectory(title="Where is Apache installed?", + initialdir=initialdir, + mustexist=1, master=root) + root.quit() + root.destroy() + return path + except ImportError: + try: + from win32com.shell import shell + pidl, displayname, imagelist = shell.SHBrowseForFolder(0, None, "Where is Apache installed?") + path = shell.SHGetPathFromIDList(pidl) + return path + except ImportError: + return "" + +# if we're called during removal, just exit +if len(sys.argv) == 1 or (len(sys.argv) > 1 and sys.argv[1] != "-remove"): + + mp = os.path.join(distutils.sysconfig.get_python_lib(), "mod_python_so.pyd") + + apachediroptions = getApacheDirOptions() + + apachedir = askForApacheDir(apachediroptions) + + if apachedir: + + # put mod_python.so there + mod_python_so_path = os.path.join(apachedir, "modules", "mod_python.so") + shutil.move(mp, mod_python_so_path) + file_created(mod_python_so_path) + + print """Important Note for Windows users, PLEASE READ!!! + + 1. This script does not attempt to modify Apache configuration, + you must do it manually: + + Edit %s, + find where other LoadModule lines are and add this: + LoadModule python_module modules/mod_python.so + + 2. Now test your installation using the instructions at this link: + http://www.modpython.org/live/current/doc-html/inst-testing.html + + """ % os.path.join(apachedir, "conf", "httpd.conf") + + else: + + print """Important Note for Windows users, PLEASE READ!!! + + 1. It appears that you do not have Tkinter installed, + which is required for a part of this installation. + Therefore you must manually take + "%s" + and copy it to your Apache modules directory. + + 2. This script does not attempt to modify Apache configuration, + you must do it manually: + + Edit %s, + find where other LoadModule lines and add this: + LoadModule python_module modules/mod_python.so + + 3. Now test your installation using the instructions at this link: + http://www.modpython.org/live/current/doc-html/inst-testing.html + + """ % (mp, os.path.join(apachedir, "conf", "httpd.conf")) diff --git a/examples/gzipfilter.py b/examples/gzipfilter.py new file mode 100644 index 0000000..7db9f28 --- /dev/null +++ b/examples/gzipfilter.py @@ -0,0 +1,71 @@ +# +# Usage: +# +# PythonOutputFilter gzipfilter +# SetOutputFilter gzipfilter +# + +from mod_python import apache + +import os +import sys +import gzip +import cStringIO +from mod_python import apache + +def compress(s): + sio = cStringIO.StringIO() + f = gzip.GzipFile(mode='wb', fileobj=sio) + f.write(s) + f.close() + return sio.getvalue() + +def accepts_gzip(req): + if req.headers_in.has_key('accept-encoding'): + encodings = req.headers_in['accept-encoding'] + return (encodings.find("gzip") != -1) + return 0 + +### +### main filter function +### +def outputfilter(filter): + + if (filter.req.main or + not accepts_gzip(filter.req)): + + # Presense of filter.req.main tells us that + # we are in a subrequest. We don't want to compress + # the data more than once, so we pass_on() in + # subrequests. We also pass_on() if the client + # does not accept gzip encoding, of course. + + filter.pass_on() + else: + + if not filter.req.sent_bodyct: + + # the above test allows us to set the encoding once + # rather than every time the filter is invoked + + filter.req.headers_out['content-encoding'] = 'gzip' + + # loop through content, compressing + + s = filter.read() + + while s: + s = compress(s) + filter.write(s) + s = filter.read() + + if s is None: + + # this means we received an EOS, so we pass it on + # by closing the filter + + filter.close() + + + + diff --git a/install-sh b/install-sh new file mode 100644 index 0000000..c1666c3 --- /dev/null +++ b/install-sh @@ -0,0 +1,251 @@ +#!/bin/sh +# +# install - install a program, script, or datafile +# This comes from X11R5 (mit/util/scripts/install.sh). +# +# Copyright 1991 by the Massachusetts Institute of Technology +# +# Permission to use, copy, modify, distribute, and sell this software and its +# documentation for any purpose is hereby granted without fee, provided that +# the above copyright notice appear in all copies and that both that +# copyright notice and this permission notice appear in supporting +# documentation, and that the name of M.I.T. not be used in advertising or +# publicity pertaining to distribution of the software without specific, +# written prior permission. M.I.T. makes no representations about the +# suitability of this software for any purpose. It is provided "as is" +# without express or implied warranty. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. It can only install one file at a time, a restriction +# shared with many OS's install programs. + + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +transformbasename="" +transform_arg="" +instcmd="$cpprog" +chmodcmd="$chmodprog 0755" +chowncmd="" +chgrpcmd="" +stripcmd="" +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src="" +dst="" +dir_arg="" + +while [ x"$1" != x ]; do + case $1 in + -c) instcmd="$cpprog" + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + -s) stripcmd="$stripprog" + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + *) if [ x"$src" = x ] + then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if [ x"$src" = x ] +then + echo "install: no input file specified" + exit 1 +else + true +fi + +if [ x"$dir_arg" != x ]; then + dst=$src + src="" + + if [ -d $dst ]; then + instcmd=: + chmodcmd="" + else + instcmd=mkdir + fi +else + +# Waiting for this to be detected by the "$instcmd $src $dsttmp" command +# might cause directories to be created, which would be especially bad +# if $src (and thus $dsttmp) contains '*'. + + if [ -f $src -o -d $src ] + then + true + else + echo "install: $src does not exist" + exit 1 + fi + + if [ x"$dst" = x ] + then + echo "install: no destination specified" + exit 1 + else + true + fi + +# If destination is a directory, append the input filename; if your system +# does not like double slashes in filenames, you may need to add some logic + + if [ -d $dst ] + then + dst="$dst"/`basename $src` + else + true + fi +fi + +## this sed command emulates the dirname command +dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. +# this part is taken from Noah Friedman's mkinstalldirs script + +# Skip lots of stat calls in the usual case. +if [ ! -d "$dstdir" ]; then +defaultIFS=' +' +IFS="${IFS-${defaultIFS}}" + +oIFS="${IFS}" +# Some sh's can't handle IFS=/ for some reason. +IFS='%' +set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` +IFS="${oIFS}" + +pathcomp='' + +while [ $# -ne 0 ] ; do + pathcomp="${pathcomp}${1}" + shift + + if [ ! -d "${pathcomp}" ] ; + then + $mkdirprog "${pathcomp}" + else + true + fi + + pathcomp="${pathcomp}/" +done +fi + +if [ x"$dir_arg" != x ] +then + $doit $instcmd $dst && + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi +else + +# If we're going to rename the final executable, determine the name now. + + if [ x"$transformarg" = x ] + then + dstfile=`basename $dst` + else + dstfile=`basename $dst $transformbasename | + sed $transformarg`$transformbasename + fi + +# don't allow the sed command to completely eliminate the filename + + if [ x"$dstfile" = x ] + then + dstfile=`basename $dst` + else + true + fi + +# Make a temp file name in the proper directory. + + dsttmp=$dstdir/#inst.$$# + +# Move or copy the file name to the temp name + + $doit $instcmd $src $dsttmp && + + trap "rm -f ${dsttmp}" 0 && + +# and set any options; do chmod last to preserve setuid bits + +# If any of these fail, we abort the whole thing. If we want to +# ignore errors from any of these, just make sure not to ignore +# errors from the above "$doit $instcmd $src $dsttmp" command. + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && + +# Now rename the file to the real destination. + + $doit $rmcmd -f $dstdir/$dstfile && + $doit $mvcmd $dsttmp $dstdir/$dstfile + +fi && + + +exit 0 diff --git a/lib/python/mod_python/Cookie.py b/lib/python/mod_python/Cookie.py new file mode 100644 index 0000000..7205722 --- /dev/null +++ b/lib/python/mod_python/Cookie.py @@ -0,0 +1,394 @@ + # vim: set sw=4 expandtab : + # + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +""" + +This module contains classes to support HTTP State Management +Mechanism, also known as Cookies. The classes provide simple +ways for creating, parsing and digitally signing cookies, as +well as the ability to store simple Python objects in Cookies +(using marshalling). + +The behaviour of the classes is designed to be most useful +within mod_python applications. + +The current state of HTTP State Management standardization is +rather unclear. It appears that the de-facto standard is the +original Netscape specification, even though already two RFC's +have been put out (RFC2109 (1997) and RFC2965 (2000)). The +RFC's add a couple of useful features (e.g. using Max-Age instead +of Expires, but my limited tests show that Max-Age is ignored +by the two browsers tested (IE and Safari). As a result of this, +perhaps trying to be RFC-compliant (by automatically providing +Max-Age and Version) could be a waste of cookie space... + +""" + +import sys +import time +import re +import hmac +import marshal +import base64 + +PY2 = sys.version[0] == '2' + +class CookieError(Exception): + pass + +class metaCookie(type): + + def __new__(cls, clsname, bases, clsdict): + + _valid_attr = ( + "version", "path", "domain", "secure", + "comment", "max_age", + # RFC 2965 + "commentURL", "discard", "port", + # Microsoft Extension + "httponly" ) + + # _valid_attr + property values + # (note __slots__ is a new Python feature, it + # prevents any other attribute from being set) + __slots__ = _valid_attr + ("name", "value", "_value", + "_expires", "__data__") + + clsdict["_valid_attr"] = _valid_attr + clsdict["__slots__"] = __slots__ + + def set_expires(self, value): + + if type(value) == type(""): + # if it's a string, it should be + # valid format as per Netscape spec + try: + t = time.strptime(value, "%a, %d-%b-%Y %H:%M:%S GMT") + except ValueError: + raise ValueError("Invalid expires time: %s" % value) + t = time.mktime(t) + else: + # otherwise assume it's a number + # representing time as from time.time() + t = value + value = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT", + time.gmtime(t)) + + self._expires = "%s" % value + + def get_expires(self): + return self._expires + + clsdict["expires"] = property(fget=get_expires, fset=set_expires) + + return type.__new__(cls, clsname, bases, clsdict) + +# metaclass= workaround, see +# http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/#using-the-metaclass-in-python-2-x-and-3-x +_metaCookie = metaCookie('Cookie', (object, ), {}) + +class Cookie(_metaCookie): + """ + This class implements the basic Cookie functionality. Note that + unlike the Python Standard Library Cookie class, this class represents + a single cookie (not a list of Morsels). + """ + + DOWNGRADE = 0 + IGNORE = 1 + EXCEPTION = 3 + + def parse(Class, str, **kw): + """ + Parse a Cookie or Set-Cookie header value, and return + a dict of Cookies. Note: the string should NOT include the + header name, only the value. + """ + + dict = _parse_cookie(str, Class, **kw) + return dict + + parse = classmethod(parse) + + def __init__(self, name, value, **kw): + + """ + This constructor takes at least a name and value as the + arguments, as well as optionally any of allowed cookie attributes + as defined in the existing cookie standards. + """ + self.name, self.value = name, value + + for k in kw: + setattr(self, k.lower(), kw[k]) + + # subclasses can use this for internal stuff + self.__data__ = {} + + + def __str__(self): + + """ + Provides the string representation of the Cookie suitable for + sending to the browser. Note that the actual header name will + not be part of the string. + + This method makes no attempt to automatically double-quote + strings that contain special characters, even though the RFC's + dictate this. This is because doing so seems to confuse most + browsers out there. + """ + + result = ["%s=%s" % (self.name, self.value)] + for name in self._valid_attr: + if hasattr(self, name): + if name in ("secure", "discard", "httponly"): + result.append(name) + else: + result.append("%s=%s" % (name, getattr(self, name))) + return "; ".join(result) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, + str(self)) + + +class SignedCookie(Cookie): + """ + This is a variation of Cookie that provides automatic + cryptographic signing of cookies and verification. It uses + the HMAC support in the Python standard library. This ensures + that the cookie has not been tamprered with on the client side. + + Note that this class does not encrypt cookie data, thus it + is still plainly visible as part of the cookie. + """ + + def parse(Class, s, secret, mismatch=Cookie.DOWNGRADE, **kw): + + dict = _parse_cookie(s, Class, **kw) + + del_list = [] + for k in dict: + c = dict[k] + try: + c.unsign(secret) + except CookieError: + if mismatch == Cookie.EXCEPTION: + raise + elif mismatch == Cookie.IGNORE: + del_list.append(k) + else: + # downgrade to Cookie + dict[k] = Cookie.parse(Cookie.__str__(c))[k] + + for k in del_list: + del dict[k] + + return dict + + parse = classmethod(parse) + + def __init__(self, name, value, secret=None, **kw): + Cookie.__init__(self, name, value, **kw) + + self.__data__["secret"] = secret + + def hexdigest(self, str): + if not self.__data__["secret"]: + raise CookieError("Cannot sign without a secret") + _hmac = hmac.new(self.__data__["secret"], self.name) + _hmac.update(str) + if PY2: + return _hmac.hexdigest() + else: + return _hmac.hexdigest().decode() + + def __str__(self): + + result = ["%s=%s%s" % (self.name, self.hexdigest(self.value), + self.value)] + for name in self._valid_attr: + if hasattr(self, name): + if name in ("secure", "discard", "httponly"): + result.append(name) + else: + result.append("%s=%s" % (name, getattr(self, name))) + return "; ".join(result) + + def unsign(self, secret): + + sig, val = self.value[:32], self.value[32:] + + mac = hmac.new(secret, self.name) + mac.update(val) + + if mac.hexdigest() == sig: + self.value = val + self.__data__["secret"] = secret + else: + raise CookieError("Incorrectly Signed Cookie: %s=%s" % (self.name, self.value)) + + +class MarshalCookie(SignedCookie): + + """ + This is a variation of SignedCookie that can store more than + just strings. It will automatically marshal the cookie value, + therefore any marshallable object can be used as value. + + The standard library Cookie module provides the ability to pickle + data, which is a major security problem. It is believed that unmarshalling + (as opposed to unpickling) is safe, yet we still err on the side of caution + which is why this class is a subclass of SignedCooke making sure what + we are about to unmarshal passes the digital signature test. + + Here is a link to a sugesstion that marshalling is safer than unpickling + http://groups.google.com/groups?hl=en&lr=&ie=UTF-8&selm=7xn0hcugmy.fsf%40ruckus.brouhaha.com + """ + + def parse(Class, s, secret, mismatch=Cookie.DOWNGRADE, **kw): + + dict = _parse_cookie(s, Class, **kw) + + del_list = [] + for k in dict: + c = dict[k] + try: + c.unmarshal(secret) + except CookieError: + if mismatch == Cookie.EXCEPTION: + raise + elif mismatch == Cookie.IGNORE: + del_list.append(k) + else: + # downgrade to Cookie + dict[k] = Cookie.parse(Cookie.__str__(c))[k] + + for k in del_list: + del dict[k] + + return dict + + parse = classmethod(parse) + + def __str__(self): + + m = base64.encodestring(marshal.dumps(self.value)) + # on long cookies, the base64 encoding can contain multiple lines + # separated by \n or \r\n + m = ''.join(m.split()) + + result = ["%s=%s%s" % (self.name, self.hexdigest(m), m)] + for name in self._valid_attr: + if hasattr(self, name): + if name in ("secure", "discard", "httponly"): + result.append(name) + else: + result.append("%s=%s" % (name, getattr(self, name))) + return "; ".join(result) + + def unmarshal(self, secret): + + self.unsign(secret) + + try: + data = base64.decodestring(self.value) + except: + raise CookieError("Cannot base64 Decode Cookie: %s=%s" % (self.name, self.value)) + + try: + self.value = marshal.loads(data) + except (EOFError, ValueError, TypeError): + raise CookieError("Cannot Unmarshal Cookie: %s=%s" % (self.name, self.value)) + + +# This is a simplified and in some places corrected +# (at least I think it is) pattern from standard lib Cookie.py + +_cookiePattern = re.compile( + r"(?x)" # Verbose pattern + r"[,\ ]*" # space/comma (RFC2616 4.2) before attr-val is eaten + r"(?P" # Start of group 'key' + r"[^;\ =]+" # anything but ';', ' ' or '=' + r")" # End of group 'key' + r"\ *(=\ *)?" # a space, then may be "=", more space + r"(?P" # Start of group 'val' + r'"(?:[^\\"]|\\.)*"' # a doublequoted string + r"|" # or + r"[^;]*" # any word or empty string + r")" # End of group 'val' + r"\s*;?" # probably ending in a semi-colon + ) + +def _parse_cookie(str, Class, names=None): + # XXX problem is we should allow duplicate + # strings + result = {} + + matchIter = _cookiePattern.finditer(str) + + for match in matchIter: + key, val = match.group("key"), match.group("val") + + # We just ditch the cookies names which start with a dollar sign since + # those are in fact RFC2965 cookies attributes. See bug [#MODPYTHON-3]. + if key[0]!='$' and names is None or key in names: + result[key] = Class(key, val) + + return result + +def add_cookie(req, cookie, value="", **kw): + """ + Sets a cookie in outgoing headers and adds a cache + directive so that caches don't cache the cookie. + """ + + # is this a cookie? + if not isinstance(cookie, Cookie): + + # make a cookie + cookie = Cookie(cookie, value, **kw) + + if "Set-Cookie" not in req.headers_out: + req.headers_out.add("Cache-Control", 'no-cache="set-cookie"') + + req.headers_out.add("Set-Cookie", str(cookie)) + +def get_cookies(req, Class=Cookie, **kw): + """ + A shorthand for retrieveing and parsing cookies given + a Cookie class. The class must be one of the classes from + this module. + """ + + if "cookie" not in req.headers_in: + return {} + + cookies = req.headers_in["cookie"] + if type(cookies) == type([]): + cookies = '; '.join(cookies) + + return Class.parse(cookies, **kw) + +def get_cookie(req, name, Class=Cookie, **kw): + cookies = get_cookies(req, Class, names=[name], **kw) + if name in cookies: + return cookies[name] diff --git a/lib/python/mod_python/Session.py b/lib/python/mod_python/Session.py new file mode 100644 index 0000000..38694c2 --- /dev/null +++ b/lib/python/mod_python/Session.py @@ -0,0 +1,842 @@ + # vim: set sw=4 expandtab : + # + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +import sys +PY2 = sys.version[0] == '2' + +if PY2: + import apache, Cookie + import md5 + import anydbm as dbm + from whichdb import whichdb + from cPickle import load, loads, dump, dumps + from cStringIO import StringIO +else: + from . import apache, Cookie + from hashlib import md5 + import dbm + from dbm import whichdb + from io import StringIO + from pickle import load, loads, dump, dumps + +import _apache + +import os +import stat +import time +import random +import tempfile +import traceback +import re + +COOKIE_NAME="pysid" +DFT_TIMEOUT=30*60 # 30 min +DFT_LOCK = True +CLEANUP_CHANCE=1000 # cleanups have 1 in CLEANUP_CHANCE chance + +tempdir = tempfile.gettempdir() + +def md5_hash(s): + if PY2: + return md5.new(s).hexdigest() + else: + if isinstance(s, str): + s = s.encode('latin1') + return md5(s).hexdigest() + +def _init_rnd(): + """ initialize random number generators + this is key in multithreaded env, see + python docs for random """ + + # query max number of threads + + + if _apache.mpm_query(apache.AP_MPMQ_IS_THREADED): + gennum = _apache.mpm_query(apache.AP_MPMQ_MAX_SPARE_THREADS) + else: + gennum = 10 + + # make generators + # this bit is from Python lib reference + g = random.Random(time.time()) + result = [g] + for i in range(gennum - 1): + laststate = g.getstate() + g = random.Random() + g.setstate(laststate) + result.append(g) + + return result + +rnd_gens = _init_rnd() +rnd_iter = iter(rnd_gens) + +def _get_generator(): + # get rnd_iter.next(), or start over + # if we reached the end of it + global rnd_iter + try: + return next(rnd_iter) + except StopIteration: + # the small potential for two threads doing this + # seems does not warrant use of a lock + rnd_iter = iter(rnd_gens) + return next(rnd_iter) + +def _new_sid(req): + # Make a number based on current time, pid, remote ip + # and two random ints, then hash with md5. This should + # be fairly unique and very difficult to guess. + # + # WARNING + # The current implementation of _new_sid returns an + # md5 hexdigest string. To avoid a possible directory traversal + # attack in FileSession the sid is validated using + # the _check_sid() method and the compiled regex + # validate_sid_re. The sid will be accepted only if len(sid) == 32 + # and it only contains the characters 0-9 and a-f. + # + # If you change this implementation of _new_sid, make sure to also + # change the validation scheme, as well as the test_Session_illegal_sid() + # unit test in test/test.py. + # /WARNING + + t = int(time.time()*10000) + pid = os.getpid() + g = _get_generator() + rnd1 = g.randint(0, 999999999) + rnd2 = g.randint(0, 999999999) + ip = req.connection.remote_ip + + return md5_hash("%d%d%d%d%s" % (t, pid, rnd1, rnd2, ip)) + +validate_sid_re = re.compile('[0-9a-f]{32}$') + +def _check_sid(sid): + ## Check the validity of the session id + # # The sid must be 32 characters long, and consisting of the characters + # 0-9 and a-f. + # + # The sid may be passed in a cookie from the client and as such + # should not be trusted. This is particularly important in + # FileSession, where the session filename is derived from the sid. + # A sid containing '/' or '.' characters could result in a directory + # traversal attack + + return not not validate_sid_re.match(sid) + +class BaseSession(dict): + + def __init__(self, req, sid=None, secret=None, lock=1, + timeout=0): + + self._req, self._sid, self._secret = req, sid, secret + self._lock = lock + self._new = 1 + self._created = 0 + self._accessed = 0 + self._timeout = 0 + self._locked = 0 + self._invalid = 0 + + dict.__init__(self) + + config = req.get_options() + if "mod_python.session.cookie_name" in config: + session_cookie_name = config.get("mod_python.session.cookie_name", COOKIE_NAME) + else: + # For backwards compatability with versions + # of mod_python prior to 3.3. + session_cookie_name = config.get("session_cookie_name", COOKIE_NAME) + + if not self._sid: + # check to see if cookie exists + if secret: + cookie = Cookie.get_cookie(req, session_cookie_name, + Class=Cookie.SignedCookie, + secret=self._secret, + mismatch=Cookie.Cookie.IGNORE) + else: + cookie = Cookie.get_cookie(req, session_cookie_name) + + if cookie: + self._sid = cookie.value + + if self._sid: + if not _check_sid(self._sid): + if sid: + # Supplied explicitly by user of the class, + # raise an exception and make the user code + # deal with it. + raise ValueError("Invalid Session ID: sid=%s" % sid) + else: + # Derived from the cookie sent by browser, + # wipe it out so it gets replaced with a + # correct value. + self._sid = None + + self.init_lock() + + if self._sid: + # attempt to load ourselves + self.lock() + if self.load(): + self._new = 0 + + if self._new: + # make a new session + if self._sid: self.unlock() # unlock old sid + self._sid = _new_sid(self._req) + self.lock() # lock new sid + Cookie.add_cookie(self._req, self.make_cookie()) + self._created = time.time() + if timeout: + self._timeout = timeout + else: + self._timeout = DFT_TIMEOUT + + self._accessed = time.time() + + # need cleanup? + if random.randint(1, CLEANUP_CHANCE) == 1: + self.cleanup() + + def make_cookie(self): + config = self._req.get_options() + if "mod_python.session.cookie_name" in config: + session_cookie_name = config.get("mod_python.session.cookie_name", COOKIE_NAME) + else: + # For backwards compatability with versions + # of mod_python prior to 3.3. + session_cookie_name = config.get("session_cookie_name", COOKIE_NAME) + + if self._secret: + c = Cookie.SignedCookie(session_cookie_name, self._sid, + secret=self._secret) + else: + c = Cookie.Cookie(session_cookie_name, self._sid) + + if "mod_python.session.application_domain" in config: + c.domain = config["mod_python.session.application_domain"] + if "mod_python.session.application_path" in config: + c.path = config["mod_python.session.application_path"] + elif "ApplicationPath" in config: + # For backwards compatability with versions + # of mod_python prior to 3.3. + c.path = config["ApplicationPath"] + else: + # the path where *Handler directive was specified + dirpath = self._req.hlist.directory + if dirpath: + docroot = self._req.document_root() + c.path = dirpath[len(docroot):] + else: + c.path = '/' + + # Sometimes there is no path, e.g. when Location + # is used. When Alias or UserDir are used, then + # the path wouldn't match the URI. In those cases + # just default to '/' + if not c.path or not self._req.uri.startswith(c.path): + c.path = '/' + + return c + + def invalidate(self): + c = self.make_cookie() + c.expires = 0 + Cookie.add_cookie(self._req, c) + self.delete() + self._invalid = 1 + + def load(self): + dict = self.do_load() + if dict == None: + return 0 + + if (time.time() - dict["_accessed"]) > dict["_timeout"]: + return 0 + + self._created = dict["_created"] + self._accessed = dict["_accessed"] + self._timeout = dict["_timeout"] + self.update(dict["_data"]) + return 1 + + def save(self): + if not self._invalid: + dict = {"_data" : self.copy(), + "_created" : self._created, + "_accessed": self._accessed, + "_timeout" : self._timeout} + self.do_save(dict) + + def delete(self): + self.do_delete() + self.clear() + + def init_lock(self): + pass + + def lock(self): + if self._lock: + _apache._global_lock(self._req.server, self._sid) + self._locked = 1 + self._req.register_cleanup(unlock_session_cleanup, self) + + def unlock(self): + if self._lock and self._locked: + _apache._global_unlock(self._req.server, self._sid) + self._locked = 0 + + def is_new(self): + return not not self._new + + def id(self): + return self._sid + + def created(self): + return self._created + + def last_accessed(self): + return self._accessed + + def timeout(self): + return self._timeout + + def set_timeout(self, secs): + self._timeout = secs + + def cleanup(self): + self.do_cleanup() + + def __del__(self): + self.unlock() + +def unlock_session_cleanup(sess): + sess.unlock() + +########################################################################### +## DbmSession + +def dbm_cleanup(data): + dbm, server = data + _apache._global_lock(server, None, 0) + db = dbm.open(dbm, 'c') + try: + old = [] + s = db.first() + while 1: + key, val = s + dict = loads(val) + try: + if (time.time() - dict["_accessed"]) > dict["_timeout"]: + old.append(key) + except KeyError: + old.append(key) + try: + s = next(db) + except KeyError: break + + for key in old: + try: + del db[key] + except: pass + finally: + db.close() + _apache._global_unlock(server, None, 0) + +class DbmSession(BaseSession): + + def __init__(self, req, dbm=None, sid=0, secret=None, dbmtype=dbm, + timeout=0, lock=1): + + if not dbm: + opts = req.get_options() + if "mod_python.dbm_session.database_filename" in opts: + dbm = opts["mod_python.dbm_session.database_filename"] + elif "session_dbm" in opts: + # For backwards compatability with versions + # of mod_python prior to 3.3. + dbm = opts["session_dbm"] + elif "mod_python.dbm_session.database_directory" in opts: + dbm = os.path.join(opts.get('mod_python.dbm_session.database_directory', tempdir), 'mp_sess.dbm') + elif "mod_python.session.database_directory" in opts: + dbm = os.path.join(opts.get('mod_python.session.database_directory', tempdir), 'mp_sess.dbm') + else: + # For backwards compatability with versions + # of mod_python prior to 3.3. + dbm = os.path.join(opts.get('session_directory', tempdir), 'mp_sess.dbm') + + self._dbmfile = dbm + self._dbmtype = dbmtype + + BaseSession.__init__(self, req, sid=sid, secret=secret, + timeout=timeout, lock=lock) + + def _set_dbm_type(self): + module = whichdb(self._dbmfile) + if module: + self._dbmtype = __import__(module) + + def _get_dbm(self): + result = self._dbmtype.open(self._dbmfile, 'c', stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP) + if self._dbmtype is dbm: + self._set_dbm_type() + return result + + def do_cleanup(self): + data = [self._dbmfile, self._req.server] + self._req.register_cleanup(dbm_cleanup, data) + self._req.log_error("DbmSession: registered database cleanup.", + apache.APLOG_NOTICE) + + def do_load(self): + _apache._global_lock(self._req.server, None, 0) + dbm = self._get_dbm() + try: + if self._sid.encode() in dbm: + return loads(dbm[self._sid.encode()]) + else: + return None + finally: + dbm.close() + _apache._global_unlock(self._req.server, None, 0) + + def do_save(self, dict): + _apache._global_lock(self._req.server, None, 0) + dbm = self._get_dbm() + try: + dbm[self._sid.encode()] = dumps(dict) + finally: + dbm.close() + _apache._global_unlock(self._req.server, None, 0) + + def do_delete(self): + _apache._global_lock(self._req.server, None, 0) + dbm = self._get_dbm() + try: + try: + del dbm[self._sid.encode()] + except KeyError: pass + finally: + dbm.close() + _apache._global_unlock(self._req.server, None, 0) + +########################################################################### +## FileSession + +DFT_FAST_CLEANUP = True +DFT_VERIFY_CLEANUP = True +DFT_GRACE_PERIOD = 240 +DFT_CLEANUP_TIME_LIMIT = 2 + +# Credits : this was initially contributed by dharana +class FileSession(BaseSession): + + def __init__(self, req, sid=0, secret=None, timeout=0, lock=1, + fast_cleanup=-1, verify_cleanup=-1): + + opts = req.get_options() + + if fast_cleanup == -1: + if 'mod_python.file_session.enable_fast_cleanup' in opts: + self._fast_cleanup = true_or_false(opts.get('mod_python.file_session.enable_fast_cleanup', DFT_FAST_CLEANUP)) + else: + # For backwards compatability with versions + # of mod_python prior to 3.3. + self._fast_cleanup = true_or_false(opts.get('session_fast_cleanup', DFT_FAST_CLEANUP)) + else: + self._fast_cleanup = fast_cleanup + + if verify_cleanup == -1: + if 'mod_python.file_session.verify_session_timeout' in opts: + self._verify_cleanup = true_or_false(opts.get('mod_python.file_session.verify_session_timeout', DFT_VERIFY_CLEANUP)) + else: + # For backwards compatability with versions + # of mod_python prior to 3.3. + self._verify_cleanup = true_or_false(opts.get('session_verify_cleanup', DFT_VERIFY_CLEANUP)) + else: + self._verify_cleanup = verify_cleanup + + if 'mod_python.file_session.cleanup_grace_period' in opts: + self._grace_period = int(opts.get('mod_python.file_session.cleanup_grace_period', DFT_GRACE_PERIOD)) + else: + # For backwards compatability with versions + # of mod_python prior to 3.3. + self._grace_period = int(opts.get('session_grace_period', DFT_GRACE_PERIOD)) + + if 'mod_python.file_session.cleanup_time_limit' in opts: + self._cleanup_time_limit = int(opts.get('mod_python.file_session.cleanup_time_limit',DFT_CLEANUP_TIME_LIMIT)) + else: + # For backwards compatability with versions + # of mod_python prior to 3.3. + self._cleanup_time_limit = int(opts.get('session_cleanup_time_limit',DFT_CLEANUP_TIME_LIMIT)) + + if 'mod_python.file_session.database_directory' in opts: + self._sessdir = os.path.join(opts.get('mod_python.file_session.database_directory', tempdir), 'mp_sess') + elif 'mod_python.session.database_directory' in opts: + self._sessdir = os.path.join(opts.get('mod_python.session.database_directory', tempdir), 'mp_sess') + else: + # For backwards compatability with versions + # of mod_python prior to 3.3. + self._sessdir = os.path.join(opts.get('session_directory', tempdir), 'mp_sess') + + # FIXME + if timeout: + self._cleanup_timeout = timeout + else: + self._cleanup_timeout = DFT_TIMEOUT + + BaseSession.__init__(self, req, sid=sid, secret=secret, + timeout=timeout, lock=lock) + + def do_cleanup(self): + data = {'req':self._req, + 'sessdir':self._sessdir, + 'fast_cleanup':self._fast_cleanup, + 'verify_cleanup':self._verify_cleanup, + 'timeout':self._cleanup_timeout, + 'grace_period':self._grace_period, + 'cleanup_time_limit': self._cleanup_time_limit, + } + + self._req.register_cleanup(filesession_cleanup, data) + self._req.log_error("FileSession: registered filesession cleanup.", + apache.APLOG_NOTICE) + + def do_load(self): + self.lock_file() + try: + try: + path = os.path.join(self._sessdir, self._sid[0:2]) + filename = os.path.join(path, self._sid) + fp = open(filename,'rb') + try: + data = load(fp) + if (time.time() - data["_accessed"]) <= data["_timeout"]: + # Change the file access time to the current time so the + # cleanup does not delete this file before the request + # can save it's session data + os.utime(filename,None) + return data + finally: + fp.close() + except: + s = StringIO() + traceback.print_exc(file=s) + s = s.getvalue() + self._req.log_error('Error while loading a session : %s'%s) + return None + finally: + self.unlock_file() + + def do_save(self, dict): + self.lock_file() + try: + try: + path = os.path.join(self._sessdir, self._sid[0:2]) + if not os.path.exists(path): + make_filesession_dirs(self._sessdir) + filename = os.path.join(path, self._sid) + fp = open(filename, 'wb') + try: + dump(dict, fp, 2) + finally: + fp.close() + except: + s = StringIO() + traceback.print_exc(file=s) + s = s.getvalue() + self._req.log_error('Error while saving a session : %s'%s) + finally: + self.unlock_file() + + def do_delete(self): + self.lock_file() + try: + try: + path = os.path.join(self._sessdir, self._sid[0:2]) + filename = os.path.join(path, self._sid) + os.unlink(filename) + except Exception: + pass + finally: + self.unlock_file() + + def lock_file(self): + # self._lock = 1 indicates that session locking is turned on, + # so let BaseSession handle it. + # Otherwise, explicitly acquire a lock for the file manipulation. + if not self._locked: + _apache._global_lock(self._req.server, self._sid) + self._locked = 1 + + def unlock_file(self): + if self._locked and not self._lock: + _apache._global_unlock(self._req.server, self._sid) + self._locked = 0 + +FS_STAT_VERSION = 'MPFS_3.2' +def filesession_cleanup(data): + # There is a small chance that a the cleanup for a given session file + # may occur at the exact time that the session is being accessed by + # another request. It is possible under certain circumstances for that + # session file to be saved in another request only to immediately deleted + # by the cleanup. To avoid this race condition, a session is allowed a + # grace_period before it is considered for deletion by the cleanup. + # As long as the grace_period is longer that the time it takes to complete + # the request (which should normally be less than 1 second), the session will + # not be mistakenly deleted by the cleanup. By doing this we also avoid the + # need to lock individual sessions and bypass any potential deadlock + # situations. + + req = data['req'] + sessdir = data['sessdir'] + fast_cleanup = data['fast_cleanup'] + verify_cleanup = data['verify_cleanup'] + timeout = data['timeout'] + grace_period = data['grace_period'] + cleanup_time_limit = data['cleanup_time_limit'] + + req.log_error('FileSession cleanup: (fast=%s, verify=%s) ...' + % (fast_cleanup,verify_cleanup), + apache.APLOG_NOTICE) + + lockfile = os.path.join(sessdir,'.mp_sess.lck') + try: + lockfp = os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o660) + except: + # check if it's a stale lockfile + mtime = os.stat(lockfile).st_mtime + if mtime < (time.time() - 3600): + # lockfile is over an hour old so it's likely stale. + # Even though there may not be another cleanup process running, + # we are going to defer running the cleanup at this time. + # Short circuiting this cleanup just makes the code a little cleaner. + req.log_error('FileSession cleanup: stale lockfile found - deleting it', + apache.APLOG_NOTICE) + # Remove the stale lockfile so the next call to filesession_cleanup + # can proceed. + os.remove(lockfile) + else: + req.log_error('FileSession cleanup: another process is already running', + apache.APLOG_NOTICE) + return + + try: + status_file = open(os.path.join(sessdir, 'fs_status.txt'), 'r') + d = status_file.readline() + status_file.close() + + if not d.startswith(FS_STAT_VERSION): + raise Exception('wrong status file version') + + parts = d.split() + + stat_version = parts[0] + next_i = int(parts[1]) + expired_file_count = int(parts[2]) + total_file_count = int(parts[3]) + total_time = float(parts[4]) + + except: + stat_version = FS_STAT_VERSION + next_i = 0 + expired_file_count = 0 + total_file_count = 0 + total_time = 0.0 + + try: + start_time = time.time() + filelist = os.listdir(sessdir) + dir_index = list(range(0,256))[next_i:] + for i in dir_index: + path = '%s/%s' % (sessdir,'%02x' % i) + if not os.path.exists(path): + continue + + filelist = os.listdir(path) + total_file_count += len(filelist) + + for f in filelist: + try: + filename = os.path.join(path,f) + if fast_cleanup: + accessed = os.stat(filename).st_mtime + if time.time() - accessed < (timeout + grace_period): + continue + + if fast_cleanup and not verify_cleanup: + delete_session = True + else: + try: + fp = open(filename) + dict = load(fp) + if (time.time() - dict['_accessed']) > (dict['_timeout'] + grace_period): + delete_session = True + else: + delete_session = False + finally: + fp.close() + if delete_session: + os.unlink(filename) + expired_file_count += 1 + except: + s = StringIO() + traceback.print_exc(file=s) + s = s.getvalue() + req.log_error('FileSession cleanup error: %s' + % (s), + apache.APLOG_NOTICE) + + next_i = (i + 1) % 256 + time_used = time.time() - start_time + if (cleanup_time_limit > 0) and (time_used > cleanup_time_limit): + break + + total_time += time.time() - start_time + if next_i == 0: + # next_i can only be 0 when the full cleanup has run to completion + req.log_error("FileSession cleanup: deleted %d of %d in %.4f seconds" + % (expired_file_count, total_file_count, total_time), + apache.APLOG_NOTICE) + expired_file_count = 0 + total_file_count = 0 + total_time = 0.0 + else: + req.log_error("FileSession cleanup incomplete: next cleanup will start at index %d (%02x)" + % (next_i, next_i), + apache.APLOG_NOTICE) + + status_file = open(os.path.join(sessdir, 'fs_status.txt'), 'w') + status_file.write('%s %d %d %d %f\n' % (stat_version, next_i, expired_file_count, total_file_count, total_time)) + status_file.close() + + try: + os.unlink(lockfile) + except: + pass + + finally: + os.close(lockfp) + +def make_filesession_dirs(sess_dir): + """Creates the directory structure used for storing session files""" + for i in range(0,256): + path = os.path.join(sess_dir, '%02x' % i) + if not os.path.exists(path): + os.makedirs(path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP) + +########################################################################### +## MemorySession + +def mem_cleanup(sdict): + for sid in list(sdict.keys()): + try: + session = sdict[sid] + if (time.time() - session["_accessed"]) > session["_timeout"]: + del sdict[sid] + except: + pass + +class MemorySession(BaseSession): + + sdict = {} + + def __init__(self, req, sid=0, secret=None, timeout=0, lock=1): + + BaseSession.__init__(self, req, sid=sid, secret=secret, + timeout=timeout, lock=lock) + + def do_cleanup(self): + self._req.register_cleanup(mem_cleanup, MemorySession.sdict) + self._req.log_error("MemorySession: registered session cleanup.", + apache.APLOG_NOTICE) + + def do_load(self): + if self._sid in MemorySession.sdict: + return MemorySession.sdict[self._sid] + return None + + def do_save(self, dict): + MemorySession.sdict[self._sid] = dict + + def do_delete(self): + try: + del MemorySession.sdict[self._sid] + except KeyError: pass + +########################################################################### +## Session + +def Session(req, sid=0, secret=None, timeout=0, lock=1): + + opts = req.get_options() + # Check the apache config for the type of session + if 'mod_python.session.session_type' in opts: + sess_type = opts['mod_python.session.session_type'] + elif 'session' in opts: + # For backwards compatability with versions + # of mod_python prior to 3.3. + sess_type = opts['session'] + else: + # no session class in config so get the default for the platform + threaded = _apache.mpm_query(apache.AP_MPMQ_IS_THREADED) + forked = _apache.mpm_query(apache.AP_MPMQ_IS_FORKED) + daemons = _apache.mpm_query(apache.AP_MPMQ_MAX_DAEMONS) + + if (threaded and ((not forked) or (daemons == 1))): + sess_type = 'MemorySession' + else: + sess_type = 'DbmSession' + + if sess_type == 'FileSession': + sess = FileSession + elif sess_type == 'DbmSession': + sess = DbmSession + elif sess_type == 'MemorySession': + sess = MemorySession + else: + # TODO Add capability to load a user defined class + # For now, just raise an exception. + raise Exception('Unknown session type %s' % sess_type) + + return sess(req, sid=sid, secret=secret, + timeout=timeout, lock=lock) + + +## helper functions +def true_or_false(item): + """This function is used to assist in getting appropriate + values set with the PythonOption directive + """ + + try: + item = item.lower() + except: + pass + if item in ['yes','true', '1', 1, True]: + return True + elif item in ['no', 'false', '0', 0, None, False]: + return False + else: + raise Exception diff --git a/lib/python/mod_python/__init__.py b/lib/python/mod_python/__init__.py new file mode 100644 index 0000000..4e8a79e --- /dev/null +++ b/lib/python/mod_python/__init__.py @@ -0,0 +1,34 @@ + # + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +__all__ = ["apache", "cgihandler", "psp", + "publisher", "util", "python22", "version"] + +# This is used by mod_python.c to make sure the version of C +# code matches the Python code. +from . import version +mp_version = version.version + +try: + # it's possible for mod_python to be imported outside httpd, e.g. to use + # httpdconf, so we fail silently + from . import apache +except: pass + + diff --git a/lib/python/mod_python/apache.py b/lib/python/mod_python/apache.py new file mode 100644 index 0000000..7748108 --- /dev/null +++ b/lib/python/mod_python/apache.py @@ -0,0 +1,1215 @@ + # vim: set sw=4 expandtab : + # + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +import sys +import traceback +import time +import os +import pdb +import stat +import imp +import types +import cgi +import _apache + +try: + import threading +except: + import dummy_threading as threading + +# Cache for values of PythonPath that have been seen already. +_path_cache = {} +_path_cache_lock = threading.Lock() + +_result_warning = """Handler has returned result or raised SERVER_RETURN +exception with argument having non integer type. Type of value returned +was %s, whereas expected """ + str(int) + "." + +class CallBack: + """ + A generic callback object. + """ + + class HStack: + """ + The actual stack string lives in the request object so + it can be manipulated by both apache.py and mod_python.c + """ + + def __init__(self, req): + self.req = req + + def pop(self): + + handlers = self.req.hstack.split() + + if not handlers: + return None + else: + self.req.hstack = " ".join(handlers[1:]) + return handlers[0] + + def ConnectionDispatch(self, conn): + + # config + config, debug = conn.base_server.get_config(), False + if "PythonDebug" in config: + debug = config["PythonDebug"] == "1" + + try: + + handler = conn.hlist.handler + + # split module::handler + l = handler.split('::', 1) + module_name = l[0] + if len(l) == 1: + # no oject, provide default + obj_str = "connectionhandler" + else: + obj_str = l[1] + + # evaluate pythonpath and set sys.path to + # resulting value if not already done + + if "PythonPath" in config: + _path_cache_lock.acquire() + try: + pathstring = config["PythonPath"] + if pathstring not in _path_cache: + newpath = eval(pathstring) + _path_cache[pathstring] = None + sys.path[:] = newpath + finally: + _path_cache_lock.release() + + # import module + autoreload = True + if "PythonAutoReload" in config: + autoreload = config["PythonAutoReload"] == "1" + module = import_module(module_name, + autoreload=autoreload, + log=debug) + # find the object + obj = resolve_object(module, obj_str, + arg=conn, silent=0) + + # Only permit debugging using pdb if Apache has + # actually been started in single process mode. + + pdb_debug = False + if "PythonEnablePdb" in config: + pdb_debug = config["PythonEnablePdb"] == "1" + + if pdb_debug and exists_config_define("ONE_PROCESS"): + + # Don't use pdb.runcall() as it results in + # a bogus 'None' response when pdb session + # is quit. With this code the exception + # marking that the session has been quit is + # propogated back up and it is obvious in + # the error message what actually occurred. + + debugger = pdb.Pdb() + debugger.reset() + sys.settrace(debugger.trace_dispatch) + + try: + result = obj(conn) + + finally: + debugger.quitting = 1 + sys.settrace(None) + + else: + result = obj(conn) + + assert (result.__class__ is int), \ + "ConnectionHandler '%s' returned invalid return code." % handler + + except: + # Error (usually parsing) + try: + exc_type, exc_value, exc_traceback = sys.exc_info() + result = self.ReportError(exc_type, exc_value, exc_traceback, srv=conn.base_server, + phase="ConnectionHandler", + hname=handler, debug=debug) + finally: + exc_traceback = None + + return result + + def FilterDispatch(self, fltr): + req = fltr.req + + # config + config, debug = req.get_config(), False + if "PythonDebug" in config: + debug = config["PythonDebug"] == "1" + + try: + + # split module::handler + l = fltr.handler.split('::', 1) + module_name = l[0] + if len(l) == 1: + # no oject, provide default + if fltr.is_input: + obj_str = "inputfilter" + else: + obj_str = "outputfilter" + else: + obj_str = l[1] + + # add the directory to pythonpath if + # not there yet, or evaluate pythonpath + # and set sys.path to resulting value + # if not already done + + if "PythonPath" in config: + _path_cache_lock.acquire() + try: + pathstring = config["PythonPath"] + if pathstring not in _path_cache: + newpath = eval(pathstring) + _path_cache[pathstring] = None + sys.path[:] = newpath + finally: + _path_cache_lock.release() + else: + if fltr.dir: + _path_cache_lock.acquire() + try: + if fltr.dir not in sys.path: + sys.path[:0] = [fltr.dir] + finally: + _path_cache_lock.release() + + # import module + autoreload = True + if "PythonAutoReload" in config: + autoreload = config["PythonAutoReload"] == "1" + module = import_module(module_name, + autoreload=autoreload, + log=debug) + + # find the object + obj = resolve_object(module, obj_str, + arg=fltr, silent=0) + + # Only permit debugging using pdb if Apache has + # actually been started in single process mode. + + pdb_debug = False + if "PythonEnablePdb" in config: + pdb_debug = config["PythonEnablePdb"] == "1" + + if pdb_debug and exists_config_define("ONE_PROCESS"): + + # Don't use pdb.runcall() as it results in + # a bogus 'None' response when pdb session + # is quit. With this code the exception + # marking that the session has been quit is + # propogated back up and it is obvious in + # the error message what actually occurred. + + debugger = pdb.Pdb() + debugger.reset() + sys.settrace(debugger.trace_dispatch) + + try: + result = obj(fltr) + + finally: + debugger.quitting = 1 + sys.settrace(None) + + else: + result = obj(fltr) + + # always flush the filter. without a FLUSH or EOS bucket, + # the content is never written to the network. + # XXX an alternative is to tell the user to flush() always + if not fltr.closed: + fltr.flush() + + except SERVER_RETURN as value: + # SERVER_RETURN indicates a non-local abort from below + # with value as (result, status) or (result, None) or result + try: + if len(value.args) == 2: + (result, status) = value.args + if status: + req.status = status + else: + result = value.args[0] + + if result.__class__ is not int: + s = "Value raised with SERVER_RETURN is invalid. It is a " + s = s + "%s, but it must be a tuple or an int." % result.__class__ + _apache.log_error(s, APLOG_ERR, req.server) + + return + + except: + pass + + except: + # Error (usually parsing) + try: + exc_type, exc_value, exc_traceback = sys.exc_info() + fltr.disable() + result = self.ReportError(exc_type, exc_value, exc_traceback, req=req, filter=fltr, + phase=fltr.name, hname=fltr.handler, + debug=debug) + finally: + exc_traceback = None + + return OK + + def HandlerDispatch(self, req): + """ + This is the handler dispatcher. + """ + + # be cautious + result = HTTP_INTERNAL_SERVER_ERROR + + # config + config, debug = req.get_config(), False + if "PythonDebug" in config: + debug = config["PythonDebug"] == "1" + + default_obj_str = _phase_handler_names[req.phase] + + # Lookup expected status values that allow us to + # continue when multiple handlers exist. + expected = _status_values[default_obj_str] + + try: + hlist = req.hlist + + while hlist.handler is not None: + + # split module::handler + l = hlist.handler.split('::', 1) + + module_name = l[0] + if len(l) == 1: + # no object, provide default + obj_str = default_obj_str + else: + obj_str = l[1] + + # add the directory to pythonpath if + # not there yet, or evaluate pythonpath + # and set sys.path to resulting value + # if not already done + + if "PythonPath" in config: + _path_cache_lock.acquire() + try: + pathstring = config["PythonPath"] + if pathstring not in _path_cache: + newpath = eval(pathstring) + _path_cache[pathstring] = None + sys.path[:] = newpath + finally: + _path_cache_lock.release() + else: + if not hlist.is_location: + directory = hlist.directory + if directory: + _path_cache_lock.acquire() + try: + if directory not in sys.path: + sys.path[:0] = [directory] + finally: + _path_cache_lock.release() + + # import module + autoreload = True + if "PythonAutoReload" in config: + autoreload = config["PythonAutoReload"] == "1" + module = import_module(module_name, + autoreload=autoreload, + log=debug) + + # find the object + if '.' not in obj_str: # this is an optimization + try: + obj = module.__dict__[obj_str] + except: + if not hlist.silent: + s = "module '%s' contains no '%s'" % (module.__file__, obj_str) + raise AttributeError(s) + else: + obj = resolve_object(module, obj_str, + arg=req, silent=hlist.silent) + + if not hlist.silent or obj is not None: + + try: + # Only permit debugging using pdb if Apache has + # actually been started in single process mode. + + pdb_debug = False + if "PythonEnablePdb" in config: + pdb_debug = config["PythonEnablePdb"] == "1" + + if pdb_debug and exists_config_define("ONE_PROCESS"): + + # Don't use pdb.runcall() as it results in + # a bogus 'None' response when pdb session + # is quit. With this code the exception + # marking that the session has been quit is + # propogated back up and it is obvious in + # the error message what actually occurred. + + debugger = pdb.Pdb() + debugger.reset() + sys.settrace(debugger.trace_dispatch) + + try: + result = obj(req) + + finally: + debugger.quitting = 1 + sys.settrace(None) + + else: + result = obj(req) + + except SERVER_RETURN as value: + + # The SERVER_RETURN exception type when raised + # otherwise indicates an abort from below with + # value as (result, status) or (result, None) or + # result. + + if len(value.args) == 2: + (result, status) = value.args + if status: + req.status = status + else: + result = value.args[0] + + assert (result.__class__ is int), \ + _result_warning % result.__class__ + + # stop cycling through handlers + if result not in expected: + break + + elif hlist.silent: + # A missing handler when in silent mode will + # only propagate DECLINED if it is the first + # and only handler. + + if result == HTTP_INTERNAL_SERVER_ERROR: + result = DECLINED + + hlist.next() + + except: + # Error (usually parsing) + try: + exc_type, exc_value, exc_traceback = sys.exc_info() + result = self.ReportError(exc_type, exc_value, exc_traceback, req=req, + phase=req.phase, hname=hlist.handler, debug=debug) + finally: + exc_traceback = None + + return result + + def IncludeDispatch(self, fltr, tag, code): + + try: + # config + config, debug = fltr.req.get_config(), False + if "PythonDebug" in config: + debug = config["PythonDebug"] == "1" + + if not hasattr(fltr.req,"ssi_globals"): + fltr.req.ssi_globals = {} + + fltr.req.ssi_globals["filter"] = fltr + fltr.req.ssi_globals["__file__"] = fltr.req.filename + + code = code.replace('\r\n', '\n').rstrip() + + if tag == 'eval': + result = eval(code, fltr.req.ssi_globals) + if result is not None: + fltr.write(str(result)) + elif tag == 'exec': + exec(code, fltr.req.ssi_globals) + + fltr.flush() + + except: + try: + exc_type, exc_value, exc_traceback = sys.exc_info() + fltr.disable() + result = self.ReportError(exc_type, exc_value, exc_traceback, + req=fltr.req, filter=fltr, + phase=fltr.name, + hname=fltr.req.filename, + debug=debug) + finally: + exc_traceback = None + + raise + + fltr.req.ssi_globals["filter"] = None + + return OK + + def ImportDispatch(self, name): + + # config + config, debug = main_server.get_config(), False + if "PythonDebug" in config: + debug = config["PythonDebug"] == "1" + + # evaluate pythonpath and set sys.path to + # resulting value if not already done + + if "PythonPath" in config: + _path_cache_lock.acquire() + try: + pathstring = config["PythonPath"] + if pathstring not in _path_cache: + newpath = eval(pathstring) + _path_cache[pathstring] = None + sys.path[:] = newpath + finally: + _path_cache_lock.release() + + # split module::function + l = name.split('::', 1) + module_name = l[0] + func_name = None + if len(l) != 1: + func_name = l[1] + + module = import_module(module_name, log=debug) + + if func_name: + getattr(module, func_name)() + + def ReportError(self, etype, evalue, etb, req=None, filter=None, srv=None, + phase="N/A", hname="N/A", debug=0): + """ + This function is only used when debugging is on. + It sends the output similar to what you'd see + when using Python interactively to the browser + """ + try: # try/finally + try: # try/except + + if str(etype) == "exceptions.IOError" \ + and str(evalue)[:5] == "Write": + # if this is an IOError while writing to client, + # it is probably better not to try to write to the cleint + # even if debug is on. + debug = 0 + + # write to log + for e in traceback.format_exception(etype, evalue, etb): + s = "%s %s: %s" % (phase, hname, e[:-1]) + if req: + req.log_error(s, APLOG_ERR) + else: + _apache.log_error(s, APLOG_ERR, srv) + + if not debug or not req: + return HTTP_INTERNAL_SERVER_ERROR + else: + # write to client + req.status = HTTP_INTERNAL_SERVER_ERROR + req.content_type = 'text/html' + + s = '\n
\nMod_python error: "%s %s"\n\n' % (phase, hname)
+                    for e in traceback.format_exception(etype, evalue, etb):
+                        s = s + cgi.escape(e) + '\n'
+                    s = s + "
\n" + + if filter: + filter.write(s) + filter.flush() + else: + req.write(s) + + return DONE + except: + # last try + traceback.print_exc() + sys.stderr.flush() + + finally: + # erase the traceback + etb = None + # we do not return anything + +def import_module(module_name, autoreload=1, log=0, path=None): + """ + Get the module to handle the request. If + autoreload is on, then the module will be reloaded + if it has changed since the last import. + """ + + # nlehuen: this is a big lock, we'll have to refine it later to get better performance. + # For now, we'll concentrate on thread-safety. + imp.acquire_lock() + try: + # (Re)import + if module_name in sys.modules: + + # The module has been imported already + module = sys.modules[module_name] + oldmtime, mtime = 0, 0 + + if autoreload: + + # but is it in the path? + try: + file = module.__dict__["__file__"] + except KeyError: + file = None + + # the "and not" part of this condition is to prevent execution + # of arbitrary already imported modules, such as os. The + # reason we use startswith as opposed to exact match is that + # modules inside packages are actually in subdirectories. + + if not file or (path and not list(filter(file.startswith, path))): + # there is a script by this name already imported, but it's in + # a different directory, therefore it's a different script + mtime, oldmtime = 0, -1 # trigger import + else: + try: + last_check = module.__dict__["__mtime_check__"] + except KeyError: + last_check = 0 + + if (time.time() - last_check) > 1: + oldmtime = module.__dict__.get("__mtime__", 0) + mtime = module_mtime(module) + else: + pass + else: + mtime, oldmtime = 0, -1 + + if mtime != oldmtime: + + # Import the module + if log: + if path: + s = "mod_python: (Re)importing module '%s' with path set to '%s'" % (module_name, path) + else: + s = "mod_python: (Re)importing module '%s'" % module_name + _apache.log_error(s, APLOG_NOTICE) + + parent = None + parts = module_name.split('.') + for i in range(len(parts)): + f, p, d = imp.find_module(parts[i], path) + try: + mname = ".".join(parts[:i+1]) + module = imp.load_module(mname, f, p, d) + if parent: + setattr(parent,parts[i],module) + parent = module + finally: + if f: f.close() + if hasattr(module, "__path__"): + path = module.__path__ + + if mtime == 0: + mtime = module_mtime(module) + + module.__mtime__ = mtime + + return module + finally: + imp.release_lock() + +def module_mtime(module): + """Get modification time of module""" + mtime = 0 + if "__file__" in module.__dict__: + + filepath = module.__file__ + + try: + # this try/except block is a workaround for a Python bug in + # 2.0, 2.1 and 2.1.1. See + # http://sourceforge.net/tracker/?group_id=5470&atid=105470&func=detail&aid=422004 + + if os.path.exists(filepath): + mtime = os.path.getmtime(filepath) + + if os.path.exists(filepath[:-1]) : + mtime = max(mtime, os.path.getmtime(filepath[:-1])) + + module.__dict__["__mtime_check__"] = time.time() + except OSError: pass + + return mtime + +def resolve_object(module, obj_str, arg=None, silent=0): + """ + This function traverses the objects separated by . + (period) to find the last one we're looking for: + + From left to right, find objects, if it is + an unbound method of a class, instantiate the + class passing the request as single argument + + 'arg' is sometimes req, sometimes filter, + sometimes connection + """ + + obj = module + + for obj_str in obj_str.split('.'): + + parent = obj + + # don't throw attribute errors when silent + if silent and not hasattr(obj, obj_str): + return None + + # this adds a little clarity if we have an attriute error + if obj == module and not hasattr(module, obj_str): + if hasattr(module, "__file__"): + s = "module '%s' contains no '%s'" % (module.__file__, obj_str) + raise AttributeError(s) + + obj = getattr(obj, obj_str) + + if hasattr(obj, "im_self") and not obj.__self__: + # this is an unbound method, its class + # needs to be instantiated + instance = parent(arg) + obj = getattr(instance, obj_str) + + return obj + +def build_cgi_env(req): + """ + Utility function that returns a dictionary of + CGI environment variables as described in + http://hoohoo.ncsa.uiuc.edu/cgi/env.html + """ + + req.add_cgi_vars() + env = req.subprocess_env.copy() + + if req.path_info and len(req.path_info) > 0: + env["SCRIPT_NAME"] = req.uri[:-len(req.path_info)] + else: + env["SCRIPT_NAME"] = req.uri + + env["GATEWAY_INTERFACE"] = "Python-CGI/1.1" + + # you may want to comment this out for better security + if "authorization" in req.headers_in: + env["HTTP_AUTHORIZATION"] = req.headers_in["authorization"] + + return env + +class NullIO(object): + """ Abstract IO + """ + def tell(self): return 0 + def read(self, n = -1): return "" + def readline(self, length = None): return "" + def readlines(self): return [] + def write(self, s): pass + def writelines(self, list): + self.write("".join(list)) + def isatty(self): return 0 + def flush(self): pass + def close(self): pass + def detach(self): pass + def seek(self, pos, mode = 0): pass + +class CGIStdin(NullIO): + + def __init__(self, req): + self.pos = 0 + self.req = req + self.BLOCK = 65536 # 64K + # note that self.buf sometimes contains leftovers + # that were read, but not used when readline was used + self.buf = "" + + def read(self, n = -1): + if n == 0: + return "" + if n == -1: + s = self.req.read(self.BLOCK) + while s: + self.buf = self.buf + s + self.pos = self.pos + len(s) + s = self.req.read(self.BLOCK) + result = self.buf + self.buf = "" + return result + else: + if self.buf: + s = self.buf[:n] + n = n - len(s) + else: + s = "" + s = s + self.req.read(n) + self.pos = self.pos + len(s) + return s + + def readlines(self): + s = (self.buf + self.read()).split('\n') + return [s + '\n' for s in s] + + def readline(self, n = -1): + + if n == 0: + return "" + + # fill up the buffer + self.buf = self.buf + self.req.read(self.BLOCK) + + # look for \n in the buffer + i = self.buf.find('\n') + while i == -1: # if \n not found - read more + if (n != -1) and (len(self.buf) >= n): # we're past n + i = n - 1 + break + x = len(self.buf) + self.buf = self.buf + self.req.read(self.BLOCK) + if len(self.buf) == x: # nothing read, eof + i = x - 1 + break + i = self.buf.find('\n', x) + + # carve out the piece, then shorten the buffer + result = self.buf[:i+1] + self.buf = self.buf[i+1:] + self.pos = self.pos + len(result) + return result + + +class CGIStdout(NullIO): + + """ + Class that allows writing to the socket directly for CGI. + """ + + def __init__(self, req): + self.pos = 0 + self.req = req + self.headers_sent = 0 + self.headers = "" + + def write(self, s): + + if not s: return + + if not self.headers_sent: + self.headers = self.headers + s + + # are headers over yet? + headers_over = 0 + + # first try RFC-compliant CRLF + ss = self.headers.split('\r\n\r\n', 1) + if len(ss) < 2: + # second try with \n\n + ss = self.headers.split('\n\n', 1) + if len(ss) >= 2: + headers_over = 1 + else: + headers_over = 1 + + if headers_over: + # headers done, process them + ss[0] = ss[0].replace('\r\n', '\n') + lines = ss[0].split('\n') + for line in lines: + h, v = line.split(":", 1) + v = v.strip() + if h.lower() == "status": + status = int(v.split()[0]) + self.req.status = status + elif h.lower() == "content-type": + self.req.content_type = v + self.req.headers_out[h] = v + else: + self.req.headers_out.add(h, v) + + self.headers_sent = 1 + + # write the body if any at this point + self.req.write(ss[1]) + else: + self.req.write(str(s)) + + self.pos = self.pos + len(s) + + def tell(self): return self.pos + +def setup_cgi(req): + """ + Replace sys.stdin and stdout with an objects that read/write to + the socket, as well as substitute the os.environ. + Returns (environ, stdin, stdout) which you must save and then use + with restore_nocgi(). + """ + + # save env + save_env = os.environ.copy() + + si = sys.stdin + so = sys.stdout + + os.environ.update(build_cgi_env(req)) + + sys.stdout = CGIStdout(req) + sys.stdin = CGIStdin(req) + + sys.argv = [] # keeps cgi.py happy + + return save_env, si, so + +def restore_nocgi(sav_env, si, so): + """ see setup_cgi() """ + + osenv = os.environ + + # restore env + for k in list(osenv.keys()): + del osenv[k] + for k in sav_env: + osenv[k] = sav_env[k] + + sys.stdout = si + sys.stdin = so + +interpreter = None +main_server = None +_callback = None + +def register_cleanup(callback, data=None): + _apache.register_cleanup(interpreter, main_server, callback, data) + +def init(name, server): + """ + This function is called by the server at startup time + """ + + global interpreter + global main_server + interpreter = name + main_server = server + + sys.argv = ["mod_python"] + + global _callback + _callback = CallBack() + return _callback + +## Some functions made public +make_table = _apache.table +log_error = _apache.log_error +table = _apache.table +config_tree = _apache.config_tree +server_root = _apache.server_root +mpm_query = _apache.mpm_query +exists_config_define = _apache.exists_config_define +stat = _apache.stat + +## Some constants + +HTTP_CONTINUE = 100 +HTTP_SWITCHING_PROTOCOLS = 101 +HTTP_PROCESSING = 102 +HTTP_OK = 200 +HTTP_CREATED = 201 +HTTP_ACCEPTED = 202 +HTTP_NON_AUTHORITATIVE = 203 +HTTP_NO_CONTENT = 204 +HTTP_RESET_CONTENT = 205 +HTTP_PARTIAL_CONTENT = 206 +HTTP_MULTI_STATUS = 207 +HTTP_MULTIPLE_CHOICES = 300 +HTTP_MOVED_PERMANENTLY = 301 +HTTP_MOVED_TEMPORARILY = 302 +HTTP_SEE_OTHER = 303 +HTTP_NOT_MODIFIED = 304 +HTTP_USE_PROXY = 305 +HTTP_TEMPORARY_REDIRECT = 307 +HTTP_BAD_REQUEST = 400 +HTTP_UNAUTHORIZED = 401 +HTTP_PAYMENT_REQUIRED = 402 +HTTP_FORBIDDEN = 403 +HTTP_NOT_FOUND = 404 +HTTP_METHOD_NOT_ALLOWED = 405 +HTTP_NOT_ACCEPTABLE = 406 +HTTP_PROXY_AUTHENTICATION_REQUIRED= 407 +HTTP_REQUEST_TIME_OUT = 408 +HTTP_CONFLICT = 409 +HTTP_GONE = 410 +HTTP_LENGTH_REQUIRED = 411 +HTTP_PRECONDITION_FAILED = 412 +HTTP_REQUEST_ENTITY_TOO_LARGE = 413 +HTTP_REQUEST_URI_TOO_LARGE = 414 +HTTP_UNSUPPORTED_MEDIA_TYPE = 415 +HTTP_RANGE_NOT_SATISFIABLE = 416 +HTTP_EXPECTATION_FAILED = 417 +HTTP_UNPROCESSABLE_ENTITY = 422 +HTTP_LOCKED = 423 +HTTP_FAILED_DEPENDENCY = 424 +HTTP_UPGRADE_REQUIRED = 426 +HTTP_INTERNAL_SERVER_ERROR = 500 +HTTP_NOT_IMPLEMENTED = 501 +HTTP_BAD_GATEWAY = 502 +HTTP_SERVICE_UNAVAILABLE = 503 +HTTP_GATEWAY_TIME_OUT = 504 +HTTP_VERSION_NOT_SUPPORTED = 505 +HTTP_VARIANT_ALSO_VARIES = 506 +HTTP_INSUFFICIENT_STORAGE = 507 +HTTP_NOT_EXTENDED = 510 + +# The APLOG constants in Apache are derived from syslog.h +# constants, so we do same here. + +try: + import syslog + APLOG_EMERG = syslog.LOG_EMERG # system is unusable + APLOG_ALERT = syslog.LOG_ALERT # action must be taken immediately + APLOG_CRIT = syslog.LOG_CRIT # critical conditions + APLOG_ERR = syslog.LOG_ERR # error conditions + APLOG_WARNING = syslog.LOG_WARNING # warning conditions + APLOG_NOTICE = syslog.LOG_NOTICE # normal but significant condition + APLOG_INFO = syslog.LOG_INFO # informational + APLOG_DEBUG = syslog.LOG_DEBUG # debug-level messages +except ImportError: + APLOG_EMERG = 0 + APLOG_ALERT = 1 + APLOG_CRIT = 2 + APLOG_ERR = 3 + APLOG_WARNING = 4 + APLOG_NOTICE = 5 + APLOG_INFO = 6 + APLOG_DEBUG = 7 + +APLOG_NOERRNO = 0 # DEPRECATED +OK = REQ_PROCEED = 0 +DONE = -2 +DECLINED = REQ_NOACTION = -1 + +_phase_handler_names = {} +for _phase in ["PythonPostReadRequestHandler", + "PythonTransHandler", + "PythonHeaderParserHandler", + "PythonInitHandler", + "PythonAccessHandler", + "PythonAuthenHandler", + "PythonAuthzHandler", + "PythonTypeHandler", + "PythonFixupHandler", + "PythonHandler", + "PythonLogHandler", + "PythonCleanupHandler"]: + _phase_handler_names[_phase] = _phase[len("python"):].lower() + +_status_values = { + "postreadrequesthandler": [ DECLINED, OK ], + "transhandler": [ DECLINED ], + "headerparserhandler": [ DECLINED, OK ], + "inithandler": [ DECLINED, OK ], + "accesshandler": [ DECLINED, OK ], + "authenhandler": [ DECLINED ], + "authzhandler": [ DECLINED ], + "typehandler": [ DECLINED ], + "fixuphandler": [ DECLINED, OK ], + "handler": [ OK ], + "loghandler": [ DECLINED, OK ], + "cleanuphandler": [ OK ], +} + +# constants for get_remote_host +REMOTE_HOST = 0 +REMOTE_NAME = 1 +REMOTE_NOLOOKUP = 2 +REMOTE_DOUBLE_REV = 3 + +# legacy/mod_python things +REQ_ABORTED = HTTP_INTERNAL_SERVER_ERROR +REQ_EXIT = "REQ_EXIT" +SERVER_RETURN = _apache.SERVER_RETURN + +# the req.finfo tuple +FINFO_MODE = 0 +FINFO_INO = 1 +FINFO_DEV = 2 +FINFO_NLINK = 3 +FINFO_UID = 4 +FINFO_GID = 5 +FINFO_SIZE = 6 +FINFO_ATIME = 7 +FINFO_MTIME = 8 +FINFO_CTIME = 9 +FINFO_FNAME = 10 +FINFO_NAME = 11 +FINFO_FILETYPE = 12 + +# the req.parsed_uri +URI_SCHEME = 0 +URI_HOSTINFO = 1 +URI_USER = 2 +URI_PASSWORD = 3 +URI_HOSTNAME = 4 +URI_PORT = 5 +URI_PATH = 6 +URI_QUERY = 7 +URI_FRAGMENT = 8 + +# for req.proxyreq +PROXYREQ_NONE = 0 # No proxy +PROXYREQ_PROXY = 1 # Standard proxy +PROXYREQ_REVERSE = 2 # Reverse proxy +PROXYREQ_RESPONSE = 3 # Origin response + +# methods for req.allow_method() +M_GET = 0 # RFC 2616: HTTP +M_PUT = 1 +M_POST = 2 +M_DELETE = 3 +M_CONNECT = 4 +M_OPTIONS = 5 +M_TRACE = 6 # RFC 2616: HTTP +M_PATCH = 7 +M_PROPFIND = 8 # RFC 2518: WebDAV +M_PROPPATCH = 9 +M_MKCOL = 10 +M_COPY = 11 +M_MOVE = 12 +M_LOCK = 13 +M_UNLOCK = 14 # RFC2518: WebDAV +M_VERSION_CONTROL = 15 # RFC3253: WebDAV Versioning +M_CHECKOUT = 16 +M_UNCHECKOUT = 17 +M_CHECKIN = 18 +M_UPDATE = 19 +M_LABEL = 20 +M_REPORT = 21 +M_MKWORKSPACE = 22 +M_MKACTIVITY = 23 +M_BASELINE_CONTROL = 24 +M_MERGE = 25 +M_INVALID = 26 # RFC3253: WebDAV Versioning + +# for req.used_path_info +AP_REQ_ACCEPT_PATH_INFO = 0 # Accept request given path_info +AP_REQ_REJECT_PATH_INFO = 1 # Send 404 error if path_info was given +AP_REQ_DEFAULT_PATH_INFO = 2 # Module's choice for handling path_info + + +# for mpm_query +AP_MPMQ_NOT_SUPPORTED = 0 # This value specifies whether + # an MPM is capable of + # threading or forking. +AP_MPMQ_STATIC = 1 # This value specifies whether + # an MPM is using a static # of + # threads or daemons. +AP_MPMQ_DYNAMIC = 2 # This value specifies whether + # an MPM is using a dynamic # of + # threads or daemons. + +AP_MPMQ_MAX_DAEMON_USED = 1 # Max # of daemons used so far +AP_MPMQ_IS_THREADED = 2 # MPM can do threading +AP_MPMQ_IS_FORKED = 3 # MPM can do forking +AP_MPMQ_HARD_LIMIT_DAEMONS = 4 # The compiled max # daemons +AP_MPMQ_HARD_LIMIT_THREADS = 5 # The compiled max # threads +AP_MPMQ_MAX_THREADS = 6 # # of threads/child by config +AP_MPMQ_MIN_SPARE_DAEMONS = 7 # Min # of spare daemons +AP_MPMQ_MIN_SPARE_THREADS = 8 # Min # of spare threads +AP_MPMQ_MAX_SPARE_DAEMONS = 9 # Max # of spare daemons +AP_MPMQ_MAX_SPARE_THREADS = 10 # Max # of spare threads +AP_MPMQ_MAX_REQUESTS_DAEMON= 11 # Max # of requests per daemon +AP_MPMQ_MAX_DAEMONS = 12 # Max # of daemons by config + +# magic mime types +CGI_MAGIC_TYPE = "application/x-httpd-cgi" +INCLUDES_MAGIC_TYPE = "text/x-server-parsed-html" +INCLUDES_MAGIC_TYPE3 = "text/x-server-parsed-html3" +DIR_MAGIC_TYPE = "httpd/unix-directory" + +# for req.read_body +REQUEST_NO_BODY = 0 +REQUEST_CHUNKED_ERROR = 1 +REQUEST_CHUNKED_DECHUNK = 2 + +# for req.connection.keepalive +AP_CONN_UNKNOWN = _apache.AP_CONN_UNKNOWN +AP_CONN_CLOSE = _apache.AP_CONN_CLOSE +AP_CONN_KEEPALIVE = _apache.AP_CONN_KEEPALIVE + +# for req.finfo[apache.FINFO_FILETYPE] +APR_NOFILE = _apache.APR_NOFILE +APR_REG = _apache.APR_REG +APR_DIR = _apache.APR_DIR +APR_CHR = _apache.APR_CHR +APR_BLK = _apache.APR_BLK +APR_PIPE = _apache.APR_PIPE +APR_LNK = _apache.APR_LNK +APR_SOCK = _apache.APR_SOCK +APR_UNKFILE = _apache.APR_UNKFILE + +# module magic +MODULE_MAGIC_NUMBER_MAJOR = _apache.MODULE_MAGIC_NUMBER_MAJOR +MODULE_MAGIC_NUMBER_MINOR = _apache.MODULE_MAGIC_NUMBER_MINOR + +# for apache.stat() +APR_FINFO_LINK = 0x00000001 # Stat the link not the file itself if it is a link +APR_FINFO_MTIME = 0x00000010 # Modification Time +APR_FINFO_CTIME = 0x00000020 # Creation or inode-changed time +APR_FINFO_ATIME = 0x00000040 # Access Time +APR_FINFO_SIZE = 0x00000100 # Size of the file +APR_FINFO_CSIZE = 0x00000200 # Storage size consumed by the file +APR_FINFO_DEV = 0x00001000 # Device +APR_FINFO_INODE = 0x00002000 # Inode +APR_FINFO_NLINK = 0x00004000 # Number of links +APR_FINFO_TYPE = 0x00008000 # Type +APR_FINFO_USER = 0x00010000 # User +APR_FINFO_GROUP = 0x00020000 # Group +APR_FINFO_UPROT = 0x00100000 # User protection bits +APR_FINFO_GPROT = 0x00200000 # Group protection bits +APR_FINFO_WPROT = 0x00400000 # World protection bits +APR_FINFO_ICASE = 0x01000000 # if dev is case insensitive +APR_FINFO_NAME = 0x02000000 # ->name in proper case +APR_FINFO_MIN = 0x00008170 # type, mtime, ctime, atime, size +APR_FINFO_IDENT = 0x00003000 # dev and inode +APR_FINFO_OWNER = 0x00030000 # user and group +APR_FINFO_PROT = 0x00700000 # all protections +APR_FINFO_NORM = 0x0073b170 # an atomic unix apr_stat() +APR_FINFO_DIRENT = 0x02000000 # an atomic unix apr_dir_read() diff --git a/lib/python/mod_python/cache.py b/lib/python/mod_python/cache.py new file mode 100644 index 0000000..c6b2bf7 --- /dev/null +++ b/lib/python/mod_python/cache.py @@ -0,0 +1,419 @@ +# + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + # This was donated by Nicolas Lehuen, and also posted to the Python Cookbook + # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302997 + # + +import sys +if sys.version[0] == '2': + from urllib2 import Request + from urllib2 import HTTPError +else: + from urllib.request import Request + from urllib.error import HTTPError + +from os import stat +from time import time, mktime +from email.utils import parsedate +from calendar import timegm +import re +import weakref +import types + +try: + from threading import Lock +except ImportError: + from dummy_threading import Lock + +NOT_INITIALIZED = object() + +class Entry(object): + """ A cache entry, mostly an internal object. """ + def __init__(self, key): + object.__init__(self) + self._key=key + self._value=NOT_INITIALIZED + self._lock=Lock() + +class Cache(object): + """ An abstract, multi-threaded cache object. """ + + def __init__(self, max_size=0): + """ Builds a cache with a limit of max_size entries. + If this limit is exceeded, the Least Recently Used entry is discarded. + if max_size==0, the cache is unbounded (no LRU rule is applied). + """ + object.__init__(self) + self._maxsize=max_size + self._dict={} + self._lock=Lock() + + # Header of the access list + if self._maxsize: + self._head=Entry(None) + self._head._previous=self._head + self._head._next=self._head + + def __setitem__(self, name, value): + """ Populates the cache with a given name and value. """ + key = self.key(name) + + entry = self._get_entry(key) + + entry._lock.acquire() + try: + self._pack(entry,value) + self.commit() + finally: + entry._lock.release() + + def __getitem__(self, name): + """ Gets a value from the cache, builds it if required. + """ + return self._checkitem(name)[2] + + def __delitem__(self, name): + self._lock.acquire() + try: + key = self.key(name) + del self._dict[key] + finally: + self._lock.release() + + def _get_entry(self,key): + self._lock.acquire() + try: + entry = self._dict.get(key) + if not entry: + entry = Entry(key) + self._dict[key]=entry + if self._maxsize: + entry._next = entry._previous = None + self._access(entry) + self._checklru() + elif self._maxsize: + self._access(entry) + return entry + finally: + self._lock.release() + + def _checkitem(self, name): + """ Gets a value from the cache, builds it if required. + Returns a tuple is_new, key, value, entry. + If is_new is True, the result had to be rebuilt. + """ + key = self.key(name) + + entry = self._get_entry(key) + + entry._lock.acquire() + try: + value = self._unpack(entry) + is_new = False + if value is NOT_INITIALIZED: + opened = self.check(key, name, entry) + value = self.build(key, name, opened, entry) + is_new = True + self._pack(entry, value) + self.commit() + else: + opened = self.check(key, name, entry) + if opened is not None: + value = self.build(key, name, opened, entry) + is_new = True + self._pack(entry, value) + self.commit() + return is_new, key, value, entry + finally: + entry._lock.release() + + def mru(self): + """ Returns the Most Recently Used key """ + if self._maxsize: + self._lock.acquire() + try: + return self._head._previous._key + finally: + self._lock.release() + else: + return None + + def lru(self): + """ Returns the Least Recently Used key """ + if self._maxsize: + self._lock.acquire() + try: + return self._head._next._key + finally: + self._lock.release() + else: + return None + + def key(self, name): + """ Override this method to extract a key from the name passed to the [] operator """ + return name + + def commit(self): + """ Override this method if you want to do something each time the underlying dictionary is modified (e.g. make it persistent). """ + pass + + def clear(self): + """ Clears the cache """ + self._lock.acquire() + try: + self._dict.clear() + if self._maxsize: + self._head._next=self._head + self._head._previous=self._head + finally: + self._lock.release() + + def check(self, key, name, entry): + """ Override this method to check whether the entry with the given name is stale. Return None if it is fresh + or an opened resource if it is stale. The object returned will be passed to the 'build' method as the 'opened' parameter. + Use the 'entry' parameter to store meta-data if required. Don't worry about multiple threads accessing the same name, + as this method is properly isolated. + """ + return None + + def build(self, key, name, opened, entry): + """ Build the cached value with the given name from the given opened resource. Use entry to obtain or store meta-data if needed. + Don't worry about multiple threads accessing the same name, as this method is properly isolated. + """ + raise NotImplementedError() + + def _access(self, entry): + " Internal use only, must be invoked within a cache lock. Updates the access list. """ + if entry._next is not self._head: + if entry._previous is not None: + # remove the entry from the access list + entry._previous._next=entry._next + entry._next._previous=entry._previous + # insert the entry at the end of the access list + entry._previous=self._head._previous + entry._previous._next=entry + entry._next=self._head + entry._next._previous=entry + if self._head._next is self._head: + self._head._next=entry + + def _checklru(self): + " Internal use only, must be invoked within a cache lock. Removes the LRU entry if needed. """ + if len(self._dict)>self._maxsize: + lru=self._head._next + lru._previous._next=lru._next + lru._next._previous=lru._previous + del self._dict[lru._key] + + def _pack(self, entry, value): + """ Store the value in the entry. """ + entry._value=value + + def _unpack(self, entry): + """ Recover the value from the entry, returns NOT_INITIALIZED if it is not OK. """ + return entry._value + +class WeakCache(Cache): + """ This cache holds weak references to the values it stores. Whenever a value is not longer + normally referenced, it is removed from the cache. Useful for sharing the result of long + computations but letting them go as soon as they are not needed by anybody. + """ + + def _pack(self, entry, value): + entry._value=weakref.ref(value, lambda ref: self.__delitem__(entry._key)) + + def _unpack(self, entry): + if entry._value is NOT_INITIALIZED: + return NOT_INITIALIZED + + value = entry._value() + if value is None: + return NOT_INITIALIZED + else: + return value + +class FileCache(Cache): + """ A file cache. Returns the content of the files as a string, given their filename. + Whenever the files are modified (according to their modification time) the cache is updated. + Override the build method to obtain more interesting behaviour. + """ + def __init__(self, max_size=0, mode='rb'): + Cache.__init__(self, max_size) + self.mode=mode + + def check(self, key, name, entry): + timestamp = stat(key).st_mtime + + if entry._value is NOT_INITIALIZED: + entry._timestamp = timestamp + return open(key, self.mode) + else: + if entry._timestamp != timestamp: + entry._timestamp = timestamp + return open(key, self.mode) + else: + return None + + def build(self, key, name, opened, entry): + """ Return the content of the file as a string. Override this for better behaviour. """ + try: + return opened.read() + finally: + opened.close() + +def parseRFC822Time(t): + return mktime(parsedate(t)) + +re_max_age=re.compile('max-age\s*=\s*(\d+)', re.I) + +class HTTPEntity(object): + def __init__(self, entity, metadata): + self.entity=entity + self.metadata=metadata + + def __repr__(self): + return 'HTTPEntity(%s, %s)'%(repr(self.entity), self.metadata) + + def __str__(self): + return self.entity + +class HTTPCache(Cache): + """ An HTTP cache. Returns the entity found at the given URL. + Uses Expires, ETag and Last-Modified headers to minimize bandwidth usage. + Partial Cache-Control support (only max-age is supported). + """ + def check(self, key, name, entry): + request = urllib.request.Request(key) + + try: + if time()\n" % (self.tag, self.attr) + if self.flipslash: + s = s.replace("\\", "/") + for arg in self.args: + arg.indent = self.indent + 2 + s += "%s" % str(arg) + s += i + "\n" % self.tag + return s + +class Comment: + + def __init__(self, comment): + self.comment = comment + self.indent = 0 + + def __repr__(self): + i = " " * self.indent + lines = self.comment.splitlines() + s = i + "Comment(%s" % repr(lines[0]+"\n") + for line in lines[1:]: + s += "\n " + i + repr(line+"\n") + s += ")" + return s + + def __str__(self): + i = " " * self.indent + s = "" + for line in self.comment.splitlines(): + s += i + '# %s\n' % line + return s + +## directives + +class AddHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class AddOutputFilter(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class AddType(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class AuthBasicAuthoritative(Directive): + # New in Apache 2.2 + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class AuthBasicProvider(Directive): + # New in Apache 2.2 + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class AuthType(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class AuthName(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class CustomLog(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class Directory(ContainerTag): + def __init__(self, dir, *args): + ContainerTag.__init__(self, self.__class__.__name__, dir, args) + +class DirectoryIndex(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class DocumentRoot(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class ErrorLog(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class Files(ContainerTag): + def __init__(self, dir, *args): + ContainerTag.__init__(self, self.__class__.__name__, dir, args) + +class IfModule(ContainerTag): + def __init__(self, dir, *args): + ContainerTag.__init__(self, self.__class__.__name__, dir, args) + +class KeepAliveTimeout(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class Listen(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class LoadModule(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class Location(ContainerTag): + def __init__(self, dir, *args): + ContainerTag.__init__(self, self.__class__.__name__, dir, args) + +class LogLevel(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class LogFormat(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val, flipslash=0) + +class LockFile(Directive): + def __init__(self, val): + import sys + if sys.platform!='win32': + Directive.__init__(self, self.__class__.__name__, val) + else: + Directive.__init__(self, '#'+self.__class__.__name__, val) + +class MaxConnectionsPerChild(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class MaxClients(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class MaxRequestsPerChild(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class MaxSpareServers(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class MaxSpareThreads(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class MaxThreadsPerChild(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class MinSpareThreads(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class Mutex(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class NameVirtualHost(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class NumServers(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class Options(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PidFile(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonAuthenHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonAuthzHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonCleanupHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonConnectionHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonDebug(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonAccessHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonPostReadRequestHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonTransHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonFixupHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonImport(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonPath(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val, flipslash=0) + +class PythonOutputFilter(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonOption(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class Require(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class SetHandler(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class ServerAdmin(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class ServerName(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class ServerPath(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class ServerRoot(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class StartServers(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class StartThreads(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class ThreadsPerChild(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class Timeout(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class TypesConfig(Directive): + def __init__(self, val): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonInterpPerDirectory(Directive): + def __init__(self, val='Off'): + Directive.__init__(self, self.__class__.__name__, val) + +class PythonInterpPerDirective(Directive): + def __init__(self, val='Off'): + Directive.__init__(self, self.__class__.__name__, val) + +class VirtualHost(ContainerTag): + def __init__(self, addr, *args): + ContainerTag.__init__(self, self.__class__.__name__, addr, args) + +## utility functions + +def quote_if_space(s): + + # Windows doesn't like quotes when there are + # no spaces, but needs them otherwise, + # TODO: Is this still true? + if s.find(" ") != -1: + s = '"%s"' % s + return s + +def write_basic_config(server_root, listen='0.0.0.0:8888', conf="conf", logs="logs", + htdocs="public", pythonhandler="mod_python.publisher", + pythonpath=[], pythonoptions=[], mp_comments=[], + conf_name='httpd_conf.py', createdirs=True, replace_config=False): + """This generates a sensible Apache configuration""" + + conf_path = os.path.join(server_root, conf, conf_name) + if os.path.exists(conf_path) and not replace_config: + print('Error: %s already exists, aborting.' % repr(conf_path), file=sys.stderr) + return + + if createdirs: + for dirname in [server_root, + os.path.join(server_root, htdocs), + os.path.join(server_root, conf), + os.path.join(server_root, logs)]: + if os.path.isdir(dirname): + print("Warning: directory %s already exists, continuing." % repr(dirname), file=sys.stderr) + else: + print("Creating directory %s." % repr(dirname), file=sys.stderr) + os.mkdir(dirname) + + # try to find mime.types + mime_types_dest = os.path.join(server_root, conf, 'mime.types') + if os.path.isfile(mime_types_dest): + print("Warning: file %s already exists, continuing." % repr(mime_types_dest), file=sys.stderr) + else: + for mime_types_dir in [mod_python.version.SYSCONFDIR, '/etc']: + mime_types_src = os.path.join(mime_types_dir, 'mime.types') + if os.path.isfile(mime_types_src): + print("Copying %s to %s" % (repr(mime_types_src), repr(mime_types_dest)), file=sys.stderr) + shutil.copy(mime_types_src, mime_types_dest) + break + + mime_types = os.path.join(conf, "mime.types") + if not os.path.exists(os.path.join(server_root, mime_types)): + print("Warning: file %s does not exist." % repr(os.path.join(server_root, mime_types)), file=sys.stderr) + + if not os.path.isdir(os.path.join(server_root, htdocs)): + print("Warning: %s does not exist or not a directory." % repr(os.path.join(server_root, htdocs)), file=sys.stderr) + + modpath = mod_python.version.LIBEXECDIR + + modules = Container(Comment("\nLoad the necessary modules (this is the default httpd set):\n\n")) + for module in [ + ['authn_file_module', 'mod_authn_file.so'], + ['authn_core_module', 'mod_authn_core.so'], + ['authz_host_module', 'mod_authz_host.so'], + ['authz_groupfile_module', 'mod_authz_groupfile.so'], + ['authz_user_module', 'mod_authz_user.so'], + ['authz_core_module', 'mod_authz_core.so'], + ['access_compat_module', 'mod_access_compat.so'], + ['auth_basic_module', 'mod_auth_basic.so'], + ['reqtimeout_module', 'mod_reqtimeout.so'], + ['include_module', 'mod_include.so'], + ['filter_module', 'mod_filter.so'], + ['mime_module', 'mod_mime.so'], + ['log_config_module', 'mod_log_config.so'], + ['env_module', 'mod_env.so'], + ['headers_module', 'mod_headers.so'], + ['setenvif_module', 'mod_setenvif.so'], + ['version_module', 'mod_version.so'], + ['unixd_module', 'mod_unixd.so'], + ['status_module', 'mod_status.so'], + ['autoindex_module', 'mod_autoindex.so'], + ['dir_module', 'mod_dir.so'], + ['alias_module', 'mod_alias.so'], + ]: + modules.append( + LoadModule("%s %s" % (module[0], quote_if_space(os.path.join(modpath, module[1])))) + ) + + main = Container(Comment("\nMain configuration options:\n\n"), + + ServerRoot(server_root), + + Container(MaxConnectionsPerChild('65536'), + only_if="mod_python.version.HTTPD_VERSION[0:3] == '2.4'"), + Container(MaxRequestsPerChild('65536'), + only_if="mod_python.version.HTTPD_VERSION[0:3] < '2.4'"), + + LogFormat(r'"%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined'), + CustomLog("%s combined" % quote_if_space(os.path.join(logs, "access_log"))), + ErrorLog(quote_if_space(os.path.join(logs, "error_log"))), + LogLevel("warn"), + + PidFile(quote_if_space(os.path.join(logs, "httpd.pid"))), + + TypesConfig(quote_if_space(mime_types)), + + # The only reason we need a ServerName is so that Apache does not + # generate a warning about being unable to determine its name. + ServerName("127.0.0.1"), + Listen(listen), + DocumentRoot(quote_if_space(os.path.join(server_root, htdocs))), + + Container(LockFile(quote_if_space(os.path.join(logs, "accept.lock"))), + only_if="mod_python.version.HTTPD_VERSION[0:3] == '2.2'"), + ) + + mp = Container(Comment("\nmod_python-specific options:\n\n"), + LoadModule("python_module %s" % quote_if_space(quote_if_space(os.path.join(modpath, 'mod_python.so')))), + SetHandler("mod_python"), + Comment("PythonDebug On"), + PythonHandler(pythonhandler), + ) + + if pythonpath: + pp = "sys.path+[" + for p in pythonpath: + pp += repr(p)+"," + pp += "]" + mp.append(PythonPath('"%s"' % pp)) + for po in pythonoptions: + mp.append(PythonOption(po)) + for c in mp_comments: + mp.append(Comment(c)) + + config = Container() + config.append(Comment( + "\n" + "This config was auto-generated, do not edit!\n" + "\n" + )) + config.append(modules) + config.append(main) + config.append(mp) + + s = """#!%s + +# +# This config was auto-generated, but you can edit it! +# It can be used to generate an Apache config by simply +# running it. We recommend you run it like this: +# $ mod_python genconfig > +#\n +""" % mod_python.version.PYTHON_BIN + s += "from mod_python.httpdconf import *\n\n" + s += "config = " + repr(config) + s += "\n\nprint(config)\n" + + print("Writing %s." % repr(conf_path), file=sys.stderr) + open(conf_path, 'w').write(s) + return conf_path diff --git a/lib/python/mod_python/psp.py b/lib/python/mod_python/psp.py new file mode 100644 index 0000000..f994847 --- /dev/null +++ b/lib/python/mod_python/psp.py @@ -0,0 +1,470 @@ + # vim: set sw=4 expandtab : + # + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # This file originally written by Sterling Hughes + # + +from . import apache, Session, util, _psp +import _apache + +import sys +import os +import marshal +import types +from cgi import escape +import dbm, dbm +import tempfile + +# dbm types for cache +dbm_types = {} + +tempdir = tempfile.gettempdir() + +def path_split(filename): + + dir, fname = os.path.split(filename) + if sys.platform.startswith("win"): + dir += "\\" + else: + dir += "/" + + return dir, fname + +def code2str(c): + + ctuple = (c.co_argcount, c.co_nlocals, c.co_stacksize, c.co_flags, + c.co_code, c.co_consts, c.co_names, c.co_varnames, c.co_filename, + c.co_name, c.co_firstlineno, c.co_lnotab) + + return marshal.dumps(ctuple) + +def str2code(s): + + return types.CodeType(*marshal.loads(s)) + +class PSPInterface: + + def __init__(self, req, filename, form): + self.req = req + self.filename = filename + self.error_page = None + self.form = form + + def set_error_page(self, page): + if page and page[0] == '/': + # relative to document root + self.error_page = PSP(self.req, self.req.document_root() + page) + else: + # relative to same dir we're in + dir = path_split(self.filename)[0] + self.error_page = PSP(self.req, dir + page) + + def apply_data(self, object): + + if not self.form: + if not hasattr(self.req, 'form'): + # no existing form, so need to create one, + # form has to be saved back to request object + # so that error page can access it if need be + self.form = util.FieldStorage(self.req, keep_blank_values=1) + self.req.form = self.form + else: + self.form = self.req.form + + return util.apply_fs_data(object, self.form, req=self.req) + + def redirect(self, location, permanent=0): + + util.redirect(self.req, location, permanent) + +class PSP: + + code = None + dbmcache = None + + def __init__(self, req, filename=None, string=None, vars={}): + + if (string and filename): + raise ValueError("Must specify either filename or string") + + self.req, self.vars = req, vars + + if not filename and not string: + filename = req.filename + + self.filename, self.string = filename, string + + if filename: + + # if filename is not absolute, default to our guess + # of current directory + if not os.path.isabs(filename): + base = os.path.split(req.filename)[0] + self.filename = os.path.join(base, filename) + + self.load_from_file() + else: + + cached = mem_scache.get(string) + if cached: + self.code = cached + else: + source = _psp.parsestring(string) + code = compile(source, "__psp__", "exec") + mem_scache.store(string,code) + self.code = code + + def cache_get(self, filename, mtime): + + opts = self.req.get_options() + if "mod_python.psp.cache_database_filename" in opts: + self.dbmcache = opts["mod_python.psp.cache_database_filename"] + elif "PSPDbmCache" in opts: + # For backwards compatability with versions + # of mod_python prior to 3.3. + self.dbmcache = opts["PSPDbmCache"] + + if self.dbmcache: + cached = dbm_cache_get(self.req.server, self.dbmcache, + filename, mtime) + if cached: + return cached + + cached = mem_fcache.get(filename, mtime) + if cached: + return cached + + def cache_store(self, filename, mtime, code): + + if self.dbmcache: + dbm_cache_store(self.req.server, self.dbmcache, + filename, mtime, code) + else: + mem_fcache.store(filename, mtime, code) + + def cfile_get(self, filename, mtime): + + # check for a file ending with 'c' (precompiled file) + name, ext = os.path.splitext(filename) + cname = name + ext[:-1] + 'c' + + if os.path.isfile(cname): + cmtime = os.path.getmtime(cname) + + if cmtime >= mtime: + return str2code(open(cname).read()) + + def load_from_file(self): + + filename = self.filename + + if not os.path.isfile(filename): + raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND) + + mtime = os.path.getmtime(filename) + + # check cache + code = self.cache_get(filename, mtime) + + # check for precompiled file + if not code: + code = self.cfile_get(filename, mtime) + + # finally parse and compile + if not code: + dir, fname = path_split(self.filename) + source = _psp.parse(fname, dir) + code = compile(source, filename, "exec") + + # store in cache + self.cache_store(filename, mtime, code) + + self.code = code + + def run(self, vars={}, flush=0): + + code, req = self.code, self.req + + # does this code use session? + session = None + if "session" in code.co_names: + if not hasattr(req, 'session'): + # no existing session, so need to create one, + # session has to be saved back to request object + # to avoid deadlock if error page tries to use it + req.session = session = Session.Session(req) + + # does this code use form? + form = None + if "form" in code.co_names: + if not hasattr(req, 'form'): + # no existing form, so need to create one, + # form has to be saved back to request object + # so that error page can access it if need be + form = util.FieldStorage(req, keep_blank_values=1) + req.form = form + else: + form = req.form + + # create psp interface object + psp = PSPInterface(req, self.filename, form) + + try: + global_scope = globals().copy() + global_scope.update({"req":req, "form":form, "psp":psp}) + + # strictly speaking, session attribute only needs + # to be populated if referenced, but historically + # code has always populated it even if None, so + # preserve that just in case changing it breaks + # some users code + if hasattr(req, 'session'): + global_scope.update({"session":req.session}) + else: + global_scope.update({"session":None}) + + global_scope.update(self.vars) # passed in __init__() + global_scope.update(vars) # passed in run() + + class _InstanceInfo: + + def __init__(self, label, file, cache): + self.label = label + self.file = file + self.cache = cache + self.children = {} + + global_scope["__file__"] = req.filename + global_scope["__mp_info__"] = _InstanceInfo( + None, req.filename, None) + global_scope["__mp_path__"] = [] + + try: + exec(code, global_scope) + if flush: + req.flush() + + # the mere instantiation of a session changes it + # (access time), so it *always* has to be saved + if hasattr(req, 'session'): + req.session.save() + except: + et, ev, etb = sys.exc_info() + if psp.error_page: + # run error page + psp.error_page.run({"exception": (et, ev, etb)}, flush) + else: + raise et(ev).with_traceback(etb) + finally: + # if session was created here, unlock it and don't leave + # it behind in request object in unlocked state as it + # will just cause problems if then used by subsequent code + if session is not None: + session.unlock() + del req.session + + def __str__(self): + self.req.content_type = 'text/html' + self.run() + return "" + + def display_code(self): + """ + Display a niceliy HTML-formatted side-by-side of + what PSP generated next to orinial code. + """ + + req, filename = self.req, self.filename + + # Because of caching, source code is most often not + # available in this object, so we read it here + # (instead of trying to get it in __init__ somewhere) + + dir, fname = path_split(filename) + + source = open(filename).read().splitlines() + pycode = _psp.parse(fname, dir).splitlines() + + source = [s.rstrip() for s in source] + pycode = [s.rstrip() for s in pycode] + + req.write("\n") + for s in ("", " PSP-produced Python Code:", + " %s:" % filename): + req.write("" % s) + req.write("\n") + + n = 1 + for line in pycode: + req.write("") + left = escape(line).replace("\t", " "*4).replace(" ", " ") + if len(source) < n: + right = "" + else: + right = escape(source[n-1]).replace("\t", " "*4).replace(" ", " ") + for s in ("%d. " % n, + "%s" % left, + " %s" % right): + req.write("" % s) + req.write("\n") + + n += 1 + req.write("
%s
%s
\n") + + +def parse(filename, dir=None): + if dir: + return _psp.parse(filename, dir) + else: + return _psp.parse(filename) + +def parsestring(str): + + return _psp.parsestring(str) + +def handler(req): + + req.content_type = "text/html" + + config = req.get_config() + debug = debug = int(config.get("PythonDebug", 0)) + + if debug and req.filename[-1] == "_": + p = PSP(req, req.filename[:-1]) + p.display_code() + else: + p = PSP(req) + p.run() + + return apache.OK + +def dbm_cache_type(dbmfile): + + global dbm_types + + if dbmfile in dbm_types: + return dbm_types[dbmfile] + + module = dbm.whichdb(dbmfile) + if module: + dbm_type = __import__(module) + dbm_types[dbmfile] = dbm_type + return dbm_type + else: + # this is a new file + return anydbm + +def dbm_cache_store(srv, dbmfile, filename, mtime, val): + + dbm_type = dbm_cache_type(dbmfile) + + # NOTE: acquiring a lock for the dbm file (also applies to dbm_cache_get) + # See http://issues.apache.org/jira/browse/MODPYTHON-69 + # In mod_python versions < 3.2 "pspcache" was used as the lock key. + # ie. _apache._global_lock(srv, key, index) + # Assuming there are 32 mutexes (the default in 3.1.x), "pspcache" + # will hash to one of 31 mutexes (index 0 is reserved). Therefore + # there is a 1 in 31 chance for a hash collision if a session is + # used in the same request, which would result in a deadlock. This + # has been confirmed by testing. + # We can avoid this by using index 0 and setting the key to None. + # Lock index 0 is also used by DbmSession for locking it's dbm file, + # but since the lock is not held for the duration of the request there + # should not be any additional deadlock issues. Likewise, the lock + # here is only held for a short time, so it will not interfere + # with DbmSession file locking. + + _apache._global_lock(srv, None, 0) + try: + dbm = dbm_type.open(dbmfile, 'c') + dbm[filename] = "%d %s" % (mtime, code2str(val)) + finally: + try: dbm.close() + except: pass + _apache._global_unlock(srv, None, 0) + +def dbm_cache_get(srv, dbmfile, filename, mtime): + + dbm_type = dbm_cache_type(dbmfile) + _apache._global_lock(srv, None, 0) + try: + dbm = dbm_type.open(dbmfile, 'c') + try: + entry = dbm[filename] + t, val = entry.split(" ", 1) + if int(t) == mtime: + return str2code(val) + except KeyError: + return None + finally: + try: dbm.close() + except: pass + _apache._global_unlock(srv, None, 0) + + +class HitsCache: + + def __init__(self, size=512): + self.cache = {} + self.size = size + + def store(self, key, val): + self.cache[key] = (1, val) + if len(self.cache) > self.size: + self.clean() + + def get(self, key): + if key in self.cache: + hits, val = self.cache[key] + self.cache[key] = (hits+1, val) + return val + else: + return None + + def clean(self): + + byhits = [(n[1], n[0]) for n in list(self.cache.items())] + byhits.sort() + + # delete enough least hit entries to make cache 75% full + for item in byhits[:len(self.cache)-int(self.size*.75)]: + val, key = item + del self.cache[key] + +mem_scache = HitsCache() + +class FileCache(HitsCache): + + def store(self, filename, mtime, code): + self.cache[filename] = (1, mtime, code) + if len(self.cache) > self.size: + self.clean() + + def get(self, filename, mtime): + try: + hits, c_mtime, code = self.cache[filename] + if mtime != c_mtime: + del self.cache[filename] + return None + else: + self.cache[filename] = (hits+1, mtime, code) + return code + except KeyError: + return None + +mem_fcache = FileCache() + diff --git a/lib/python/mod_python/publisher.py b/lib/python/mod_python/publisher.py new file mode 100644 index 0000000..75d103f --- /dev/null +++ b/lib/python/mod_python/publisher.py @@ -0,0 +1,511 @@ + # + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +""" + This handler is conceptually similar to Zope's ZPublisher, except + that it: + + 1. Is written specifically for mod_python and is therefore much faster + 2. Does not require objects to have a documentation string + 3. Passes all arguments as simply string + 4. Does not try to match Python errors to HTTP errors + 5. Does not give special meaning to '.' and '..'. +""" + +from . import apache +from . import util + +import sys +import os +from os.path import exists, isabs, normpath, split, isfile, join, dirname +import imp +import re +import base64 + +import types +from types import * +import collections + + +imp_suffixes = " ".join([x[0][1:] for x in imp.get_suffixes()]) + +# Python 2/3 compat workaround +PY2 = sys.version[0] == '2' +def _callable(obj): + if PY2: + return callable(obj) + else: + return (isinstance(obj, collections.Callable) or + (hasattr(obj, "__call__") and isinstance(obj.__call__, collections.Callable))) + +####################### The published page cache ############################## + +from .cache import ModuleCache, NOT_INITIALIZED + +class PageCache(ModuleCache): + """ This is the cache for page objects. Handles the automatic reloading of pages. """ + + def key(self, req): + """ Extracts the normalized filename from the request """ + return req.filename + + def check(self, key, req, entry): + config = req.get_config() + autoreload=int(config.get("PythonAutoReload", 1)) + if autoreload==0 and entry._value is not NOT_INITIALIZED: + # if we don't want to reload and we have a value, + # then we consider it fresh + return None + else: + return ModuleCache.check(self, key, req, entry) + + def build(self, key, req, opened, entry): + config = req.get_config() + log=int(config.get("PythonDebug", 0)) + if log: + if entry._value is NOT_INITIALIZED: + req.log_error('Publisher loading page %s'%req.filename, apache.APLOG_NOTICE) + else: + req.log_error('Publisher reloading page %s'%req.filename, apache.APLOG_NOTICE) + return ModuleCache.build(self, key, req, opened, entry) + +page_cache = PageCache() + +####################### Interface to the published page cache ################## + +# def get_page(req, path): +# """ +# This imports a published page. If the path is absolute it is used as is. +# If it is a relative path it is relative to the published page +# where the request is really handled (not relative to the path +# given in the URL). +# +# Warning : in order to maintain consistency in case of module reloading, +# do not store the resulting module in a place that outlives the request +# duration. +# """ +# +# real_filename = req.filename +# +# try: +# if isabs(path): +# req.filename = path +# else: +# req.filename = normpath(join(dirname(req.filename), path)) +# +# return page_cache[req] +# +# finally: +# req.filename = real_filename + +####################### The publisher handler himself ########################## + +def handler(req): + + req.allow_methods(["GET", "POST", "HEAD"]) + if req.method not in ["GET", "POST", "HEAD"]: + raise apache.SERVER_RETURN(apache.HTTP_METHOD_NOT_ALLOWED) + + # Derive the name of the actual module which will be + # loaded. In older version of mod_python.publisher + # you can't actually have a code file name which has + # an embedded '.' in it except for that used by the + # extension. This is because the standard Python + # module import system which is used will think that + # you are importing a submodule of a package. In + # this code, because the standard Python module + # import system isn't used and the actual file is + # opened directly by name, an embedded '.' besides + # that used for the extension will technically work. + + path,module_name = os.path.split(req.filename) + + # If the request is against a directory, fallback to + # looking for the 'index' module. This is determined + # by virtue of the fact that Apache will always add + # a trailing slash to 'req.filename' when it matches + # a directory. This will mean that the calculated + # module name will be empty. + + if not module_name: + module_name = 'index' + + # Now need to strip off any special extension which + # was used to trigger this handler in the first place. + + suffixes = ['py'] + suffixes += req.get_addhandler_exts().split() + if req.extension: + suffixes.append(req.extension[1:]) + + exp = '\\.' + '$|\\.'.join(suffixes) + '$' + suff_matcher = re.compile(exp) + module_name = suff_matcher.sub('',module_name) + + # Next need to determine the path for the function + # which will be called from 'req.path_info'. The + # leading slash and possibly any trailing slash are + # eliminated. There would normally be at most one + # trailing slash as Apache eliminates duplicates + # from the original URI. + + func_path = '' + + if req.path_info: + func_path = req.path_info[1:] + if func_path[-1:] == '/': + func_path = func_path[:-1] + + # Now determine the actual Python module code file + # to load. This will first try looking for the file + # '/path/.py'. If this doesn't exist, + # will try fallback of using the 'index' module, + # ie., look for '/path/index.py'. In doing this, the + # 'func_path' gets adjusted so the lead part is what + # 'module_name' was set to. + + req.filename = path + '/' + module_name + '.py' + + if not exists(req.filename): + if func_path: + func_path = module_name + '/' + func_path + else: + func_path = module_name + + module_name = 'index' + req.filename = path + '/' + module_name + '.py' + + if not exists(req.filename): + raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND) + + # Default to looking for the 'index' function if no + # function path definition was supplied. + + if not func_path: + func_path = 'index' + + # Turn slashes into dots. + + func_path = func_path.replace('/', '.') + + # Normalise req.filename to avoid Win32 issues. + + req.filename = normpath(req.filename) + + + # We use the page cache to load the module + module = page_cache[req] + + # does it have an __auth__? + realm, user, passwd = process_auth(req, module) + + # resolve the object ('traverse') + object = resolve_object(req, module, func_path, realm, user, passwd) + + # publish the object + published = publish_object(req, object) + + # we log a message if nothing was published, it helps with debugging + if (not published) and (req._bytes_queued==0) and (req.next is None): + log=int(req.get_config().get("PythonDebug", 0)) + if log: + req.log_error("mod_python.publisher: nothing to publish.") + + return apache.OK + +def process_auth(req, object, realm="unknown", user=None, passwd=None): + + found_auth, found_access = 0, 0 + + if hasattr(object, "__auth_realm__"): + realm = object.__auth_realm__ + + func_object = None + + if type(object) is FunctionType: + func_object = object + elif type(object) == types.MethodType: + func_object = object.__func__ + + if func_object: + # functions are a bit tricky + + func_code = func_object.__code__ + func_globals = func_object.__globals__ + + def lookup(name): + i = None + if name in func_code.co_names: + i = list(func_code.co_names).index(name) + elif func_code.co_argcount < len(func_code.co_varnames): + names = func_code.co_varnames[func_code.co_argcount:] + if name in names: + i = list(names).index(name) + if i is not None: + if PY2: + return (1, func_code.co_consts[i+1]) + else: + return (1, func_code.co_consts[1+i*2]) + return (0, None) + + (found_auth, __auth__) = lookup('__auth__') + if found_auth and type(__auth__) == types.CodeType: + __auth__ = types.FunctionType(__auth__, func_globals) + + (found_access, __access__) = lookup('__access__') + if found_access and type(__access__) == types.CodeType: + __access__ = types.FunctionType(__access__, func_globals) + + (found_realm, __auth_realm__) = lookup('__auth_realm__') + if found_realm: + realm = __auth_realm__ + + else: + if hasattr(object, "__auth__"): + __auth__ = object.__auth__ + found_auth = 1 + if hasattr(object, "__access__"): + __access__ = object.__access__ + found_access = 1 + + if found_auth or found_access: + # because ap_get_basic insists on making sure that AuthName and + # AuthType directives are specified and refuses to do anything + # otherwise (which is technically speaking a good thing), we + # have to do base64 decoding ourselves. + # + # to avoid needless header parsing, user and password are parsed + # once and the are received as arguments + if not user and "Authorization" in req.headers_in: + try: + if PY2: + s = req.headers_in["Authorization"][6:] + s = base64.decodestring(s) + else: + s = req.headers_in["Authorization"][6:].encode() + s = base64.decodestring(s).decode() + user, passwd = s.split(":", 1) + except: + raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) + + if found_auth: + + if not user: + # note that Opera supposedly doesn't like spaces around "=" below + s = 'Basic realm="%s"' % realm + req.err_headers_out["WWW-Authenticate"] = s + raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED) + + if _callable(__auth__): + rc = __auth__(req, user, passwd) + else: + if type(__auth__) is DictionaryType: + rc = user in __auth__ and __auth__[user] == passwd + else: + rc = __auth__ + + if not rc: + s = 'Basic realm = "%s"' % realm + req.err_headers_out["WWW-Authenticate"] = s + raise apache.SERVER_RETURN(apache.HTTP_UNAUTHORIZED) + + if found_access: + + if _callable(__access__): + rc = __access__(req, user) + else: + if type(__access__) in (list, tuple): + rc = user in __access__ + else: + rc = __access__ + + if not rc: + raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) + + return realm, user, passwd + +### Those are the traversal and publishing rules ### + +# tp_rules is a dictionary, indexed by type, with tuple values. +# The first item in the tuple is a boolean telling if the object can be traversed (default is True) +# The second item in the tuple is a boolen telling if the object can be published (default is True) +tp_rules = {} + +# by default, built-in types cannot be traversed, but can be published +default_builtins_tp_rule = (False, True) +for t in list(types.__dict__.values()): + if isinstance(t, type): + tp_rules[t]=default_builtins_tp_rule + +# those are the exceptions to the previous rules +tp_rules.update({ + # Those are not traversable nor publishable + ModuleType : (False, False), + BuiltinFunctionType : (False, False), + type : (False, False), + + # Publishing a generator may not seem to makes sense, because + # it can only be done once. However, we could get a brand new generator + # each time a new-style class property is accessed. + GeneratorType : (False, True), + +}) + +if PY2: + tp_rules.update({ + # This may change in the near future to (False, True) + ClassType : (False, False), + TypeType : (False, False), + + # Old-style instances are traversable + InstanceType : (True, True), + }) + +# types which are not referenced in the tp_rules dictionary will be traversable +# AND publishable +default_tp_rule = (True, True) + +def resolve_object(req, obj, object_str, realm=None, user=None, passwd=None): + """ + This function traverses the objects separated by . + (period) to find the last one we're looking for. + """ + parts = object_str.split('.') + + first_object = True + for obj_str in parts: + # path components starting with an underscore are forbidden + if obj_str[0]=='_': + req.log_error('Cannot traverse %s in %s because ' + 'it starts with an underscore' + % (obj_str, req.unparsed_uri), apache.APLOG_WARNING) + raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) + + if first_object: + first_object = False + else: + # if we're not in the first object (which is the module) + # we're going to check whether be can traverse this type or not + rule = tp_rules.get(type(obj), default_tp_rule) + if not rule[0]: + req.log_error('Cannot traverse %s in %s because ' + '%s is not a traversable object' + % (obj_str, req.unparsed_uri, obj), apache.APLOG_WARNING) + raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) + + # we know it's OK to call getattr + # note that getattr can really call some code because + # of property objects (or attribute with __get__ special methods)... + try: + obj = getattr(obj, obj_str) + except AttributeError: + raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND) + + # we process the authentication for the object + realm, user, passwd = process_auth(req, obj, realm, user, passwd) + + # we're going to check if the final object is publishable + rule = tp_rules.get(type(obj), default_tp_rule) + if not rule[1]: + + req.log_error('Cannot publish %s in %s because ' + '%s is not publishable' + % (obj_str, req.unparsed_uri, obj), apache.APLOG_WARNING) + raise apache.SERVER_RETURN(apache.HTTP_FORBIDDEN) + + return obj + +# This regular expression is used to test for the presence of an HTML header +# tag, written in upper or lower case. +re_html = re.compile(r"\s*$",re.I) +re_charset = re.compile(r"charset\s*=\s*([^\s;]+)",re.I); + +def publish_object(req, obj): + if _callable(obj): + + # To publish callables, we call them and recursively publish the result + # of the call (as done by util.apply_fs_data) + + req.form = util.FieldStorage(req, keep_blank_values=1) + return publish_object(req, util.apply_fs_data(obj, req.form, req=req)) + +# TODO : we removed this as of mod_python 3.2, let's see if we can put it back +# in mod_python 3.3 +# elif hasattr(obj,'__iter__'): +# +# # To publish iterables, we recursively publish each item +# # This way, generators can be published +# result = False +# for item in obj: +# result |= publish_object(req,item) +# return result +# + else: + if obj is None: + + # Nothing to publish + return False + + elif PY2 and isinstance(obj, UnicodeType): + + # We've got an Unicode string to publish, so we have to encode + # it to bytes. We try to detect the character encoding + # from the Content-Type header + if req._content_type_set: + + charset = re_charset.search(req.content_type) + if charset: + charset = charset.group(1) + else: + # If no character encoding was set, we use UTF8 + charset = 'UTF8' + req.content_type += '; charset=UTF8' + + else: + # If no character encoding was set, we use UTF8 + charset = 'UTF8' + + result = obj.encode(charset) + else: + charset = None + result = str(obj) + + if not req._content_type_set: + # make an attempt to guess content-type + # we look for a ') + req.write('KeyValue\n') + for key in table: + req.write('%s%s\n'%( + key, + table[key] + )) + req.write('') + +def write_tree(req,tree,level): + for entry in tree: + if isinstance(entry,list): + write_tree(req,entry,level+1) + else: + req.write(' '*level) + req.write(' '.join(entry)) + req.write('\n') + +def handler(req): + req.form = util.FieldStorage(req) + + if req.form.getfirst('view_log'): + log = open(os.path.join(apache.server_root(),req.server.error_fname),'rb') + lines = bounded_buffer(100) + for line in log: + lines.append(line) + log.close() + req.content_type='text/plain' + for line in lines: + req.write(line) + return apache.OK + + req.add_common_vars() + req.content_type = 'text/html' + req.write('mod_python test page\n') + + req.write('

General information

\n') + req.write('\n') + req.write('\n'%( + 'Apache version', + req.subprocess_env.get('SERVER_SOFTWARE') + )) + req.write('\n'%( + 'Apache threaded MPM', + ( + apache.mpm_query(apache.AP_MPMQ_IS_THREADED) and + 'Yes, maximum %i threads / process'% + apache.mpm_query(apache.AP_MPMQ_MAX_THREADS) + ) or 'No (single thread MPM)' + )) + req.write('\n'%( + 'Apache forked MPM', + ( + apache.mpm_query(apache.AP_MPMQ_IS_FORKED) and + 'Yes, maximum %i processes'% + apache.mpm_query(apache.AP_MPMQ_MAX_DAEMONS) + ) or 'No (single process MPM)' + )) + req.write('\n'%( + 'Apache server root', + apache.server_root() + )) + req.write('\n'%( + 'Apache document root', + req.document_root() + )) + if req.server.error_fname: + req.write('\n'%( + 'Apache error log', + os.path.join(apache.server_root(),req.server.error_fname) + )) + else: + req.write('\n'%( + 'Apache error log', + 'None' + )) + req.write('\n'%( + 'Python sys.version', + sys.version + )) + req.write('\n'%( + 'Python sys.path', + '\n'.join(sys.path) + )) + req.write('\n'%( + 'Python interpreter name', + req.interpreter + )) + req.write('\n') + req.write('\n') + req.write('
%s%s
%s%s
%s%s
%s%s
%s%s
%s%s (view last 100 lines)
%s%s
%s%s
%s
%s
%s%s
mod_python.publisher available') + try: + from mod_python import publisher + req.write('Yes') + except: + req.write('No') + req.write('
mod_python.psp available') + try: + from mod_python import psp + req.write('Yes') + except: + req.write('No') + req.write('
\n') + + req.write('

Request input headers

\n') + write_table(req,req.headers_in) + + req.write('

Request environment

\n') + write_table(req,req.subprocess_env) + + req.write('

Request configuration

\n') + write_table(req,req.get_config()) + + req.write('

Request options

\n') + write_table(req,req.get_options()) + + req.write('

Request notes

\n') + write_table(req,req.notes) + + req.write('

Server configuration

\n') + write_table(req,req.server.get_config()) + + req.write('

Server options

\n') + write_table(req,req.server.get_options()) + + req.write('

Server configuration tree

\n
')
+    write_tree(req,apache.config_tree(),0)
+    req.write('
\n') + + req.write('') + return apache.OK diff --git a/lib/python/mod_python/util.py b/lib/python/mod_python/util.py new file mode 100644 index 0000000..b1d1d4b --- /dev/null +++ b/lib/python/mod_python/util.py @@ -0,0 +1,668 @@ + # vim: set sw=4 expandtab : + # + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +import _apache + +import sys +PY2 = sys.version[0] == '2' +if PY2: + import apache + from exceptions import * +else: + from . import apache + +from io import BytesIO +import tempfile +import re + +from types import * +import collections + +MethodWrapper = type(object.__call__) + +parse_qs = _apache.parse_qs +parse_qsl = _apache.parse_qsl + +# Maximum line length for reading. (64KB) +# Fixes memory error when upload large files such as 700+MB ISOs. +readBlockSize = 65368 + +""" The classes below are a (almost) a drop-in replacement for the + standard cgi.py FieldStorage class. They should have pretty much the + same functionality. + + These classes differ in that unlike cgi.FieldStorage, they are not + recursive. The class FieldStorage contains a list of instances of + Field class. Field class is incapable of storing anything in it. + + These objects should be considerably faster than the ones in cgi.py + because they do not expect CGI environment, and are + optimized specifically for Apache and mod_python. +""" + +class Field: + def __init__(self, name, *args, **kwargs): + self.name = name + + # Some third party packages such as Trac create + # instances of the Field object and insert it + # directly into the list of form fields. To + # maintain backward compatibility check for + # where more than just a field name is supplied + # and invoke an additional initialisation step + # to process the arguments. Ideally, third party + # code should use the add_field() method of the + # form, but if they need to maintain backward + # compatibility with older versions of mod_python + # they will not have a choice but to use old + # way of doing things and thus we need this code + # for the forseeable future to cope with that. + + if args or kwargs: + self.__bc_init__(*args, **kwargs) + + def __bc_init__(self, file, ctype, type_options, + disp, disp_options, headers = {}): + self.file = file + self.type = ctype + self.type_options = type_options + self.disposition = disp + self.disposition_options = disp_options + if "filename" in disp_options: + self.filename = disp_options["filename"] + else: + self.filename = None + self.headers = headers + + def __repr__(self): + """Return printable representation.""" + return "Field(%s, %s)" % (repr(self.name), repr(self.value)) + + def __getattr__(self, name): + if name != 'value': + raise AttributeError(name) + if self.file: + self.file.seek(0) + value = self.file.read() + if not isinstance(value, bytes): + raise TypeError("value must be bytes, is file opened in binary mode?") + self.file.seek(0) + else: + value = None + return value + + def __del__(self): + self.file.close() + +if PY2: + class StringField(str): + """ This class is basically a string with + added attributes for compatibility with std lib cgi.py. Basically, this + works the opposite of Field, as it stores its data in a string, but creates + a file on demand. Field creates a value on demand and stores data in a file. + """ + filename = None + headers = {} + ctype = "text/plain" + type_options = {} + disposition = None + disp_options = None + + # I wanted __init__(name, value) but that does not work (apparently, you + # cannot subclass str with a constructor that takes >1 argument) + def __init__(self, value): + '''Create StringField instance. You'll have to set name yourself.''' + str.__init__(self, value) + self.value = value + + def __getattr__(self, name): + if name != 'file': + raise AttributeError(name) + self.file = BytesIO(self.value) + return self.file + + def __repr__(self): + """Return printable representation (to pass unit tests).""" + return "Field(%s, %s)" % (repr(self.name), repr(self.value)) +else: + class StringField(bytes): + """ This class is basically a string with + added attributes for compatibility with std lib cgi.py. Basically, this + works the opposite of Field, as it stores its data in a string, but creates + a file on demand. Field creates a value on demand and stores data in a file. + """ + filename = None + headers = {} + ctype = "text/plain" + type_options = {} + disposition = None + disp_options = None + + def __new__(self, value): + return bytes.__new__(self, value) + + def __init__(self, value): + self.value = value + + def __getattr__(self, name): + if name != 'file': + raise AttributeError(name) + self.file = BytesIO(self.value) + return self.file + + def __repr__(self): + """Return printable representation (to pass unit tests).""" + return "Field(%s, %s)" % (repr(self.name), repr(self.value)) + +class FieldList(list): + + def __init__(self): + self.__table = None + list.__init__(self) + + def table(self): + if self.__table is None: + self.__table = {} + for item in self: + if item.name in self.__table: + self.__table[item.name].append(item) + else: + self.__table[item.name] = [item] + return self.__table + + def __delitem__(self, *args): + self.__table = None + return list.__delitem__(self, *args) + + def __delslice__(self, *args): + self.__table = None + return list.__delslice__(self, *args) + + def __iadd__(self, *args): + self.__table = None + return list.__iadd__(self, *args) + + def __imul__(self, *args): + self.__table = None + return list.__imul__(self, *args) + + def __setitem__(self, *args): + self.__table = None + return list.__setitem__(self, *args) + + def __setslice__(self, *args): + self.__table = None + return list.__setslice__(self, *args) + + def append(self, *args): + self.__table = None + return list.append(self, *args) + + def extend(self, *args): + self.__table = None + return list.extend(self, *args) + + def insert(self, *args): + self.__table = None + return list.insert(self, *args) + + def pop(self, *args): + self.__table = None + return list.pop(self, *args) + + def remove(self, *args): + self.__table = None + return list.remove(self, *args) + + +class FieldStorage: + + def __init__(self, req, keep_blank_values=0, strict_parsing=0, file_callback=None, field_callback=None): + # + # Whenever readline is called ALWAYS use the max size EVEN when + # not expecting a long line. - this helps protect against + # malformed content from exhausting memory. + # + + self.list = FieldList() + + # always process GET-style parameters + if req.args: + pairs = parse_qsl(req.args, keep_blank_values) + for pair in pairs: + self.add_field(pair[0], pair[1]) + + if req.method != "POST": + return + + try: + clen = int(req.headers_in["content-length"]) + except (KeyError, ValueError): + # absent content-length is not acceptable + raise apache.SERVER_RETURN(apache.HTTP_LENGTH_REQUIRED) + + if "content-type" not in req.headers_in: + ctype = b"application/x-www-form-urlencoded" + else: + ctype = req.headers_in["content-type"].encode("latin1") + + if not isinstance(ctype, bytes): + raise TypeError("ctype must be of type bytes") + + if ctype.startswith(b"application/x-www-form-urlencoded"): + v = req.read(clen) + if not isinstance(v, bytes): + raise TypeError("req.read() must return bytes") + pairs = parse_qsl(v, keep_blank_values) + for pair in pairs: + self.add_field(pair[0], pair[1]) + return + + if not ctype.startswith(b"multipart/"): + # we don't understand this content-type + raise apache.SERVER_RETURN(apache.HTTP_NOT_IMPLEMENTED) + + # figure out boundary + try: + i = ctype.lower().rindex(b"boundary=") + boundary = ctype[i+9:] + if len(boundary) >= 2 and boundary[:1] == boundary[-1:] == b'"': + boundary = boundary[1:-1] + boundary = re.compile(b"--" + re.escape(boundary) + b"(--)?\r?\n") + + except ValueError: + raise apache.SERVER_RETURN(apache.HTTP_BAD_REQUEST) + + # read until boundary + self.read_to_boundary(req, boundary, None) + + end_of_stream = False + while not end_of_stream: + ## parse headers + + ctype, type_options = b"text/plain", {} + disp, disp_options = None, {} + headers = apache.make_table() + + line = req.readline(readBlockSize) + if not isinstance(line, bytes): + raise TypeError("req.readline() must return bytes") + match = boundary.match(line) + if (not line) or match: + # we stop if we reached the end of the stream or a stop + # boundary (which means '--' after the boundary) we + # continue to the next part if we reached a simple + # boundary in either case this would mean the entity is + # malformed, but we're tolerating it anyway. + end_of_stream = (not line) or (match.group(1) is not None) + continue + + skip_this_part = False + while line not in (b'\r',b'\r\n'): + nextline = req.readline(readBlockSize) + while nextline and nextline[:1] in [ b' ', b'\t']: + line = line + nextline + nextline = req.readline(readBlockSize) + # we read the headers until we reach an empty line + # NOTE : a single \n would mean the entity is malformed, but + # we're tolerating it anyway + h, v = line.split(b":", 1) + headers.add(h, v) # mp_table accepts bytes, but always returns str + h = h.lower() + if h == b"content-disposition": + disp, disp_options = parse_header(v) + elif h == b"content-type": + ctype, type_options = parse_header(v) + # + # NOTE: FIX up binary rubbish sent as content type + # from Microsoft IE 6.0 when sending a file which + # does not have a suffix. + # + if ctype.find(b'/') == -1: + ctype = b'application/octet-stream' + + line = nextline + match = boundary.match(line) + if (not line) or match: + # we stop if we reached the end of the stream or a + # stop boundary (which means '--' after the + # boundary) we continue to the next part if we + # reached a simple boundary in either case this + # would mean the entity is malformed, but we're + # tolerating it anyway. + skip_this_part = True + end_of_stream = (not line) or (match.group(1) is not None) + break + + if skip_this_part: + continue + + if b"name" in disp_options: + name = disp_options[b"name"] + else: + name = None + + # create a file object + # is this a file? + filename = None + if b"filename" in disp_options: + filename = disp_options[b"filename"] + if file_callback and isinstance(file_callback, collections.Callable): + file = file_callback(filename) + else: + file = tempfile.TemporaryFile("w+b") + else: + if field_callback and isinstance(field_callback, collections.Callable): + file = field_callback() + else: + file = BytesIO() + + # read it in + self.read_to_boundary(req, boundary, file) + file.seek(0) + + # make a Field + if filename: + field = Field(name) + field.filename = filename + else: + field = StringField(file.read()) + field.name = name + field.file = file + field.type = PY2 and ctype or ctype.decode('latin1') + field.type_options = type_options + field.disposition = PY2 and disp or disp.decode('latin1') + field.disposition_options = disp_options + field.headers = headers + self.list.append(field) + + def add_field(self, key, value): + """Insert a field as key/value pair""" + item = StringField(value) + item.name = key + self.list.append(item) + + def __setitem__(self, key, value): + if not isinstance(value, bytes): + raise TypeError("Field value must be bytes") + if not isinstance(key, bytes): + raise TypeError("Field key must be bytes") + table = self.list.table() + if key in table: + items = table[key] + for item in items: + self.list.remove(item) + item = StringField(value) + item.name = key + self.list.append(item) + + def read_to_boundary(self, req, boundary, file): + previous_delimiter = None + while True: + line = req.readline(readBlockSize) + if not isinstance(line, bytes): + raise TypeError("req.readline() must return bytes") + + if not line: + # end of stream + if file is not None and previous_delimiter is not None: + file.write(previous_delimiter) + return True + + match = boundary.match(line) + if match: + # the line is the boundary, so we bail out + # if the two last bytes are '--' it is the end of the entity + return match.group(1) is not None + + if line[-2:] == b'\r\n': + # the line ends with a \r\n, which COULD be part + # of the next boundary. We write the previous line delimiter + # then we write the line without \r\n and save it for the next + # iteration if it was not part of the boundary + if file is not None: + if previous_delimiter is not None: file.write(previous_delimiter) + file.write(line[:-2]) + previous_delimiter = b'\r\n' + + elif line[-1:] == b'\r': + # the line ends with \r, which is only possible if + # readBlockSize bytes have been read. In that case the + # \r COULD be part of the next boundary, so we save it + # for the next iteration + assert len(line) == readBlockSize + if file is not None: + if previous_delimiter is not None: file.write(previous_delimiter) + file.write(line[:-1]) + previous_delimiter = b'\r' + + elif line == b'\n' and previous_delimiter == b'\r': + # the line us a single \n and we were in the middle of a \r\n, + # so we complete the delimiter + previous_delimiter = b'\r\n' + + else: + if file is not None: + if previous_delimiter is not None: file.write(previous_delimiter) + file.write(line) + previous_delimiter = None + + def __getitem__(self, key): + """Dictionary style indexing.""" + found = self.list.table()[key] + if len(found) == 1: + return found[0] + else: + return found + + def get(self, key, default): + try: + return self.__getitem__(key) + except (TypeError, KeyError): + return default + + def keys(self): + """Dictionary style keys() method.""" + return list(self.list.table().keys()) + + def __iter__(self): + return iter(list(self.keys())) + + def __repr__(self): + return repr(self.list.table()) + + def has_key(self, key): + """Dictionary style has_key() method.""" + return (key in self.list.table()) + + __contains__ = has_key + + def __len__(self): + """Dictionary style len(x) support.""" + return len(self.list.table()) + + def getfirst(self, key, default=None): + """ return the first value received """ + try: + return self.list.table()[key][0] + except KeyError: + return default + + def getlist(self, key): + """ return a list of received values """ + try: + return self.list.table()[key] + except KeyError: + return [] + + def items(self): + """Dictionary-style items(), except that items are returned in the same + order as they were supplied in the form.""" + return [(item.name, item) for item in self.list] + + def __delitem__(self, key): + table = self.list.table() + values = table[key] + for value in values: + self.list.remove(value) + + def clear(self): + self.list = FieldList() + + +def parse_header(line): + """Parse a Content-type like header. + + Return the main content-type and a dictionary of options. + + """ + + if not isinstance(line, bytes): + raise TypeError("parse_header() only accepts bytes") + + plist = [a.strip() for a in line.split(b';')] + key = plist[0].lower() + del plist[0] + pdict = {} + for p in plist: + i = p.find(b'=') + if i >= 0: + name = p[:i].strip().lower() + value = p[i+1:].strip() + if len(value) >= 2 and value[:1] == value[-1:] == b'"': + value = value[1:-1] + pdict[name] = PY2 and value or value.decode('latin1') + return key, pdict + +def apply_fs_data(object, fs, **args): + """ + Apply FieldStorage data to an object - the object must be + callable. Examine the args, and match then with fs data, + then call the object, return the result. + """ + + # we need to weed out unexpected keyword arguments + # and for that we need to get a list of them. There + # are a few options for callable objects here: + + fc = None + expected = [] + + if PY2: + if hasattr(object, "func_code"): + # function + fc = object.func_code + expected = fc.co_varnames[0:fc.co_argcount] + elif hasattr(object, 'im_func'): + # method + fc = object.im_func.func_code + expected = fc.co_varnames[1:fc.co_argcount] + elif type(object) in (TypeType,ClassType) and hasattr(object, "__init__"): + # class + fc = object.__init__.im_func.func_code + expected = fc.co_varnames[1:fc.co_argcount] + elif type(object) is BuiltinFunctionType: + # builtin + fc = None + expected = [] + elif hasattr(object, '__call__'): + # callable object + if type(object.__call__) is MethodType: + fc = object.__call__.im_func.func_code + expected = fc.co_varnames[1:fc.co_argcount] + else: + # abuse of objects to create hierarchy + return apply_fs_data(object.__call__, fs, **args) + else: + if hasattr(object, '__code__'): + # function + fc = object.__code__ + expected = fc.co_varnames[0:fc.co_argcount] + elif hasattr(object, '__func__'): + # method + fc = object.__func__.__code__ + expected = fc.co_varnames[1:fc.co_argcount] + elif type(object) is type and hasattr(object, "__init__") and hasattr(object.__init__, "__code__"): + # class + fc = object.__init__.__code__ + expected = fc.co_varnames[1:fc.co_argcount] + elif type(object) is [type, BuiltinFunctionType]: + # builtin + fc = None + expected = [] + elif hasattr(object, '__call__'): + # callable object + if type(object.__call__) is MethodType: + fc = object.__call__.__func__.__code__ + expected = fc.co_varnames[1:fc.co_argcount] + elif type(object.__call__) != MethodWrapper: + # abuse of objects to create hierarchy + return apply_fs_data(object.__call__, fs, **args) + + # add form data to args + for field in fs.list: + if field.filename: + val = field + else: + val = field.value + args.setdefault(field.name, []).append(val) + + # replace lists with single values + for arg in args: + if ((type(args[arg]) is list) and + (len(args[arg]) == 1)): + args[arg] = args[arg][0] + + # remove unexpected args unless co_flags & 0x08, + # meaning function accepts **kw syntax + if fc is None: + args = {} + elif not (fc.co_flags & 0x08): + for name in list(args.keys()): + if name not in expected: + del args[name] + + return object(**args) + +def redirect(req, location, permanent=0, text=None): + """ + A convenience function to provide redirection + """ + + if not isinstance(location, str): + raise TypeError("location must be of type str") + + if req.sent_bodyct: + raise IOError("Cannot redirect after headers have already been sent.") + + req.err_headers_out["Location"] = location + if permanent: + req.status = apache.HTTP_MOVED_PERMANENTLY + else: + req.status = apache.HTTP_MOVED_TEMPORARILY + + if text is None: + req.write('

The document has moved' + ' here

\n' + % location) + else: + req.write(text) + + raise apache.SERVER_RETURN(apache.DONE) diff --git a/lib/python/mod_python/wsgi.py b/lib/python/mod_python/wsgi.py new file mode 100644 index 0000000..dbd6608 --- /dev/null +++ b/lib/python/mod_python/wsgi.py @@ -0,0 +1,80 @@ + # + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +import sys +from mod_python import apache + +def handler(req): + + options = req.get_options() + + ## Find the application callable + + app = None + app_str = options['mod_python.wsgi.application'] + if app_str: + if '::' in app_str: + mod_str, callable_str = app_str.split('::', 1) + else: + mod_str, callable_str = app_str, 'application' + config = req.get_config() + autoreload, log = True, False + if "PythonAutoReload" in config: + autoreload = config["PythonAutoReload"] == "1" + if "PythonDebug" in config: + log = config["PythonDebug"] == "1" + module = apache.import_module(mod_str, autoreload=autoreload, log=log) + + try: + app = module.__dict__[callable_str] + except KeyError: pass + + if not app: + req.log_error( + 'WSGI handler: mod_python.wsgi.application (%s) not found, declining.' + % repr(app_str), apache.APLOG_WARNING) + return apache.DECLINED + + ## Build env + + env = req.build_wsgi_env() + if env is None: + # None means base_uri mismatch. The problem can be either + # base_uri or Location, but because we couldn't be here if it + # was Location, then it must be mod_python.wsgi.base_uri. + base_uri = options.get('mod_python.wsgi.base_uri') + req.log_error( + "WSGI handler: req.uri (%s) does not start with mod_python.wsgi.base_uri (%s), declining." + % (repr(req.uri), repr(base_uri)), apache.APLOG_WARNING) + return apache.DECLINED + + ## Run the app + + response = None + try: + response = app(env, req.wsgi_start_response) + [req.write(token) for token in response] + finally: + # call close() if there is one + if type(response) not in (list, tuple): + getattr(response, 'close', lambda: None)() + + return apache.OK + + diff --git a/scripts/Makefile.in b/scripts/Makefile.in new file mode 100644 index 0000000..5519a2b --- /dev/null +++ b/scripts/Makefile.in @@ -0,0 +1,29 @@ + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +INSTALL=@INSTALL@ +BINDIR=@prefix@/bin + +clean: + rm -rf *~ + +distclean: clean + rm -f Makefile + +install: + $(INSTALL) -m 0755 mod_python $(DESTDIR)$(BINDIR)/mod_python diff --git a/scripts/mod_python.in b/scripts/mod_python.in new file mode 100644 index 0000000..3c1a7b3 --- /dev/null +++ b/scripts/mod_python.in @@ -0,0 +1,167 @@ +#!@PYTHON_BIN@ + + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + + + # WARNING: + # WARNING: Make sure you're editing mod_python.in, not mod_python! + # WARNING: + + +import sys +import os +import platform +import StringIO +import mod_python +from mod_python import httpdconf + + +def cmd_start(): + parser = OptionParser(usage="%prog start \n" + " Start Apache using config file ") + (options, args) = parser.parse_args(sys.argv[2:]) + if len(args) != 1: + parser.error("Must specify ") + os.execl(mod_python.version.HTTPD, mod_python.version.HTTPD, '-f', args[0], '-k', 'start') + +def cmd_stop(): + parser = OptionParser(usage="%prog start \n" + " Stop Apache using config file ") + (options, args) = parser.parse_args(sys.argv[2:]) + if len(args) != 1: + parser.error("Must specify ") + os.execl(mod_python.version.HTTPD, mod_python.version.HTTPD, '-f', args[0], '-k', 'graceful-stop') + +def cmd_restart(): + parser = OptionParser(usage="%prog start \n" + " Restart Apache using config file ") + (options, args) = parser.parse_args(sys.argv[2:]) + if len(args) != 1: + parser.error("Must specify ") + os.execl(mod_python.version.HTTPD, mod_python.version.HTTPD, '-f', args[0], '-k', 'graceful') + +def cmd_genconfig(): + + parser = OptionParser(usage="%prog genconfig > \n" + " Run the config generation script ") + + (options, args) = parser.parse_args(sys.argv[2:]) + if len(args) != 1: + parser.error("Must specify ") + + execfile(args[0]) + +def cmd_create(): + + parser = OptionParser(usage="%prog create \n" + " Create a mod_python skeleton in ") + parser.add_option("--listen", action="store", type="string", dest="listen", default="8888") + parser.add_option("--pythonpath", action="store", type="string", dest="pythonpath", default="") + parser.add_option("--pythonhandler", action="store", type="string", dest="pythonhandler", default=None) + parser.add_option("--pythonoption", action="append", type="string", dest="pythonoptions", default=[]) + + (options, args) = parser.parse_args(sys.argv[2:]) + + if len(args) != 1: + parser.error("Must specify ") + + if not options.pythonhandler: + parser.error("Must specify a --pythonhandler") + + dest = args[0] + + pythonpath = options.pythonpath.split(":") + if options.pythonhandler == 'mod_python.wsgi': + mp_comments = ['PythonOption mod_python.wsgi.base_url = ""'] + conf_path = mod_python.httpdconf.write_basic_config(dest, listen=options.listen, pythonhandler=options.pythonhandler, + pythonpath=pythonpath, pythonoptions=options.pythonoptions, + mp_comments=mp_comments) + if conf_path: + print "\nCreated! Please look over %s." % `conf_path` + print "Remember to generate the Apache httpd config by running" + print "%s genconfig %s > %s" % (sys.argv[0], conf_path, + os.path.join(os.path.split(conf_path)[0], 'httpd.conf')) + print "From here on you can tweak %s and re-generate Apache config at any time." % `conf_path` + +def cmd_version(): + + parser = OptionParser(usage="%prog version\n" + " Print version") + + version = "\n" + version += "mod_python: %s\n" % mod_python.mp_version + version += " %s\n\n" % `os.path.join(mod_python.version.LIBEXECDIR, "mod_python.so")` + version += "python: %s\n" % ''.join(sys.version.splitlines()) + version += " %s\n\n" % `mod_python.version.PYTHON_BIN` + version += "httpd: %s\n" % mod_python.version.HTTPD_VERSION + version += " %s\n\n" % `mod_python.version.HTTPD` + version += "apr: %s\n" % mod_python.version.APR_VERSION + version += "platform: %s\n" % platform.platform() + + print version + +import optparse + +class OptionParser (optparse.OptionParser): + + def check_required (self, opt): + option = self.get_option(opt) + + # Assumes the option's 'default' is set to None! + if getattr(self.values, option.dest) is None: + self.error("%s option not supplied" % option) + + +def main(): + + module = sys.modules[__name__] + commands = [c[4:] for c in dir(module) if c.startswith("cmd_")] + + parser = OptionParser(usage = "%%prog [command options]\n" + " Where is one of: %s\n" + " For help on a specific command, use: %%prog --help\n" + % " ".join(commands)) + + # anything after a command is not our argument + try: + cmd_idx = [sys.argv.index(arg) for arg in sys.argv if arg in commands][0] + except IndexError: + cmd_idx = 1 + + (options, args) = parser.parse_args(sys.argv[1:cmd_idx+1]) + + if not args: + parser.error("Please specify a command") + + command = args[0] + + if command not in commands: + parser.error("Invalid command: %s" % command) + + cmd_func = module.__dict__["cmd_"+command] + cmd_func() + +if __name__ == "__main__": + main() + +# makes emacs go into python mode +### Local Variables: +### mode:python +### End: + + diff --git a/src/.tableobject.c.swp b/src/.tableobject.c.swp new file mode 100644 index 0000000..183e669 Binary files /dev/null and b/src/.tableobject.c.swp differ diff --git a/src/Makefile.in b/src/Makefile.in new file mode 100644 index 0000000..c3b3fb2 --- /dev/null +++ b/src/Makefile.in @@ -0,0 +1,90 @@ + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + # Originally developed by Gregory Trubetskoy. + # + +CC=@CC@ +AR=@AR@ +APXS=@APXS@ +MKDEP=@MKDEP@ + +# requires flex 2.5.31 for reentrant support +LEX=@LEX@ +INCLUDES=@INCLUDES@ +CPPFLAGS=@CPPFLAGS@ +CFLAGS=@CFLAGS@ +LDLIBS=@LDLIBS@ +LDFLAGS=@LDFLAGS@ +srcdir=. + +SRCS= mod_python.c _apachemodule.c requestobject.c tableobject.c util.c \ + serverobject.c connobject.c filterobject.c hlist.c \ + hlistobject.c finfoobject.c version.c \ + include/_apachemodule.h include/filterobject.h include/hlist.h \ + include/mod_python.h include/psp_flex.h include/psp_parser.h \ + include/requestobject.h include/tableobject.h include/connobject.h \ + include/finfoobject.h include/hlistobject.h include/mp_version.h \ + include/_pspmodule.h include/psp_string.h include/serverobject.h \ + include/util.h + +.PHONY: version + +all: @ALL@ + +psp_parser.c: psp_parser.l + @rm -f psp_parser.c + $(LEX) -R -opsp_parser.c --header-file=include/psp_flex.h psp_parser.l + +dso: mod_python.so + +mod_python.so: $(SRCS) @SOLARIS_HACKS@ + @echo + @echo 'Building mod_python.so.' + @echo + $(APXS) $(INCLUDES) $(CPPFLAGS) $(CFLAGS) -c $(SRCS) $(LDFLAGS) $(LDLIBS) @SOLARIS_HACKS@ + @rm -f mod_python.so + @ln -s .libs/mod_python.so mod_python.so +clean: + rm -rf $(OBJS) version.c core libpython.a mod_python.so *~ .libs *.o *.slo *.lo *.la + +distclean: clean + rm -f Makefile .depend + +version.c: + @MP_GIT_SHA=$$(git describe --always); \ + echo > version.c ; \ + echo "/* THIS FILE IS AUTO-GENERATED BY Makefile */" >> version.c ; \ + echo "#include \"mp_version.h\"" >> version.c ; \ + echo "const char * const mp_git_sha = \"$${MP_GIT_SHA}\";" >> version.c ; \ + echo "const int mp_version_major = MP_VERSION_MAJOR;" >> version.c ; \ + echo "const int mp_version_minor = MP_VERSION_MINOR;" >> version.c ; \ + echo "const int mp_version_patch = MP_VERSION_PATCH;" >> version.c ; \ + echo "const char * const mp_version_string = MP_VERSION_STRING(MP_VERSION_MAJOR,MP_VERSION_MINOR,MP_VERSION_PATCH) \"-$${MP_GIT_SHA}\";" >> version.c ; \ + echo "const char * const mp_version_component = \"mod_python/\" MP_VERSION_STRING(MP_VERSION_MAJOR,MP_VERSION_MINOR,MP_VERSION_PATCH) \"-$${MP_GIT_SHA}\";" >> version.c + +# echo "const char * const mp_version_component = \"mod_python/\" #MP_VERSION_MAJOR \".\" #MP_VERSION_MINOR \".\" #MP_VERSION_PATCH \"-$${MP_GIT_SHA}\";" >> version.c + +# this is a hack to help avoid a gcc/solaris problem +# python uses assert() which needs _eprintf(). See +# SOLARIS_HACKS above +_eprintf.o: + ar -x `gcc -print-libgcc-file-name` _eprintf.o +_floatdidf.o: + ar -x `gcc -print-libgcc-file-name` _floatdidf.o +_muldi3.o: + ar -x `gcc -print-libgcc-file-name` _muldi3.o + +# DO NOT DELETE THIS LINE diff --git a/src/UpgradeLog.XML b/src/UpgradeLog.XML new file mode 100644 index 0000000..db095d3 --- /dev/null +++ b/src/UpgradeLog.XML @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/Version.rc b/src/Version.rc new file mode 100644 index 0000000..f1cbf33 --- /dev/null +++ b/src/Version.rc @@ -0,0 +1,64 @@ +#define APSTUDIO_READONLY_SYMBOLS +#include "afxres.h" +#include "include\mp_version.h" +#undef APSTUDIO_READONLY_SYMBOLS + +///////////////////////////////////////////////////////////////////////////// +// English (U.S.) resources + +#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) +#ifdef _WIN32 +LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US +#pragma code_page(1252) +#endif //_WIN32 + +#ifndef _MAC + +///////////////////////////////////////////////////////////////////////////// +// +// Version +// + +VS_VERSION_INFO VERSIONINFO + FILEVERSION MP_VERSION_MAJOR,MP_VERSION_MINOR,MP_VERSION_PATCH + PRODUCTVERSION MP_VERSION_MAJOR,MP_VERSION_MINOR,MP_VERSION_PATCH + FILEFLAGSMASK 0x3fL +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x40004L + FILETYPE 0x1L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "Comments", "Mod_python allows embedding Python within the Apache http server for a considerable boost in performance and added flexibility in designing web based applications. \0" + VALUE "CompanyName", "\0" + VALUE "FileDescription", "Embedding Python within Apache.\0" + VALUE "FileVersion", MP_VERSION_STRING "\0" + VALUE "InternalName", "mod_python\0" + VALUE "LegalCopyright", "Copyright © 2000-2002 Apache Software Foundation.\0" + VALUE "LegalTrademarks", "\0" + VALUE "OriginalFilename", "mod_python\0" + VALUE "PrivateBuild", "\0" + VALUE "ProductName", "mod_python\0" + VALUE "ProductVersion", MP_VERSION_STRING "\0" + VALUE "SpecialBuild", "\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END + +#endif // !_MAC + +#endif // English (U.S.) resources + +///////////////////////////////////////////////////////////////////////////// + diff --git a/src/_apachemodule.c b/src/_apachemodule.c new file mode 100644 index 0000000..1f273df --- /dev/null +++ b/src/_apachemodule.c @@ -0,0 +1,924 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * _apachemodule.c + * + * + */ + +#include "mod_python.h" + +/* A referende to the _apache.SERVER_RETURN */ + +PyObject *Mp_ServerReturn; + +/** + ** mp_log_error + ** + * A wrapper to ap_log_error + * + * mp_log_error(string message, int level, server server) + * + */ + +static PyObject * mp_log_error(PyObject *self, PyObject *args) +{ + + int level = 0; + char *message = NULL; + serverobject *server = NULL; + server_rec *serv_rec; + + if (! PyArg_ParseTuple(args, "z|iO", &message, &level, &server)) + return NULL; /* error */ + + if (message) { + + if (! level) + level = APLOG_ERR; + + if (!server || (PyObject *)server == Py_None) + serv_rec = NULL; + else { + if (! MpServer_Check(server)) { + PyErr_BadArgument(); + return NULL; + } + serv_rec = server->server; + } + Py_BEGIN_ALLOW_THREADS + ap_log_error(APLOG_MARK, level, 0, serv_rec, "%s", message); + Py_END_ALLOW_THREADS + } + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** parse_qs + ** + * This is a C version of cgi.parse_qs + */ + +static PyObject *parse_qs(PyObject *self, PyObject *args) +{ + + PyObject *pairs, *dict; + int i, n, len, lsize; + char *qs; + PyObject *qso; + char unicode = 0; + int keep_blank_values = 0; + int strict_parsing = 0; /* XXX not implemented */ + + if (! PyArg_ParseTuple(args, "O|ii", &qso, &keep_blank_values, + &strict_parsing)) + return NULL; /* error */ + + if (PyUnicode_Check(qso)) + unicode = 1; + + MP_ANYSTR_AS_STR(qs, qso, 1); + if (!qs) { + Py_DECREF(qso); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + /* split query string by '&' and ';' into a list of pairs */ + pairs = PyList_New(0); + if (pairs == NULL) { + Py_DECREF(qso); + return NULL; + } + + i = 0; + len = strlen(qs); + + while (i < len) { + + PyObject *pair; + char *cpair; + int j = 0; + + pair = PyBytes_FromStringAndSize(NULL, len); + if (pair == NULL) { + Py_DECREF(qso); + return NULL; + } + + /* split by '&' or ';' */ + cpair = PyBytes_AS_STRING(pair); + while ((qs[i] != '&') && (qs[i] != ';') && (i < len)) { + /* replace '+' with ' ' */ + cpair[j] = (qs[i] == '+') ? ' ' : qs[i]; + i++; + j++; + } + + if (j) { + _PyBytes_Resize(&pair, j); + if (pair) + PyList_Append(pairs, pair); + } + + Py_XDECREF(pair); + i++; + } + + Py_DECREF(qso); /* MP_ANYSTR_AS_STR */ + + /* + * now we have a list of "abc=def" string (pairs), let's split + * them all by '=' and put them in a dictionary. + */ + + dict = PyDict_New(); + if (dict == NULL) + return NULL; + + lsize = PyList_Size(pairs); + n = 0; + + while (n < lsize) { + + PyObject *pair, *key, *val; + char *cpair, *ckey, *cval; + int k, v; + + pair = PyList_GET_ITEM(pairs, n); + cpair = PyBytes_AS_STRING(pair); + + len = strlen(cpair); + key = PyBytes_FromStringAndSize(NULL, len); + if (key == NULL) + return NULL; + + val = PyBytes_FromStringAndSize(NULL, len); + if (val == NULL) + return NULL; + + ckey = PyBytes_AS_STRING(key); + cval = PyBytes_AS_STRING(val); + + i = 0; + k = 0; + v = 0; + while (i < len) { + if (cpair[i] != '=') { + ckey[k] = cpair[i]; + k++; + i++; + } + else { + i++; /* skip '=' */ + while (i < len) { + cval[v] = cpair[i]; + v++; + i++; + } + } + } + + ckey[k] = '\0'; + cval[v] = '\0'; + + if (keep_blank_values || (v > 0)) { + + ap_unescape_url(ckey); + ap_unescape_url(cval); + + _PyBytes_Resize(&key, strlen(ckey)); + _PyBytes_Resize(&val, strlen(cval)); + + if (key && val) { + + ckey = PyBytes_AS_STRING(key); + cval = PyBytes_AS_STRING(val); + + if (unicode) { + PyObject *list, *ukey, *uval; + ukey = PyUnicode_DecodeLatin1(ckey, strlen(ckey), NULL); + uval = PyUnicode_DecodeLatin1(ckey, strlen(cval), NULL); + list = PyDict_GetItem(dict, ukey); + if (list) { + PyList_Append(list, uval); + Py_DECREF(uval); + } else { + list = Py_BuildValue("[O]", uval); + PyDict_SetItem(dict, ukey, list); + Py_DECREF(ukey); + Py_DECREF(list); + } + } else { + PyObject *list; + list = PyDict_GetItem(dict, key); + if (list) + PyList_Append(list, val); + else { + list = Py_BuildValue("[O]", val); + PyDict_SetItem(dict, key, list); + Py_DECREF(list); + } + } + } + } + + Py_XDECREF(key); + Py_XDECREF(val); + + n++; + } + + Py_DECREF(pairs); + return dict; +} + +/** + ** parse_qsl + ** + * This is a C version of cgi.parse_qsl + */ + +static PyObject *parse_qsl(PyObject *self, PyObject *args) +{ + + PyObject *pairs; + int i, len; + PyObject *qso; + char unicode = 0; + char *qs = NULL; + int keep_blank_values = 0; + int strict_parsing = 0; /* XXX not implemented */ + + if (! PyArg_ParseTuple(args, "O|ii", &qso, &keep_blank_values, + &strict_parsing)) + return NULL; /* error */ + + if (PyUnicode_Check(qso)) + unicode = 1; + + MP_ANYSTR_AS_STR(qs, qso, 1); + if (!qs) { + Py_DECREF(qso); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + /* split query string by '&' and ';' into a list of pairs */ + pairs = PyList_New(0); + if (pairs == NULL) { + Py_DECREF(qso); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + i = 0; + len = strlen(qs); + + while (i < len) { + + PyObject *pair, *key, *val; + char *cpair, *ckey, *cval; + int plen, j, p, k, v; + + pair = PyBytes_FromStringAndSize(NULL, len); + if (pair == NULL) + return NULL; + + /* split by '&' or ';' */ + cpair = PyBytes_AS_STRING(pair); + j = 0; + while ((qs[i] != '&') && (qs[i] != ';') && (i < len)) { + /* replace '+' with ' ' */ + cpair[j] = (qs[i] == '+') ? ' ' : qs[i]; + i++; + j++; + } + + if (j == 0) { + Py_XDECREF(pair); + i++; + continue; + } + + cpair[j] = '\0'; + _PyBytes_Resize(&pair, j); + cpair = PyBytes_AS_STRING(pair); + + /* split the "abc=def" pair */ + plen = strlen(cpair); + + key = PyBytes_FromStringAndSize(NULL, plen); + if (key == NULL) { + Py_DECREF(qso); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + val = PyBytes_FromStringAndSize(NULL, plen); + if (val == NULL) { + Py_DECREF(qso); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + ckey = PyBytes_AS_STRING(key); + cval = PyBytes_AS_STRING(val); + + p = 0; + k = 0; + v = 0; + while (p < plen) { + if (cpair[p] != '=') { + ckey[k] = cpair[p]; + k++; + p++; + } + else { + p++; /* skip '=' */ + while (p < plen) { + cval[v] = cpair[p]; + v++; + p++; + } + } + } + ckey[k] = '\0'; + cval[v] = '\0'; + + if (keep_blank_values || (v > 0)) { + + ap_unescape_url(ckey); + ap_unescape_url(cval); + + _PyBytes_Resize(&key, strlen(ckey)); + _PyBytes_Resize(&val, strlen(cval)); + + if (key && val) { + PyObject *listitem = NULL; + if (unicode) { + PyObject *ukey, *uval; + ukey = PyUnicode_DecodeLatin1(ckey, strlen(ckey), NULL); + uval = PyUnicode_DecodeLatin1(cval, strlen(cval), NULL); + listitem = Py_BuildValue("(O,O)", ukey, uval); + Py_DECREF(ukey); + Py_DECREF(uval); + } else + listitem = Py_BuildValue("(O,O)", key, val); + if(listitem) { + PyList_Append(pairs, listitem); + Py_DECREF(listitem); + } + } + + } + Py_XDECREF(pair); + Py_XDECREF(key); + Py_XDECREF(val); + i++; + } + + Py_DECREF(qso); + return pairs; +} + +/** + ** config_tree + ** + * Returns a copy of the config tree + */ + +static PyObject *config_tree(void) +{ + return cfgtree_walk(ap_conftree); +} + +/** + ** server_root + ** + * Returns ServerRoot + */ + +static PyObject *server_root(void) +{ + return MpBytesOrUnicode_FromString(ap_server_root); +} + +/** + ** _global_lock + ** + * Lock one of our global_mutexes + */ + +static PyObject *_global_lock(PyObject *self, PyObject *args) +{ + + PyObject *server; + PyObject *key; + server_rec *s; + py_global_config *glb; + int index = -1; + apr_status_t rv; + + if (! PyArg_ParseTuple(args, "OO|i", &server, &key, &index)) + return NULL; + + if (! MpServer_Check(server)) { + PyErr_SetString(PyExc_TypeError, + "First argument must be a server object"); + return NULL; + } + + s = ((serverobject *)server)->server; + + apr_pool_userdata_get((void **)&glb, MP_CONFIG_KEY, + s->process->pool); + + if ((index >= (glb->nlocks)) || (index < -1)) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, + "Index %d is out of range for number of global mutex locks", index); + PyErr_SetString(PyExc_ValueError, + "Lock index is out of range for number of global mutex locks"); + return NULL; + } + + if (index == -1) { + + int hash = PyObject_Hash(key); + if (hash == -1) { + return NULL; + } + else { + hash = abs(hash); + } + + /* note that this will never result in 0, + * which is reserved for things like dbm + * locking (see Session.py) + */ + + index = (hash % (glb->nlocks-1)+1); + } + + /* ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, */ + /* "_global_lock at index %d from pid %d", index, getpid()); */ + Py_BEGIN_ALLOW_THREADS + rv = apr_global_mutex_lock(glb->g_locks[index]); + Py_END_ALLOW_THREADS + if (rv != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s, + "Failed to acquire global mutex lock at index %d", index); + PyErr_SetString(PyExc_ValueError, + "Failed to acquire global mutex lock"); + return NULL; + } + /* ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, */ + /* "_global_lock DONE at index %d from pid %d", index, getpid()); */ + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** _global_trylock + ** + * Try to lock one of our global_mutexes + */ + +static PyObject *_global_trylock(PyObject *self, PyObject *args) +{ + + PyObject *server; + PyObject *key; + server_rec *s; + py_global_config *glb; + int index = -1; + apr_status_t rv; + + if (! PyArg_ParseTuple(args, "OO|i", &server, &key, &index)) + return NULL; + + if (! MpServer_Check(server)) { + PyErr_SetString(PyExc_TypeError, + "First argument must be a server object"); + return NULL; + } + + s = ((serverobject *)server)->server; + + apr_pool_userdata_get((void **)&glb, MP_CONFIG_KEY, + s->process->pool); + + if ((index >= (glb->nlocks)) || (index < -1)) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, + "Index %d is out of range for number of global mutex locks", index); + PyErr_SetString(PyExc_ValueError, + "Lock index is out of range for number of global mutex locks"); + return NULL; + } + + if (index == -1) { + + int hash = PyObject_Hash(key); + if (hash == -1) { + return NULL; + } + else { + hash = abs(hash); + } + + /* note that this will never result in 0, + * which is reserved for things like dbm + * locking (see Session.py) + */ + + index = (hash % (glb->nlocks-1)+1); + } + + /* + * ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, + * "_global_trylock at index %d from pid %d", index, getpid()); + */ + Py_BEGIN_ALLOW_THREADS + rv = apr_global_mutex_trylock(glb->g_locks[index]); + Py_END_ALLOW_THREADS + + if (rv == APR_SUCCESS) { + /* + * ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, + * "_global_trylock DONE at index %d from pid %d", index, getpid()); + */ + Py_INCREF(Py_True); + return Py_True; + } + else if(APR_STATUS_IS_EBUSY(rv)) { + /* + * ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, + * "_global_trylock BUSY at index %d from pid %d", index, getpid()); + */ + Py_INCREF(Py_False); + return Py_False; + } + else { + ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s, + "Failed to acquire global mutex lock at index %d", index); + PyErr_SetString(PyExc_ValueError, + "Failed to acquire global mutex lock"); + return NULL; + } +} + +/** + ** _global_unlock + ** + * Unlock one of our global_mutexes + */ + +static PyObject *_global_unlock(PyObject *self, PyObject *args) +{ + + PyObject *server; + PyObject *key; + server_rec *s; + py_global_config *glb; + int index = -1; + apr_status_t rv; + + if (! PyArg_ParseTuple(args, "OO|i", &server, &key, &index)) + return NULL; + + if (! MpServer_Check(server)) { + PyErr_SetString(PyExc_TypeError, + "First argument must be a server object"); + return NULL; + } + + s = ((serverobject *)server)->server; + + apr_pool_userdata_get((void **)&glb, MP_CONFIG_KEY, + s->process->pool); + + if ((index >= (glb->nlocks)) || (index < -1)) { + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, + "Index %d is out of range for number of global mutex locks", index); + PyErr_SetString(PyExc_ValueError, + "Lock index is out of range for number of global mutex locks"); + return NULL; + } + + if (index == -1) { + + int hash = PyObject_Hash(key); + if (hash == -1) { + return NULL; + } + else { + hash = abs(hash); + } + + /* note that this will never result in 0, + * which is reserved for things like dbm + * locking (see Session.py) + */ + + index = (hash % (glb->nlocks-1)+1); + } + +/* ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, */ +/* "_global_unlock at index %d from pid %d", index, getpid()); */ + if ((rv = apr_global_mutex_unlock(glb->g_locks[index])) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s, + "Failed to release global mutex lock at index %d", index); + PyErr_SetString(PyExc_ValueError, + "Failed to release global mutex lock"); + return NULL; + } + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** mpm_query + ** + * ap_mpm_query interface + */ + +static PyObject *mpm_query(PyObject *self, PyObject *code) +{ + int result; + +#if PY_MAJOR_VERSION < 3 + if (! PyInt_Check(code)) { + PyErr_SetString(PyExc_TypeError, + "The argument must be an integer"); + return NULL; + } + ap_mpm_query(PyInt_AsLong(code), &result); + return PyInt_FromLong(result); +#else + if (! PyLong_Check(code)) { + PyErr_SetString(PyExc_TypeError, + "The argument must be an integer"); + return NULL; + } + ap_mpm_query(PyLong_AsLong(code), &result); + return PyLong_FromLong(result); +#endif +} + +/** + ** register_cleanup(interpreter, server, handler, data) + ** + * more low level version of request.register_cleanup where it is + * necessary to specify the actual interpreter name. the server pool + * is used. the server pool gets destroyed before the child dies or + * when the whole process dies in multithreaded situations. + */ + +static PyObject *register_cleanup(PyObject *self, PyObject *args) +{ + + cleanup_info *ci; + char *interpreter = NULL; + serverobject *server = NULL; + PyObject *handler = NULL; + PyObject *data = NULL; + + if (! PyArg_ParseTuple(args, "sOO|O", &interpreter, &server, &handler, &data)) + return NULL; + + if (! MpServer_Check(server)) { + PyErr_SetString(PyExc_ValueError, + "second argument must be a server object"); + return NULL; + } + else if(!PyCallable_Check(handler)) { + PyErr_SetString(PyExc_ValueError, + "third argument must be a callable object"); + return NULL; + } + + ci = (cleanup_info *)malloc(sizeof(cleanup_info)); + ci->request_rec = NULL; + ci->server_rec = server->server; + Py_INCREF(handler); + ci->handler = handler; + ci->interpreter = strdup(interpreter); + if (data) { + Py_INCREF(data); + ci->data = data; + } + else { + Py_INCREF(Py_None); + ci->data = Py_None; + } + + apr_pool_cleanup_register(child_init_pool, ci, python_cleanup, + apr_pool_cleanup_null); + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** exists_config_define(name) + ** + * Check for a definition from the server command line. + */ + +static PyObject *exists_config_define(PyObject *self, PyObject *args) +{ + + char *name = NULL; + + if (! PyArg_ParseTuple(args, "s", &name)) + return NULL; + + if(ap_exists_config_define(name)) { + Py_INCREF(Py_True); + return Py_True; + } + else { + Py_INCREF(Py_False); + return Py_False; + } +} + +/** + ** mp_stat(fname, wanted) + ** + * Wrapper for apr_stat(). + */ + +static PyObject *mp_stat(PyObject *self, PyObject *args) +{ + char *fname = NULL; + apr_int32_t wanted = 0; + finfoobject* finfo; + apr_status_t result; + + if (! PyArg_ParseTuple(args, "si", &fname, &wanted)) + return NULL; + + finfo = (finfoobject *)MpFinfo_New(); + + fname = apr_pstrdup(finfo->pool, fname); + + result = apr_stat(finfo->finfo, fname, wanted, finfo->pool); + + if (result == APR_INCOMPLETE || result == APR_SUCCESS) + return (PyObject *)finfo; + + if (result == APR_ENOENT) + return (PyObject *)finfo; + + Py_DECREF(finfo); + + PyErr_SetObject(PyExc_OSError, + Py_BuildValue("is", result, "apr_stat() failed")); + + return NULL; +} + +PyObject *get_ServerReturn() +{ + return Mp_ServerReturn; +} + +/* methods of _apache */ +static PyMethodDef _apache_module_methods[] = { + {"config_tree", (PyCFunction)config_tree, METH_NOARGS}, + {"log_error", (PyCFunction)mp_log_error, METH_VARARGS}, + {"mpm_query", (PyCFunction)mpm_query, METH_O}, + {"parse_qs", (PyCFunction)parse_qs, METH_VARARGS}, + {"parse_qsl", (PyCFunction)parse_qsl, METH_VARARGS}, + {"server_root", (PyCFunction)server_root, METH_NOARGS}, + {"register_cleanup", (PyCFunction)register_cleanup, METH_VARARGS}, + {"exists_config_define", (PyCFunction)exists_config_define, METH_VARARGS}, + {"stat", (PyCFunction)mp_stat, METH_VARARGS}, + {"_global_lock", (PyCFunction)_global_lock, METH_VARARGS}, + {"_global_trylock", (PyCFunction)_global_trylock, METH_VARARGS}, + {"_global_unlock", (PyCFunction)_global_unlock, METH_VARARGS}, + {NULL, NULL} /* sentinel */ +}; + +/* Module initialization */ + +#if PY_MAJOR_VERSION >= 3 + +static struct PyModuleDef _apache_moduledef = { + PyModuleDef_HEAD_INIT, + "_apache", /* m_name */ + NULL, /* m_doc */ + -1, /* m_size */ + _apache_module_methods, /* m_methods */ + NULL, + NULL, + NULL, + NULL, +}; + +#endif + +PyObject *_apache_module_init() +{ + PyObject *m, *d, *o; + + PyType_Ready(&MpTable_Type); + PyType_Ready(&MpTableIter_Type); + PyType_Ready(&MpServer_Type); + PyType_Ready(&MpConn_Type); + PyType_Ready(&MpRequest_Type); + PyType_Ready(&MpFilter_Type); + PyType_Ready(&MpHList_Type); + +#if PY_MAJOR_VERSION < 3 + m = Py_InitModule("_apache", _apache_module_methods); +#else + m = PyModule_Create(&_apache_moduledef); + PyObject *name = PyUnicode_FromString("_apache"); + _PyImport_FixupExtensionObject(m, name, name); +#endif + d = PyModule_GetDict(m); + Mp_ServerReturn = PyErr_NewException("_apache.SERVER_RETURN", NULL, NULL); + if (Mp_ServerReturn == NULL) + return NULL; + PyDict_SetItemString(d, "SERVER_RETURN", Mp_ServerReturn); + + PyDict_SetItemString(d, "table", (PyObject *)&MpTable_Type); + + o = PyLong_FromLong(AP_CONN_UNKNOWN); + PyDict_SetItemString(d, "AP_CONN_UNKNOWN", o); + Py_DECREF(o); + o = PyLong_FromLong(AP_CONN_CLOSE); + PyDict_SetItemString(d, "AP_CONN_CLOSE", o); + Py_DECREF(o); + o = PyLong_FromLong(AP_CONN_KEEPALIVE); + PyDict_SetItemString(d, "AP_CONN_KEEPALIVE", o); + Py_DECREF(o); + + o = PyLong_FromLong(APR_NOFILE); + PyDict_SetItemString(d, "APR_NOFILE", o); + Py_DECREF(o); + o = PyLong_FromLong(APR_REG); + PyDict_SetItemString(d, "APR_REG", o); + Py_DECREF(o); + o = PyLong_FromLong(APR_DIR); + PyDict_SetItemString(d, "APR_DIR", o); + Py_DECREF(o); + o = PyLong_FromLong(APR_CHR); + PyDict_SetItemString(d, "APR_CHR", o); + Py_DECREF(o); + o = PyLong_FromLong(APR_BLK); + PyDict_SetItemString(d, "APR_BLK", o); + Py_DECREF(o); + o = PyLong_FromLong(APR_PIPE); + PyDict_SetItemString(d, "APR_PIPE", o); + Py_DECREF(o); + o = PyLong_FromLong(APR_LNK); + PyDict_SetItemString(d, "APR_LNK", o); + Py_DECREF(o); + o = PyLong_FromLong(APR_SOCK); + PyDict_SetItemString(d, "APR_SOCK", o); + Py_DECREF(o); + o = PyLong_FromLong(APR_UNKFILE); + PyDict_SetItemString(d, "APR_UNKFILE", o); + Py_DECREF(o); + + o = PyLong_FromLong(MODULE_MAGIC_NUMBER_MAJOR); + PyDict_SetItemString(d, "MODULE_MAGIC_NUMBER_MAJOR", o); + Py_DECREF(o); + o = PyLong_FromLong(MODULE_MAGIC_NUMBER_MINOR); + PyDict_SetItemString(d, "MODULE_MAGIC_NUMBER_MINOR", o); + Py_DECREF(o); + + return m; +} + +#if PY_MAJOR_VERSION < 3 + +PyMODINIT_FUNC init_apache(void) { + _apache_module_init(); +} + +#else + +PyMODINIT_FUNC PyInit_apache(void) { + return _apache_module_init(); +} + +#endif diff --git a/src/_pspmodule.c b/src/_pspmodule.c new file mode 100644 index 0000000..00213cc --- /dev/null +++ b/src/_pspmodule.c @@ -0,0 +1,225 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * This file originally written by Stering Hughes + * + * + * See accompanying documentation and source code comments for + * details. + * + */ + +#include "psp_flex.h" +#include "psp_parser.h" +#include "psp_string.h" +#include "_pspmodule.h" +#include "Python.h" + +/* calm down compile warning from psp_flex.h*/ +static int yy_init_globals (yyscan_t yyscanner ) {return 0;}; + +static psp_parser_t *psp_parser_init(void) +{ + psp_parser_t *parser; + + parser = (psp_parser_t *) malloc(sizeof(*parser)); + + memset(&parser->pycode, 0, sizeof(psp_string)); + memset(&parser->whitespace, 0, sizeof(psp_string)); + parser->dir = NULL; + parser->is_psp_echo = 0; + parser->after_colon = 0; + parser->seen_newline = 0; + + return parser; +} + +static void psp_parser_cleanup(psp_parser_t *parser) +{ + if (parser->pycode.allocated) { + free(parser->pycode.blob); + } + + if (parser->whitespace.allocated) { + free(parser->whitespace.blob); + } + + free(parser); +} + +static PyObject * _psp_module_parse(PyObject *self, PyObject *argv) +{ + PyObject *code; + char *filename; + char *dir = NULL; + char *path; + psp_parser_t *parser; + yyscan_t scanner; + FILE *f; + + if (!PyArg_ParseTuple(argv, "s|s", &filename, &dir)) { + return NULL; + } + + if (dir) { + path = malloc(strlen(filename)+strlen(dir)+1); + if (!path) + return PyErr_NoMemory(); + strcpy(path, dir); + strcat(path, filename); + } + else { + path = filename; + } + + Py_BEGIN_ALLOW_THREADS + f = fopen(path, "rb"); + Py_END_ALLOW_THREADS + + if (f == NULL) { + PyErr_SetFromErrnoWithFilename(PyExc_IOError, path); + if (dir) free(path); + return NULL; + } + if (dir) free(path); + + parser = psp_parser_init(); + if (dir) + parser->dir = dir; + + yylex_init(&scanner); + yyset_in(f, scanner); + yyset_extra(parser, scanner); + yylex(scanner); + yylex_destroy(scanner); + + fclose(f); + psp_string_0(&parser->pycode); + + if (PyErr_Occurred()) { + psp_parser_cleanup(parser); + return NULL; + } + + if (parser->pycode.blob) { + code = MpBytesOrUnicode_FromString(parser->pycode.blob); + } + else { + code = MpBytesOrUnicode_FromString(""); + } + + psp_parser_cleanup(parser); + + return code; +} + +static PyObject * _psp_module_parsestring(PyObject *self, PyObject *argv) +{ + PyObject *code; + PyObject *str; + PyObject *latin = NULL; + char *c_str = NULL; + yyscan_t scanner; + psp_parser_t *parser; + YY_BUFFER_STATE bs; + + if (!PyArg_ParseTuple(argv, "S", &str)) { + return NULL; + } + + Py_BEGIN_ALLOW_THREADS + parser = psp_parser_init(); + yylex_init(&scanner); + yyset_extra(parser, scanner); + + if (PyUnicode_Check(str)) { + latin = PyUnicode_AsLatin1String(str); + if (latin) + c_str = PyBytes_AsString(latin); + } else if (PyBytes_Check(str)) + c_str = PyBytes_AsString(str); + + if (!c_str) c_str = "UNICODE ERROR"; + + bs = yy_scan_string(c_str, scanner); + yylex(scanner); + + Py_XDECREF(latin); + + /* yy_delete_buffer(bs, scanner); */ + yylex_destroy(scanner); + + psp_string_0(&parser->pycode); + Py_END_ALLOW_THREADS + + if (parser->pycode.blob) { + code = MpBytesOrUnicode_FromString(parser->pycode.blob); + } + else { + code = MpBytesOrUnicode_FromString(""); + } + + psp_parser_cleanup(parser); + + return code; +} + +static PyMethodDef _psp_module_methods[] = { + {"parse", (PyCFunction) _psp_module_parse, METH_VARARGS}, + {"parsestring", (PyCFunction) _psp_module_parsestring, METH_VARARGS}, + {NULL, NULL} +}; + +#if PY_MAJOR_VERSION >= 3 + +static struct PyModuleDef _psp_moduledef = { + PyModuleDef_HEAD_INIT, + "_psp", /* m_name */ + NULL, /* m_doc */ + -1, /* m_size */ + _psp_module_methods, /* m_methods */ + NULL, + NULL, + NULL, + NULL, +}; + +#endif + +PyObject * _init_psp(void) +{ + PyObject *m; +#if PY_MAJOR_VERSION < 3 + m = Py_InitModule("_psp", _psp_module_methods); +#else + m = PyModule_Create(&_psp_moduledef); +#endif + return m; +} + +#if PY_MAJOR_VERSION < 3 + +PyMODINIT_FUNC init_psp(void) { + _init_psp(); +} + +#else + +PyMODINIT_FUNC PyInit__psp(void) { + return _init_psp(); +} + +#endif diff --git a/src/connobject.c b/src/connobject.c new file mode 100644 index 0000000..6c62bad --- /dev/null +++ b/src/connobject.c @@ -0,0 +1,465 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * connobject.c + * + * + */ + +/* + * This is a mapping of a Python object to an Apache conn_rec. + * + */ + +#include "mod_python.h" + +/** + ** MpConn_FromConn + ** + * This routine creates a Python connobject given an Apache + * conn_rec pointer. + * + */ + +PyObject * MpConn_FromConn(conn_rec *c) +{ + connobject *result; + MpConn_Type.ob_type = &PyType_Type; + result = PyObject_New(connobject, &MpConn_Type); + if (! result) + return PyErr_NoMemory(); + + result->conn = c; + result->base_server = NULL; + result->notes = MpTable_FromTable(c->notes); + result->hlo = NULL; + + return (PyObject *)result; +} + +/** + ** conn.log_error(conn self, string message, int level) + ** + * calls ap_log_cerror + */ + +static PyObject * conn_log_error(connobject *self, PyObject *args) +{ + int level = 0; + char *message = NULL; + + if (! PyArg_ParseTuple(args, "z|i", &message, &level)) + return NULL; /* error */ + + if (message) { + + if (! level) + level = APLOG_ERR; + + Py_BEGIN_ALLOW_THREADS +#if AP_MODULE_MAGIC_AT_LEAST(20020903,10) + ap_log_cerror(APLOG_MARK, level, 0, self->conn, "%s", message); +#else + ap_log_error(APLOG_MARK, level, 0, self->conn->base_server, "%s", message); +#endif + Py_END_ALLOW_THREADS + } + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** conn.read(conn self, int bytes) + ** + */ + +static PyObject * _conn_read(conn_rec *c, ap_input_mode_t mode, long len) +{ + + apr_bucket *b; + apr_bucket_brigade *bb; + apr_status_t rc; + long bytes_read; + PyObject *result; + char *buffer; + long bufsize; + + bb = apr_brigade_create(c->pool, c->bucket_alloc); + + bufsize = len == 0 ? HUGE_STRING_LEN : len; + + while (APR_BRIGADE_EMPTY(bb)) { + Py_BEGIN_ALLOW_THREADS; + rc = ap_get_brigade(c->input_filters, bb, mode, APR_BLOCK_READ, bufsize); + Py_END_ALLOW_THREADS; + + if (rc != APR_SUCCESS) { + PyErr_SetString(PyExc_IOError, "Connection read error"); + return NULL; + } + } + + /* + * loop through the brigade reading buckets into the string + */ + + b = APR_BRIGADE_FIRST(bb); + + if (APR_BUCKET_IS_EOS(b)) { + apr_bucket_delete(b); + Py_INCREF(Py_None); + return Py_None; + } + + result = PyBytes_FromStringAndSize(NULL, bufsize); + + /* possibly no more memory */ + if (result == NULL) + return PyErr_NoMemory(); + + buffer = PyBytes_AS_STRING((PyBytesObject *) result); + + bytes_read = 0; + + while ((bytes_read < len || len == 0) && + !(b == APR_BRIGADE_SENTINEL(bb) || + APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b))) { + + const char *data; + apr_size_t size; + apr_bucket *old; + + if (apr_bucket_read(b, &data, &size, APR_BLOCK_READ) != APR_SUCCESS) { + PyErr_SetString(PyExc_IOError, "Connection read error"); + return NULL; + } + + if (bytes_read + size > bufsize) { + apr_bucket_split(b, bufsize - bytes_read); + size = bufsize - bytes_read; + /* now the bucket is the exact size we need */ + } + + memcpy(buffer, data, size); + buffer += size; + bytes_read += size; + + /* time to grow destination string? */ + if (len == 0 && bytes_read == bufsize) { + + _PyBytes_Resize(&result, bufsize + HUGE_STRING_LEN); + buffer = PyBytes_AS_STRING((PyBytesObject *) result); + buffer += bufsize; + bufsize += HUGE_STRING_LEN; + } + + + if (mode == AP_MODE_GETLINE || len == 0) { + apr_bucket_delete(b); + break; + } + + old = b; + b = APR_BUCKET_NEXT(b); + apr_bucket_delete(old); + } + + /* resize if necessary */ + if (bytes_read < len || len == 0) + if(_PyBytes_Resize(&result, bytes_read)) + return NULL; + + return result; +} + +/** + ** conn.read(conn self, int bytes) + ** + */ + +static PyObject * conn_read(connobject *self, PyObject *args) +{ + + long len = 0; + + if (! PyArg_ParseTuple(args, "|l", &len)) + return NULL; + + if (len == -1) + return _conn_read(self->conn, AP_MODE_EXHAUSTIVE, 0); + else + return _conn_read(self->conn, AP_MODE_READBYTES, len); +} + +/** + ** conn.readline(conn self, int bytes) + ** + */ + +static PyObject * conn_readline(connobject *self, PyObject *args) +{ + + long len = 0; + + if (! PyArg_ParseTuple(args, "|l", &len)) + return NULL; + + return _conn_read(self->conn, AP_MODE_GETLINE, len); +} + +/** + ** conn.write(conn self, int bytes) + ** + */ + +static PyObject * conn_write(connobject *self, PyObject *args) +{ + char *buff; + int len; + apr_bucket_brigade *bb; + apr_bucket *b; + PyObject *s; + conn_rec *c = self->conn; + + if (! PyArg_ParseTuple(args, "s#", &buff, &len)) + return NULL; /* bad args */ + + if (len) { + bb = apr_brigade_create(c->pool, c->bucket_alloc); + + b = apr_bucket_pool_create(buff, len, c->pool, c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + + /* Make sure the data is flushed to the client */ + b = apr_bucket_flush_create(c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); + + ap_pass_brigade(c->output_filters, bb); + } + + Py_INCREF(Py_None); + return Py_None; +} + +static PyMethodDef connobjectmethods[] = { + {"log_error", (PyCFunction) conn_log_error, METH_VARARGS}, + {"read", (PyCFunction) conn_read, METH_VARARGS}, + {"readline", (PyCFunction) conn_readline, METH_VARARGS}, + {"write", (PyCFunction) conn_write, METH_VARARGS}, + { NULL, NULL } /* sentinel */ +}; + +#define OFF(x) offsetof(conn_rec, x) + +static PyMemberDef conn_memberlist[] = { + {"base_server", T_OBJECT, 0, READONLY}, + /* XXX vhost_lookup_data? */ + /* XXX client_socket? */ + {"local_addr", T_OBJECT, 0, READONLY}, +#if AP_MODULE_MAGIC_AT_LEAST(20111130,0) + {"client_addr", T_OBJECT, 0, READONLY}, + {"client_ip", T_STRING, OFF(client_ip), READONLY}, + {"remote_ip", T_STRING, OFF(client_ip), READONLY}, /* bw compat */ +#else + {"remote_addr", T_OBJECT, 0, READONLY}, + {"remote_ip", T_STRING, OFF(remote_ip), READONLY}, +#endif + {"remote_host", T_STRING, OFF(remote_host), READONLY}, + {"remote_logname", T_STRING, OFF(remote_logname), READONLY}, + {"aborted", T_INT, 0, READONLY}, + {"keepalive", T_INT, 0, READONLY}, + {"double_reverse", T_INT, 0, READONLY}, + {"keepalives", T_INT, OFF(keepalives), READONLY}, + {"local_addr", T_OBJECT, 0, READONLY}, + {"local_ip", T_STRING, OFF(local_ip), READONLY}, + {"local_host", T_STRING, OFF(local_host), READONLY}, + {"id", T_LONG, OFF(id), READONLY}, + /* XXX conn_config? */ + {"notes", T_OBJECT, 0, READONLY}, + /* XXX filters ? */ + /* XXX document remain */ + /*{"remain", T_LONG, OFF(remain), READONLY},*/ + {NULL} /* Sentinel */ +}; + +/** + ** conn_dealloc + ** + * + */ + +static void conn_dealloc(connobject *self) +{ + Py_XDECREF(self->base_server); + Py_XDECREF(self->notes); + Py_XDECREF(self->hlo); + PyObject_Del(self); +} + + +/** + ** conn_getattr + ** + * Get conn object attributes + * + */ + +static PyObject * conn_getattr(connobject *self, char *name) +{ + + PyObject *res; + PyMemberDef *md; + PyMethodDef *ml = connobjectmethods; + for (; ml->ml_name != NULL; ml++) { + if (name[0] == ml->ml_name[0] && + strcmp(name+1, ml->ml_name+1) == 0) + return PyCFunction_New(ml, (PyObject*)self); + } + + PyErr_Clear(); + + if (strcmp(name, "base_server") == 0) { + + /* base_server serverobject is created as needed */ + if (self->base_server == NULL) { + if (self->conn->base_server == NULL) { + Py_INCREF(Py_None); + return Py_None; + } + else { + self->base_server = MpServer_FromServer(self->conn->base_server); + Py_INCREF(self->base_server); + return self->base_server; + } + } + else { + Py_INCREF(self->base_server); + return self->base_server; + } + } + else if (strcmp(name, "aborted") == 0) { + return PyLong_FromLong(self->conn->aborted); + } + else if (strcmp(name, "keepalive") == 0) { + return PyLong_FromLong(self->conn->keepalive); + } + else if (strcmp(name, "double_reverse") == 0) { + return PyLong_FromLong(self->conn->double_reverse); + } + else if (strcmp(name, "local_addr") == 0) { + return makesockaddr(self->conn->local_addr); + } +#if AP_MODULE_MAGIC_AT_LEAST(20111130,0) + else if (strcmp(name, "client_addr") == 0) { + return makesockaddr(self->conn->client_addr); + } + else if (strcmp(name, "remote_addr") == 0) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, self->conn, "%s", + "mod_python: conn.remote_addr deprecated in 2.4, " + "use req.useragent_addr or conn.client_addr"); + return makesockaddr(self->conn->client_addr); +#else + else if (strcmp(name, "remote_addr") == 0) { + return makesockaddr(self->conn->remote_addr); +#endif + } + else if (strcmp(name, "notes") == 0) { + Py_INCREF(self->notes); + return (PyObject *) self->notes; + } + else if (strcmp(name, "hlist") == 0) { + Py_INCREF(self->hlo); + return (PyObject *)self->hlo; + } + else if (strcmp(name, "_conn_rec") == 0) { +#if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION < 7 + return PyCObject_FromVoidPtr(self->conn, 0); +#else + return PyCapsule_New((void *)self->conn, NULL, NULL); +#endif + } + else { + +#if AP_MODULE_MAGIC_AT_LEAST(20111130,0) + if (strcmp(name, "remote_ip") == 0) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, self->conn, "%s", + "mod_python: conn.remote_ip deprecated in 2.4, " + "use req.useragent_ip or conn.client_ip"); + } +#endif + md = find_memberdef(conn_memberlist, name); + if (!md) { + PyErr_SetString(PyExc_AttributeError, name); + return NULL; + } + return PyMember_GetOne((char*)self->conn, md); + } +} + +/** + ** conn_setattr + ** + * Set connection object attribute + */ + +static int conn_setattr(connobject *self, char* name, PyObject* value) +{ + + if (value == NULL) { + PyErr_SetString(PyExc_AttributeError, + "can't delete connection attributes"); + return -1; + } + else if (strcmp(name, "keepalive") == 0) { + if (! PyLong_Check(value)) { + PyErr_SetString(PyExc_TypeError, "keepalive must be a integer"); + return -1; + } + self->conn->keepalive = PyLong_AsLong(value); + return 0; + } + else { + PyMemberDef *md = find_memberdef(conn_memberlist, name); + if (!md) { + PyErr_SetString(PyExc_AttributeError, name); + return -1; + } + return PyMember_SetOne((char*)self->conn, md, value); + } +} + +PyTypeObject MpConn_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "mp_conn", /* tp_name */ + sizeof(connobject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor) conn_dealloc, /* tp_dealloc*/ + 0, /* tp_print*/ + (getattrfunc) conn_getattr, /* tp_getattr*/ + (setattrfunc) conn_setattr, /* tp_setattr*/ + 0, /* tp_compare*/ + 0, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash*/ +}; + + diff --git a/src/filterobject.c b/src/filterobject.c new file mode 100644 index 0000000..98256c8 --- /dev/null +++ b/src/filterobject.c @@ -0,0 +1,573 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * filterobject.c + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +#include "mod_python.h" + +/*** Some explanation of what is going on here: + * + * In Apache terminology, an "input" filter filters data flowing from + * network to application (aka "up"), an "output" filter filters data + * flowing from application to network (aka "down"). + * + * An output filter is invoked as a result of ap_pass_brigade() + * call. It is given a populated brigade, which it then gives in the + * same fashion to the next filter via ap_pass_brigade(). (The filter + * may chose to create a new brigade and pass it instead). + * + * An input filter is invoked as a result of ap_get_brigade() call. It + * is given an empty brigade, which it is expected to populate, which + * may in turn require the filter to invoke the next filter in the + * same fashion (via ap_get_brigade()). + * + * In mod_python Output filters: + * + * filter.read() - copies data from *given* bucket brigade (saved in + * self->bb_in) into a Python string. + * + * filter.write() - copies data from a Python string into a *new* + * bucket brigade (saved in self->bb_out). + * + * filter.close() - appends an EOS and passes the self->bb_out brigade + * to the next filter via ap_pass_brigade() + * + * In mod_python Input filters: + * + * filter.read() - copies data from a *new* and *populated via + * ap_get_brigade* (saved as self->bb_in) into a Python string. + * + * filter.write() - copies data from a Python string into a *given* + * brigade (saved as self->bb_out). + * + * filter.close() - appends an EOS to *given* brigade. + * + */ + +/** + ** MpFilter_FromFilter + ** + * This routine creates a Python filerobject. + * + */ + +PyObject *MpFilter_FromFilter(ap_filter_t *f, apr_bucket_brigade *bb, int is_input, + ap_input_mode_t mode, apr_size_t readbytes, + char * handler, char *dir) +{ + filterobject *result; + MpFilter_Type.ob_type = &PyType_Type; + result = PyObject_New(filterobject, &MpFilter_Type); + if (! result) + return PyErr_NoMemory(); + + result->f = f; + result->is_input = is_input; + + result->rc = APR_SUCCESS; + + if (is_input) { + result->bb_in = NULL; + result->bb_out = bb; + result->mode = mode; + result->readbytes = readbytes; + } + else { + result->bb_in = bb; + result->bb_out = NULL; + result->mode = 0; + result->readbytes = 0; + } + + result->closed = 0; + result->softspace = 0; + + result->handler = handler; + result->dir = dir; + + result->request_obj = NULL; + + apr_pool_cleanup_register(f->r->pool, (PyObject *)result, python_decref, + apr_pool_cleanup_null); + + return (PyObject *)result; +} + +/** + ** filter_pass_on + ** + * just passes everything on + */ + +static PyObject *filter_pass_on(filterobject *self) +{ + + Py_BEGIN_ALLOW_THREADS; + + if (self->is_input) + self->rc = ap_get_brigade(self->f->next, self->bb_out, + self->mode, APR_BLOCK_READ, + self->readbytes); + else + self->rc = ap_pass_brigade(self->f->next, self->bb_in); + + Py_END_ALLOW_THREADS; + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** _filter_read + ** + * read from filter - works for both read() and readline() + */ + +static PyObject *_filter_read(filterobject *self, PyObject *args, int readline) +{ + + apr_bucket *b; + long bytes_read; + PyObject *result; + char *buffer; + long bufsize; + int newline = 0; + long len = -1; + conn_rec *c = self->request_obj->request_rec->connection; + + if (! PyArg_ParseTuple(args, "|l", &len)) + return NULL; + + if (self->closed) { + PyErr_SetString(PyExc_ValueError, "I/O operation on closed filter"); + return NULL; + } + + if (self->is_input) { + + /* does the output brigade exist? */ + if (!self->bb_in) { + self->bb_in = apr_brigade_create(self->f->r->pool, + c->bucket_alloc); + } + + Py_BEGIN_ALLOW_THREADS; + self->rc = ap_get_brigade(self->f->next, self->bb_in, self->mode, + APR_BLOCK_READ, self->readbytes); + Py_END_ALLOW_THREADS; + + if (!APR_STATUS_IS_EAGAIN(self->rc) && !(self->rc == APR_SUCCESS)) { + PyErr_SetString(PyExc_IOError, "Input filter read error"); + return NULL; + } + } + + /* + * loop through the brigade reading buckets into the string + */ + + b = APR_BRIGADE_FIRST(self->bb_in); + + if (b == APR_BRIGADE_SENTINEL(self->bb_in)) + return PyBytes_FromString(""); + + /* reached eos ? */ + if (APR_BUCKET_IS_EOS(b)) { + apr_bucket_delete(b); + Py_INCREF(Py_None); + return Py_None; + } + + bufsize = len < 0 ? HUGE_STRING_LEN : len; + result = PyBytes_FromStringAndSize(NULL, bufsize); + + /* possibly no more memory */ + if (result == NULL) + return PyErr_NoMemory(); + + buffer = PyBytes_AS_STRING((PyBytesObject *) result); + + bytes_read = 0; + + while ((bytes_read < len || len == -1) && + !(APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b) || + b == APR_BRIGADE_SENTINEL(self->bb_in))) { + + const char *data; + apr_size_t size; + apr_bucket *old; + int i; + + if (apr_bucket_read(b, &data, &size, APR_BLOCK_READ) != APR_SUCCESS) { + PyErr_SetObject(PyExc_IOError, + PyBytes_FromString("Filter read error")); + return NULL; + } + + if (bytes_read + size > bufsize) { + apr_bucket_split(b, bufsize - bytes_read); + size = bufsize - bytes_read; + /* now the bucket is the exact size we need */ + } + + if (readline) { + + /* scan for newline */ + for (i=0; irequest_obj->request_rec->connection; + + if (self->closed) { + PyErr_SetString(PyExc_ValueError, "I/O operation on closed filter"); + return NULL; + } + if (! PyArg_ParseTuple(args, "s#", &buff, &len)) + return NULL; /* bad args */ + + if (len) { + + /* does the output brigade exist? */ + if (!self->bb_out) + self->bb_out = apr_brigade_create(self->f->r->pool, + c->bucket_alloc); + + /* it looks like there is no need to memcpy, an immortal + bucket is fine, since Python won't free that memory before + the write is over */ + b = apr_bucket_immortal_create(buff, len, c->bucket_alloc); + + APR_BRIGADE_INSERT_TAIL(self->bb_out, b); + } + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** filter.flush(filter self) + ** + * Flush output (i.e. pass brigade) + */ + +static PyObject *filter_flush(filterobject *self, PyObject *args) +{ + + conn_rec *c = self->request_obj->request_rec->connection; + + /* does the output brigade exist? */ + if (!self->bb_out) { + self->bb_out = apr_brigade_create(self->f->r->pool, + c->bucket_alloc); + } + + APR_BRIGADE_INSERT_TAIL(self->bb_out, + apr_bucket_flush_create(c->bucket_alloc)); + + if (!self->is_input) { + + Py_BEGIN_ALLOW_THREADS; + self->rc = ap_pass_brigade(self->f->next, self->bb_out); + apr_brigade_destroy(self->bb_out); + Py_END_ALLOW_THREADS; + + if(self->rc != APR_SUCCESS) { + PyErr_SetString(PyExc_IOError, "Flush failed."); + return NULL; + } + } + + Py_INCREF(Py_None); + return Py_None; + +} + +/** + ** filter.close(filter self) + ** + * passes EOS + */ + +static PyObject *filter_close(filterobject *self, PyObject *args) +{ + + conn_rec *c = self->request_obj->request_rec->connection; + + if (! self->closed) { + + /* does the output brigade exist? */ + if (!self->bb_out) { + self->bb_out = apr_brigade_create(self->f->r->pool, + c->bucket_alloc); + } + + APR_BRIGADE_INSERT_TAIL(self->bb_out, + apr_bucket_eos_create(c->bucket_alloc)); + + if (! self->is_input) { + Py_BEGIN_ALLOW_THREADS; + self->rc = ap_pass_brigade(self->f->next, self->bb_out); + apr_brigade_destroy(self->bb_out); + Py_END_ALLOW_THREADS; + self->bb_out = NULL; + } + + self->closed = 1; + } + + Py_INCREF(Py_None); + return Py_None; + +} + +/** + ** filter.disable(filter self) + ** + * Sets the transparent flag on causeing the filter_handler to + * just pass the data through without envoking Python at all. + * This is used during filter error output. + */ + +static PyObject *filter_disable(filterobject *self, PyObject *args) +{ + + python_filter_ctx *ctx; + + ctx = (python_filter_ctx *) self->f->ctx; + ctx->transparent = 1; + + Py_INCREF(Py_None); + return Py_None; + +} + +static PyMethodDef filterobjectmethods[] = { + {"pass_on", (PyCFunction) filter_pass_on, METH_NOARGS}, + {"read", (PyCFunction) filter_read, METH_VARARGS}, + {"readline", (PyCFunction) filter_readline, METH_VARARGS}, + {"write", (PyCFunction) filter_write, METH_VARARGS}, + {"flush", (PyCFunction) filter_flush, METH_VARARGS}, + {"close", (PyCFunction) filter_close, METH_VARARGS}, + {"disable", (PyCFunction) filter_disable, METH_VARARGS}, + {NULL, NULL} +}; + +#define OFF(x) offsetof(filterobject, x) + +static PyMemberDef filter_memberlist[] = { + {"softspace", T_INT, OFF(softspace), }, + {"closed", T_INT, OFF(closed), READONLY}, + {"name", T_OBJECT, 0, READONLY}, + {"req", T_OBJECT, OFF(request_obj), }, + {"is_input", T_INT, OFF(is_input), READONLY}, + {"handler", T_STRING, OFF(handler), READONLY}, + {"dir", T_STRING, OFF(dir), READONLY}, + {NULL} /* Sentinel */ +}; + +/** + ** filter_dealloc + ** + * + */ + +static void filter_dealloc(filterobject *self) +{ + Py_XDECREF(self->request_obj); + PyObject_Del(self); +} + + +/** + ** filter_getattr + ** + * Get filter object attributes + * + * + */ + +static PyObject * filter_getattr(filterobject *self, char *name) +{ + + PyObject *res; + + PyMethodDef *ml = filterobjectmethods; + for (; ml->ml_name != NULL; ml++) { + if (name[0] == ml->ml_name[0] && + strcmp(name+1, ml->ml_name+1) == 0) + return PyCFunction_New(ml, (PyObject*)self); + } + + PyErr_Clear(); + + if (strcmp(name, "name") == 0) { + if (! self->f->frec->name) { + Py_INCREF(Py_None); + return Py_None; + } + else { + return MpBytesOrUnicode_FromString(self->f->frec->name); + } + } + else if (strcmp(name, "req") == 0) { + if (! self->request_obj) { + Py_INCREF(Py_None); + return Py_None; + } + else { + Py_INCREF(self->request_obj); + return (PyObject *)self->request_obj; + } + } + else { + PyMemberDef *md = find_memberdef(filter_memberlist, name); + if (!md) { + PyErr_SetString(PyExc_AttributeError, name); + return NULL; + } + return PyMember_GetOne((char*)self, md); + } +} + +/** + ** filter_setattr + ** + * Set filter object attributes + * + */ + +static int filter_setattr(filterobject *self, char *name, PyObject *v) +{ + PyMemberDef *md; + if (v == NULL) { + PyErr_SetString(PyExc_AttributeError, + "can't delete filter attributes"); + return -1; + } + md = find_memberdef(filter_memberlist, name); + if (!md) { + PyErr_SetString(PyExc_AttributeError, name); + return -1; + } + return PyMember_SetOne((char*)self, md, v); +} + +PyTypeObject MpFilter_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "mp_filter", /* tp_name */ + sizeof(filterobject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor) filter_dealloc, /* tp_dealloc*/ + 0, /* tp_print*/ + (getattrfunc) filter_getattr, /* tp_getattr*/ + (setattrfunc) filter_setattr, /* tp_setattr*/ + 0, /* tp_compare*/ + 0, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash*/ +}; + diff --git a/src/finfoobject.c b/src/finfoobject.c new file mode 100644 index 0000000..0aa9aa3 --- /dev/null +++ b/src/finfoobject.c @@ -0,0 +1,394 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * finfoobject.c + * + * + */ + +#include "mod_python.h" + +/** + ** MpFinfo_FromFinfo + ** + * This routine creates a Python finfoobject given an Apache + * finfo pointer. + * + */ + +PyObject * MpFinfo_FromFinfo(apr_finfo_t *f) +{ + finfoobject *result; + MpFinfo_Type.ob_type = &PyType_Type; + result = PyObject_New(finfoobject, &MpFinfo_Type); + if (! result) + return PyErr_NoMemory(); + + result->finfo = f; + result->pool = NULL; + + return (PyObject *)result; +} + +/** + ** MpFinfo_New + ** + * This returns a new object of built-in type finfo. + * + * NOTE: The apr_finfo_t gets greated in its own pool, which lives + * throught the life of the finfoobject. + * + */ + +PyObject * MpFinfo_New() +{ + finfoobject *f; + apr_pool_t *p; + + /* XXX need second arg abort function to report mem error */ + apr_pool_create_ex(&p, NULL, NULL, NULL); + + f = (finfoobject *)MpFinfo_FromFinfo(apr_pcalloc(p, sizeof(apr_finfo_t))); + + /* remember the pointer to our own pool */ + f->pool = p; + + return (PyObject *)f; +} + +/** + ** finfo_dealloc + ** + * Frees finfo's memory + */ + +static void finfo_dealloc(register finfoobject *self) +{ + if (MpFinfo_Check(self)) { + if (self->pool) + apr_pool_destroy(self->pool); + PyObject_Del(self); + } + else + Py_TYPE(self)->tp_free((PyObject *)self); +} + +/** + ** finfo_getattr + ** + * Get finfo object attributes + * + */ + +static PyObject * finfo_getattr(finfoobject *self, char *name) +{ + if (strcmp(name, "fname") == 0) { + if (self->finfo->fname) + return MpBytesOrUnicode_FromString(self->finfo->fname); + } + else if (strcmp(name, "filetype") == 0) { + return PyLong_FromLong(self->finfo->filetype); + } + else if (strcmp(name, "valid") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + return PyLong_FromLong(self->finfo->valid); + } + else if (strcmp(name, "protection") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_PROT) + return PyLong_FromLong(self->finfo->protection); + } + else if (strcmp(name, "user") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_USER) + return PyLong_FromLong(self->finfo->user); + } + else if (strcmp(name, "group") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_GROUP) + return PyLong_FromLong(self->finfo->group); + } + else if (strcmp(name, "inode") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_INODE) + return PyLong_FromLong(self->finfo->inode); + } + else if (strcmp(name, "device") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_DEV) + return PyLong_FromLong(self->finfo->device); + } + else if (strcmp(name, "nlink") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_NLINK) + return PyLong_FromLong(self->finfo->nlink); + } + else if (strcmp(name, "size") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_SIZE) { + if (sizeof(apr_off_t) == sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong(self->finfo->size); + } + else { + return PyLong_FromLong(self->finfo->size); + } + } + } + else if (strcmp(name, "atime") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_ATIME) + return PyLong_FromLong(self->finfo->atime*0.000001); + } + else if (strcmp(name, "mtime") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_MTIME) + return PyLong_FromLong(self->finfo->mtime*0.000001); + } + else if (strcmp(name, "ctime") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_CTIME) + return PyLong_FromLong(self->finfo->ctime*0.000001); + } + else if (strcmp(name, "name") == 0) { + if (self->finfo->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + if (self->finfo->valid & APR_FINFO_NAME) + return MpBytesOrUnicode_FromString(self->finfo->name); + } + else { + PyErr_Format(PyExc_AttributeError, + "class 'mp_finfo' has no attribute '%.400s'", name); + return NULL; + } + + Py_INCREF(Py_None); + return Py_None; +} + +static PyObject* +finfoseq_item(PyObject *o, Py_ssize_t i) +{ + finfoobject *self = (finfoobject *)o; + if (i < 0 || i >= 12) { + PyErr_SetString(PyExc_IndexError, "tuple index out of range"); + return NULL; + } + + switch (i) { + case 0: { + return finfo_getattr(self, "protection"); + } + case 1: { + return finfo_getattr(self, "inode"); + } + case 2: { + return finfo_getattr(self, "device"); + } + case 3: { + return finfo_getattr(self, "nlink"); + } + case 4: { + return finfo_getattr(self, "user"); + } + case 5: { + return finfo_getattr(self, "group"); + } + case 6: { + return finfo_getattr(self, "size"); + } + case 7: { + return finfo_getattr(self, "atime"); + } + case 8: { + return finfo_getattr(self, "mtime"); + } + case 9: { + return finfo_getattr(self, "ctime"); + } + case 10: { + return finfo_getattr(self, "fname"); + } + case 11: { + return finfo_getattr(self, "name"); + } + case 12: { + return finfo_getattr(self, "filetype"); + } + } + + Py_INCREF(Py_None); + return Py_None; +} + +static PySequenceMethods finfoseq_as_sequence = { + 0, + 0, /* sq_concat */ + 0, /* sq_repeat */ + finfoseq_item, /* sq_item */ + 0, /* sq_slice */ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + 0, /* sq_contains */ +}; + +/** + ** finfo_repr + ** + * + */ + +static PyObject *finfo_repr(finfoobject *self) +{ + PyObject *s = PyBytes_FromString("{"); + PyObject *t = NULL; + + PyBytes_ConcatAndDel(&s, PyBytes_FromString("'fname': ")); + t = finfo_getattr(self, "fname"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'filetype': ")); + t = finfo_getattr(self, "filetype"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'valid': ")); + t = finfo_getattr(self, "valid"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'protection': ")); + t = finfo_getattr(self, "protection"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'user': ")); + t = finfo_getattr(self, "user"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'group': ")); + t = finfo_getattr(self, "group"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'size': ")); + t = finfo_getattr(self, "size"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'inode': ")); + t = finfo_getattr(self, "inode"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'device': ")); + t = finfo_getattr(self, "device"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'nlink': ")); + t = finfo_getattr(self, "nlink"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'atime': ")); + t = finfo_getattr(self, "atime"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'mtime': ")); + t = finfo_getattr(self, "mtime"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'ctime': ")); + t = finfo_getattr(self, "ctime"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", 'name': ")); + t = finfo_getattr(self, "name"); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString("}")); + +#if PY_MAJOR_VERSION < 3 + return s; +#else + { + PyObject *str = PyUnicode_FromString(PyBytes_AS_STRING(s)); + Py_DECREF(s); + return str; + } +#endif +} + +PyTypeObject MpFinfo_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "mp_finfo", /* tp_name */ + sizeof(finfoobject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)finfo_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + (getattrfunc)finfo_getattr, /* tp_getattr*/ + 0, /* tp_setattr*/ + 0, /* tp_compare */ + (reprfunc)finfo_repr, /* tp_repr*/ + 0, /* tp_as_number */ + &finfoseq_as_sequence, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ +}; diff --git a/src/hlist.c b/src/hlist.c new file mode 100644 index 0000000..cae10e7 --- /dev/null +++ b/src/hlist.c @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * hlist.c + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +#include "mod_python.h" + +/** + ** hlist_new + ** + * Start a new list. + */ + +hl_entry *hlist_new(apr_pool_t *p, const char *h, const char *d, + char d_is_fnmatch, char d_is_location, + ap_regex_t *regex, const char silent) + +{ + hl_entry *hle; + + hle = (hl_entry *)apr_pcalloc(p, sizeof(hl_entry)); + + hle->handler = h; + hle->directory = d; + hle->d_is_fnmatch = d_is_fnmatch; + hle->d_is_location = d_is_location; + hle->regex = regex; + hle->silent = silent; + + return hle; +} + +/** + ** hlist_append + ** + * Appends an hl_entry to a list identified by hle, + * and returns the new tail. This func will skip + * to the tail of the list. + * If hle is NULL, a new list is created. + */ + +hl_entry *hlist_append(apr_pool_t *p, hl_entry *hle, const char * h, + const char *d, char d_is_fnmatch, char d_is_location, + ap_regex_t *regex, const char silent) + +{ + hl_entry *nhle; + + /* find tail */ + while (hle && hle->next) + hle = hle->next; + + nhle = (hl_entry *)apr_pcalloc(p, sizeof(hl_entry)); + + nhle->handler = h; + nhle->directory = d; + nhle->d_is_fnmatch = d_is_fnmatch; + nhle->d_is_location = d_is_location; + nhle->regex = regex; + nhle->silent = silent; + + if (hle) + hle->next = nhle; + + return nhle; +} + +/** + ** hlist_copy + ** + */ + +hl_entry *hlist_copy(apr_pool_t *p, const hl_entry *hle) +{ + hl_entry *nhle; + hl_entry *head; + + head = (hl_entry *)apr_pcalloc(p, sizeof(hl_entry)); + head->handler = hle->handler; + head->directory = hle->directory; + head->d_is_fnmatch = hle->d_is_fnmatch; + head->d_is_location = hle->d_is_location; + head->regex = hle->regex; + head->silent = hle->silent; + + hle = hle->next; + nhle = head; + while (hle) { + nhle->next = (hl_entry *)apr_pcalloc(p, sizeof(hl_entry)); + nhle = nhle->next; + nhle->handler = hle->handler; + nhle->directory = hle->directory; + nhle->d_is_fnmatch = hle->d_is_fnmatch; + nhle->d_is_location = hle->d_is_location; + nhle->regex = hle->regex; + hle = hle->next; + } + + return head; +} + +/** + ** hlist_extend + ** + */ + +void hlist_extend(apr_pool_t *p, hl_entry *hle1, + const hl_entry *hle2) +{ + if (!hle2) + return; + + /* find tail */ + while (hle1 && hle1->next) + hle1 = hle1->next; + + while (hle2) { + hle1->next = (hl_entry *)apr_pcalloc(p, sizeof(hl_entry)); + hle1 = hle1->next; + hle1->handler = hle2->handler; + hle1->directory = hle2->directory; + hle1->d_is_fnmatch = hle2->d_is_fnmatch; + hle1->d_is_location = hle2->d_is_location; + hle1->regex = hle2->regex; + hle1->silent = hle2->silent; + hle2 = hle2->next; + } +} + diff --git a/src/hlistobject.c b/src/hlistobject.c new file mode 100644 index 0000000..af956c1 --- /dev/null +++ b/src/hlistobject.c @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * hlist.c + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +#include "mod_python.h" + +/** + ** MpHList_FromHLEntry() + ** + * new list from hl_entry + */ + +PyObject *MpHList_FromHLEntry(hl_entry *hle) +{ + hlistobject *result; + MpHList_Type.ob_type = &PyType_Type; + result = PyObject_New(hlistobject, &MpHList_Type); + if (! result) + PyErr_NoMemory(); + + result->head = hle; + + return (PyObject *) result; +} + +/** + ** hlist_next + ** + */ + +static PyObject *hlist_next(hlistobject *self, PyObject *args) +{ + self->head = self->head->next; + + Py_INCREF(Py_None); + return Py_None; +} + +static PyMethodDef hlistmethods[] = { + {"next", (PyCFunction) hlist_next, METH_VARARGS}, + { NULL, NULL } /* sentinel */ +}; + +#define OFF(x) offsetof(hl_entry, x) + +static PyMemberDef hlist_memberlist[] = { + {"handler", T_STRING, OFF(handler), READONLY}, + {"silent", T_INT, OFF(silent), READONLY}, + {"is_location", T_INT, OFF(d_is_location), READONLY}, + {"directory", T_STRING, OFF(directory), READONLY}, + {NULL} /* Sentinel */ +}; + +/** + ** hlist_dealloc + ** + */ + +static void hlist_dealloc(hlistobject *self) +{ + PyObject_Del(self); +} + +/** + ** hlist_getattr + ** + */ + +static PyObject *hlist_getattr(hlistobject *self, char *name) +{ + PyObject *res; + PyMemberDef *md; + + PyMethodDef *ml = hlistmethods; + for (; ml->ml_name != NULL; ml++) { + if (name[0] == ml->ml_name[0] && + strcmp(name+1, ml->ml_name+1) == 0) + return PyCFunction_New(ml, (PyObject*)self); + } + + PyErr_Clear(); + + /* when we are at the end of the list, everything + would return None */ + if (! self->head) { + Py_INCREF(Py_None); + return Py_None; + } + + md = find_memberdef(hlist_memberlist, name); + if (!md) { + PyErr_SetString(PyExc_AttributeError, name); + return NULL; + } + + return PyMember_GetOne((char*)self->head, md); +} + +/** + ** hlist_repr + ** + * + */ + +static PyObject *hlist_repr(hlistobject *self) +{ + PyObject *t; + PyObject *s = PyBytes_FromString("{"); + + if (self->head->handler) { + PyBytes_ConcatAndDel(&s, PyBytes_FromString("'handler':")); + t = PyBytes_FromString(self->head->handler); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + } + if (self->head->directory) { + PyBytes_ConcatAndDel(&s, PyBytes_FromString(",'directory':")); + t = PyBytes_FromString(self->head->directory); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + } + PyBytes_ConcatAndDel(&s, PyBytes_FromString(",'is_location':")); + if (self->head->d_is_location) + PyBytes_ConcatAndDel(&s, PyBytes_FromString("True")); + else + PyBytes_ConcatAndDel(&s, PyBytes_FromString("False")); + PyBytes_ConcatAndDel(&s, PyBytes_FromString(",'silent':")); + if (self->head->silent) + PyBytes_ConcatAndDel(&s, PyBytes_FromString("1}")); + else + PyBytes_ConcatAndDel(&s, PyBytes_FromString("0}")); + +#if PY_MAJOR_VERSION < 3 + return s; +#else + { + PyObject *str = PyUnicode_FromString(PyBytes_AS_STRING(s)); + Py_DECREF(s); + return str; + } +#endif +} + +PyTypeObject MpHList_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "mp_hlist", /* tp_name */ + sizeof(hlistobject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor) hlist_dealloc, /* tp_dealloc*/ + 0, /* tp_print*/ + (getattrfunc) hlist_getattr, /* tp_getattr*/ + 0, /* tp_setattr*/ + 0, /* tp_compare*/ + (reprfunc)hlist_repr, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash*/ +}; + + diff --git a/src/include/_apachemodule.h b/src/include/_apachemodule.h new file mode 100644 index 0000000..061df3e --- /dev/null +++ b/src/include/_apachemodule.h @@ -0,0 +1,34 @@ +#ifndef Mp_APACHEMODULE_H +#define Mp_APACHEMODULE_H + +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * apachemodule.h + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +PyObject *get_ServerReturn(void); +PyMODINIT_FUNC init_apache(void); + +#endif /* !Mp_APACHEMODULE_H */ diff --git a/src/include/_pspmodule.h b/src/include/_pspmodule.h new file mode 100644 index 0000000..4fdc022 --- /dev/null +++ b/src/include/_pspmodule.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * + */ + +#ifndef __PSP_MODULE_H +#define __PSP_MODULE_H + +#include "Python.h" + +#if PY_MAJOR_VERSION < 3 +#define PyBytes_AsString PyString_AsString +#define PyBytes_FromString PyString_FromString +#define MpBytesOrUnicode_FromString PyString_FromString +#else +#define MpBytesOrUnicode_FromString PyUnicode_FromString +#endif + +#endif /* __PSP_MODULE_H */ + diff --git a/src/include/connobject.h b/src/include/connobject.h new file mode 100644 index 0000000..a4f2fc6 --- /dev/null +++ b/src/include/connobject.h @@ -0,0 +1,58 @@ +#ifndef Mp_CONNOBJECT_H +#define Mp_CONNOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * connobject.h + * + * + */ + +/* + * This is a mapping of a Python object to an Apache table. + * + * This object behaves like a dictionary. Note that the + * underlying table can have duplicate keys, which can never + * happen to a Python dictionary. But this is such a rare thing + * that I can't even think of a possible scenario or implications. + * + */ + + typedef struct connobject { + PyObject_HEAD + conn_rec *conn; + PyObject *base_server; + PyObject *notes; + hlistobject *hlo; + } connobject; + + PyAPI_DATA(PyTypeObject) MpConn_Type; + +#define MpConn_Check(op) (Py_TYPE(op) == &MpConn_Type) + + PyAPI_FUNC(PyObject *) MpConn_FromConn (conn_rec *c); + +#ifdef __cplusplus +} +#endif +#endif /* !Mp_CONNOBJECT_H */ diff --git a/src/include/filterobject.h b/src/include/filterobject.h new file mode 100644 index 0000000..b8151ef --- /dev/null +++ b/src/include/filterobject.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * filterobject.h + * + * + */ + +#ifndef Mp_FILTEROBJECT_H +#define Mp_FILTEROBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + typedef struct filterobject { + PyObject_HEAD + ap_filter_t *f; + + /* in out refers to the dircetion of data with respect to + filter, not the filter type */ + apr_bucket_brigade *bb_in; + apr_bucket_brigade *bb_out; + + apr_status_t rc; + + int is_input; + ap_input_mode_t mode; + apr_size_t readbytes; + + int closed; + int softspace; + int bytes_written; + + char *handler; + char *dir; + + requestobject *request_obj; + + } filterobject; + + PyAPI_DATA(PyTypeObject) MpFilter_Type; + +#define MpFilter_Check(op) (Py_TYPE(op) == &MpFilter_Type) + + PyAPI_FUNC(PyObject *) + MpFilter_FromFilter (ap_filter_t *f, apr_bucket_brigade *bb_in, + int is_input, ap_input_mode_t mode, + apr_size_t readbytes, char *hadler, char *dir); + +#ifdef __cplusplus +} +#endif +#endif /* !Mp_FILTEROBJECT_H */ diff --git a/src/include/finfoobject.h b/src/include/finfoobject.h new file mode 100644 index 0000000..c052d59 --- /dev/null +++ b/src/include/finfoobject.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * finfoobject.h + * + * + */ + +#ifndef Mp_FINFOOBJECT_H +#define Mp_FINFOOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + typedef struct finfoobject { + PyObject_HEAD + apr_pool_t *pool; + apr_finfo_t *finfo; + } finfoobject; + + PyAPI_DATA(PyTypeObject) MpFinfo_Type; + +#define MpFinfo_Check(op) (Py_TYPE(op) == &MpFinfo_Type) + + PyAPI_FUNC(PyObject *) MpFinfo_FromFinfo (apr_finfo_t *f); + PyAPI_FUNC(PyObject *) MpFinfo_New (void); + +#ifdef __cplusplus +} +#endif +#endif /* !Mp_FINFOOBJECT_H */ diff --git a/src/include/hlist.h b/src/include/hlist.h new file mode 100644 index 0000000..2666bfc --- /dev/null +++ b/src/include/hlist.h @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * hlist.h + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +#ifndef Mp_HLIST_H +#define Mp_HLIST_H +#ifdef __cplusplus +extern "C" { +#endif + + /* handler list entry */ + typedef struct hl_entry { + const char *handler; + const char *directory; /* directory or location */ + ap_regex_t *regex; + char d_is_fnmatch; + char d_is_location; + char silent; /* 1 for PythonHandlerModule, where + if a handler is not found in a module, + no error should be reported */ + struct hl_entry *next; + } hl_entry; + + hl_entry *hlist_new(apr_pool_t *p, const char *h, const char *d, + char d_is_fnmatch, char d_is_location, + ap_regex_t *regex, const char silent); + hl_entry *hlist_append(apr_pool_t *p, hl_entry *hle, const char * h, + const char *d, char d_is_fnmatch, char d_is_location, + ap_regex_t *regex, const char silent); + + hl_entry *hlist_copy(apr_pool_t *p, const hl_entry *hle); + void hlist_extend(apr_pool_t *p, hl_entry *hle1, const hl_entry *hle2); + +#ifdef __cplusplus +} +#endif +#endif /* !Mp_HLIST_H */ diff --git a/src/include/hlistobject.h b/src/include/hlistobject.h new file mode 100644 index 0000000..aebbfdf --- /dev/null +++ b/src/include/hlistobject.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * hlistobject.h + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +#ifndef Mp_HLISTOBJECT_H +#define Mp_HLISTOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + typedef struct hlist { + PyObject_HEAD + struct hl_entry *head; + } hlistobject; + + PyAPI_DATA(PyTypeObject) MpHList_Type; + +#define MpHList_Check(op) (Py_TYPE(op) == &MpHList_Type) + + PyAPI_FUNC(PyObject *)MpHList_FromHLEntry (hl_entry *hle); + +#ifdef __cplusplus +} +#endif +#endif /* !Mp_HLISTOBJECT_H */ diff --git a/src/include/mod_python.h b/src/include/mod_python.h new file mode 100644 index 0000000..061146b --- /dev/null +++ b/src/include/mod_python.h @@ -0,0 +1,312 @@ +#ifndef Mp_MOD_PYTHON_H +#define Mp_MOD_PYTHON_H + +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * mod_python.h + * + * $Id: mod_python.h 231054 2005-08-09 15:37:04Z jgallacher $ + * + * See accompanying documentation and source code comments + * for details. + * + */ + +/* + * + * + * DO NOT EDIT - DO NOT EDIT - DO NOT EDIT - DO NOT EDIT + * + * + * + * If you are looking at mod_python.h, it is an auto-generated file on + * UNIX. This file is kept around for the Win32 platform which + * does not use autoconf. Any changes to mod_python.h must also be + * reflected in mod_python.h.in. + */ + +/* Python headers */ +#include "Python.h" +#include "structmember.h" + +/* Apache headers */ +#include "httpd.h" +#define CORE_PRIVATE +#include "http_config.h" +#include "http_core.h" +#include "http_main.h" +#include "http_connection.h" +#include "http_protocol.h" +#include "http_request.h" +#include "util_script.h" +#include "util_filter.h" +#include "http_log.h" +#include "apr_strings.h" +#include "apr_lib.h" +#include "apr_hash.h" +#include "apr_fnmatch.h" +#include "scoreboard.h" +#include "ap_mpm.h" +#include "ap_mmn.h" +#include "mod_include.h" +#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE) +#include "unixd.h" +#endif + +#if !AP_MODULE_MAGIC_AT_LEAST(20050127,0) +/* Debian backported ap_regex_t to Apache 2.0 and + * thus made official version checking break. */ +#ifndef AP_REG_EXTENDED +typedef regex_t ap_regex_t; +#define AP_REG_EXTENDED REG_EXTENDED +#define AP_REG_ICASE REG_ICASE +#endif +#endif + +#if defined(WIN32) && !defined(WITH_THREAD) +#error Python threading must be enabled on Windows +#endif + +#if !defined(WIN32) +#include +#endif + +/* pool given to us in ChildInit. We use it for + server.register_cleanup() */ +extern apr_pool_t *child_init_pool; + +/* Apache module declaration */ +extern module AP_MODULE_DECLARE_DATA python_module; + +#include "util.h" +#include "hlist.h" +#include "hlistobject.h" +#include "tableobject.h" +#include "serverobject.h" +#include "connobject.h" +#include "_apachemodule.h" +#include "requestobject.h" +#include "filterobject.h" +#include "finfoobject.h" + +/** Things specific to mod_python, as an Apache module **/ + +#if PY_MAJOR_VERSION < 3 + +#define PyBytesObject PyStringObject +#define PyBytes_Check PyString_Check +#define PyBytes_CheckExact PyString_CheckExact +#define PyBytes_FromString PyString_FromString +#define PyBytes_FromStringAndSize PyString_FromStringAndSize +#define PyBytes_AS_STRING PyString_AS_STRING +#define PyBytes_ConcatAndDel PyString_ConcatAndDel +#define PyBytes_Size PyString_Size +#define _PyBytes_Resize _PyString_Resize +#define MpObject_ReprAsBytes PyObject_Repr +#define MpBytesOrUnicode_FromString PyString_FromString + +#ifndef PyVarObject_HEAD_INIT +#define PyVarObject_HEAD_INIT(type, size) \ + PyObject_HEAD_INIT(type) size, +#endif + +#ifndef Py_TYPE +#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) +#endif + +#else + +#define MpBytesOrUnicode_FromString PyUnicode_FromString + +#endif /* PY_MAJOR_VERSION < 3 */ + +#define MP_CONFIG_KEY "mod_python_config" +#define MAIN_INTERPRETER "main_interpreter" +#define FILTER_NAME "MOD_PYTHON" + +/* used in python_directive_handler */ +#define SILENT 1 +#define NOTSILENT 0 + +/* MAX_LOCKS can now be set as a configure option + * ./configure --with-max-locks=INTEGER + */ +#define MAX_LOCKS 8 + +/* MUTEX_DIR can be set as a configure option + * ./configure --with-mutex-dir=/path/to/dir + */ +#define MUTEX_DIR "/tmp" + +/* version stuff */ +extern const char * const mp_git_sha; +extern const int mp_version_major; +extern const int mp_version_minor; +extern const int mp_version_patch; +extern const char * const mp_version_string; +extern const char * const mp_version_component; + +/* structure to hold interpreter data */ +typedef struct { + apr_array_header_t * tstates; /* tstates available for use */ + PyInterpreterState *interp; + PyObject *obcallback; +} interpreterdata; + +/* global configuration parameters */ +typedef struct +{ + apr_global_mutex_t **g_locks; + int nlocks; + int parent_pid; +} py_global_config; + +/* structure describing per directory configuration parameters */ +typedef struct { + int authoritative; + char *config_dir; + char d_is_location; + apr_table_t *directives; + apr_table_t *options; + apr_hash_t *hlists; /* hlists for every phase */ + apr_hash_t *in_filters; + apr_hash_t *out_filters; + apr_table_t *imports; /* for PythonImport */ +} py_config; + +/* register_cleanup info */ +typedef struct +{ + request_rec *request_rec; + server_rec *server_rec; + PyObject *handler; + const char *interpreter; + PyObject *data; +} cleanup_info; + +/* request config structure */ +typedef struct +{ + requestobject *request_obj; + apr_hash_t *dynhls; /* dynamically registered handlers + for this request */ + apr_hash_t *in_filters; /* dynamically registered input filters + for this request */ + apr_hash_t *out_filters; /* dynamically registered output filters + for this request */ + +} py_req_config; + +/* filter context */ +typedef struct +{ + char *name; + int transparent; +} python_filter_ctx; + +/* a structure to hold a handler, + used in configuration for filters */ +typedef struct +{ + char *handler; + char *directory; + unsigned d_is_fnmatch : 1; + unsigned d_is_location : 1; + ap_regex_t *regex; +} py_handler; + +apr_status_t python_cleanup(void *data); +PyObject* python_interpreter_name(void); +requestobject *python_get_request_object(request_rec *req, const char *phase); +PyObject *_apache_module_init(); + +APR_DECLARE_OPTIONAL_FN(PyInterpreterState *, mp_acquire_interpreter, (const char *)); +APR_DECLARE_OPTIONAL_FN(void, mp_release_interpreter, ()); +APR_DECLARE_OPTIONAL_FN(PyObject *, mp_get_request_object, (request_rec *)); +APR_DECLARE_OPTIONAL_FN(PyObject *, mp_get_server_object, (server_rec *)); +APR_DECLARE_OPTIONAL_FN(PyObject *, mp_get_connection_object, (conn_rec *)); + +/* This macro assigns a C string representation of PyObject *obj to + * char *str. obj must be a Unicode Latin1 or Bytes. It will try its + * best to accomplish this with zero-copy. WARNING - it DECREFs + * (unless obj_is_borrowed) and changes the value of obj when it is + * unicode that must be recoded, so do not use obj afterwards other + * than to DECREF it - it may not be what you expected. You MUST + * Py_DECREF(obj) afterward (even if error), but not before you're + * done with the value of str. Note that if the obj reference was + * borrowed, and without the macro you wouldn't be DECREFing it, you + * should indicate that by setting obj_is_borrowed to 1 and DECREF + * it. If after this macro str is NULL, then a TypeError error has + * been set by the macro. + */ +#if PY_MAJOR_VERSION < 3 +#define PyUnicode_1BYTE_KIND NULL +#define PyUnicode_KIND(str) NULL +#define PyUnicode_1BYTE_DATA(obj) "" +#endif +#define MP_ANYSTR_AS_STR(str, obj, obj_is_borrowed) do { \ + str = NULL; \ + if (PyUnicode_CheckExact(obj)) { \ + if (PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 && \ + PyUnicode_KIND(obj) == PyUnicode_1BYTE_KIND) { \ + if (obj_is_borrowed) Py_INCREF(obj); /* so DECREF ok */ \ + str = PyUnicode_1BYTE_DATA(obj); \ + } else { \ + PyObject *latin = PyUnicode_AsLatin1String(obj); \ + if (latin) { \ + str = PyBytes_AsString(latin); /* #define on 2.6 */ \ + if (!obj_is_borrowed) Py_DECREF(obj); \ + obj = latin; /* remember to DECREF me! */ \ + } \ + } \ + } else if (PyBytes_CheckExact(obj)) { /* #define on 2.6 */ \ + str = PyBytes_AsString(obj); /* #define on 2.6 */ \ + if (obj_is_borrowed) Py_INCREF(obj); /* so DECREF ok */ \ + } \ + if (!str) { \ + if (obj_is_borrowed) Py_INCREF(obj); /* so DECREF ok */ \ + PyErr_SetString(PyExc_TypeError, \ + "not an ISO-8859-1 string"); \ + } \ + } while (0) + +#ifndef MpObject_ReprAsBytes +static inline PyObject *MpObject_ReprAsBytes(PyObject *o) { + PyObject *result; + PyObject *ucode = PyObject_Repr(o); + /* we can do this because repr() should never have non-ascii characters XXX (really?) */ + char *c = PyUnicode_1BYTE_DATA(ucode); + if (c[0] == 'b') + result = PyBytes_FromStringAndSize(PyUnicode_1BYTE_DATA(ucode)+1, PyUnicode_GET_LENGTH(ucode)-1); + else + result = PyBytes_FromStringAndSize(PyUnicode_1BYTE_DATA(ucode), PyUnicode_GET_LENGTH(ucode)); + Py_DECREF(ucode); + return result; +} +#endif + +#endif /* !Mp_MOD_PYTHON_H */ + +/* +# makes emacs go into C mode +### Local Variables: +### mode:c +### End: +*/ diff --git a/src/include/mod_python.h.in b/src/include/mod_python.h.in new file mode 100644 index 0000000..4b6f1fb --- /dev/null +++ b/src/include/mod_python.h.in @@ -0,0 +1,312 @@ +#ifndef Mp_MOD_PYTHON_H +#define Mp_MOD_PYTHON_H + +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * mod_python.h + * + * $Id: mod_python.h 231054 2005-08-09 15:37:04Z jgallacher $ + * + * See accompanying documentation and source code comments + * for details. + * + */ + +/* + * + * + * DO NOT EDIT - DO NOT EDIT - DO NOT EDIT - DO NOT EDIT + * + * + * + * If you are looking at mod_python.h, it is an auto-generated file on + * UNIX. This file is kept around for the Win32 platform which + * does not use autoconf. Any changes to mod_python.h must also be + * reflected in mod_python.h.in. + */ + +/* Python headers */ +#include "Python.h" +#include "structmember.h" + +/* Apache headers */ +#include "httpd.h" +#define CORE_PRIVATE +#include "http_config.h" +#include "http_core.h" +#include "http_main.h" +#include "http_connection.h" +#include "http_protocol.h" +#include "http_request.h" +#include "util_script.h" +#include "util_filter.h" +#include "http_log.h" +#include "apr_strings.h" +#include "apr_lib.h" +#include "apr_hash.h" +#include "apr_fnmatch.h" +#include "scoreboard.h" +#include "ap_mpm.h" +#include "ap_mmn.h" +#include "mod_include.h" +#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE) +#include "unixd.h" +#endif + +#if !AP_MODULE_MAGIC_AT_LEAST(20050127,0) +/* Debian backported ap_regex_t to Apache 2.0 and + * thus made official version checking break. */ +#ifndef AP_REG_EXTENDED +typedef regex_t ap_regex_t; +#define AP_REG_EXTENDED REG_EXTENDED +#define AP_REG_ICASE REG_ICASE +#endif +#endif + +#if defined(WIN32) && !defined(WITH_THREAD) +#error Python threading must be enabled on Windows +#endif + +#if !defined(WIN32) +#include +#endif + +/* pool given to us in ChildInit. We use it for + server.register_cleanup() */ +extern apr_pool_t *child_init_pool; + +/* Apache module declaration */ +extern module AP_MODULE_DECLARE_DATA python_module; + +#include "util.h" +#include "hlist.h" +#include "hlistobject.h" +#include "tableobject.h" +#include "serverobject.h" +#include "connobject.h" +#include "_apachemodule.h" +#include "requestobject.h" +#include "filterobject.h" +#include "finfoobject.h" + +/** Things specific to mod_python, as an Apache module **/ + +#if PY_MAJOR_VERSION < 3 + +#define PyBytesObject PyStringObject +#define PyBytes_Check PyString_Check +#define PyBytes_CheckExact PyString_CheckExact +#define PyBytes_FromString PyString_FromString +#define PyBytes_FromStringAndSize PyString_FromStringAndSize +#define PyBytes_AS_STRING PyString_AS_STRING +#define PyBytes_ConcatAndDel PyString_ConcatAndDel +#define PyBytes_Size PyString_Size +#define _PyBytes_Resize _PyString_Resize +#define MpObject_ReprAsBytes PyObject_Repr +#define MpBytesOrUnicode_FromString PyString_FromString + +#ifndef PyVarObject_HEAD_INIT +#define PyVarObject_HEAD_INIT(type, size) \ + PyObject_HEAD_INIT(type) size, +#endif + +#ifndef Py_TYPE +#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) +#endif + +#else + +#define MpBytesOrUnicode_FromString PyUnicode_FromString + +#endif /* PY_MAJOR_VERSION < 3 */ + +#define MP_CONFIG_KEY "mod_python_config" +#define MAIN_INTERPRETER "main_interpreter" +#define FILTER_NAME "MOD_PYTHON" + +/* used in python_directive_handler */ +#define SILENT 1 +#define NOTSILENT 0 + +/* MAX_LOCKS can now be set as a configure option + * ./configure --with-max-locks=INTEGER + */ +#define MAX_LOCKS @MAX_LOCKS@ + +/* MUTEX_DIR can be set as a configure option + * ./configure --with-mutex-dir=/path/to/dir + */ +#define MUTEX_DIR "@MUTEX_DIR@" + +/* version stuff */ +extern const char * const mp_git_sha; +extern const int mp_version_major; +extern const int mp_version_minor; +extern const int mp_version_patch; +extern const char * const mp_version_string; +extern const char * const mp_version_component; + +/* structure to hold interpreter data */ +typedef struct { + apr_array_header_t * tstates; /* tstates available for use */ + PyInterpreterState *interp; + PyObject *obcallback; +} interpreterdata; + +/* global configuration parameters */ +typedef struct +{ + apr_global_mutex_t **g_locks; + int nlocks; + int parent_pid; +} py_global_config; + +/* structure describing per directory configuration parameters */ +typedef struct { + int authoritative; + char *config_dir; + char d_is_location; + apr_table_t *directives; + apr_table_t *options; + apr_hash_t *hlists; /* hlists for every phase */ + apr_hash_t *in_filters; + apr_hash_t *out_filters; + apr_table_t *imports; /* for PythonImport */ +} py_config; + +/* register_cleanup info */ +typedef struct +{ + request_rec *request_rec; + server_rec *server_rec; + PyObject *handler; + const char *interpreter; + PyObject *data; +} cleanup_info; + +/* request config structure */ +typedef struct +{ + requestobject *request_obj; + apr_hash_t *dynhls; /* dynamically registered handlers + for this request */ + apr_hash_t *in_filters; /* dynamically registered input filters + for this request */ + apr_hash_t *out_filters; /* dynamically registered output filters + for this request */ + +} py_req_config; + +/* filter context */ +typedef struct +{ + char *name; + int transparent; +} python_filter_ctx; + +/* a structure to hold a handler, + used in configuration for filters */ +typedef struct +{ + char *handler; + char *directory; + unsigned d_is_fnmatch : 1; + unsigned d_is_location : 1; + ap_regex_t *regex; +} py_handler; + +apr_status_t python_cleanup(void *data); +PyObject* python_interpreter_name(void); +requestobject *python_get_request_object(request_rec *req, const char *phase); +PyObject *_apache_module_init(); + +APR_DECLARE_OPTIONAL_FN(PyInterpreterState *, mp_acquire_interpreter, (const char *)); +APR_DECLARE_OPTIONAL_FN(void, mp_release_interpreter, ()); +APR_DECLARE_OPTIONAL_FN(PyObject *, mp_get_request_object, (request_rec *)); +APR_DECLARE_OPTIONAL_FN(PyObject *, mp_get_server_object, (server_rec *)); +APR_DECLARE_OPTIONAL_FN(PyObject *, mp_get_connection_object, (conn_rec *)); + +/* This macro assigns a C string representation of PyObject *obj to + * char *str. obj must be a Unicode Latin1 or Bytes. It will try its + * best to accomplish this with zero-copy. WARNING - it DECREFs + * (unless obj_is_borrowed) and changes the value of obj when it is + * unicode that must be recoded, so do not use obj afterwards other + * than to DECREF it - it may not be what you expected. You MUST + * Py_DECREF(obj) afterward (even if error), but not before you're + * done with the value of str. Note that if the obj reference was + * borrowed, and without the macro you wouldn't be DECREFing it, you + * should indicate that by setting obj_is_borrowed to 1 and DECREF + * it. If after this macro str is NULL, then a TypeError error has + * been set by the macro. + */ +#if PY_MAJOR_VERSION < 3 +#define PyUnicode_1BYTE_KIND NULL +#define PyUnicode_KIND(str) NULL +#define PyUnicode_1BYTE_DATA(obj) "" +#endif +#define MP_ANYSTR_AS_STR(str, obj, obj_is_borrowed) do { \ + str = NULL; \ + if (PyUnicode_CheckExact(obj)) { \ + if (PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 && \ + PyUnicode_KIND(obj) == PyUnicode_1BYTE_KIND) { \ + if (obj_is_borrowed) Py_INCREF(obj); /* so DECREF ok */ \ + str = PyUnicode_1BYTE_DATA(obj); \ + } else { \ + PyObject *latin = PyUnicode_AsLatin1String(obj); \ + if (latin) { \ + str = PyBytes_AsString(latin); /* #define on 2.6 */ \ + if (!obj_is_borrowed) Py_DECREF(obj); \ + obj = latin; /* remember to DECREF me! */ \ + } \ + } \ + } else if (PyBytes_CheckExact(obj)) { /* #define on 2.6 */ \ + str = PyBytes_AsString(obj); /* #define on 2.6 */ \ + if (obj_is_borrowed) Py_INCREF(obj); /* so DECREF ok */ \ + } \ + if (!str) { \ + if (obj_is_borrowed) Py_INCREF(obj); /* so DECREF ok */ \ + PyErr_SetString(PyExc_TypeError, \ + "not an ISO-8859-1 string"); \ + } \ + } while (0) + +#ifndef MpObject_ReprAsBytes +static inline PyObject *MpObject_ReprAsBytes(PyObject *o) { + PyObject *result; + PyObject *ucode = PyObject_Repr(o); + /* we can do this because repr() should never have non-ascii characters XXX (really?) */ + char *c = PyUnicode_1BYTE_DATA(ucode); + if (c[0] == 'b') + result = PyBytes_FromStringAndSize(PyUnicode_1BYTE_DATA(ucode)+1, PyUnicode_GET_LENGTH(ucode)-1); + else + result = PyBytes_FromStringAndSize(PyUnicode_1BYTE_DATA(ucode), PyUnicode_GET_LENGTH(ucode)); + Py_DECREF(ucode); + return result; +} +#endif + +#endif /* !Mp_MOD_PYTHON_H */ + +/* +# makes emacs go into C mode +### Local Variables: +### mode:c +### End: +*/ diff --git a/src/include/mp_version.h b/src/include/mp_version.h new file mode 100644 index 0000000..4b3a494 --- /dev/null +++ b/src/include/mp_version.h @@ -0,0 +1,14 @@ + +/* + * This is the one and only source of version information. + * + * Do not include this file directly, it is available via + * auto-generated version.c, which also includes the git sha. + */ + +#define MP_VERSION_MAJOR 3 +#define MP_VERSION_MINOR 5 +#define MP_VERSION_PATCH 0 + +#define STR(x) #x +#define MP_VERSION_STRING(maj, min, p) STR(maj) "." STR(min) "." STR(p) diff --git a/src/include/psp_flex.h b/src/include/psp_flex.h new file mode 100644 index 0000000..888440f --- /dev/null +++ b/src/include/psp_flex.h @@ -0,0 +1,340 @@ +#ifndef yyHEADER_H +#define yyHEADER_H 1 +#define yyIN_HEADER 1 + +#line 6 "include/psp_flex.h" + +#line 8 "include/psp_flex.h" + +#define YY_INT_ALIGNED short int + +/* A lexical scanner generated by flex */ + +#define FLEX_SCANNER +#define YY_FLEX_MAJOR_VERSION 2 +#define YY_FLEX_MINOR_VERSION 5 +#define YY_FLEX_SUBMINOR_VERSION 35 +#if YY_FLEX_SUBMINOR_VERSION > 0 +#define FLEX_BETA +#endif + +/* First, we deal with platform-specific or compiler-specific issues. */ + +/* begin standard C headers. */ +#include +#include +#include +#include + +/* end standard C headers. */ + +/* flex integer type definitions */ + +#ifndef FLEXINT_H +#define FLEXINT_H + +/* C99 systems have . Non-C99 systems may or may not. */ + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + +/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h, + * if you want the limit (max/min) macros for int types. + */ +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS 1 +#endif + +#include +typedef int8_t flex_int8_t; +typedef uint8_t flex_uint8_t; +typedef int16_t flex_int16_t; +typedef uint16_t flex_uint16_t; +typedef int32_t flex_int32_t; +typedef uint32_t flex_uint32_t; +#else +typedef signed char flex_int8_t; +typedef short int flex_int16_t; +typedef int flex_int32_t; +typedef unsigned char flex_uint8_t; +typedef unsigned short int flex_uint16_t; +typedef unsigned int flex_uint32_t; +#endif /* ! C99 */ + +/* Limits of integral types. */ +#ifndef INT8_MIN +#define INT8_MIN (-128) +#endif +#ifndef INT16_MIN +#define INT16_MIN (-32767-1) +#endif +#ifndef INT32_MIN +#define INT32_MIN (-2147483647-1) +#endif +#ifndef INT8_MAX +#define INT8_MAX (127) +#endif +#ifndef INT16_MAX +#define INT16_MAX (32767) +#endif +#ifndef INT32_MAX +#define INT32_MAX (2147483647) +#endif +#ifndef UINT8_MAX +#define UINT8_MAX (255U) +#endif +#ifndef UINT16_MAX +#define UINT16_MAX (65535U) +#endif +#ifndef UINT32_MAX +#define UINT32_MAX (4294967295U) +#endif + +#endif /* ! FLEXINT_H */ + +#ifdef __cplusplus + +/* The "const" storage-class-modifier is valid. */ +#define YY_USE_CONST + +#else /* ! __cplusplus */ + +/* C99 requires __STDC__ to be defined as 1. */ +#if defined (__STDC__) + +#define YY_USE_CONST + +#endif /* defined (__STDC__) */ +#endif /* ! __cplusplus */ + +#ifdef YY_USE_CONST +#define yyconst const +#else +#define yyconst +#endif + +/* An opaque pointer. */ +#ifndef YY_TYPEDEF_YY_SCANNER_T +#define YY_TYPEDEF_YY_SCANNER_T +typedef void* yyscan_t; +#endif + +/* For convenience, these vars (plus the bison vars far below) + are macros in the reentrant scanner. */ +#define yyin yyg->yyin_r +#define yyout yyg->yyout_r +#define yyextra yyg->yyextra_r +#define yyleng yyg->yyleng_r +#define yytext yyg->yytext_r +#define yylineno (YY_CURRENT_BUFFER_LVALUE->yy_bs_lineno) +#define yycolumn (YY_CURRENT_BUFFER_LVALUE->yy_bs_column) +#define yy_flex_debug yyg->yy_flex_debug_r + +/* Size of default input buffer. */ +#ifndef YY_BUF_SIZE +#define YY_BUF_SIZE 16384 +#endif + +#ifndef YY_TYPEDEF_YY_BUFFER_STATE +#define YY_TYPEDEF_YY_BUFFER_STATE +typedef struct yy_buffer_state *YY_BUFFER_STATE; +#endif + +#ifndef YY_TYPEDEF_YY_SIZE_T +#define YY_TYPEDEF_YY_SIZE_T +typedef size_t yy_size_t; +#endif + +#ifndef YY_STRUCT_YY_BUFFER_STATE +#define YY_STRUCT_YY_BUFFER_STATE +struct yy_buffer_state + { + FILE *yy_input_file; + + char *yy_ch_buf; /* input buffer */ + char *yy_buf_pos; /* current position in input buffer */ + + /* Size of input buffer in bytes, not including room for EOB + * characters. + */ + yy_size_t yy_buf_size; + + /* Number of characters read into yy_ch_buf, not including EOB + * characters. + */ + int yy_n_chars; + + /* Whether we "own" the buffer - i.e., we know we created it, + * and can realloc() it to grow it, and should free() it to + * delete it. + */ + int yy_is_our_buffer; + + /* Whether this is an "interactive" input source; if so, and + * if we're using stdio for input, then we want to use getc() + * instead of fread(), to make sure we stop fetching input after + * each newline. + */ + int yy_is_interactive; + + /* Whether we're considered to be at the beginning of a line. + * If so, '^' rules will be active on the next match, otherwise + * not. + */ + int yy_at_bol; + + int yy_bs_lineno; /**< The line count. */ + int yy_bs_column; /**< The column count. */ + + /* Whether to try to fill the input buffer when we reach the + * end of it. + */ + int yy_fill_buffer; + + int yy_buffer_status; + + }; +#endif /* !YY_STRUCT_YY_BUFFER_STATE */ + +void yyrestart (FILE *input_file ,yyscan_t yyscanner ); +void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); +YY_BUFFER_STATE yy_create_buffer (FILE *file,int size ,yyscan_t yyscanner ); +void yy_delete_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); +void yy_flush_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); +void yypush_buffer_state (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); +void yypop_buffer_state (yyscan_t yyscanner ); + +YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner ); +YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str ,yyscan_t yyscanner ); +YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len ,yyscan_t yyscanner ); + +void *yyalloc (yy_size_t ,yyscan_t yyscanner ); +void *yyrealloc (void *,yy_size_t ,yyscan_t yyscanner ); +void yyfree (void * ,yyscan_t yyscanner ); + +/* Begin user sect3 */ + +#define yywrap(n) 1 +#define YY_SKIP_YYWRAP + +#define yytext_ptr yytext_r + +#ifdef YY_HEADER_EXPORT_START_CONDITIONS +#define INITIAL 0 +#define TEXT 1 +#define PYCODE 2 +#define INDENT 3 +#define DIR 4 +#define COMMENT 5 + +#endif + +#ifndef YY_NO_UNISTD_H +/* Special case for "unistd.h", since it is non-ANSI. We include it way + * down here because we want the user's section 1 to have been scanned first. + * The user has a chance to override it with an option. + */ +#include +#endif + +#ifndef YY_EXTRA_TYPE +#define YY_EXTRA_TYPE void * +#endif + +int yylex_init (yyscan_t* scanner); + +int yylex_init_extra (YY_EXTRA_TYPE user_defined,yyscan_t* scanner); + +/* Accessor methods to globals. + These are made visible to non-reentrant scanners for convenience. */ + +int yylex_destroy (yyscan_t yyscanner ); + +int yyget_debug (yyscan_t yyscanner ); + +void yyset_debug (int debug_flag ,yyscan_t yyscanner ); + +YY_EXTRA_TYPE yyget_extra (yyscan_t yyscanner ); + +void yyset_extra (YY_EXTRA_TYPE user_defined ,yyscan_t yyscanner ); + +FILE *yyget_in (yyscan_t yyscanner ); + +void yyset_in (FILE * in_str ,yyscan_t yyscanner ); + +FILE *yyget_out (yyscan_t yyscanner ); + +void yyset_out (FILE * out_str ,yyscan_t yyscanner ); + +int yyget_leng (yyscan_t yyscanner ); + +char *yyget_text (yyscan_t yyscanner ); + +int yyget_lineno (yyscan_t yyscanner ); + +void yyset_lineno (int line_number ,yyscan_t yyscanner ); + +/* Macros after this point can all be overridden by user definitions in + * section 1. + */ + +#ifndef YY_SKIP_YYWRAP +#ifdef __cplusplus +extern "C" int yywrap (yyscan_t yyscanner ); +#else +extern int yywrap (yyscan_t yyscanner ); +#endif +#endif + +#ifndef yytext_ptr +static void yy_flex_strncpy (char *,yyconst char *,int ,yyscan_t yyscanner); +#endif + +#ifdef YY_NEED_STRLEN +static int yy_flex_strlen (yyconst char * ,yyscan_t yyscanner); +#endif + +#ifndef YY_NO_INPUT + +#endif + +/* Amount of stuff to slurp up with each read. */ +#ifndef YY_READ_BUF_SIZE +#define YY_READ_BUF_SIZE 8192 +#endif + +/* Number of entries by which start-condition stack grows. */ +#ifndef YY_START_STACK_INCR +#define YY_START_STACK_INCR 25 +#endif + +/* Default declaration of generated scanner - a define so the user can + * easily add parameters. + */ +#ifndef YY_DECL +#define YY_DECL_IS_OURS 1 + +extern int yylex (yyscan_t yyscanner); + +#define YY_DECL int yylex (yyscan_t yyscanner) +#endif /* !YY_DECL */ + +/* yy_get_previous_state - get the state just before the EOB char was reached */ + +#undef YY_NEW_FILE +#undef YY_FLUSH_BUFFER +#undef yy_set_bol +#undef yy_new_buffer +#undef yy_set_interactive +#undef YY_DO_BEFORE_ACTION + +#ifdef YY_DECL_IS_OURS +#undef YY_DECL_IS_OURS +#undef YY_DECL +#endif + +#line 239 "psp_parser.l" + + +#line 339 "include/psp_flex.h" +#undef yyIN_HEADER +#endif /* yyHEADER_H */ diff --git a/src/include/psp_parser.h b/src/include/psp_parser.h new file mode 100644 index 0000000..d1b1b88 --- /dev/null +++ b/src/include/psp_parser.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + */ + +#ifndef __PSP_PARSER_H +#define __PSP_PARSER_H + +/* This is to remove a compiler warning. Ideall there should be a way to + * to include Python.h before system headers, but because psp_parse.c is + * autogenerated by flex, I couldn't think of a simple way to do it. + */ +#if defined(_POSIX_C_SOURCE) +#undef _POSIX_C_SOURCE +#endif + +#include +#include "psp_string.h" + +#define STATIC_STR(s) s, sizeof(s)-1 + +#define PSP_PG(v) (((psp_parser_t*)yyget_extra(yyscanner))->v) + +typedef struct { +/* PyObject *files; XXX removed until cache is fixed */ + psp_string whitespace; + psp_string pycode; + char * dir; + unsigned is_psp_echo : 1; + unsigned after_colon : 1; + unsigned seen_newline : 1; +} psp_parser_t; + +#endif /* __PSP_PARSER_H */ diff --git a/src/include/psp_string.h b/src/include/psp_string.h new file mode 100644 index 0000000..e925ca6 --- /dev/null +++ b/src/include/psp_string.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * + */ + +#ifndef __PSP_STRING_H +#define __PSP_STRING_H + +#include +#include + +#ifndef PSP_STRING_BLOCK +#define PSP_STRING_BLOCK 256 +#endif + +typedef struct { + size_t allocated; + size_t length; + char *blob; +} psp_string; + +void psp_string_0(psp_string *); +void psp_string_appendl(psp_string *, char *, size_t); +void psp_string_append(psp_string *, char *); +void psp_string_appendc(psp_string *, char); +void psp_string_clear(psp_string *); +void psp_string_free(psp_string *); + +#endif /* __PSP_STRING_H */ + diff --git a/src/include/requestobject.h b/src/include/requestobject.h new file mode 100644 index 0000000..b6aee68 --- /dev/null +++ b/src/include/requestobject.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * requestobject.h + * + * + */ + +#ifndef Mp_REQUESTOBJECT_H +#define Mp_REQUESTOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + + typedef struct requestobject { + PyObject_HEAD + PyObject * dict; + request_rec * request_rec; + PyObject * connection; + PyObject * server; + PyObject * headers_in; + PyObject * headers_out; + PyObject * err_headers_out; + PyObject * subprocess_env; + PyObject * notes; + PyObject * phase; + PyObject * config; + PyObject * options; + char * extension; /* for | .ext syntax */ + int content_type_set; + apr_off_t bytes_queued; + hlistobject * hlo; + char * rbuff; /* read bufer */ + int rbuff_len; /* read buffer size */ + int rbuff_pos; /* position into the buffer */ + PyObject * session; + + } requestobject; + + PyAPI_DATA(PyTypeObject) MpRequest_Type; + +#define MpRequest_Check(op) (Py_TYPE(op) == &MpRequest_Type) + + PyAPI_FUNC(PyObject *) MpRequest_FromRequest (request_rec *r); + +#ifndef ap_is_HTTP_VALID_RESPONSE +#define ap_is_HTTP_VALID_RESPONSE(x) (((x) >= 100)&&((x) < 600)) +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Mp_REQUESTOBJECT_H */ diff --git a/src/include/serverobject.h b/src/include/serverobject.h new file mode 100644 index 0000000..b034e08 --- /dev/null +++ b/src/include/serverobject.h @@ -0,0 +1,47 @@ +#ifndef Mp_SERVEROBJECT_H +#define Mp_SERVEROBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * serverobject.h + * + * + */ + + typedef struct serverobject { + PyObject_HEAD + PyObject *dict; + server_rec *server; + PyObject *next; + } serverobject; + + PyAPI_DATA(PyTypeObject) MpServer_Type; + +#define MpServer_Check(op) (Py_TYPE(op) == &MpServer_Type) + + PyAPI_FUNC(PyObject *) MpServer_FromServer (server_rec *s); + +#ifdef __cplusplus +} +#endif +#endif /* !Mp_SERVEROBJECT_H */ diff --git a/src/include/tableobject.h b/src/include/tableobject.h new file mode 100644 index 0000000..e0680cc --- /dev/null +++ b/src/include/tableobject.h @@ -0,0 +1,60 @@ +#ifndef Mp_TABLEOBJECT_H +#define Mp_TABLEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * tableobject.h + * + * + */ + +/* + * This is a mapping of a Python object to an Apache table. + * + */ + + typedef struct tableobject { + PyObject_VAR_HEAD + apr_table_t *table; + apr_pool_t *pool; + } tableobject; + + PyAPI_DATA(PyTypeObject) MpTable_Type; + PyAPI_DATA(PyTypeObject) MpTableIter_Type; + +#define MpTable_Check(op) (Py_TYPE(op) == &MpTable_Type) + + PyAPI_FUNC(PyObject *) MpTable_FromTable (apr_table_t *t); + PyAPI_FUNC(PyObject *) MpTable_New (void); + +/* #define DEBUG_TABLES 1 */ +#ifdef DEBUG_TABLES +#define TABLE_DEBUG(str) printf("mp_table: %s\n", str) +#else +#define TABLE_DEBUG(str) +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Mp_TABLEOBJECT_H */ diff --git a/src/include/util.h b/src/include/util.h new file mode 100644 index 0000000..495d724 --- /dev/null +++ b/src/include/util.h @@ -0,0 +1,41 @@ +#ifndef Mp_UTIL_H +#define Mp_UTIL_H + +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * util.h + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +PyObject * tuple_from_array_header(const apr_array_header_t *ah); +PyObject * tuple_from_method_list(const ap_method_list_t *l); +PyObject *tuple_from_finfo(apr_finfo_t *f); +PyObject *tuple_from_apr_uri(apr_uri_t *u); +char * get_addhandler_extensions(request_rec *req); +apr_status_t python_decref(void *object); +PyMemberDef *find_memberdef(const PyMemberDef *mlist, const char *name); +PyObject *cfgtree_walk(ap_directive_t *dir); +PyObject *makesockaddr(struct apr_sockaddr_t *addr); + +#endif /* !Mp_UTIL_H */ diff --git a/src/mod_python.c b/src/mod_python.c new file mode 100644 index 0000000..759b914 --- /dev/null +++ b/src/mod_python.c @@ -0,0 +1,2972 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * mod_python.c + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +#include "mod_python.h" + +static PyThreadState* global_tstate; + +/* Server object for main server as supplied to python_init(). */ +static server_rec *main_server = NULL; + +/* List of available Python obCallBacks/Interpreters */ +static apr_hash_t *interpreters = NULL; +static apr_pool_t *interp_pool = NULL; +static void release_interpreter(interpreterdata *idata); + +apr_pool_t *child_init_pool = NULL; + +/* Optional functions imported from mod_include when loaded: */ +static APR_OPTIONAL_FN_TYPE(ap_register_include_handler) *optfn_register_include_handler; +static APR_OPTIONAL_FN_TYPE(ap_ssi_get_tag_and_value) *optfn_ssi_get_tag_and_value; +static APR_OPTIONAL_FN_TYPE(ap_ssi_parse_string) *optfn_ssi_parse_string; + +/** + ** make_obcallback + ** + * This function instantiates an obCallBack object. + * NOTE: The obCallBack object is instantiated by Python + * code. This C module calls into Python code which returns + * the reference to obCallBack. + */ + +static PyObject * make_obcallback(const char *name) +{ + + PyObject *m = NULL; + PyObject *obCallBack = NULL; + + /* This makes _apache appear imported, and subsequent + * >>> import _apache + * will not give an error. + */ + _apache_module_init(); + + /* Now execute the equivalent of + * >>> import + * >>> + * in the __main__ module to start up Python. + */ + + m = PyImport_ImportModule("mod_python.apache"); + if (!m) { + PyObject *path; + char *c_path; + + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "make_obcallback: could not import mod_python.apache.\n"); + + PyErr_Print(); + fflush(stderr); + + path = PyObject_Repr(PySys_GetObject("path")); + MP_ANYSTR_AS_STR(c_path, path, 0); + + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "make_obcallback: Python path being used \"%s\".", + c_path); + + Py_DECREF(path); + + return NULL; + + } else { + + /* Make sure that C and Python code have the same version */ + + const char *mp_dynamic_version = ""; + PyObject *mp = PyImport_ImportModule("mod_python"); + + if (mp) { + PyObject *d = PyModule_GetDict(mp); + PyObject *o = PyDict_GetItemString(d, "mp_version"); + PyObject *f = PyDict_GetItemString(d, "__file__"); + + MP_ANYSTR_AS_STR(mp_dynamic_version, o, 1); + if (!mp_dynamic_version) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "make_obcallback: fatal: mp_dynamic_version is NULL."); + Py_DECREF(o); + Py_DECREF(mp); + return NULL; + } + + if (strcmp(mp_version_string, mp_dynamic_version) != 0) { + char *c_f; + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, main_server, + "WARNING: mod_python version mismatch, expected '%s', found '%s'.", + mp_version_string, mp_dynamic_version); + MP_ANYSTR_AS_STR(c_f, f, 1); + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, main_server, + "WARNING: mod_python modules location '%s'.", c_f); + Py_DECREF(f); /* MP_ANYSTR_AS_STR */ + } + Py_DECREF(o); /* MP_ANYSTR_AS_STR */ + Py_XDECREF(mp); + + /* call init to get obCallBack */ + + if (! (obCallBack = PyObject_CallMethod( + m, "init", "sO", name, MpServer_FromServer(main_server)))) { + + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "make_obcallback: could not call init()."); + + PyErr_Print(); + fflush(stderr); + } + } else { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "make_obcallback: could not import mod_python"); + } + } + + Py_XDECREF(m); + + return obCallBack; +} + +/** + ** save_interpreter + ** + * Save the interpreter. The argument is a PyThreadState + * belonging to (i.e. tstate->interp) the interpreter we are + * saving. Creates a new interpreterdata and returns a pointer to + * it. + */ + +static interpreterdata *save_interpreter(const char *name, PyThreadState *tstate) +{ + interpreterdata *idata = NULL; + + idata = (interpreterdata *)malloc(sizeof(interpreterdata)); + if (!idata) + return NULL; + idata->tstates = apr_array_make(interp_pool, 128, sizeof(interpreterdata *)); + idata->interp = tstate->interp; + idata->obcallback = NULL; + + apr_hash_set(interpreters, name, APR_HASH_KEY_STRING, idata); + + return idata; +} + +/* + * python_interpreter_name + * + * Get name of current interpreter. Must be called while lock is held. + * This is effectively a shortcut for accessing "apache.interpreter". + */ + +PyObject *python_interpreter_name() +{ + PyObject *m = NULL; + PyObject *d = NULL; + PyObject *o = NULL; + + m = PyImport_ImportModule("mod_python.apache"); + if (m) { + d = PyModule_GetDict(m); + o = PyDict_GetItemString(d, "interpreter"); + + if (o) { + Py_INCREF(o); + Py_DECREF(m); + return o; + } + } + + return 0; +} + +/** + ** get_interpreter + ** + * Get interpreter given its name. + * + * A thread state never outlives a handler; when a phase of a handler + * is done, there is no use for this thread state. But there is no + * need to malloc() a new thread state at every request, so when a + * handler is done, in release_interpreter() we push the tstate into + * an array of available tstates for this interpreter. When we need + * another tstate, we pop from this array, and only if that returns + * nothing do we create a new tstate. + * + */ + +static interpreterdata *get_interpreter(const char *name) +{ + PyThreadState *tstate; + interpreterdata *idata = NULL; + + if (! name) + name = MAIN_INTERPRETER; + + /* Py_NewInterpreter requires the GIL held, this is one way to have it so */ + PyEval_RestoreThread(global_tstate); + + idata = apr_hash_get(interpreters, name, APR_HASH_KEY_STRING); + + if (!idata) { + + tstate = Py_NewInterpreter(); + + if (!tstate) { + /* couldn't create an interpreter, this is bad */ + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "get_interpreter: Py_NewInterpreter() returned NULL. No more memory?"); + return NULL; + } + + idata = save_interpreter(name, tstate); + if (!idata) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "get_interpreter: save_interpreter() returned NULL. No more memory?"); + return NULL; + } + } + else { + +#ifdef WITH_THREAD + /* use an available tstate, or create a new one */ + PyThreadState ** tstate_pp; + tstate_pp = (PyThreadState **)apr_array_pop(idata->tstates); + if (!tstate_pp) + tstate = PyThreadState_New(idata->interp); + else + tstate = *tstate_pp; +#else + /* always use the first (and only) tstate */ + tstate = idata->interp->tstate_head; +#endif + + PyThreadState_Swap(tstate); + + } + /* At this point GIL is held and tstate is set, we're ready to run */ + + if (!idata->obcallback) { + + idata->obcallback = make_obcallback(name); + + if (!idata->obcallback) { + release_interpreter(idata); + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "get_interpreter: no interpreter callback found."); + return NULL; + } + } + + return idata; +} + +/** + ** release_interpreter + ** + * Release interpreter. + */ + +static void release_interpreter(interpreterdata *idata) +{ + PyThreadState *tstate = PyThreadState_Get(); +#ifdef WITH_THREAD + PyThreadState_Clear(tstate); + if (idata) + APR_ARRAY_PUSH(idata->tstates, PyThreadState *) = tstate; + else + PyThreadState_Delete(tstate); + PyEval_ReleaseThread(tstate); +#else + if (!idata) PyThreadState_Delete(tstate); +#endif +} + +/** + ** pytho_cleanup + ** + * This function gets called for clean ups registered + * with register_cleanup(). Clean ups registered via + * PythonCleanupHandler run in python_cleanup_handler() + * below. + */ + +apr_status_t python_cleanup(void *data) +{ + interpreterdata *idata; + + cleanup_info *ci = (cleanup_info *)data; + + /* get/create interpreter */ + idata = get_interpreter(ci->interpreter); + + if (!idata) { + Py_DECREF(ci->handler); + Py_XDECREF(ci->data); + free((void *)ci->interpreter); + free(ci); + return APR_SUCCESS; /* this is ignored anyway */ + } + + /* + * Call the cleanup function. + */ + if (! PyObject_CallFunction(ci->handler, "O", ci->data)) { + PyObject *ptype; + PyObject *pvalue; + PyObject *ptb; + PyObject *handler; + PyObject *stype; + PyObject *svalue; + char *c_handler, *c_svalue, *c_stype; + + PyErr_Fetch(&ptype, &pvalue, &ptb); + handler = PyObject_Str(ci->handler); + stype = PyObject_Str(ptype); + svalue = PyObject_Str(pvalue); + + Py_XDECREF(ptype); + Py_XDECREF(pvalue); + Py_XDECREF(ptb); + + MP_ANYSTR_AS_STR(c_handler, handler, 0); + if (!c_handler) c_handler = ""; + MP_ANYSTR_AS_STR(c_svalue, svalue, 0); + if (!c_svalue) c_svalue = ""; + MP_ANYSTR_AS_STR(c_stype, stype, 0); + if (!c_stype) c_stype = ""; + + if (ci->request_rec) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, + ci->request_rec, + "python_cleanup: Error calling cleanup object %s", + c_handler); + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, + ci->request_rec, + " %s: %s", c_stype, c_svalue); + } + else { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, + ci->server_rec, + "python_cleanup: Error calling cleanup object %s", + c_handler); + ap_log_error(APLOG_MARK, APLOG_ERR, 0, + ci->server_rec, + " %s: %s", c_stype, c_svalue); + } + + Py_DECREF(handler); + Py_DECREF(stype); + Py_DECREF(svalue); + } + + Py_DECREF(ci->handler); + Py_DECREF(ci->data); + free((void *)ci->interpreter); + free(ci); + + release_interpreter(idata); + + return APR_SUCCESS; +} + +static apr_status_t init_mutexes(server_rec *s, apr_pool_t *p, py_global_config *glb) +{ + int max_threads = 0; + int max_procs = 0; + int is_threaded = 0; + int is_forked = 0; + int max_clients; + int locks; + int n; + const char *val; + const char *mutex_dir; + py_config *conf; + + conf = (py_config *) ap_get_module_config(s->module_config, + &python_module); + + /* figure out maximum possible concurrent connections */ + /* MAX_DAEMON_USED seems to account for MaxClients, as opposed to + MAX_DAEMONS, which is ServerLimit + */ + ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded); + if (is_threaded != AP_MPMQ_NOT_SUPPORTED) { + ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads); + } + ap_mpm_query(AP_MPMQ_IS_FORKED, &is_forked); + if (is_forked != AP_MPMQ_NOT_SUPPORTED) { + /* XXX This looks strange, and it is. prefork.c seems to use + MAX_DAEMON_USED the same way that worker.c uses + MAX_DAEMONS (prefork is wrong IMO) */ + ap_mpm_query(AP_MPMQ_MAX_DAEMON_USED, &max_procs); + if (max_procs == -1) { + ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &max_procs); + } + } + max_clients = (((max_threads <= 0) ? 1 : max_threads) * + ((max_procs <= 0) ? 1 : max_procs)); + + /* On some systems the locking mechanism chosen uses valuable + system resources, notably on RH 8 it will use sysv ipc for + which Linux by default provides only 128 semaphores + system-wide, and on many other systems flock is used, which + results in a relatively large number of open files. + + The maximum number of locks can be specified at + compile time using "./configure --with-max-locks value" or + at run time with "PythonOption mod_python.mutex_locks value". + + If the PythonOption directive is used, it must be in a + server config context, otherwise it will be ignored. + + The optimal number of necessary locks is not clear, perhaps a + small number is more than sufficient - if someone took the + time to run some research on this, that'd be most welcome! + */ + val = apr_table_get(conf->options, "mod_python.mutex_locks"); + if (val) { + locks = atoi(val); + } else { + locks = MAX_LOCKS; + } + + locks = (max_clients > locks) ? locks : max_clients; + + ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, + "mod_python: Creating %d session mutexes based " + "on %d max processes and %d max threads.", + locks, max_procs, max_threads); + + glb->g_locks = (apr_global_mutex_t **) + apr_palloc(p, locks * sizeof(apr_global_mutex_t *)); + glb->nlocks = locks; + glb->parent_pid = getpid(); + +#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE) + /* On some sytems a directory for the mutex lock files is required. + This mutex directory can be specifed at compile time using + "./configure --with-mutex-dir value" or at run time with + "PythonOption mod_python.mutex_directory value". + + If the PythonOption directive is used, it must be in a + server config context, otherwise it will be ignored. + + XXX Should we check if mutex_dir exists and has the correct + permissions? + */ + mutex_dir = apr_table_get(conf->options, "mod_python.mutex_directory"); + if (!mutex_dir) + mutex_dir = MUTEX_DIR; + + ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, + "mod_python: using mutex_directory %s ", + mutex_dir); +#endif + + for (n=0; ng_locks; + +#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE) + char fname[255]; + /* XXX What happens if len(mutex_dir) > 255 - len(mpmtx%d%d)? */ + snprintf(fname, 255, "%s/mpmtx%d%d", mutex_dir, glb->parent_pid, n); +#else + char *fname = NULL; +#endif + rc = apr_global_mutex_create(&mutex[n], fname, APR_LOCK_DEFAULT, + p); + if (rc != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rc, s, + "mod_python: Failed to create global mutex %d of %d (%s).", + n, locks, (!fname) ? "" : fname); + if (n > 1) { + /* we were able to create at least two, so lets just print a + warning/hint and proceed + */ + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, + "mod_python: We can probably continue, but with diminished ability " + "to process session locks."); + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, + "mod_python: Hint: On Linux, the problem may be the number of " + "available semaphores, check 'sysctl kernel.sem'"); + /* now free two locks so that if there is another + module or two that wants a lock, it will be ok */ + apr_global_mutex_destroy(mutex[n-1]); + glb->nlocks = n-1; + if (n > 2) { + apr_global_mutex_destroy(mutex[n-2]); + glb->nlocks = n-2; + } + break; + + } + else { + return rc; + } + } + else { + +#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE) +#if AP_MODULE_MAGIC_AT_LEAST(20081201,0) + ap_unixd_set_global_mutex_perms(mutex[n]); +#else + if (!geteuid()) { + chown(fname, unixd_config.user_id, -1); + unixd_set_global_mutex_perms(mutex[n]); + } +#endif +#endif + } + } + return APR_SUCCESS; +} + +static apr_status_t reinit_mutexes(server_rec *s, apr_pool_t *p, py_global_config *glb) +{ + int n; + +#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE) + /* Determine the directory to use for mutex lock files. + See init_mutexes function for more details. + */ + const char *mutex_dir; + py_config *conf; + + conf = (py_config *) ap_get_module_config(s->module_config, + &python_module); + mutex_dir = apr_table_get(conf->options, "mod_python.mutex_directory"); + if (!mutex_dir) + mutex_dir = MUTEX_DIR; +#endif + + for (n=0; n< glb->nlocks; n++) { + apr_status_t rc; + apr_global_mutex_t **mutex = glb->g_locks; + +#if !defined(OS2) && !defined(WIN32) && !defined(BEOS) && !defined(NETWARE) + char fname[255]; + snprintf(fname, 255, "%s/mpmtx%d%d", mutex_dir, glb->parent_pid, n); +#else + char *fname = NULL; +#endif + rc = apr_global_mutex_child_init(&mutex[n], fname, p); + + if (rc != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_STARTUP, rc, s, + "mod_python: Failed to reinit global mutex %s.", + (!fname) ? "" : fname); + return rc; + } + } + return APR_SUCCESS; +} + +/** + ** python_create_global_config + ** + * This creates the part of config that survives + * server restarts + * + */ + +static py_global_config *python_create_global_config(server_rec *s) +{ + apr_pool_t *pool = s->process->pool; + py_global_config *glb; + + /* do we already have it in s->process->pool? */ + apr_pool_userdata_get((void **)&glb, MP_CONFIG_KEY, pool); + + if (glb) { + return glb; + } + + /* otherwise, create it */ + + glb = (py_global_config *)apr_palloc(pool, sizeof(*glb)); + + apr_pool_userdata_set(glb, MP_CONFIG_KEY, + apr_pool_cleanup_null, + pool); + + return glb; +} + +/* + * mp_acquire_interpreter() + * + * Exported function for acquiring named interpreter. + */ + +PyInterpreterState *mp_acquire_interpreter(const char *name) +{ + interpreterdata *idata; + + idata = get_interpreter(name); + + return idata->interp; +} + +/* + * mp_release_interpreter() + * + * Exported function for releasing acquired interpreter. + * + */ + +void mp_release_interpreter(void) +{ + release_interpreter(NULL); +} + +/* + * mp_get_request_object(request_rec *req) + * + * Exported function for obtaining wrapper for request object. + * + */ + +PyObject *mp_get_request_object(request_rec *req) +{ + requestobject *request_obj; + + request_obj = python_get_request_object(req, 0); + + return (PyObject *)request_obj; +} + +/* + * mp_get_server_object(server_rec *srv) + * + * Exported function for obtaining wrapper for server object. + * + */ + +PyObject *mp_get_server_object(server_rec *srv) +{ + return (PyObject *)MpServer_FromServer(srv); +} + +/* + * mp_get_connection_object(conn_rec *conn) + * + * Exported function for obtaining wrapper for connection object. + * + */ + +PyObject *mp_get_connection_object(conn_rec *conn) +{ + return (PyObject *)MpConn_FromConn(conn); +} + +/** + ** python_init() + ** + * Called by Apache at mod_python initialization time. + */ + +static int python_init(apr_pool_t *p, apr_pool_t *ptemp, + apr_pool_t *plog, server_rec *s) +{ + char buff[255]; + void *data; + py_global_config *glb; + const char *userdata_key = "python_init"; + apr_status_t rc; + + const char *py_compile_version = PY_VERSION; + const char *py_dynamic_version = 0; + + /* The "initialized" flag is a fudge for Mac OS X. It + * addresses two issues. The first is that when an Apache + * "restart" is performed, Apache will unload the mod_python + * shared object, but not the Python framework. This means + * that when "python_init()" is called after the restart, + * the mod_python initialization will not run if only the + * initialized state of Python is checked, because Python + * is already initialized. The second problem is that for + * some older revisions of Mac OS X, even on the initial + * startup of Apache, the "Py_IsInitialized()" function + * would return true and mod_python wouldn't initialize + * itself correctly and would crash. + */ + static int initialized = 0; + +#ifdef WIN32 + /* No need to run python_init() in Win32 parent processes as + * the lack of fork on Win32 means we get no benefit as far as + * inheriting a preinitialized Python interpreter. Further, + * upon a restart on Win32 platform the python_init() function + * will be called again in the parent process but without some + * resources allocated by the previous call having being + * released properly, resulting in memory and Win32 resource + * leaks. + */ + if (!getenv("AP_PARENT_PID")) + return OK; +#endif /* WIN32 */ + + apr_pool_userdata_get(&data, userdata_key, s->process->pool); + if (!data) { + apr_pool_userdata_set((const void *)1, userdata_key, + apr_pool_cleanup_null, s->process->pool); + return OK; + } + + /* mod_python version */ + ap_add_version_component(p, mp_version_component); + + py_dynamic_version = strtok((char *)Py_GetVersion(), " "); + + if (strcmp(py_compile_version, py_dynamic_version) != 0) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, + "python_init: Python version mismatch, expected '%s', found '%s'.", + py_compile_version, py_dynamic_version); + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, + "python_init: Python executable found '%s'.", + Py_GetProgramFullPath()); + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, + "python_init: Python path being used '%s'.", + Py_GetPath()); + } + + /* Python version */ + sprintf(buff, "Python/%.200s", py_dynamic_version); + ap_add_version_component(p, buff); + + /* cache main server */ + main_server = s; + + /* global config */ + glb = python_create_global_config(s); + if ((rc = init_mutexes(s, p, glb)) != APR_SUCCESS) { + return rc; + } + + /* initialize global Python interpreter if necessary */ + if (initialized == 0 || !Py_IsInitialized()) + { + initialized = 1; + + /* initialze the interpreter */ + Py_Initialize(); + +#ifdef WITH_THREAD + /* create and acquire the interpreter lock */ + PyEval_InitThreads(); +#endif + + /* create the obCallBack dictionary */ + interpreters = apr_hash_make(p); + interp_pool = p; + + if (! interpreters) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, + "python_init: apr_hash_make() failed! No more memory?"); + exit(1); + } + + /* save the global_tstate */ + global_tstate = PyEval_SaveThread(); + } + + APR_REGISTER_OPTIONAL_FN(mp_acquire_interpreter); + APR_REGISTER_OPTIONAL_FN(mp_release_interpreter); + APR_REGISTER_OPTIONAL_FN(mp_get_request_object); + APR_REGISTER_OPTIONAL_FN(mp_get_server_object); + APR_REGISTER_OPTIONAL_FN(mp_get_connection_object); + + return OK; +} + +/** + ** python_create_config + ** + * Called by create_dir_config and create_srv_config + */ + +static py_config *python_create_config(apr_pool_t *p) +{ + py_config *conf = + (py_config *) apr_pcalloc(p, sizeof(py_config)); + + conf->authoritative = 1; + conf->d_is_location = 0; + conf->options = apr_table_make(p, 4); + conf->directives = apr_table_make(p, 4); + conf->hlists = apr_hash_make(p); + conf->in_filters = apr_hash_make(p); + conf->out_filters = apr_hash_make(p); + + return conf; +} + + +/** + ** python_create_dir_config + ** + * Allocate memory and initialize the strucure that will + * hold configuration parametes. + * + * This function is called on every hit it seems. + */ + +static void *python_create_dir_config(apr_pool_t *p, char *dir) +{ + py_config *conf = python_create_config(p); + conf->config_dir = dir; + + return conf; +} + +/** + ** python_create_srv_config + ** + * Allocate memory and initialize the strucure that will + * hold configuration parametes. + */ + +static void *python_create_srv_config(apr_pool_t *p, server_rec *srv) +{ + + py_config *conf = python_create_config(p); + + return conf; +} + +/** + ** modpython_table_overlap + ** + * Replaces the apr_table_overlap() function using a specific pool + * for the resulting table. + */ + +static apr_table_t *modpython_table_overlap(apr_pool_t *p, + apr_table_t *current_table, + apr_table_t *new_table) +{ + apr_table_t *merge = apr_table_overlay(p, current_table, new_table); + apr_table_compress(merge, APR_OVERLAP_TABLES_SET); + return merge; +} + +/** + ** python_merge_dir_config + ** + */ + +static void *python_merge_config(apr_pool_t *p, void *current_conf, + void *new_conf) +{ + + py_config *merged_conf = + (py_config *) apr_pcalloc(p, sizeof(py_config)); + py_config *cc = (py_config *) current_conf; + py_config *nc = (py_config *) new_conf; + + apr_hash_index_t *hi; + char *key; + apr_ssize_t klen; + hl_entry *hle; + py_handler *fh; + + /* we basically allow the local configuration to override global, + * by first copying current values and then new values on top + */ + + /** create **/ + merged_conf->hlists = apr_hash_make(p); + merged_conf->in_filters = apr_hash_make(p); + merged_conf->out_filters = apr_hash_make(p); + + /** merge directives and options **/ + merged_conf->directives = modpython_table_overlap(p, cc->directives, + nc->directives); + merged_conf->options = modpython_table_overlap(p, cc->options, + nc->options); + + /** copy current **/ + merged_conf->authoritative = cc->authoritative; + merged_conf->config_dir = apr_pstrdup(p, cc->config_dir); + merged_conf->d_is_location = cc->d_is_location; + + for (hi = apr_hash_first(p, cc->hlists); hi; hi=apr_hash_next(hi)) { + apr_hash_this(hi, (const void **)&key, &klen, (void **)&hle); + apr_hash_set(merged_conf->hlists, key, klen, (void *)hle); + } + + for (hi = apr_hash_first(p, cc->in_filters); hi; hi=apr_hash_next(hi)) { + apr_hash_this(hi, (const void **)&key, &klen, (void **)&fh); + apr_hash_set(merged_conf->in_filters, key, klen, (void *)fh); + } + + for (hi = apr_hash_first(p, cc->out_filters); hi; hi=apr_hash_next(hi)) { + apr_hash_this(hi, (const void **)&key, &klen, (void **)&fh); + apr_hash_set(merged_conf->out_filters, key, klen, (void *)fh); + } + + /** copy new **/ + + if (nc->authoritative != merged_conf->authoritative) + merged_conf->authoritative = nc->authoritative; + if (nc->config_dir) { + merged_conf->config_dir = apr_pstrdup(p, nc->config_dir); + merged_conf->d_is_location = nc->d_is_location; + } + + for (hi = apr_hash_first(p, nc->hlists); hi; hi=apr_hash_next(hi)) { + apr_hash_this(hi, (const void**)&key, &klen, (void **)&hle); + apr_hash_set(merged_conf->hlists, key, klen, (void *)hle); + } + + for (hi = apr_hash_first(p, nc->in_filters); hi; hi=apr_hash_next(hi)) { + apr_hash_this(hi, (const void**)&key, &klen, (void **)&fh); + apr_hash_set(merged_conf->in_filters, key, klen, (void *)fh); + } + + for (hi = apr_hash_first(p, nc->out_filters); hi; hi=apr_hash_next(hi)) { + apr_hash_this(hi, (const void**)&key, &klen, (void **)&fh); + apr_hash_set(merged_conf->out_filters, key, klen, (void *)fh); + } + + return (void *) merged_conf; +} + + +/** + ** python_directive + ** + * Called by non-handler directives + * + */ + +static const char *python_directive(cmd_parms *cmd, void * mconfig, + char *key, const char *val) +{ + py_config *conf; + + conf = (py_config *) mconfig; + apr_table_set(conf->directives, key, val); + + return NULL; +} + +/* returns a parent if it matches the given directive */ +static const ap_directive_t * find_parent(const ap_directive_t *dirp, + const char *what) +{ + while (dirp->parent != NULL) { + dirp = dirp->parent; + + if (strcasecmp(dirp->directive, what) == 0) + return dirp; + } + + return NULL; +} + +#ifdef WIN32 +#define USE_ICASE AP_REG_ICASE +#else +#define USE_ICASE 0 +#endif + +static void determine_context(apr_pool_t *p, const cmd_parms* cmd, + char **dp, char *dx, char *dl, + ap_regex_t **rx) +{ + const ap_directive_t *context = NULL; + const ap_directive_t *directive = NULL; + const char *endp, *arg; + + char *directory = NULL; + char d_is_fnmatch = 0; + char d_is_location = 0; + ap_regex_t *regex = NULL; + + directive = cmd->directive; + + /* Skip any enclosing File directive if one exists */ + + if ((context = find_parent(directive, "args; + endp = ap_strrchr_c(arg, '>'); + arg = apr_pstrndup(p, arg, endp - arg); + + directory = ap_getword_conf(p, &arg); + d_is_location = 1; + + if (!strcmp(directory, "~")) { + directory = ap_getword_conf(p, &arg); + regex = ap_pregcomp(p, cmd->path, AP_REG_EXTENDED|USE_ICASE); + } else if (apr_fnmatch_test(directory)) { + d_is_fnmatch = 1; + } + } else if ((context = find_parent(directive, "args; + endp = ap_strrchr_c(arg, '>'); + arg = apr_pstrndup(p, arg, endp - arg); + + directory = ap_getword_conf(p, &arg); + d_is_location = 1; + + regex = ap_pregcomp(p, directory, AP_REG_EXTENDED|USE_ICASE); + } else if ((context = find_parent(directive, "args; + endp = ap_strrchr_c(arg, '>'); + arg = apr_pstrndup(p, arg, endp - arg); + + directory = ap_getword_conf(p, &arg); + + if (!strcmp(directory, "~")) { + directory = ap_getword_conf(p, &arg); + regex = ap_pregcomp(p, cmd->path, AP_REG_EXTENDED|USE_ICASE); + } else if (apr_fnmatch_test(directory)) { + d_is_fnmatch = 1; + } + } else if ((context = find_parent(directive, "args; + endp = ap_strrchr_c(arg, '>'); + arg = apr_pstrndup(p, arg, endp - arg); + + directory = ap_getword_conf(p, &arg); + regex = ap_pregcomp(p, directory, AP_REG_EXTENDED|USE_ICASE); + } + else if (cmd->config_file != NULL) { + /* cmd->config_file is NULL when in main Apache + * configuration file as the file is completely + * read in before the directive is processed as + * EXEC_ON_READ is not set in req_override field + * of command_struct table entry. Thus know then + * we are being used in a .htaccess file. */ + + directory = ap_make_dirstr_parent(p, directive->filename); + } + + /* Only canonicalize path and add trailing slash at + * this point if no pattern matching to be done at + * a later time. */ + + if (directory && !d_is_fnmatch && !regex && !d_is_location) { + + char *newpath = NULL; + apr_status_t rv; + + /* NB: if directory is not absolute, CWD will be prepended to + * it. If directory is NULL (we make sure it is not NULL + * above), then newpath will become CWD */ + rv = apr_filepath_merge(&newpath, NULL, directory, + APR_FILEPATH_TRUENAME, p); + + if (rv == APR_SUCCESS || rv == APR_EPATHWILD) { + /* + * We are appending a trailing slash here to be consistent + * with what httpd's core.c does in dirsection(). I [GT] + * am not very clear why core.c does it, especially given + * that it appends the trailing slash even to things that + * aren't really directories, e.g. when directory is an + * fnmatch pattern such as "/foo/bar*" + */ + directory = newpath; + if (directory[strlen(directory) - 1] != '/') { + directory = apr_pstrcat(p, directory, "/", NULL); + } + } + } + + *dp = directory; + *dx = d_is_fnmatch; + *dl = d_is_location; + *rx = regex; +} + +static void python_directive_hl_add(apr_pool_t *p, apr_hash_t *hlists, + const char *phase, const char *handler, + const cmd_parms* cmd, char *directory, + char d_is_fnmatch, char d_is_location, + ap_regex_t* regex, const char silent) +{ + hl_entry *head; + char *h; + + head = (hl_entry *)apr_hash_get(hlists, phase, APR_HASH_KEY_STRING); + + /* it's possible that handler is multiple handlers separated + by white space */ + + while (*(h = ap_getword_white(p, &handler)) != '\0') { + if (!head) { + head = hlist_new(p, h, directory, d_is_fnmatch, d_is_location, regex, silent); + apr_hash_set(hlists, phase, APR_HASH_KEY_STRING, head); + } + else { + hlist_append(p, head, h, directory, d_is_fnmatch, d_is_location, regex, silent); + } + } +} + +/** + ** python_directive_handler + ** + * Called by Python*Handler directives. + * + * When used within the same directory, this will have a + * cumulative, rather than overriding effect - i.e. values + * from same directives specified multiple times will be appended. + * + */ + +static const char *python_directive_handler(cmd_parms *cmd, py_config* conf, + char *key, const char *val, int silent) +{ + + char *directory = NULL; + char d_is_fnmatch = 0; + char d_is_location = 0; + ap_regex_t *regex = NULL; + const char *exts; + + determine_context(cmd->pool, cmd, &directory, &d_is_fnmatch, &d_is_location, ®ex); + + /* d_is_location is used by the map_to_storage handler - there is no need for it to + run, if we are inside a Location. We also do not prepend a Location to sys.path. */ + conf->d_is_location = d_is_location; + + /* a handler may be restricted to certain file type by + * extention using the "| .ext1 .ext2" syntax. When this + * is the case, we will end up with a directive concatenated + * with the extension, one per, e.g. + * "PythonHandler foo | .ext1 .ext2" will result in + * PythonHandler.ext1 foo + * PythonHandler.ext2 foo + */ + + exts = val; + val = ap_getword(cmd->pool, &exts, '|'); + + if (*exts == '\0') { + python_directive_hl_add(cmd->pool, conf->hlists, key, val, + cmd, directory, d_is_fnmatch, d_is_location, regex, silent); + } + else { + + char *ext; + + /* skip blanks */ + while (apr_isspace(*exts)) exts++; + + /* repeat for every extension */ + while (*(ext = ap_getword_white(cmd->pool, &exts)) != '\0') { + char *s; + + /* append extention to the directive name */ + s = apr_pstrcat(cmd->pool, key, ext, NULL); + + python_directive_hl_add(cmd->pool, conf->hlists, s, val, + cmd, directory, d_is_fnmatch, d_is_location, regex, silent); + } + } + + return NULL; +} + +/** + ** python_directive_flag + ** + * Called for FLAG directives. + * + */ + +static const char *python_directive_flag(void * mconfig, char *key, int val) +{ + py_config *conf; + + conf = (py_config *) mconfig; + + if (val) { + apr_table_set(conf->directives, key, "1"); + } + else { + apr_table_set(conf->directives, key, "0"); + } + + return NULL; +} + +static apr_status_t python_cleanup_handler(void *data); + +/** + ** python_get_request_object + ** + * This creates or retrieves a previously created request object. + * The pointer to request object is stored in req->request_config. + */ + +requestobject *python_get_request_object(request_rec *req, const char *phase) +{ + py_req_config *req_config; + requestobject *request_obj = NULL; + + /* see if there is a request object already */ + req_config = (py_req_config *) ap_get_module_config(req->request_config, + &python_module); + + if (req_config) { + request_obj = req_config->request_obj; + } + else { + + request_obj = (requestobject *)MpRequest_FromRequest(req); + if (!request_obj) return NULL; + + /* store the pointer to this object in request_config */ + req_config = apr_pcalloc(req->pool, sizeof(py_req_config)); + req_config->request_obj = request_obj; + req_config->dynhls = apr_hash_make(req->pool); + req_config->in_filters = apr_hash_make(req->pool); + req_config->out_filters = apr_hash_make(req->pool); + ap_set_module_config(req->request_config, &python_module, req_config); + + /* register the clean up directive handler */ + apr_pool_cleanup_register(req->pool, (void *)req, + python_cleanup_handler, + apr_pool_cleanup_null); + } + + /* make a note of which phase we are in right now */ + if (phase) + { + Py_XDECREF(request_obj->phase); + request_obj->phase = MpBytesOrUnicode_FromString(phase); + } + + return request_obj; +} + +/** + ** resolve_directory + ** + * resolve any directory match returning the matched directory + */ + +static const char *resolve_directory(request_rec *req, const char *directory, + char d_is_fnmatch, ap_regex_t *regex) +{ + char *prefix; + int len, dirs, i; + + if (!req || !req->filename || (!d_is_fnmatch && !regex)) + return directory; + + dirs = ap_count_dirs(req->filename) + 1; + len = strlen(req->filename); + prefix = (char*)apr_palloc(req->pool, len+1); + + for (i=0; i<=dirs; i++) { + ap_make_dirstr_prefix(prefix, req->filename, i); +#ifdef WIN32 + if (d_is_fnmatch && apr_fnmatch(directory, prefix, + APR_FNM_PATHNAME|APR_FNM_CASE_BLIND) == 0) { +#else + if (d_is_fnmatch && apr_fnmatch(directory, prefix, + APR_FNM_PATHNAME) == 0) { +#endif + return prefix; + } + else if (regex && ap_regexec(regex, prefix, 0, NULL, 0) == 0) { + return prefix; + } + + if (strcmp(prefix, "/") != 0) { + prefix[strlen(prefix)-1] = '\0'; +#ifdef WIN32 + if (d_is_fnmatch && apr_fnmatch(directory, prefix, + APR_FNM_PATHNAME|APR_FNM_CASE_BLIND) == 0) { +#else + if (d_is_fnmatch && apr_fnmatch(directory, prefix, + APR_FNM_PATHNAME) == 0) { +#endif + prefix[strlen(prefix)] = '/'; + return prefix; + } + else if (regex && ap_regexec(regex, prefix, 0, NULL, 0) == 0) { + prefix[strlen(prefix)] = '/'; + return prefix; + } + } + } + + return directory; +} + +/** + ** select_interp_name + ** + * (internal) + * figure out the name of the interpreter we should be using + * If this is for a handler, then hle is required. If this is + * for a filter, then fname and is_input are required. If con + * is specified, then its a connection handler. + */ + +static const char *select_interp_name(request_rec *req, conn_rec *con, + py_config *conf, hl_entry *hle, + py_handler *fh) +{ + const char *s = NULL; + + if ((s = apr_table_get(conf->directives, "PythonInterpreter"))) { + /* forced by configuration */ + return s; + } + else { + if ((s = apr_table_get(conf->directives, "PythonInterpPerDirectory")) && (strcmp(s, "1") == 0)) { + + /* base interpreter on directory where the file is found */ + if (req && ap_is_directory(req->pool, req->filename)) { + /* + * We are appending a trailing slash here to be + * consistent with what httpd's core.c does in + * dirsection(), or else we will end up with a + * different interpreter named without the slash. + */ + if (req->filename[strlen(req->filename)-1]=='/') + return ap_make_dirstr_parent(req->pool, req->filename); + else + return ap_make_dirstr_parent(req->pool, + apr_pstrcat(req->pool, req->filename, + "/", NULL )); + } else { + if (req && req->filename) + return ap_make_dirstr_parent(req->pool, req->filename); + else + /* + * In early phases of the request, req->filename is not known, + * so this would have to run in the global interpreter. + */ + return NULL; + } + } + else if ((s = apr_table_get(conf->directives, "PythonInterpPerDirective")) && (strcmp(s, "1") == 0)) { + + /* + * base interpreter name on directory where the handler directive + * was last found. If it was in http.conf, then we will use the + * global interpreter. + */ + + if (fh) { + s = fh->directory; + } + else if (hle) { + s = hle->directory; + } + else { + return NULL; + } + + if (s && (s[0] == '\0')) + return NULL; + else + return s; + } + else { + /* - default: per server - */ + if (con) + return con->base_server->server_hostname; + else + return req->server->server_hostname; + } + } +} + +/** + ** python_handler + ** + * A generic python handler. Most handlers should use this. + */ + +static int python_handler(request_rec *req, char *phase) +{ + + PyObject *resultobject = NULL; + interpreterdata *idata; + requestobject *request_obj; + py_config * conf; + int result; + const char *interp_name = NULL; + char *ext = NULL; + hl_entry *hle = NULL; + hl_entry *dynhle = NULL; + hl_entry *hlohle = NULL; + + py_req_config *req_conf; + + + + /* get configuration */ + conf = (py_config *) ap_get_module_config(req->per_dir_config, + &python_module); + /* get file extension */ + if (req->filename) { /* filename is null until after transhandler */ + /* get rid of preceeding path */ + if ((ext = (char *)ap_strrchr_c(req->filename, '/')) == NULL) + ext = req->filename; + else + ++ext; + /* get extension */ + ap_getword(req->pool, (const char **)&ext, '.'); + if (*ext != '\0') + ext = apr_pstrcat(req->pool, ".", ext, NULL); + } + + /* is there an hlist entry, i.e. a handler? */ + /* try with extension */ + if (ext) { + hle = (hl_entry *)apr_hash_get(conf->hlists, + apr_pstrcat(req->pool, phase, ext, NULL), + APR_HASH_KEY_STRING); + } + + /* try without extension if we don't match */ + if (!hle) { + hle = (hl_entry *)apr_hash_get(conf->hlists, phase, + APR_HASH_KEY_STRING); + + /* also blank out ext since we didn't succeed with it. this is tested + further below */ + ext = NULL; + } + + req_conf = (py_req_config *) ap_get_module_config(req->request_config, + &python_module); + if (req_conf) { + dynhle = (hl_entry *)apr_hash_get(req_conf->dynhls, phase, + APR_HASH_KEY_STRING); + } + + if (! (hle || dynhle)) { + /* nothing to do here */ + return DECLINED; + } + + /* construct list for the handler list object */ + if (!hle) { + hlohle = hlist_copy(req->pool, dynhle); + } + else { + hlohle = hlist_copy(req->pool, hle); + + if (dynhle) + hlist_extend(req->pool, hlohle, dynhle); + } + + /* resolve wildcard or regex directory patterns. we do not resolve + * Location patterns because there is no use case for it so far */ + hle = hlohle; + while (hle) { + if (!hle->d_is_location && (hle->d_is_fnmatch || hle->regex)) { + hle->directory = resolve_directory(req, hle->directory, + hle->d_is_fnmatch, hle->regex); + hle->d_is_fnmatch = 0; + hle->regex = NULL; + } + + hle = hle->next; + } + + /* determine interpreter to use */ + interp_name = select_interp_name(req, NULL, conf, hlohle, NULL); + + /* get/create interpreter */ + idata = get_interpreter(interp_name); + + if (!idata) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "python_handler: Can't get/create interpreter."); + return HTTP_INTERNAL_SERVER_ERROR; + } + + /* create/acquire request object */ + request_obj = python_get_request_object(req, phase); + + /* remember the extension if any. used by publisher */ + if (ext) + request_obj->extension = apr_pstrdup(req->pool, ext); + + /* construct a new handler list object */ + Py_XDECREF(request_obj->hlo); + request_obj->hlo = (hlistobject *)MpHList_FromHLEntry(hlohle); + + /* + * Here is where we call into Python! + * This is the C equivalent of + * >>> resultobject = obCallBack.HandlerDispatch(request_object) + */ + + resultobject = PyObject_CallMethod(idata->obcallback, "HandlerDispatch", + "O", request_obj); + + /* clear phase from request object */ + Py_XDECREF(request_obj->phase); + request_obj->phase = NULL; + + /* release the lock and destroy tstate */ + release_interpreter(idata); + + if (! resultobject) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "python_handler: (%s) HandlerDispatch() returned nothing.", phase); + return HTTP_INTERNAL_SERVER_ERROR; + } + else { + /* Attempt to analyze the result as a string indicating which + result to return */ +#if PY_MAJOR_VERSION < 3 + if (! PyInt_Check(resultobject)) { +#else + if (! PyLong_Check(resultobject)) { +#endif + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "python_handler: (%s) HandlerDispatch() returned non-integer.", phase); + return HTTP_INTERNAL_SERVER_ERROR; + } + else { +#if PY_MAJOR_VERSION < 3 + result = PyInt_AsLong(resultobject); +#else + result = PyLong_AsLong(resultobject); +#endif + /* authen handlers need one more thing + * if authentication failed and this handler is not + * authoritative, let the others handle it + */ + if (strcmp(phase, "PythonAuthenHandler") == 0) { + /* This is a prevention measure for what is likely a bug + in mod_auth.c whereby r->user is used even if null. + XXX Remove in the future + */ + if (result == OK && !req->user) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "python_handler: After PythonAuthenHandler req->user is NULL. " + "Assign something to req.user if returning OK to avoid this error."); + return HTTP_INTERNAL_SERVER_ERROR; + } + if (result == HTTP_UNAUTHORIZED) + { + if (! conf->authoritative) + result = DECLINED; + else { + /* + * This will insert a WWW-Authenticate header + * to let the browser know that we are using + * Basic authentication. This function does check + * to make sure that auth is indeed Basic, no + * need to do that here. + */ + ap_note_basic_auth_failure(req); + } + } + } + } + } + + /* When the script sets an error status by using req.status, + * it can then either provide its own HTML error message or have + * Apache provide one. To have Apache provide one, you need to send + * no output and return the error from the handler function. However, + * if the script is providing HTML, then the return value of the + * handler should be OK, else the user will get both the script + * output and the Apache output. + */ + + /* Another note on status. req->status is used to build req->status_line + * unless status_line is not NULL. req->status has no effect on how the + * server will behave. The error behaviour is dictated by the return + * value of this handler. When the handler returns anything other than OK, + * the server will display the error that matches req->status, unless it is + * 200 (HTTP_OK), in which case it will just show the error matching the return + * value. If the req->status and the return of the handle do not match, + * then the server will first show what req->status shows, then it will + * print "Additionally, X error was recieved", where X is the return code + * of the handle. If the req->status or return code is a weird number that the + * server doesn't know, it will default to 500 Internal Server Error. + */ + + /* clean up */ + Py_XDECREF(resultobject); + + /* return the translated result (or default result) to the Server. */ + return result; + +} + +/** + ** python_cleanup_handler + ** + * Runs handler registered via PythonCleanupHandler. Clean ups + * registered via register_cleanup() run in python_cleanup() above. + */ + +static apr_status_t python_cleanup_handler(void *data) +{ + + apr_status_t rc; + py_req_config *req_config; + request_rec *req = (request_rec *)data; + + rc = python_handler((request_rec *)data, "PythonCleanupHandler"); + + req_config = (py_req_config *) ap_get_module_config(req->request_config, + &python_module); + + if (req_config && req_config->request_obj) { + + interpreterdata *idata; + requestobject *request_obj = req_config->request_obj; + + /* get interpreter */ + idata = get_interpreter(NULL); + if (!idata) + return APR_SUCCESS; /* this return code is ignored by httpd anyway */ + + Py_XDECREF(request_obj); + + /* release interpreter */ + release_interpreter(idata); + } + + return rc; +} + + +/** + ** python_connection + ** + * connection handler + */ + +static apr_status_t python_connection(conn_rec *con) +{ + + PyObject *resultobject = NULL; + interpreterdata *idata; + connobject *conn_obj; + py_config * conf; + int result; + const char *interp_name = NULL; + hl_entry *hle = NULL; + + /* get configuration */ + conf = (py_config *) ap_get_module_config(con->base_server->module_config, + &python_module); + + /* is there a handler? */ + hle = (hl_entry *)apr_hash_get(conf->hlists, "PythonConnectionHandler", + APR_HASH_KEY_STRING); + + if (! hle) { + /* nothing to do here */ + return DECLINED; + } + + /* determine interpreter to use */ + interp_name = select_interp_name(NULL, con, conf, hle, NULL); + + /* get/create interpreter */ + idata = get_interpreter(interp_name); + + if (!idata) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, con->base_server, + "python_connection: Can't get/create interpreter."); + return HTTP_INTERNAL_SERVER_ERROR; + } + + /* create connection object */ + conn_obj = (connobject*) MpConn_FromConn(con); + + /* create a handler list object */ + conn_obj->hlo = (hlistobject *)MpHList_FromHLEntry(hle); + + /* + * Here is where we call into Python! + * This is the C equivalent of + * >>> resultobject = obCallBack.ConnectionDispatch(request_object) + */ + resultobject = PyObject_CallMethod(idata->obcallback, "ConnectionDispatch", + "O", conn_obj); + + /* release the lock and destroy tstate*/ + release_interpreter(idata); + + if (! resultobject) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, con->base_server, + "python_connection: ConnectionDispatch() returned nothing."); + return HTTP_INTERNAL_SERVER_ERROR; + } + else { + /* Attempt to analyze the result as a string indicating which + result to return */ +#if PY_MAJOR_VERSION < 3 + if (! PyInt_Check(resultobject)) { +#else + if (! PyLong_Check(resultobject)) { +#endif + ap_log_error(APLOG_MARK, APLOG_ERR, 0, con->base_server, + "python_connection: ConnectionDispatch() returned non-integer."); + return HTTP_INTERNAL_SERVER_ERROR; + } + else +#if PY_MAJOR_VERSION < 3 + result = PyInt_AsLong(resultobject); +#else + result = PyLong_AsLong(resultobject); +#endif + } + + /* clean up */ + Py_XDECREF(resultobject); + + /* return the translated result (or default result) to the Server. */ + return result; +} + +/** + ** python_filter + ** + * filter + */ + +static apr_status_t python_filter(int is_input, ap_filter_t *f, + apr_bucket_brigade *bb, + ap_input_mode_t mode, + apr_read_type_e block, + apr_size_t readbytes) { + + PyObject *resultobject = NULL; + interpreterdata *idata; + requestobject *request_obj; + py_config * conf; + py_req_config * req_config; + const char * interp_name = NULL; + request_rec *req; + filterobject *filter; + python_filter_ctx *ctx; + py_handler *fh; + + /* we only allow request level filters so far */ + req = f->r; + + /* create ctx if not there yet */ + if (!f->ctx) { + ctx = (python_filter_ctx *) apr_pcalloc(req->pool, sizeof(python_filter_ctx)); + f->ctx = (void *)ctx; + } + else { + ctx = (python_filter_ctx *) f->ctx; + } + + /* are we in transparent mode? transparent mode is on after an error, + so a fitler can spit out an error without causing infinite loop */ + if (ctx->transparent) { + if (is_input) + return ap_get_brigade(f->next, bb, mode, block, readbytes); + else + return ap_pass_brigade(f->next, bb); + } + + /* get configuration */ + conf = (py_config *) ap_get_module_config(req->per_dir_config, + &python_module); + + req_config = (py_req_config *) ap_get_module_config(req->request_config, + &python_module); + /* the name of python function to call */ + if (ctx->name) { + if (is_input) + fh = apr_hash_get(req_config->in_filters, ctx->name, APR_HASH_KEY_STRING); + else + fh = apr_hash_get(req_config->out_filters, ctx->name, APR_HASH_KEY_STRING); + } else { + if (is_input) + fh = apr_hash_get(conf->in_filters, f->frec->name, APR_HASH_KEY_STRING); + else + fh = apr_hash_get(conf->out_filters, f->frec->name, APR_HASH_KEY_STRING); + } + + if (!fh) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "python_filter: Could not find registered filter."); + return HTTP_INTERNAL_SERVER_ERROR; + } + + /* determine interpreter to use */ + interp_name = select_interp_name(req, NULL, conf, NULL, fh); + + /* get/create interpreter */ + idata = get_interpreter(interp_name); + + if (!idata) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "python_filter: Can't get/create interpreter."); + return HTTP_INTERNAL_SERVER_ERROR; + } + + /* create/acquire request object */ + request_obj = python_get_request_object(req, 0); + + /* create filter */ + filter = (filterobject *)MpFilter_FromFilter(f, bb, is_input, mode, readbytes, + fh->handler, fh->directory); + + Py_INCREF(request_obj); + filter->request_obj = request_obj; + + /* + * Here is where we call into Python! + * This is the C equivalent of + * >>> resultobject = obCallBack.FilterDispatch(filter_object) + */ + resultobject = PyObject_CallMethod(idata->obcallback, "FilterDispatch", "O", + filter); + + /* clean up */ + Py_XDECREF(resultobject); + + /* release interpreter */ + release_interpreter(idata); + + return filter->rc; +} + +/** + ** python_input_filter + ** + * input filter + */ + +static apr_status_t python_input_filter(ap_filter_t *f, + apr_bucket_brigade *bb, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes) +{ + return python_filter(1, f, bb, mode, block, readbytes); +} + + +/** + ** python_output_filter + ** + * output filter + */ + +static apr_status_t python_output_filter(ap_filter_t *f, + apr_bucket_brigade *bb) +{ + return python_filter(0, f, bb, 0, 0, 0); +} + + +/** + ** handle_python + ** + * handler function for mod_include tag + * + * The mod_include tag handler interface changed at: + * + * 20030821 (2.1.0-dev) bumped mod_include's entire API + * + * Provide a completely separate implementation for now until + * it is determined whether the new SSI_CREATE_ERROR_BUCKET + * macro can simply be copied to allow backward compatibility. + */ + +#if AP_MODULE_MAGIC_AT_LEAST(20030821,0) + +static apr_status_t handle_python(include_ctx_t *ctx, + ap_filter_t *f, + apr_bucket_brigade *bb) { + + py_config *conf; + const char *interp_name = NULL; + interpreterdata *idata; + requestobject *request_obj; + PyObject *resultobject = NULL; + filterobject *filter; + apr_bucket *tmp_buck; + + char *file = f->r->filename; + char *tag = NULL; + char *tag_val = NULL; + + PyObject *tagobject = NULL; + PyObject *codeobject = NULL; + + request_rec *req = f->r; + + if (!(ctx->flags & SSI_FLAG_PRINTING)) { + return APR_SUCCESS; + } + + if (ctx->flags & SSI_FLAG_NO_EXEC) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "#python used but not allowed in %s", file); + + SSI_CREATE_ERROR_BUCKET(ctx, f, bb); + return APR_SUCCESS; + } + + /* process tags */ + while (1) { + optfn_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1); + + if (!tag || !tag_val) + break; + + if (!strlen(tag_val)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "empty value for '%s' parameter to tag 'python' in %s", + tag, file); + + SSI_CREATE_ERROR_BUCKET(ctx, f, bb); + Py_XDECREF(tagobject); + Py_XDECREF(codeobject); + return APR_SUCCESS; + } + + if (!strcmp(tag, "eval") || !strcmp(tag, "exec")) { + if (tagobject) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "multiple 'eval/exec' parameters to tag 'python' in %s", + file); + + SSI_CREATE_ERROR_BUCKET(ctx, f, bb); + Py_XDECREF(tagobject); + Py_XDECREF(codeobject); + return APR_SUCCESS; + } + + tagobject = MpBytesOrUnicode_FromString(tag); + codeobject = MpBytesOrUnicode_FromString(tag_val); + } else { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "unexpected '%s' parameter to tag 'python' in %s", + tag, file); + + SSI_CREATE_ERROR_BUCKET(ctx, f, bb); + Py_XDECREF(tagobject); + Py_XDECREF(codeobject); + return APR_SUCCESS; + } + } + + if (!tagobject) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "missing 'eval/exec' parameter to tag 'python' in %s", + file); + + SSI_CREATE_ERROR_BUCKET(ctx, f, bb); + return APR_SUCCESS; + } + + /* get configuration */ + conf = (py_config *) ap_get_module_config(req->per_dir_config, + &python_module); + + /* determine interpreter to use */ + interp_name = select_interp_name(req, NULL, conf, NULL, NULL); + + /* get/create interpreter */ + idata = get_interpreter(interp_name); + + if (!idata) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "handle_python: Can't get/create interpreter."); + + Py_XDECREF(tagobject); + Py_XDECREF(codeobject); + return HTTP_INTERNAL_SERVER_ERROR; + } + + /* create/acquire request object */ + request_obj = python_get_request_object(req, 0); + + /* create filter */ + filter = (filterobject *)MpFilter_FromFilter(f, bb, 0, 0, 0, 0, 0); + + Py_INCREF(request_obj); + filter->request_obj = request_obj; + + /* + * Here is where we call into Python! + */ + resultobject = PyObject_CallMethod(idata->obcallback, + "IncludeDispatch", "OOO", filter, tagobject, codeobject); + + if (!resultobject) + { + SSI_CREATE_ERROR_BUCKET(ctx, f, bb); + release_interpreter(idata); + return APR_SUCCESS; + } + + /* clean up */ + Py_XDECREF(resultobject); + + /* release interpreter */ + release_interpreter(idata); + + return filter->rc; +} + +#else + +static apr_status_t handle_python(include_ctx_t *ctx, + apr_bucket_brigade **bb, + request_rec *r_bogus, + ap_filter_t *f, + apr_bucket *head_ptr, + apr_bucket **inserted_head) { + + py_config *conf; + const char *interp_name = NULL; + interpreterdata *idata; + requestobject *request_obj; + PyObject *resultobject = NULL; + filterobject *filter; + apr_bucket *tmp_buck; + + char *file = f->r->filename; + char *tag = NULL; + char *tag_val = NULL; + + PyObject *tagobject = NULL; + PyObject *codeobject = NULL; + + request_rec *req = f->r; + + if (!(ctx->flags & FLAG_PRINTING)) { + return APR_SUCCESS; + } + + if (ctx->flags & FLAG_NO_EXEC) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "#python used but not allowed in %s", file); + + CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head); + return APR_SUCCESS; + } + + /* process tags */ + while (1) { + optfn_ssi_get_tag_and_value(ctx, &tag, &tag_val, 1); + + if (!tag || !tag_val) + break; + + if (!strlen(tag_val)) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "empty value for '%s' parameter to tag 'python' in %s", + tag, file); + + CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head); + Py_XDECREF(tagobject); + Py_XDECREF(codeobject); + return APR_SUCCESS; + } + + if (!strcmp(tag, "eval") || !strcmp(tag, "exec")) { + if (tagobject) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "multiple 'eval/exec' parameters to tag 'python' in %s", + file); + + CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head); + Py_XDECREF(tagobject); + Py_XDECREF(codeobject); + return APR_SUCCESS; + } + + tagobject = MpBytesOrUnicode_FromString(tag); + codeobject = MpBytesOrUnicode_FromString(tag_val); + } else { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "unexpected '%s' parameter to tag 'python' in %s", + tag, file); + + CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head); + Py_XDECREF(tagobject); + Py_XDECREF(codeobject); + return APR_SUCCESS; + } + } + + if (!tagobject) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "missing 'eval/exec' parameter to tag 'python' in %s", + file); + + CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head); + return APR_SUCCESS; + } + + /* get configuration */ + conf = (py_config *) ap_get_module_config(req->per_dir_config, + &python_module); + + /* determine interpreter to use */ + interp_name = select_interp_name(req, NULL, conf, NULL, NULL); + + /* get/create interpreter */ + idata = get_interpreter(interp_name); + + if (!idata) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req, + "handle_python: Can't get/create interpreter."); + + Py_XDECREF(tagobject); + Py_XDECREF(codeobject); + return HTTP_INTERNAL_SERVER_ERROR; + } + + /* create/acquire request object */ + request_obj = python_get_request_object(req, 0); + + /* create filter */ + filter = (filterobject *)MpFilter_FromFilter(f, *bb, 0, 0, 0, 0, 0); + + Py_INCREF(request_obj); + filter->request_obj = request_obj; + + /* + * Here is where we call into Python! + */ + resultobject = PyObject_CallMethod(idata->obcallback, + "IncludeDispatch", "OOO", filter, tagobject, codeobject); + + if (!resultobject) + { + CREATE_ERROR_BUCKET(ctx, tmp_buck, head_ptr, *inserted_head); + release_interpreter(idata); + return APR_SUCCESS; + } + + /* clean up */ + Py_XDECREF(resultobject); + + /* release interpreter */ + release_interpreter(idata); + + return filter->rc; +} + +#endif + +/** + ** directive_PythonImport + ** + * This function called whenever PythonImport directive + * is encountered. Note that this function does not actually + * import anything, it just remembers what needs to be imported. + * The actual importing is done later + * in the ChildInitHandler. This is because this function here + * is called before the python_init and before the suid and fork. + * + */ +static const char *directive_PythonImport(cmd_parms *cmd, void *mconfig, + const char *module, const char *interp_name) +{ + py_config *conf = ap_get_module_config(cmd->server->module_config, + &python_module); + + if (!conf->imports) + conf->imports = apr_table_make(cmd->pool, 4); + + apr_table_add(conf->imports, interp_name, module); + + return NULL; + +} + +/** + ** directive_PythonPath + ** + * This function called whenever PythonPath directive + * is encountered. + */ +static const char *directive_PythonPath(cmd_parms *cmd, void *mconfig, + const char *val) { + + const char *rc = python_directive(cmd, mconfig, "PythonPath", val); + + if (!cmd->path) { + py_config *conf = ap_get_module_config(cmd->server->module_config, + &python_module); + return python_directive(cmd, conf, "PythonPath", val); + } + return rc; +} + +/** + ** directive_PythonInterpreter + ** + * This function called whenever PythonInterpreter directive + * is encountered. + */ +static const char *directive_PythonInterpreter(cmd_parms *cmd, void *mconfig, + const char *val) { + const char *rc = python_directive(cmd, mconfig, "PythonInterpreter", val); + + if (!cmd->path) { + py_config *conf = ap_get_module_config(cmd->server->module_config, + &python_module); + return python_directive(cmd, conf, "PythonInterpreter", val); + } + return rc; +} + +/** + ** directive_PythonDebug + ** + * This function called whenever PythonDebug directive + * is encountered. + */ +static const char *directive_PythonDebug(cmd_parms *cmd, void *mconfig, + int val) { + const char *rc = python_directive_flag(mconfig, "PythonDebug", val); + + if (!cmd->path) { + py_config *conf = ap_get_module_config(cmd->server->module_config, + &python_module); + + return python_directive_flag(conf, "PythonDebug", val); + } + return rc; +} + +/** + ** directive_PythonEnablePdb + ** + * This function called whenever PythonEnablePdb directive + * is encountered. + */ +static const char *directive_PythonEnablePdb(cmd_parms *cmd, void *mconfig, + int val) { + const char *rc = python_directive_flag(mconfig, "PythonEnablePdb", val); + + if (!cmd->path) { + py_config *conf = ap_get_module_config(cmd->server->module_config, + &python_module); + return python_directive_flag(conf, "PythonEnablePdb", val); + } + return rc; +} + +/** + ** directive_PythonInterpPerDirective + ** + * This function called whenever PythonInterpPerDirective directive + * is encountered. + */ + +static const char *directive_PythonInterpPerDirective(cmd_parms *cmd, + void *mconfig, int val) { + const char *rc = python_directive_flag(mconfig, "PythonInterpPerDirective", val); + + if (!cmd->path) { + py_config *conf = ap_get_module_config(cmd->server->module_config, + &python_module); + return python_directive_flag(conf, "PythonInterpPerDirective", val); + } + return rc; +} + +/** + ** directive_PythonInterpPerDirectory + ** + * This function called whenever PythonInterpPerDirectory directive + * is encountered. + */ + +static const char *directive_PythonInterpPerDirectory(cmd_parms *cmd, + void *mconfig, int val) { + return python_directive_flag(mconfig, "PythonInterpPerDirectory", val); +} + +/** + ** directive_PythonAutoReload + ** + * This function called whenever PythonAutoReload directive + * is encountered. + */ + +static const char *directive_PythonAutoReload(cmd_parms *cmd, + void *mconfig, int val) { + const char *rc = python_directive_flag(mconfig, "PythonAutoReload", val); + + if (!cmd->path) { + py_config *conf = ap_get_module_config(cmd->server->module_config, + &python_module); + return python_directive_flag(conf, "PythonAutoReload", val); + } + return rc; +} + +/** + ** directive_PythonOption + ** + * This function is called every time PythonOption directive + * is encountered. It sticks the option into a table containing + * a list of options. This table is part of the local config structure. + */ + +static const char *directive_PythonOption(cmd_parms *cmd, void * mconfig, + const char *key, const char *val) +{ + py_config *conf; + + conf = (py_config *) mconfig; + + if(val!=NULL) { + apr_table_set(conf->options, key, val); + + if (!cmd->path) { + conf = ap_get_module_config(cmd->server->module_config, + &python_module); + apr_table_set(conf->options, key, val); + } + } + else { + /** We don't remove the value, but set it + to an empty string. There is no possibility + of colliding with an actual value, since + an entry string precisely means 'remove the value' */ + apr_table_set(conf->options, key, ""); + + if (!cmd->path) { + conf = ap_get_module_config(cmd->server->module_config, + &python_module); + apr_table_set(conf->options, key, ""); + } + } + + return NULL; +} + +/** + ** directive_PythonOptimize + ** + * This function called whenever PythonOptimize directive + * is encountered. + */ + +static const char *directive_PythonOptimize(cmd_parms *cmd, void *mconfig, + int val) { + if ((val) && (Py_OptimizeFlag != 2)) + Py_OptimizeFlag = 2; + return NULL; +} + +/** + ** Python*Handler directives + ** + */ + +static const char *directive_PythonAccessHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonAccessHandler", val, NOTSILENT); +} +static const char *directive_PythonAuthenHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonAuthenHandler", val, NOTSILENT); +} +static const char *directive_PythonAuthzHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonAuthzHandler", val, NOTSILENT); +} +static const char *directive_PythonCleanupHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonCleanupHandler", val, NOTSILENT); +} +static const char *directive_PythonConnectionHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + py_config *conf = ap_get_module_config(cmd->server->module_config, + &python_module); + return python_directive_handler(cmd, conf, "PythonConnectionHandler", val, NOTSILENT); +} +static const char *directive_PythonFixupHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonFixupHandler", val, NOTSILENT); +} +static const char *directive_PythonHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonHandler", val, NOTSILENT); +} +static const char *directive_PythonHeaderParserHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonHeaderParserHandler", val, NOTSILENT); +} +static const char *directive_PythonInitHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonInitHandler", val, NOTSILENT); +} +static const char *directive_PythonHandlerModule(cmd_parms *cmd, void *mconfig, + const char *val) { + + /* + * This handler explodes into all other handlers, but their absense will be + * silently ignored. + */ + + /* + * XXX Not used at present. See problems noted against connection + * handler below. + * + py_config *srv_conf = ap_get_module_config(cmd->server->module_config, + &python_module); + */ + + python_directive_handler(cmd, mconfig, "PythonPostReadRequestHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonTransHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonHeaderParserHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonAccessHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonAuthenHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonAuthzHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonTypeHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonFixupHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonInitHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonLogHandler", val, SILENT); + python_directive_handler(cmd, mconfig, "PythonCleanupHandler", val, SILENT); + + /* + * XXX There is a bug here with PythonConnectionHandler which can + * cause an infinite loop when the handler is added to the handler + * list. Cause is unknown so simply disable it for now. If someone + * really needs a connection handler, they can use the directive + * PythonConnectionHandler explicitly still and not rely on the + * PythonHandlerModule directive doing it automatically. + * + python_directive_handler(cmd, srv_conf, "PythonConnectionHandler", val, SILENT); + */ + + return NULL; +} +static const char *directive_PythonPostReadRequestHandler(cmd_parms *cmd, + void * mconfig, + const char *val) { + + if (strchr((char *)val, '|')) + return "PythonPostReadRequestHandler does not accept \"| .ext\" syntax."; + + return python_directive_handler(cmd, mconfig, "PythonPostReadRequestHandler", val,NOTSILENT); +} + +static const char *directive_PythonTransHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + if (strchr((char *)val, '|')) + return "PythonTransHandler does not accept \"| .ext\" syntax."; + + return python_directive_handler(cmd, mconfig, "PythonTransHandler", val, NOTSILENT); +} +static const char *directive_PythonTypeHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonTypeHandler", val, NOTSILENT); +} +static const char *directive_PythonLogHandler(cmd_parms *cmd, void *mconfig, + const char *val) { + return python_directive_handler(cmd, mconfig, "PythonLogHandler", val, NOTSILENT); +} +static const char *directive_PythonInputFilter(cmd_parms *cmd, void *mconfig, + const char *handler, const char *name) { + + py_config *conf; + py_handler *fh; + ap_filter_rec_t *frec; + + char *directory = NULL; + char d_is_fnmatch = 0; + char d_is_location = 0; + ap_regex_t *regex = NULL; + + if (!name) + name = apr_pstrdup(cmd->pool, handler); + + /* register the filter NOTE - this only works so long as the + directive is only allowed in the main config. For .htaccess we + would have to make sure not to duplicate this */ + frec = ap_register_input_filter(name, python_input_filter, NULL, AP_FTYPE_RESOURCE); + + conf = (py_config *) mconfig; + + determine_context(cmd->pool, cmd, &directory, &d_is_fnmatch, &d_is_location, ®ex); + + fh = (py_handler *) apr_pcalloc(cmd->pool, sizeof(py_handler)); + fh->handler = (char *)handler; + fh->directory = directory; + fh->d_is_fnmatch = d_is_fnmatch; + fh->d_is_location = d_is_location; + fh->regex = regex; + + apr_hash_set(conf->in_filters, frec->name, APR_HASH_KEY_STRING, fh); + + return NULL; +} + +static const char *directive_PythonOutputFilter(cmd_parms *cmd, void *mconfig, + const char *handler, const char *name) { + py_config *conf; + py_handler *fh; + ap_filter_rec_t *frec; + + char *directory = NULL; + char d_is_fnmatch = 0; + char d_is_location = 0; + ap_regex_t *regex = NULL; + + if (!name) + name = apr_pstrdup(cmd->pool, handler); + + /* register the filter NOTE - this only works so long as the + directive is only allowed in the main config. For .htaccess we + would have to make sure not to duplicate this */ + frec = ap_register_output_filter(name, python_output_filter, NULL, AP_FTYPE_RESOURCE); + + determine_context(cmd->pool, cmd, &directory, &d_is_fnmatch, &d_is_location, ®ex); + + conf = (py_config *) mconfig; + + fh = (py_handler *) apr_pcalloc(cmd->pool, sizeof(py_handler)); + fh->handler = (char *)handler; + fh->directory = directory; + fh->d_is_fnmatch = d_is_fnmatch; + fh->d_is_location = d_is_location; + fh->regex = regex; + + apr_hash_set(conf->out_filters, frec->name, APR_HASH_KEY_STRING, fh); + + return NULL; +} + +/** + ** python_finalize + ** + * We create a thread state just so we can run Py_Finalize() + */ + +static apr_status_t python_finalize(void *data) +{ + interpreterdata *idata; + + idata = get_interpreter(NULL); + + if (idata) { + Py_Finalize(); + PyEval_SaveThread(); + } + + return APR_SUCCESS; +} + +/** + ** Handlers + ** + */ + +static void PythonChildInitHandler(apr_pool_t *p, server_rec *s) +{ + + + const apr_array_header_t *ah; + apr_table_entry_t *elts; + int i; + interpreterdata *idata; + + py_config *conf = ap_get_module_config(s->module_config, &python_module); + py_global_config *glb; + PyObject *resultobject = NULL; + + /* accordig Py C Docs we must do this after forking */ + PyEval_RestoreThread(global_tstate); + PyOS_AfterFork(); + + idata = save_interpreter(MAIN_INTERPRETER, PyThreadState_Get()); + if (!idata) + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "PythonChildInitHandler: save_interpreter() returned NULL. No more memory?"); + + if (PyEval_SaveThread() != global_tstate) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, main_server, + "PythonChildInitHandler: not in global thread state, aborting."); + return; + } + + /* + * Cleanups registered first will be called last. This will + * end the Python interpreter *after* all other cleanups. + */ + + /* + * XXX Trying to cleanup Python on process shutdown causes + * problems. This seems to mainly be an issue where there + * are user created threads which are running in parallel as + * the environment they are running in will be destroyed + * from underneath them potentially resulting in crashes, + * process hangs or simply Python errors. There is also a + * small chance that finalization code can be called within + * a signal handler in some configurations which could cause + * problems as well. Thus disable cleanup of Python when + * child processes are being shutdown. (MODPYTHON-109) + * + apr_pool_cleanup_register(p, NULL, python_finalize, apr_pool_cleanup_null); + */ + + /* + * Reinit mutexes + */ + + /* this will return it if it already exists */ + glb = python_create_global_config(s); + + reinit_mutexes(s, p, glb); + + /* + * remember the pool in a global var. we may use it + * later in server.register_cleanup() + */ + child_init_pool = p; + + /* register python handler for mod_include. could probably + * also have done this in python_init() instead */ + optfn_register_include_handler = APR_RETRIEVE_OPTIONAL_FN(ap_register_include_handler); + optfn_ssi_get_tag_and_value = APR_RETRIEVE_OPTIONAL_FN(ap_ssi_get_tag_and_value); + optfn_ssi_parse_string = APR_RETRIEVE_OPTIONAL_FN(ap_ssi_parse_string); + if (optfn_register_include_handler && optfn_ssi_get_tag_and_value && + optfn_ssi_parse_string) { + optfn_register_include_handler("python", handle_python); + } + + /* + * Now run PythonImports + */ + + if (conf->imports) { + ah = apr_table_elts (conf->imports); + elts = (apr_table_entry_t *) ah->elts; + + i = ah->nelts; + + while (i--) { + if (elts[i].key) { + + interpreterdata *idata; + const char *interp_name = elts[i].key; + const char *module_name = elts[i].val; + + /* get interpreter */ + idata = get_interpreter(interp_name); + if (!idata) + return; + + /* + * Call into Python to do import. + * This is the C equivalent of + * >>> resultobject = obCallBack.ImportDispatch(module_name) + */ + resultobject = PyObject_CallMethod(idata->obcallback, + "ImportDispatch", "s", module_name); + + if (!resultobject) { + if (PyErr_Occurred()) { + PyErr_Print(); + fflush(stderr); + } + ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, + "directive_PythonImport: error importing %s", + (!module_name) ? "" : module_name); + } + + /* clean up */ + Py_XDECREF(resultobject); + + /* release interpreter */ + release_interpreter(idata); + } + } + } +} + +static int PythonConnectionHandler(conn_rec *con) { + return python_connection(con); +} +static int PythonAccessHandler(request_rec *req) { + return python_handler(req, "PythonAccessHandler"); +} +static int PythonAuthenHandler(request_rec *req) { + return python_handler(req, "PythonAuthenHandler"); +} +static int PythonAuthzHandler(request_rec *req) { + return python_handler(req, "PythonAuthzHandler"); +} +static int PythonFixupHandler(request_rec *req) { + return python_handler(req, "PythonFixupHandler"); +} +static int PythonHandler(request_rec *req) { + /* + * In Apache 2.0, all handlers receive a request and have + * a chance to process them. Therefore, we need to only + * handle those that we explicitly agreed to handle (see + * above). + */ + if (!req->handler || (strcmp(req->handler, "mod_python") && + strcmp(req->handler, "python-program"))) + return DECLINED; + + return python_handler(req, "PythonHandler"); +} +static int PythonHeaderParserHandler(request_rec *req) { + int rc; + + /* run PythonInitHandler, if not already */ + if (! apr_table_get(req->notes, "python_init_ran")) { + rc = python_handler(req, "PythonInitHandler"); + if ((rc != OK) && (rc != DECLINED)) + return rc; + } + return python_handler(req, "PythonHeaderParserHandler"); +} +static int PythonLogHandler(request_rec *req) { + return python_handler(req, "PythonLogHandler"); +} +static int PythonPostReadRequestHandler(request_rec *req) { + int rc; + + /* run PythonInitHandler */ + rc = python_handler(req, "PythonInitHandler"); + apr_table_set(req->notes, "python_init_ran", "1"); + if ((rc != OK) && (rc != DECLINED)) + return rc; + + return python_handler(req, "PythonPostReadRequestHandler"); +} +static int PythonTransHandler(request_rec *req) { + return python_handler(req, "PythonTransHandler"); +} +static int PythonMapToStorageHandler(request_rec *req) { + py_config *conf = (py_config *) ap_get_module_config(req->per_dir_config, + &python_module); + if (conf->d_is_location) { + /* there is no storage, no need to hit the slow filesystem to + * map to storage */ + return OK; + } + return DECLINED; +} +static int PythonTypeHandler(request_rec *req) { + return python_handler(req, "PythonTypeHandler"); +} + +static void python_register_hooks(apr_pool_t *p) +{ + + /* module initializer */ + ap_hook_post_config(python_init, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [0] raw connection handling */ + ap_hook_process_connection(PythonConnectionHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [1] post read_request handling */ + ap_hook_post_read_request(PythonPostReadRequestHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [2] filename-to-URI translation */ + ap_hook_translate_name(PythonTransHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [2.5] storage mapping (TODO add me as a valid handler?) */ + ap_hook_translate_name(PythonMapToStorageHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [3] header parser */ + ap_hook_header_parser(PythonHeaderParserHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [4] check access by host address */ + ap_hook_access_checker(PythonAccessHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [5] check/validate user_id */ + ap_hook_check_user_id(PythonAuthenHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [6] check user_id is valid *here* */ + ap_hook_auth_checker(PythonAuthzHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [7] MIME type checker/setter */ + ap_hook_type_checker(PythonTypeHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [8] fixups */ + ap_hook_fixups(PythonFixupHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* [9] filter insert opportunity */ + /* ap_hook_insert_filter(PythonInsertFilter, + NULL, NULL, APR_HOOK_MIDDLE); */ + + /* [10] is for the handlers; see below */ + ap_hook_handler(PythonHandler, NULL, NULL, APR_HOOK_MIDDLE); + + /* [11] logger */ + ap_hook_log_transaction(PythonLogHandler, + NULL, NULL, APR_HOOK_MIDDLE); + + /* dynamic input/output filter entry points */ + ap_register_input_filter(FILTER_NAME, python_input_filter, NULL, + AP_FTYPE_RESOURCE); + ap_register_output_filter(FILTER_NAME, python_output_filter, NULL, + AP_FTYPE_RESOURCE); + + /* process initializer */ + ap_hook_child_init(PythonChildInitHandler, + NULL, NULL, APR_HOOK_MIDDLE); + +} + +/* command table */ +command_rec python_commands[] = +{ + AP_INIT_RAW_ARGS( + "PythonAccessHandler", directive_PythonAccessHandler, NULL, OR_ALL, + "Python access by host address handlers."), + AP_INIT_RAW_ARGS( + "PythonAuthenHandler", directive_PythonAuthenHandler, NULL, OR_ALL, + "Python authentication handlers."), + AP_INIT_FLAG( + "PythonAutoReload", directive_PythonAutoReload, NULL, OR_ALL, + "Set to Off if you don't want changed modules to reload."), + AP_INIT_RAW_ARGS( + "PythonAuthzHandler", directive_PythonAuthzHandler, NULL, OR_ALL, + "Python authorization handlers."), + AP_INIT_RAW_ARGS( + "PythonCleanupHandler", directive_PythonCleanupHandler, NULL, OR_ALL, + "Python clean up handlers."), + AP_INIT_RAW_ARGS( + "PythonConnectionHandler", directive_PythonConnectionHandler, NULL, RSRC_CONF, + "Python connection handlers."), + AP_INIT_FLAG( + "PythonDebug", directive_PythonDebug, NULL, OR_ALL, + "Send (most) Python error output to the client rather than logfile."), + AP_INIT_FLAG( + "PythonEnablePdb", directive_PythonEnablePdb, NULL, OR_ALL, + "Run handlers in PDB (Python Debugger). Use with -DONE_PROCESS."), + AP_INIT_RAW_ARGS( + "PythonFixupHandler", directive_PythonFixupHandler, NULL, OR_ALL, + "Python fixups handlers."), + AP_INIT_RAW_ARGS( + "PythonHandler", directive_PythonHandler, NULL, OR_ALL, + "Python request handlers."), + AP_INIT_RAW_ARGS( + "PythonHeaderParserHandler", directive_PythonHeaderParserHandler, NULL, OR_ALL, + "Python header parser handlers."), + AP_INIT_TAKE2( + "PythonImport", directive_PythonImport, NULL, RSRC_CONF, + "Module and interpreter name to be imported at server/child init time."), + AP_INIT_RAW_ARGS( + "PythonInitHandler", directive_PythonInitHandler, NULL, OR_ALL, + "Python request initialization handler."), + AP_INIT_FLAG( + "PythonInterpPerDirective", directive_PythonInterpPerDirective, NULL, OR_ALL, + "Create subinterpreters per directive."), + AP_INIT_FLAG( + "PythonInterpPerDirectory", directive_PythonInterpPerDirectory, NULL, OR_ALL, + "Create subinterpreters per directory."), + AP_INIT_TAKE1( + "PythonInterpreter", directive_PythonInterpreter, NULL, OR_ALL, + "Forces a specific Python interpreter name to be used here."), + AP_INIT_RAW_ARGS( + "PythonLogHandler", directive_PythonLogHandler, NULL, OR_ALL, + "Python logger handlers."), + AP_INIT_RAW_ARGS( + "PythonHandlerModule", directive_PythonHandlerModule, NULL, OR_ALL, + "A Python module containing handlers to be executed."), + AP_INIT_FLAG( + "PythonOptimize", directive_PythonOptimize, NULL, RSRC_CONF, + "Set the equivalent of the -O command-line flag on the interpreter."), + AP_INIT_TAKE12( + "PythonOption", directive_PythonOption, NULL, OR_ALL, + "Useful to pass custom configuration information to scripts."), + AP_INIT_TAKE1( + "PythonPath", directive_PythonPath, NULL, OR_ALL, + "Python path, specified in Python list syntax."), + AP_INIT_RAW_ARGS( + "PythonPostReadRequestHandler", directive_PythonPostReadRequestHandler, + NULL, RSRC_CONF, + "Python post read-request handlers."), + AP_INIT_RAW_ARGS( + "PythonTransHandler", directive_PythonTransHandler, NULL, RSRC_CONF, + "Python filename to URI translation handlers."), + AP_INIT_RAW_ARGS( + "PythonTypeHandler", directive_PythonTypeHandler, NULL, OR_ALL, + "Python MIME type checker/setter handlers."), + AP_INIT_TAKE12( + "PythonInputFilter", directive_PythonInputFilter, NULL, RSRC_CONF|ACCESS_CONF, + "Python input filter."), + AP_INIT_TAKE12( + "PythonOutputFilter", directive_PythonOutputFilter, NULL, RSRC_CONF|ACCESS_CONF, + "Python output filter."), + {NULL} +}; + + +module python_module = +{ + STANDARD20_MODULE_STUFF, + python_create_dir_config, /* per-directory config creator */ + python_merge_config, /* dir config merger */ + python_create_srv_config, /* server config creator */ + python_merge_config, /* server config merger */ + python_commands, /* command table */ + python_register_hooks /* register hooks */ +}; + + + + + + + + diff --git a/src/mod_python.sln b/src/mod_python.sln new file mode 100644 index 0000000..7aa324c --- /dev/null +++ b/src/mod_python.sln @@ -0,0 +1,17 @@ + +Microsoft Visual Studio Solution File, Format Version 11.00 +# Visual Studio 2010 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mod_python", "mod_python.vcxproj", "{91935325-716F-4776-825B-7A2250320FC1}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Release|Win32 = Release|Win32 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {91935325-716F-4776-825B-7A2250320FC1}.Release|Win32.ActiveCfg = Release|Win32 + {91935325-716F-4776-825B-7A2250320FC1}.Release|Win32.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/src/mod_python.suo b/src/mod_python.suo new file mode 100644 index 0000000..117f1f5 Binary files /dev/null and b/src/mod_python.suo differ diff --git a/src/mod_python.vcproj b/src/mod_python.vcproj new file mode 100644 index 0000000..d639450 --- /dev/null +++ b/src/mod_python.vcproj @@ -0,0 +1,140 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/mod_python.vcxproj b/src/mod_python.vcxproj new file mode 100644 index 0000000..f7c1ac9 --- /dev/null +++ b/src/mod_python.vcxproj @@ -0,0 +1,104 @@ + + + + + Release + Win32 + + + + {91935325-716F-4776-825B-7A2250320FC1} + + + + + + DynamicLibrary + Static + + + + + + + + + + <_ProjectFileVersion>10.0.40219.1 + .\Release\ + .\Release\ + false + AllRules.ruleset + + + + + + OnlyExplicitInline + include;$(PYTHONSRC)\include;$(APACHESRC)\include;C:\Python27\include;C:\tools\httpd-2.4.6\include;C:\tools\apr-1.4.8\include;C:\tools\apr-util-1.5.2\include;C:\tools\httpd-2.4.6\os\win32;C:\tools\httpd-2.4.6\modules\filters;%(AdditionalIncludeDirectories) + WIN32;NDEBUG;_WINDOWS;%(PreprocessorDefinitions) + true + MultiThreaded + true + + + .\Release/mod_python.pch + .\Release/ + .\Release/ + .\Release/ + Level3 + true + Default + + + /MACHINE:I386 %(AdditionalOptions) + libhttpd.lib;libapr.lib;libaprutil.lib;ws2_32.lib;%(AdditionalDependencies) + .\Release/mod_python.so + true + $(APACHESRC)\lib;$(PYTHONSRC)\libs;C:\Users\nathan\Downloads\httpd-2.4.6-win32-ssl_0.9.8\Apache24\lib;C:\Python27\libs;%(AdditionalLibraryDirectories) + .\Release/mod_python.pdb + .\Release/mod_python.lib + + + NDEBUG;%(PreprocessorDefinitions) + true + true + Win32 + .\Release/mod_python.tlb + + + NDEBUG;%(PreprocessorDefinitions) + 0x0409 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/mod_python.vcxproj.filters b/src/mod_python.vcxproj.filters new file mode 100644 index 0000000..b7ab2b1 --- /dev/null +++ b/src/mod_python.vcxproj.filters @@ -0,0 +1,53 @@ + + + + + {a90f906b-139f-461b-9179-9f6767afec72} + + + + + include + + + include + + + include + + + include + + + include + + + include + + + include + + + include + + + include + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/mod_python.vcxproj.user b/src/mod_python.vcxproj.user new file mode 100644 index 0000000..695b5c7 --- /dev/null +++ b/src/mod_python.vcxproj.user @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/src/psp_parser.c b/src/psp_parser.c new file mode 100644 index 0000000..2f1768c --- /dev/null +++ b/src/psp_parser.c @@ -0,0 +1,2275 @@ +#line 2 "psp_parser.c" + +#line 4 "psp_parser.c" + +#define YY_INT_ALIGNED short int + +/* A lexical scanner generated by flex */ + +#define FLEX_SCANNER +#define YY_FLEX_MAJOR_VERSION 2 +#define YY_FLEX_MINOR_VERSION 5 +#define YY_FLEX_SUBMINOR_VERSION 35 +#if YY_FLEX_SUBMINOR_VERSION > 0 +#define FLEX_BETA +#endif + +/* First, we deal with platform-specific or compiler-specific issues. */ + +/* begin standard C headers. */ +#include +#include +#include +#include + +/* end standard C headers. */ + +/* flex integer type definitions */ + +#ifndef FLEXINT_H +#define FLEXINT_H + +/* C99 systems have . Non-C99 systems may or may not. */ + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + +/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h, + * if you want the limit (max/min) macros for int types. + */ +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS 1 +#endif + +#include +typedef int8_t flex_int8_t; +typedef uint8_t flex_uint8_t; +typedef int16_t flex_int16_t; +typedef uint16_t flex_uint16_t; +typedef int32_t flex_int32_t; +typedef uint32_t flex_uint32_t; +#else +typedef signed char flex_int8_t; +typedef short int flex_int16_t; +typedef int flex_int32_t; +typedef unsigned char flex_uint8_t; +typedef unsigned short int flex_uint16_t; +typedef unsigned int flex_uint32_t; +#endif /* ! C99 */ + +/* Limits of integral types. */ +#ifndef INT8_MIN +#define INT8_MIN (-128) +#endif +#ifndef INT16_MIN +#define INT16_MIN (-32767-1) +#endif +#ifndef INT32_MIN +#define INT32_MIN (-2147483647-1) +#endif +#ifndef INT8_MAX +#define INT8_MAX (127) +#endif +#ifndef INT16_MAX +#define INT16_MAX (32767) +#endif +#ifndef INT32_MAX +#define INT32_MAX (2147483647) +#endif +#ifndef UINT8_MAX +#define UINT8_MAX (255U) +#endif +#ifndef UINT16_MAX +#define UINT16_MAX (65535U) +#endif +#ifndef UINT32_MAX +#define UINT32_MAX (4294967295U) +#endif + +#endif /* ! FLEXINT_H */ + +#ifdef __cplusplus + +/* The "const" storage-class-modifier is valid. */ +#define YY_USE_CONST + +#else /* ! __cplusplus */ + +/* C99 requires __STDC__ to be defined as 1. */ +#if defined (__STDC__) + +#define YY_USE_CONST + +#endif /* defined (__STDC__) */ +#endif /* ! __cplusplus */ + +#ifdef YY_USE_CONST +#define yyconst const +#else +#define yyconst +#endif + +/* Returned upon end-of-file. */ +#define YY_NULL 0 + +/* Promotes a possibly negative, possibly signed char to an unsigned + * integer for use as an array index. If the signed char is negative, + * we want to instead treat it as an 8-bit unsigned char, hence the + * double cast. + */ +#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c) + +/* An opaque pointer. */ +#ifndef YY_TYPEDEF_YY_SCANNER_T +#define YY_TYPEDEF_YY_SCANNER_T +typedef void* yyscan_t; +#endif + +/* For convenience, these vars (plus the bison vars far below) + are macros in the reentrant scanner. */ +#define yyin yyg->yyin_r +#define yyout yyg->yyout_r +#define yyextra yyg->yyextra_r +#define yyleng yyg->yyleng_r +#define yytext yyg->yytext_r +#define yylineno (YY_CURRENT_BUFFER_LVALUE->yy_bs_lineno) +#define yycolumn (YY_CURRENT_BUFFER_LVALUE->yy_bs_column) +#define yy_flex_debug yyg->yy_flex_debug_r + +/* Enter a start condition. This macro really ought to take a parameter, + * but we do it the disgusting crufty way forced on us by the ()-less + * definition of BEGIN. + */ +#define BEGIN yyg->yy_start = 1 + 2 * + +/* Translate the current start state into a value that can be later handed + * to BEGIN to return to the state. The YYSTATE alias is for lex + * compatibility. + */ +#define YY_START ((yyg->yy_start - 1) / 2) +#define YYSTATE YY_START + +/* Action number for EOF rule of a given start state. */ +#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1) + +/* Special action meaning "start processing a new file". */ +#define YY_NEW_FILE yyrestart(yyin ,yyscanner ) + +#define YY_END_OF_BUFFER_CHAR 0 + +/* Size of default input buffer. */ +#ifndef YY_BUF_SIZE +#define YY_BUF_SIZE 16384 +#endif + +/* The state buf must be large enough to hold one state per character in the main buffer. + */ +#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type)) + +#ifndef YY_TYPEDEF_YY_BUFFER_STATE +#define YY_TYPEDEF_YY_BUFFER_STATE +typedef struct yy_buffer_state *YY_BUFFER_STATE; +#endif + +#define EOB_ACT_CONTINUE_SCAN 0 +#define EOB_ACT_END_OF_FILE 1 +#define EOB_ACT_LAST_MATCH 2 + + #define YY_LESS_LINENO(n) + +/* Return all but the first "n" matched characters back to the input stream. */ +#define yyless(n) \ + do \ + { \ + /* Undo effects of setting up yytext. */ \ + int yyless_macro_arg = (n); \ + YY_LESS_LINENO(yyless_macro_arg);\ + *yy_cp = yyg->yy_hold_char; \ + YY_RESTORE_YY_MORE_OFFSET \ + yyg->yy_c_buf_p = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ + YY_DO_BEFORE_ACTION; /* set up yytext again */ \ + } \ + while ( 0 ) + +#define unput(c) yyunput( c, yyg->yytext_ptr , yyscanner ) + +#ifndef YY_TYPEDEF_YY_SIZE_T +#define YY_TYPEDEF_YY_SIZE_T +typedef size_t yy_size_t; +#endif + +#ifndef YY_STRUCT_YY_BUFFER_STATE +#define YY_STRUCT_YY_BUFFER_STATE +struct yy_buffer_state + { + FILE *yy_input_file; + + char *yy_ch_buf; /* input buffer */ + char *yy_buf_pos; /* current position in input buffer */ + + /* Size of input buffer in bytes, not including room for EOB + * characters. + */ + yy_size_t yy_buf_size; + + /* Number of characters read into yy_ch_buf, not including EOB + * characters. + */ + int yy_n_chars; + + /* Whether we "own" the buffer - i.e., we know we created it, + * and can realloc() it to grow it, and should free() it to + * delete it. + */ + int yy_is_our_buffer; + + /* Whether this is an "interactive" input source; if so, and + * if we're using stdio for input, then we want to use getc() + * instead of fread(), to make sure we stop fetching input after + * each newline. + */ + int yy_is_interactive; + + /* Whether we're considered to be at the beginning of a line. + * If so, '^' rules will be active on the next match, otherwise + * not. + */ + int yy_at_bol; + + int yy_bs_lineno; /**< The line count. */ + int yy_bs_column; /**< The column count. */ + + /* Whether to try to fill the input buffer when we reach the + * end of it. + */ + int yy_fill_buffer; + + int yy_buffer_status; + +#define YY_BUFFER_NEW 0 +#define YY_BUFFER_NORMAL 1 + /* When an EOF's been seen but there's still some text to process + * then we mark the buffer as YY_EOF_PENDING, to indicate that we + * shouldn't try reading from the input source any more. We might + * still have a bunch of tokens to match, though, because of + * possible backing-up. + * + * When we actually see the EOF, we change the status to "new" + * (via yyrestart()), so that the user can continue scanning by + * just pointing yyin at a new input file. + */ +#define YY_BUFFER_EOF_PENDING 2 + + }; +#endif /* !YY_STRUCT_YY_BUFFER_STATE */ + +/* We provide macros for accessing buffer states in case in the + * future we want to put the buffer states in a more general + * "scanner state". + * + * Returns the top of the stack, or NULL. + */ +#define YY_CURRENT_BUFFER ( yyg->yy_buffer_stack \ + ? yyg->yy_buffer_stack[yyg->yy_buffer_stack_top] \ + : NULL) + +/* Same as previous macro, but useful when we know that the buffer stack is not + * NULL or when we need an lvalue. For internal use only. + */ +#define YY_CURRENT_BUFFER_LVALUE yyg->yy_buffer_stack[yyg->yy_buffer_stack_top] + +void yyrestart (FILE *input_file ,yyscan_t yyscanner ); +void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); +YY_BUFFER_STATE yy_create_buffer (FILE *file,int size ,yyscan_t yyscanner ); +void yy_delete_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); +void yy_flush_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); +void yypush_buffer_state (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); +void yypop_buffer_state (yyscan_t yyscanner ); + +static void yyensure_buffer_stack (yyscan_t yyscanner ); +static void yy_load_buffer_state (yyscan_t yyscanner ); +static void yy_init_buffer (YY_BUFFER_STATE b,FILE *file ,yyscan_t yyscanner ); + +#define YY_FLUSH_BUFFER yy_flush_buffer(YY_CURRENT_BUFFER ,yyscanner) + +YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner ); +YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str ,yyscan_t yyscanner ); +YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len ,yyscan_t yyscanner ); + +void *yyalloc (yy_size_t ,yyscan_t yyscanner ); +void *yyrealloc (void *,yy_size_t ,yyscan_t yyscanner ); +void yyfree (void * ,yyscan_t yyscanner ); + +#define yy_new_buffer yy_create_buffer + +#define yy_set_interactive(is_interactive) \ + { \ + if ( ! YY_CURRENT_BUFFER ){ \ + yyensure_buffer_stack (yyscanner); \ + YY_CURRENT_BUFFER_LVALUE = \ + yy_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \ + } \ + YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \ + } + +#define yy_set_bol(at_bol) \ + { \ + if ( ! YY_CURRENT_BUFFER ){\ + yyensure_buffer_stack (yyscanner); \ + YY_CURRENT_BUFFER_LVALUE = \ + yy_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \ + } \ + YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \ + } + +#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol) + +/* Begin user sect3 */ + +#define yywrap(n) 1 +#define YY_SKIP_YYWRAP + +typedef unsigned char YY_CHAR; + +typedef int yy_state_type; + +#define yytext_ptr yytext_r + +static yy_state_type yy_get_previous_state (yyscan_t yyscanner ); +static yy_state_type yy_try_NUL_trans (yy_state_type current_state ,yyscan_t yyscanner); +static int yy_get_next_buffer (yyscan_t yyscanner ); +static void yy_fatal_error (yyconst char msg[] ,yyscan_t yyscanner ); + +/* Done after the current pattern has been matched and before the + * corresponding action - sets up yytext. + */ +#define YY_DO_BEFORE_ACTION \ + yyg->yytext_ptr = yy_bp; \ + yyleng = (size_t) (yy_cp - yy_bp); \ + yyg->yy_hold_char = *yy_cp; \ + *yy_cp = '\0'; \ + yyg->yy_c_buf_p = yy_cp; + +#define YY_NUM_RULES 23 +#define YY_END_OF_BUFFER 24 +/* This struct is not used in this scanner, + but its presence is necessary. */ +struct yy_trans_info + { + flex_int32_t yy_verify; + flex_int32_t yy_nxt; + }; +static yyconst flex_int16_t yy_accept[73] = + { 0, + 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, + 0, 0, 24, 2, 1, 2, 11, 10, 11, 11, + 11, 15, 12, 12, 15, 14, 19, 18, 19, 19, + 16, 23, 23, 23, 23, 1, 10, 7, 3, 4, + 5, 12, 13, 18, 17, 16, 21, 0, 0, 0, + 6, 8, 0, 0, 9, 0, 22, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 20, 0 + } ; + +static yyconst flex_int32_t yy_ec[256] = + { 0, + 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, + 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 5, 1, 6, 1, 1, 7, 1, 1, 1, + 1, 1, 1, 1, 8, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 9, 1, 10, + 11, 12, 1, 13, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 14, 1, 1, 1, 1, 1, 1, 15, 16, + + 17, 18, 1, 1, 19, 1, 1, 20, 1, 21, + 1, 1, 1, 22, 1, 23, 24, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1 + } ; + +static yyconst flex_int32_t yy_meta[25] = + { 0, + 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1 + } ; + +static yyconst flex_int16_t yy_base[79] = + { 0, + 0, 2, 4, 16, 28, 35, 6, 43, 4, 5, + 81, 80, 87, 90, 90, 83, 90, 90, 82, 77, + 6, 90, 90, 80, 70, 90, 90, 90, 78, 68, + 20, 90, 67, 57, 69, 90, 90, 41, 90, 90, + 90, 90, 90, 90, 90, 31, 90, 61, 68, 66, + 90, 90, 53, 60, 90, 47, 90, 54, 52, 63, + 16, 37, 35, 36, 46, 32, 10, 35, 0, 34, + 11, 90, 57, 59, 61, 63, 65, 0 + } ; + +static yyconst flex_int16_t yy_def[79] = + { 0, + 73, 73, 74, 74, 75, 75, 76, 76, 77, 77, + 77, 77, 72, 72, 72, 72, 72, 72, 72, 72, + 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, + 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, + 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, + 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, + 72, 72, 72, 72, 72, 72, 72, 72, 78, 78, + 78, 0, 72, 72, 72, 72, 72, 72 + } ; + +static yyconst flex_int16_t yy_nxt[115] = + { 0, + 70, 72, 15, 16, 15, 16, 18, 19, 28, 29, + 33, 33, 30, 20, 68, 69, 71, 21, 18, 19, + 61, 46, 34, 34, 46, 20, 39, 40, 41, 21, + 23, 24, 46, 62, 25, 46, 26, 23, 24, 71, + 69, 25, 67, 26, 31, 28, 29, 31, 50, 30, + 66, 51, 65, 52, 64, 63, 67, 14, 14, 17, + 17, 22, 22, 27, 27, 32, 32, 61, 60, 59, + 58, 57, 56, 55, 54, 53, 49, 48, 47, 45, + 44, 43, 42, 38, 37, 36, 72, 35, 35, 13, + 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, + + 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, + 72, 72, 72, 72 + } ; + +static yyconst flex_int16_t yy_chk[115] = + { 0, + 78, 0, 1, 1, 2, 2, 3, 3, 7, 7, + 9, 10, 7, 3, 67, 67, 71, 3, 4, 4, + 61, 31, 9, 10, 31, 4, 21, 21, 21, 4, + 5, 5, 46, 61, 5, 46, 5, 6, 6, 70, + 68, 6, 66, 6, 8, 8, 8, 8, 38, 8, + 65, 38, 64, 38, 63, 62, 65, 73, 73, 74, + 74, 75, 75, 76, 76, 77, 77, 60, 59, 58, + 56, 54, 53, 50, 49, 48, 35, 34, 33, 30, + 29, 25, 24, 20, 19, 16, 13, 12, 11, 72, + 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, + + 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, + 72, 72, 72, 72 + } ; + +/* The intent behind this definition is that it'll catch + * any uses of REJECT which flex missed. + */ +#define REJECT reject_used_but_not_detected +#define yymore() yymore_used_but_not_detected +#define YY_MORE_ADJ 0 +#define YY_RESTORE_YY_MORE_OFFSET +#line 1 "psp_parser.l" +#line 2 "psp_parser.l" +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * + * This file originally written by Sterling Hughes. + * + */ + +/* NOTE The seemingly unusual generated Python code (sometime using + * ";" to separate statements, newline placement, etc) is such that + * for vast majority of cases the line number of the input file will + * match the line number of the output! + */ + +#include "psp_parser.h" + +#define OUTPUT_WHITESPACE(__wsstring) \ + psp_string_0((__wsstring)); \ + psp_string_append(&PSP_PG(pycode), (__wsstring)->blob) + +#define CLEAR_WHITESPACE(__wsstring) psp_string_clear((__wsstring)); + +#define YY_NO_UNISTD_H 1 + + + + + +#line 520 "psp_parser.c" + +#define INITIAL 0 +#define TEXT 1 +#define PYCODE 2 +#define INDENT 3 +#define DIR 4 +#define COMMENT 5 + +#ifndef YY_NO_UNISTD_H +/* Special case for "unistd.h", since it is non-ANSI. We include it way + * down here because we want the user's section 1 to have been scanned first. + * The user has a chance to override it with an option. + */ +#include +#endif + +#ifndef YY_EXTRA_TYPE +#define YY_EXTRA_TYPE void * +#endif + +/* Holds the entire state of the reentrant scanner. */ +struct yyguts_t + { + + /* User-defined. Not touched by flex. */ + YY_EXTRA_TYPE yyextra_r; + + /* The rest are the same as the globals declared in the non-reentrant scanner. */ + FILE *yyin_r, *yyout_r; + size_t yy_buffer_stack_top; /**< index of top of stack. */ + size_t yy_buffer_stack_max; /**< capacity of stack. */ + YY_BUFFER_STATE * yy_buffer_stack; /**< Stack as an array. */ + char yy_hold_char; + int yy_n_chars; + int yyleng_r; + char *yy_c_buf_p; + int yy_init; + int yy_start; + int yy_did_buffer_switch_on_eof; + int yy_start_stack_ptr; + int yy_start_stack_depth; + int *yy_start_stack; + yy_state_type yy_last_accepting_state; + char* yy_last_accepting_cpos; + + int yylineno_r; + int yy_flex_debug_r; + + char *yytext_r; + int yy_more_flag; + int yy_more_len; + + }; /* end struct yyguts_t */ + +static int yy_init_globals (yyscan_t yyscanner ); + +int yylex_init (yyscan_t* scanner); + +int yylex_init_extra (YY_EXTRA_TYPE user_defined,yyscan_t* scanner); + +/* Accessor methods to globals. + These are made visible to non-reentrant scanners for convenience. */ + +int yylex_destroy (yyscan_t yyscanner ); + +int yyget_debug (yyscan_t yyscanner ); + +void yyset_debug (int debug_flag ,yyscan_t yyscanner ); + +YY_EXTRA_TYPE yyget_extra (yyscan_t yyscanner ); + +void yyset_extra (YY_EXTRA_TYPE user_defined ,yyscan_t yyscanner ); + +FILE *yyget_in (yyscan_t yyscanner ); + +void yyset_in (FILE * in_str ,yyscan_t yyscanner ); + +FILE *yyget_out (yyscan_t yyscanner ); + +void yyset_out (FILE * out_str ,yyscan_t yyscanner ); + +int yyget_leng (yyscan_t yyscanner ); + +char *yyget_text (yyscan_t yyscanner ); + +int yyget_lineno (yyscan_t yyscanner ); + +void yyset_lineno (int line_number ,yyscan_t yyscanner ); + +/* Macros after this point can all be overridden by user definitions in + * section 1. + */ + +#ifndef YY_SKIP_YYWRAP +#ifdef __cplusplus +extern "C" int yywrap (yyscan_t yyscanner ); +#else +extern int yywrap (yyscan_t yyscanner ); +#endif +#endif + + static void yyunput (int c,char *buf_ptr ,yyscan_t yyscanner); + +#ifndef yytext_ptr +static void yy_flex_strncpy (char *,yyconst char *,int ,yyscan_t yyscanner); +#endif + +#ifdef YY_NEED_STRLEN +static int yy_flex_strlen (yyconst char * ,yyscan_t yyscanner); +#endif + +#ifndef YY_NO_INPUT + +#ifdef __cplusplus +static int yyinput (yyscan_t yyscanner ); +#else +static int input (yyscan_t yyscanner ); +#endif + +#endif + +/* Amount of stuff to slurp up with each read. */ +#ifndef YY_READ_BUF_SIZE +#define YY_READ_BUF_SIZE 8192 +#endif + +/* Copy whatever the last rule matched to the standard output. */ +#ifndef ECHO +/* This used to be an fputs(), but since the string might contain NUL's, + * we now use fwrite(). + */ +#define ECHO do { if (fwrite( yytext, yyleng, 1, yyout )) {} } while (0) +#endif + +/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL, + * is returned in "result". + */ +#ifndef YY_INPUT +#define YY_INPUT(buf,result,max_size) \ + if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \ + { \ + int c = '*'; \ + unsigned n; \ + for ( n = 0; n < max_size && \ + (c = getc( yyin )) != EOF && c != '\n'; ++n ) \ + buf[n] = (char) c; \ + if ( c == '\n' ) \ + buf[n++] = (char) c; \ + if ( c == EOF && ferror( yyin ) ) \ + YY_FATAL_ERROR( "input in flex scanner failed" ); \ + result = n; \ + } \ + else \ + { \ + errno=0; \ + while ( (result = fread(buf, 1, max_size, yyin))==0 && ferror(yyin)) \ + { \ + if( errno != EINTR) \ + { \ + YY_FATAL_ERROR( "input in flex scanner failed" ); \ + break; \ + } \ + errno=0; \ + clearerr(yyin); \ + } \ + }\ +\ + +#endif + +/* No semi-colon after return; correct usage is to write "yyterminate();" - + * we don't want an extra ';' after the "return" because that will cause + * some compilers to complain about unreachable statements. + */ +#ifndef yyterminate +#define yyterminate() return YY_NULL +#endif + +/* Number of entries by which start-condition stack grows. */ +#ifndef YY_START_STACK_INCR +#define YY_START_STACK_INCR 25 +#endif + +/* Report a fatal error. */ +#ifndef YY_FATAL_ERROR +#define YY_FATAL_ERROR(msg) yy_fatal_error( msg , yyscanner) +#endif + +/* end tables serialization structures and prototypes */ + +/* Default declaration of generated scanner - a define so the user can + * easily add parameters. + */ +#ifndef YY_DECL +#define YY_DECL_IS_OURS 1 + +extern int yylex (yyscan_t yyscanner); + +#define YY_DECL int yylex (yyscan_t yyscanner) +#endif /* !YY_DECL */ + +/* Code executed at the beginning of each rule, after yytext and yyleng + * have been set up. + */ +#ifndef YY_USER_ACTION +#define YY_USER_ACTION +#endif + +/* Code executed at the end of each rule. */ +#ifndef YY_BREAK +#define YY_BREAK break; +#endif + +#define YY_RULE_SETUP \ + if ( yyleng > 0 ) \ + YY_CURRENT_BUFFER_LVALUE->yy_at_bol = \ + (yytext[yyleng - 1] == '\n'); \ + YY_USER_ACTION + +/** The main scanner function which does all the work. + */ +YY_DECL +{ + register yy_state_type yy_current_state; + register char *yy_cp, *yy_bp; + register int yy_act; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + +#line 46 "psp_parser.l" + + +#line 752 "psp_parser.c" + + if ( !yyg->yy_init ) + { + yyg->yy_init = 1; + +#ifdef YY_USER_INIT + YY_USER_INIT; +#endif + + if ( ! yyg->yy_start ) + yyg->yy_start = 1; /* first start state */ + + if ( ! yyin ) + yyin = stdin; + + if ( ! yyout ) + yyout = stdout; + + if ( ! YY_CURRENT_BUFFER ) { + yyensure_buffer_stack (yyscanner); + YY_CURRENT_BUFFER_LVALUE = + yy_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); + } + + yy_load_buffer_state(yyscanner ); + } + + while ( 1 ) /* loops until end-of-file is reached */ + { + yy_cp = yyg->yy_c_buf_p; + + /* Support of yytext. */ + *yy_cp = yyg->yy_hold_char; + + /* yy_bp points to the position in yy_ch_buf of the start of + * the current run. + */ + yy_bp = yy_cp; + + yy_current_state = yyg->yy_start; + yy_current_state += YY_AT_BOL(); +yy_match: + do + { + register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)]; + if ( yy_accept[yy_current_state] ) + { + yyg->yy_last_accepting_state = yy_current_state; + yyg->yy_last_accepting_cpos = yy_cp; + } + while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) + { + yy_current_state = (int) yy_def[yy_current_state]; + if ( yy_current_state >= 73 ) + yy_c = yy_meta[(unsigned int) yy_c]; + } + yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; + ++yy_cp; + } + while ( yy_base[yy_current_state] != 90 ); + +yy_find_action: + yy_act = yy_accept[yy_current_state]; + if ( yy_act == 0 ) + { /* have to back up */ + yy_cp = yyg->yy_last_accepting_cpos; + yy_current_state = yyg->yy_last_accepting_state; + yy_act = yy_accept[yy_current_state]; + } + + YY_DO_BEFORE_ACTION; + +do_action: /* This label is used only to access EOF actions. */ + + switch ( yy_act ) + { /* beginning of action switch */ + case 0: /* must back up */ + /* undo the effects of YY_DO_BEFORE_ACTION */ + *yy_cp = yyg->yy_hold_char; + yy_cp = yyg->yy_last_accepting_cpos; + yy_current_state = yyg->yy_last_accepting_state; + goto yy_find_action; + +case 1: +/* rule 1 can match eol */ +YY_RULE_SETUP +#line 48 "psp_parser.l" +{ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("req.write(\"\"\"")); + + yyless(0); + BEGIN TEXT; +} + YY_BREAK +case 2: +YY_RULE_SETUP +#line 55 "psp_parser.l" +{ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("req.write(\"\"\"")); + + yyless(0); + BEGIN TEXT; +} + YY_BREAK +case 3: +YY_RULE_SETUP +#line 62 "psp_parser.l" +{ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\\\\n")); +} + YY_BREAK +case 4: +YY_RULE_SETUP +#line 66 "psp_parser.l" +{ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\\\\r")); +} + YY_BREAK +case 5: +YY_RULE_SETUP +#line 70 "psp_parser.l" +{ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\\\\t")); +} + YY_BREAK +case 6: +YY_RULE_SETUP +#line 74 "psp_parser.l" +{ /* expression */ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\"\"\",0); req.write(str(")); + PSP_PG(is_psp_echo) = 1; + + BEGIN PYCODE; +} + YY_BREAK +case 7: +YY_RULE_SETUP +#line 81 "psp_parser.l" +{ /* python code */ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\"\"\",0);")); + CLEAR_WHITESPACE(&PSP_PG(whitespace)); + PSP_PG(seen_newline) = 0; + BEGIN PYCODE; +} + YY_BREAK +case 8: +YY_RULE_SETUP +#line 88 "psp_parser.l" +{ /* directive */ + BEGIN DIR; +} + YY_BREAK +case 9: +YY_RULE_SETUP +#line 92 "psp_parser.l" +{ /* comment */ + BEGIN COMMENT; +} + YY_BREAK +case 10: +/* rule 10 can match eol */ +YY_RULE_SETUP +#line 96 "psp_parser.l" +{ + psp_string_appendc(&PSP_PG(pycode), '\n'); +} + YY_BREAK +case 11: +YY_RULE_SETUP +#line 100 "psp_parser.l" +{ + if (yytext[0] == '"') { + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\\\"")); + } else { + psp_string_appendc(&PSP_PG(pycode), yytext[0]); + } +} + YY_BREAK +case YY_STATE_EOF(TEXT): +#line 108 "psp_parser.l" +{ + yypop_buffer_state(yyscanner); + if (!YY_CURRENT_BUFFER) { + /* this is really the end */ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\"\"\",0)\n")); + yyterminate(); + } + else { + /* we are inside include, continue scanning */ + BEGIN DIR; + } +} + YY_BREAK +case 12: +/* rule 12 can match eol */ +YY_RULE_SETUP +#line 121 "psp_parser.l" +{ + psp_string_appendc(&PSP_PG(pycode), '\n'); + + PSP_PG(seen_newline) = 1; + BEGIN INDENT; +} + YY_BREAK +case 13: +YY_RULE_SETUP +#line 128 "psp_parser.l" +{ + + if (PSP_PG(is_psp_echo)) { + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("),0); req.write(\"\"\"")); + PSP_PG(is_psp_echo) = 0; + } + else { + if (!PSP_PG(seen_newline)) { + /* this will happen is you have <%%> */ + psp_string_appendc(&PSP_PG(pycode), ';'); + } + + if (PSP_PG(after_colon)) { + /* this is dumb mistake-proof measure, if %> + is immediately following where there should be an indent */ + psp_string_appendc(&PSP_PG(whitespace), '\t'); + PSP_PG(after_colon) = 0; + } + OUTPUT_WHITESPACE(&PSP_PG(whitespace)); + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("req.write(\"\"\"")); + } + + BEGIN TEXT; +} + YY_BREAK +case 14: +YY_RULE_SETUP +#line 153 "psp_parser.l" +{ + psp_string_appendc(&PSP_PG(pycode), yytext[0]); + PSP_PG(after_colon) = 1; +} + YY_BREAK +case 15: +YY_RULE_SETUP +#line 158 "psp_parser.l" +{ + psp_string_appendc(&PSP_PG(pycode), yytext[0]); + PSP_PG(after_colon) = 0; +} + YY_BREAK +case 16: +YY_RULE_SETUP +#line 163 "psp_parser.l" +{ + + CLEAR_WHITESPACE(&PSP_PG(whitespace)); + psp_string_appendl(&PSP_PG(whitespace), yytext, yyleng); + psp_string_appendl(&PSP_PG(pycode), yytext, yyleng); + + BEGIN PYCODE; +} + YY_BREAK +case 17: +YY_RULE_SETUP +#line 172 "psp_parser.l" +{ + yyless(0); + BEGIN PYCODE; +} + YY_BREAK +case 18: +/* rule 18 can match eol */ +YY_RULE_SETUP +#line 177 "psp_parser.l" +{ + CLEAR_WHITESPACE(&PSP_PG(whitespace)); + yyless(0); + BEGIN PYCODE; +} + YY_BREAK +case 19: +YY_RULE_SETUP +#line 183 "psp_parser.l" +{ + CLEAR_WHITESPACE(&PSP_PG(whitespace)); + yyless(0); + BEGIN PYCODE; +} + YY_BREAK +case 20: +/* rule 20 can match eol */ +YY_RULE_SETUP +#line 189 "psp_parser.l" +{ + + char *filename; + char *path; + FILE *f; + + /* find a quote */ + filename = strchr(yytext, '"') + 1; + filename[strchr(filename, '"')-filename] = '\0'; + + /* XXX The absolute path check won't work on Windows, + * needs to be corrected + */ + + if (PSP_PG(dir) && filename[0] != '/') { + path = malloc(strlen(filename)+strlen(PSP_PG(dir))+1); + if (path == NULL) { + PyErr_NoMemory(); + yyterminate(); + } + strcpy(path, PSP_PG(dir)); + strcat(path, filename); + } + else { + path = filename; + } + + Py_BEGIN_ALLOW_THREADS + f = fopen(path, "rb"); + Py_END_ALLOW_THREADS + if (f == NULL) { + PyErr_SetFromErrnoWithFilename(PyExc_IOError, path); + } + else { + yypush_buffer_state(yy_create_buffer(f,YY_BUF_SIZE,yyscanner),yyscanner); + BEGIN(TEXT); + } + + if (PSP_PG(dir)) free(path); +} + YY_BREAK +case 21: +YY_RULE_SETUP +#line 231 "psp_parser.l" +{ + BEGIN TEXT; +} + YY_BREAK +case 22: +YY_RULE_SETUP +#line 235 "psp_parser.l" +{ + BEGIN TEXT; +} + YY_BREAK +case 23: +YY_RULE_SETUP +#line 239 "psp_parser.l" +ECHO; + YY_BREAK +#line 1104 "psp_parser.c" +case YY_STATE_EOF(INITIAL): +case YY_STATE_EOF(PYCODE): +case YY_STATE_EOF(INDENT): +case YY_STATE_EOF(DIR): +case YY_STATE_EOF(COMMENT): + yyterminate(); + + case YY_END_OF_BUFFER: + { + /* Amount of text matched not including the EOB char. */ + int yy_amount_of_matched_text = (int) (yy_cp - yyg->yytext_ptr) - 1; + + /* Undo the effects of YY_DO_BEFORE_ACTION. */ + *yy_cp = yyg->yy_hold_char; + YY_RESTORE_YY_MORE_OFFSET + + if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW ) + { + /* We're scanning a new file or input source. It's + * possible that this happened because the user + * just pointed yyin at a new source and called + * yylex(). If so, then we have to assure + * consistency between YY_CURRENT_BUFFER and our + * globals. Here is the right place to do so, because + * this is the first action (other than possibly a + * back-up) that will match for the new input source. + */ + yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; + YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin; + YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL; + } + + /* Note that here we test for yy_c_buf_p "<=" to the position + * of the first EOB in the buffer, since yy_c_buf_p will + * already have been incremented past the NUL character + * (since all states make transitions on EOB to the + * end-of-buffer state). Contrast this with the test + * in input(). + */ + if ( yyg->yy_c_buf_p <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] ) + { /* This was really a NUL. */ + yy_state_type yy_next_state; + + yyg->yy_c_buf_p = yyg->yytext_ptr + yy_amount_of_matched_text; + + yy_current_state = yy_get_previous_state( yyscanner ); + + /* Okay, we're now positioned to make the NUL + * transition. We couldn't have + * yy_get_previous_state() go ahead and do it + * for us because it doesn't know how to deal + * with the possibility of jamming (and we don't + * want to build jamming into it because then it + * will run more slowly). + */ + + yy_next_state = yy_try_NUL_trans( yy_current_state , yyscanner); + + yy_bp = yyg->yytext_ptr + YY_MORE_ADJ; + + if ( yy_next_state ) + { + /* Consume the NUL. */ + yy_cp = ++yyg->yy_c_buf_p; + yy_current_state = yy_next_state; + goto yy_match; + } + + else + { + yy_cp = yyg->yy_c_buf_p; + goto yy_find_action; + } + } + + else switch ( yy_get_next_buffer( yyscanner ) ) + { + case EOB_ACT_END_OF_FILE: + { + yyg->yy_did_buffer_switch_on_eof = 0; + + if ( yywrap(yyscanner ) ) + { + /* Note: because we've taken care in + * yy_get_next_buffer() to have set up + * yytext, we can now set up + * yy_c_buf_p so that if some total + * hoser (like flex itself) wants to + * call the scanner after we return the + * YY_NULL, it'll still work - another + * YY_NULL will get returned. + */ + yyg->yy_c_buf_p = yyg->yytext_ptr + YY_MORE_ADJ; + + yy_act = YY_STATE_EOF(YY_START); + goto do_action; + } + + else + { + if ( ! yyg->yy_did_buffer_switch_on_eof ) + YY_NEW_FILE; + } + break; + } + + case EOB_ACT_CONTINUE_SCAN: + yyg->yy_c_buf_p = + yyg->yytext_ptr + yy_amount_of_matched_text; + + yy_current_state = yy_get_previous_state( yyscanner ); + + yy_cp = yyg->yy_c_buf_p; + yy_bp = yyg->yytext_ptr + YY_MORE_ADJ; + goto yy_match; + + case EOB_ACT_LAST_MATCH: + yyg->yy_c_buf_p = + &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars]; + + yy_current_state = yy_get_previous_state( yyscanner ); + + yy_cp = yyg->yy_c_buf_p; + yy_bp = yyg->yytext_ptr + YY_MORE_ADJ; + goto yy_find_action; + } + break; + } + + default: + YY_FATAL_ERROR( + "fatal flex scanner internal error--no action found" ); + } /* end of action switch */ + } /* end of scanning one token */ +} /* end of yylex */ + +/* yy_get_next_buffer - try to read in a new buffer + * + * Returns a code representing an action: + * EOB_ACT_LAST_MATCH - + * EOB_ACT_CONTINUE_SCAN - continue scanning from current position + * EOB_ACT_END_OF_FILE - end of file + */ +static int yy_get_next_buffer (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf; + register char *source = yyg->yytext_ptr; + register int number_to_move, i; + int ret_val; + + if ( yyg->yy_c_buf_p > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] ) + YY_FATAL_ERROR( + "fatal flex scanner internal error--end of buffer missed" ); + + if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 ) + { /* Don't try to fill the buffer, so this is an EOF. */ + if ( yyg->yy_c_buf_p - yyg->yytext_ptr - YY_MORE_ADJ == 1 ) + { + /* We matched a single character, the EOB, so + * treat this as a final EOF. + */ + return EOB_ACT_END_OF_FILE; + } + + else + { + /* We matched some text prior to the EOB, first + * process it. + */ + return EOB_ACT_LAST_MATCH; + } + } + + /* Try to read more data. */ + + /* First move last chars to start of buffer. */ + number_to_move = (int) (yyg->yy_c_buf_p - yyg->yytext_ptr) - 1; + + for ( i = 0; i < number_to_move; ++i ) + *(dest++) = *(source++); + + if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING ) + /* don't do the read, it's not guaranteed to return an EOF, + * just force an EOF + */ + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars = 0; + + else + { + int num_to_read = + YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; + + while ( num_to_read <= 0 ) + { /* Not enough room in the buffer - grow it. */ + + /* just a shorter name for the current buffer */ + YY_BUFFER_STATE b = YY_CURRENT_BUFFER; + + int yy_c_buf_p_offset = + (int) (yyg->yy_c_buf_p - b->yy_ch_buf); + + if ( b->yy_is_our_buffer ) + { + int new_size = b->yy_buf_size * 2; + + if ( new_size <= 0 ) + b->yy_buf_size += b->yy_buf_size / 8; + else + b->yy_buf_size *= 2; + + b->yy_ch_buf = (char *) + /* Include room in for 2 EOB chars. */ + yyrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 ,yyscanner ); + } + else + /* Can't grow it, we don't own it. */ + b->yy_ch_buf = 0; + + if ( ! b->yy_ch_buf ) + YY_FATAL_ERROR( + "fatal error - scanner input buffer overflow" ); + + yyg->yy_c_buf_p = &b->yy_ch_buf[yy_c_buf_p_offset]; + + num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - + number_to_move - 1; + + } + + if ( num_to_read > YY_READ_BUF_SIZE ) + num_to_read = YY_READ_BUF_SIZE; + + /* Read in more data. */ + YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]), + yyg->yy_n_chars, (size_t) num_to_read ); + + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars; + } + + if ( yyg->yy_n_chars == 0 ) + { + if ( number_to_move == YY_MORE_ADJ ) + { + ret_val = EOB_ACT_END_OF_FILE; + yyrestart(yyin ,yyscanner); + } + + else + { + ret_val = EOB_ACT_LAST_MATCH; + YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = + YY_BUFFER_EOF_PENDING; + } + } + + else + ret_val = EOB_ACT_CONTINUE_SCAN; + + if ((yy_size_t) (yyg->yy_n_chars + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) { + /* Extend the array by 50%, plus the number we really need. */ + yy_size_t new_size = yyg->yy_n_chars + number_to_move + (yyg->yy_n_chars >> 1); + YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ,yyscanner ); + if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) + YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" ); + } + + yyg->yy_n_chars += number_to_move; + YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] = YY_END_OF_BUFFER_CHAR; + YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] = YY_END_OF_BUFFER_CHAR; + + yyg->yytext_ptr = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0]; + + return ret_val; +} + +/* yy_get_previous_state - get the state just before the EOB char was reached */ + + static yy_state_type yy_get_previous_state (yyscan_t yyscanner) +{ + register yy_state_type yy_current_state; + register char *yy_cp; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + yy_current_state = yyg->yy_start; + yy_current_state += YY_AT_BOL(); + + for ( yy_cp = yyg->yytext_ptr + YY_MORE_ADJ; yy_cp < yyg->yy_c_buf_p; ++yy_cp ) + { + register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1); + if ( yy_accept[yy_current_state] ) + { + yyg->yy_last_accepting_state = yy_current_state; + yyg->yy_last_accepting_cpos = yy_cp; + } + while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) + { + yy_current_state = (int) yy_def[yy_current_state]; + if ( yy_current_state >= 73 ) + yy_c = yy_meta[(unsigned int) yy_c]; + } + yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; + } + + return yy_current_state; +} + +/* yy_try_NUL_trans - try to make a transition on the NUL character + * + * synopsis + * next_state = yy_try_NUL_trans( current_state ); + */ + static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner) +{ + register int yy_is_jam; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */ + register char *yy_cp = yyg->yy_c_buf_p; + + register YY_CHAR yy_c = 1; + if ( yy_accept[yy_current_state] ) + { + yyg->yy_last_accepting_state = yy_current_state; + yyg->yy_last_accepting_cpos = yy_cp; + } + while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) + { + yy_current_state = (int) yy_def[yy_current_state]; + if ( yy_current_state >= 73 ) + yy_c = yy_meta[(unsigned int) yy_c]; + } + yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; + yy_is_jam = (yy_current_state == 72); + + return yy_is_jam ? 0 : yy_current_state; +} + + static void yyunput (int c, register char * yy_bp , yyscan_t yyscanner) +{ + register char *yy_cp; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + yy_cp = yyg->yy_c_buf_p; + + /* undo effects of setting up yytext */ + *yy_cp = yyg->yy_hold_char; + + if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 ) + { /* need to shift things up to make room */ + /* +2 for EOB chars. */ + register int number_to_move = yyg->yy_n_chars + 2; + register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[ + YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2]; + register char *source = + &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]; + + while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) + *--dest = *--source; + + yy_cp += (int) (dest - source); + yy_bp += (int) (dest - source); + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = + yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_buf_size; + + if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 ) + YY_FATAL_ERROR( "flex scanner push-back overflow" ); + } + + *--yy_cp = (char) c; + + yyg->yytext_ptr = yy_bp; + yyg->yy_hold_char = *yy_cp; + yyg->yy_c_buf_p = yy_cp; +} + +#ifndef YY_NO_INPUT +#ifdef __cplusplus + static int yyinput (yyscan_t yyscanner) +#else + static int input (yyscan_t yyscanner) +#endif + +{ + int c; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + *yyg->yy_c_buf_p = yyg->yy_hold_char; + + if ( *yyg->yy_c_buf_p == YY_END_OF_BUFFER_CHAR ) + { + /* yy_c_buf_p now points to the character we want to return. + * If this occurs *before* the EOB characters, then it's a + * valid NUL; if not, then we've hit the end of the buffer. + */ + if ( yyg->yy_c_buf_p < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] ) + /* This was really a NUL. */ + *yyg->yy_c_buf_p = '\0'; + + else + { /* need more input */ + int offset = yyg->yy_c_buf_p - yyg->yytext_ptr; + ++yyg->yy_c_buf_p; + + switch ( yy_get_next_buffer( yyscanner ) ) + { + case EOB_ACT_LAST_MATCH: + /* This happens because yy_g_n_b() + * sees that we've accumulated a + * token and flags that we need to + * try matching the token before + * proceeding. But for input(), + * there's no matching to consider. + * So convert the EOB_ACT_LAST_MATCH + * to EOB_ACT_END_OF_FILE. + */ + + /* Reset buffer status. */ + yyrestart(yyin ,yyscanner); + + /*FALLTHROUGH*/ + + case EOB_ACT_END_OF_FILE: + { + if ( yywrap(yyscanner ) ) + return EOF; + + if ( ! yyg->yy_did_buffer_switch_on_eof ) + YY_NEW_FILE; +#ifdef __cplusplus + return yyinput(yyscanner); +#else + return input(yyscanner); +#endif + } + + case EOB_ACT_CONTINUE_SCAN: + yyg->yy_c_buf_p = yyg->yytext_ptr + offset; + break; + } + } + } + + c = *(unsigned char *) yyg->yy_c_buf_p; /* cast for 8-bit char's */ + *yyg->yy_c_buf_p = '\0'; /* preserve yytext */ + yyg->yy_hold_char = *++yyg->yy_c_buf_p; + + YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (c == '\n'); + + return c; +} +#endif /* ifndef YY_NO_INPUT */ + +/** Immediately switch to a different input stream. + * @param input_file A readable stream. + * @param yyscanner The scanner object. + * @note This function does not reset the start condition to @c INITIAL . + */ + void yyrestart (FILE * input_file , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + if ( ! YY_CURRENT_BUFFER ){ + yyensure_buffer_stack (yyscanner); + YY_CURRENT_BUFFER_LVALUE = + yy_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); + } + + yy_init_buffer(YY_CURRENT_BUFFER,input_file ,yyscanner); + yy_load_buffer_state(yyscanner ); +} + +/** Switch to a different input buffer. + * @param new_buffer The new input buffer. + * @param yyscanner The scanner object. + */ + void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + /* TODO. We should be able to replace this entire function body + * with + * yypop_buffer_state(); + * yypush_buffer_state(new_buffer); + */ + yyensure_buffer_stack (yyscanner); + if ( YY_CURRENT_BUFFER == new_buffer ) + return; + + if ( YY_CURRENT_BUFFER ) + { + /* Flush out information for old buffer. */ + *yyg->yy_c_buf_p = yyg->yy_hold_char; + YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p; + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars; + } + + YY_CURRENT_BUFFER_LVALUE = new_buffer; + yy_load_buffer_state(yyscanner ); + + /* We don't actually know whether we did this switch during + * EOF (yywrap()) processing, but the only time this flag + * is looked at is after yywrap() is called, so it's safe + * to go ahead and always set it. + */ + yyg->yy_did_buffer_switch_on_eof = 1; +} + +static void yy_load_buffer_state (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; + yyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos; + yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file; + yyg->yy_hold_char = *yyg->yy_c_buf_p; +} + +/** Allocate and initialize an input buffer state. + * @param file A readable stream. + * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE. + * @param yyscanner The scanner object. + * @return the allocated buffer state. + */ + YY_BUFFER_STATE yy_create_buffer (FILE * file, int size , yyscan_t yyscanner) +{ + YY_BUFFER_STATE b; + + b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) ,yyscanner ); + if ( ! b ) + YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" ); + + b->yy_buf_size = size; + + /* yy_ch_buf has to be 2 characters longer than the size given because + * we need to put in 2 end-of-buffer characters. + */ + b->yy_ch_buf = (char *) yyalloc(b->yy_buf_size + 2 ,yyscanner ); + if ( ! b->yy_ch_buf ) + YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" ); + + b->yy_is_our_buffer = 1; + + yy_init_buffer(b,file ,yyscanner); + + return b; +} + +/** Destroy the buffer. + * @param b a buffer created with yy_create_buffer() + * @param yyscanner The scanner object. + */ + void yy_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + if ( ! b ) + return; + + if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */ + YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0; + + if ( b->yy_is_our_buffer ) + yyfree((void *) b->yy_ch_buf ,yyscanner ); + + yyfree((void *) b ,yyscanner ); +} + +#ifndef __cplusplus +extern int isatty (int ); +#endif /* __cplusplus */ + +/* Initializes or reinitializes a buffer. + * This function is sometimes called more than once on the same buffer, + * such as during a yyrestart() or at EOF. + */ + static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file , yyscan_t yyscanner) + +{ + int oerrno = errno; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + yy_flush_buffer(b ,yyscanner); + + b->yy_input_file = file; + b->yy_fill_buffer = 1; + + /* If b is the current buffer, then yy_init_buffer was _probably_ + * called from yyrestart() or through yy_get_next_buffer. + * In that case, we don't want to reset the lineno or column. + */ + if (b != YY_CURRENT_BUFFER){ + b->yy_bs_lineno = 1; + b->yy_bs_column = 0; + } + + b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0; + + errno = oerrno; +} + +/** Discard all buffered characters. On the next scan, YY_INPUT will be called. + * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER. + * @param yyscanner The scanner object. + */ + void yy_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + if ( ! b ) + return; + + b->yy_n_chars = 0; + + /* We always need two end-of-buffer characters. The first causes + * a transition to the end-of-buffer state. The second causes + * a jam in that state. + */ + b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR; + b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR; + + b->yy_buf_pos = &b->yy_ch_buf[0]; + + b->yy_at_bol = 1; + b->yy_buffer_status = YY_BUFFER_NEW; + + if ( b == YY_CURRENT_BUFFER ) + yy_load_buffer_state(yyscanner ); +} + +/** Pushes the new state onto the stack. The new state becomes + * the current state. This function will allocate the stack + * if necessary. + * @param new_buffer The new state. + * @param yyscanner The scanner object. + */ +void yypush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + if (new_buffer == NULL) + return; + + yyensure_buffer_stack(yyscanner); + + /* This block is copied from yy_switch_to_buffer. */ + if ( YY_CURRENT_BUFFER ) + { + /* Flush out information for old buffer. */ + *yyg->yy_c_buf_p = yyg->yy_hold_char; + YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p; + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars; + } + + /* Only push if top exists. Otherwise, replace top. */ + if (YY_CURRENT_BUFFER) + yyg->yy_buffer_stack_top++; + YY_CURRENT_BUFFER_LVALUE = new_buffer; + + /* copied from yy_switch_to_buffer. */ + yy_load_buffer_state(yyscanner ); + yyg->yy_did_buffer_switch_on_eof = 1; +} + +/** Removes and deletes the top of the stack, if present. + * The next element becomes the new top. + * @param yyscanner The scanner object. + */ +void yypop_buffer_state (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + if (!YY_CURRENT_BUFFER) + return; + + yy_delete_buffer(YY_CURRENT_BUFFER ,yyscanner); + YY_CURRENT_BUFFER_LVALUE = NULL; + if (yyg->yy_buffer_stack_top > 0) + --yyg->yy_buffer_stack_top; + + if (YY_CURRENT_BUFFER) { + yy_load_buffer_state(yyscanner ); + yyg->yy_did_buffer_switch_on_eof = 1; + } +} + +/* Allocates the stack if it does not exist. + * Guarantees space for at least one push. + */ +static void yyensure_buffer_stack (yyscan_t yyscanner) +{ + int num_to_alloc; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + if (!yyg->yy_buffer_stack) { + + /* First allocation is just for 2 elements, since we don't know if this + * scanner will even need a stack. We use 2 instead of 1 to avoid an + * immediate realloc on the next call. + */ + num_to_alloc = 1; + yyg->yy_buffer_stack = (struct yy_buffer_state**)yyalloc + (num_to_alloc * sizeof(struct yy_buffer_state*) + , yyscanner); + if ( ! yyg->yy_buffer_stack ) + YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" ); + + memset(yyg->yy_buffer_stack, 0, num_to_alloc * sizeof(struct yy_buffer_state*)); + + yyg->yy_buffer_stack_max = num_to_alloc; + yyg->yy_buffer_stack_top = 0; + return; + } + + if (yyg->yy_buffer_stack_top >= (yyg->yy_buffer_stack_max) - 1){ + + /* Increase the buffer to prepare for a possible push. */ + int grow_size = 8 /* arbitrary grow size */; + + num_to_alloc = yyg->yy_buffer_stack_max + grow_size; + yyg->yy_buffer_stack = (struct yy_buffer_state**)yyrealloc + (yyg->yy_buffer_stack, + num_to_alloc * sizeof(struct yy_buffer_state*) + , yyscanner); + if ( ! yyg->yy_buffer_stack ) + YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" ); + + /* zero only the new slots.*/ + memset(yyg->yy_buffer_stack + yyg->yy_buffer_stack_max, 0, grow_size * sizeof(struct yy_buffer_state*)); + yyg->yy_buffer_stack_max = num_to_alloc; + } +} + +/** Setup the input buffer state to scan directly from a user-specified character buffer. + * @param base the character buffer + * @param size the size in bytes of the character buffer + * @param yyscanner The scanner object. + * @return the newly allocated buffer state object. + */ +YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size , yyscan_t yyscanner) +{ + YY_BUFFER_STATE b; + + if ( size < 2 || + base[size-2] != YY_END_OF_BUFFER_CHAR || + base[size-1] != YY_END_OF_BUFFER_CHAR ) + /* They forgot to leave room for the EOB's. */ + return 0; + + b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) ,yyscanner ); + if ( ! b ) + YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" ); + + b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */ + b->yy_buf_pos = b->yy_ch_buf = base; + b->yy_is_our_buffer = 0; + b->yy_input_file = 0; + b->yy_n_chars = b->yy_buf_size; + b->yy_is_interactive = 0; + b->yy_at_bol = 1; + b->yy_fill_buffer = 0; + b->yy_buffer_status = YY_BUFFER_NEW; + + yy_switch_to_buffer(b ,yyscanner ); + + return b; +} + +/** Setup the input buffer state to scan a string. The next call to yylex() will + * scan from a @e copy of @a str. + * @param yystr a NUL-terminated string to scan + * @param yyscanner The scanner object. + * @return the newly allocated buffer state object. + * @note If you want to scan bytes that may contain NUL values, then use + * yy_scan_bytes() instead. + */ +YY_BUFFER_STATE yy_scan_string (yyconst char * yystr , yyscan_t yyscanner) +{ + + return yy_scan_bytes(yystr,strlen(yystr) ,yyscanner); +} + +/** Setup the input buffer state to scan the given bytes. The next call to yylex() will + * scan from a @e copy of @a bytes. + * @param bytes the byte buffer to scan + * @param len the number of bytes in the buffer pointed to by @a bytes. + * @param yyscanner The scanner object. + * @return the newly allocated buffer state object. + */ +YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, int _yybytes_len , yyscan_t yyscanner) +{ + YY_BUFFER_STATE b; + char *buf; + yy_size_t n; + int i; + + /* Get memory for full buffer, including space for trailing EOB's. */ + n = _yybytes_len + 2; + buf = (char *) yyalloc(n ,yyscanner ); + if ( ! buf ) + YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" ); + + for ( i = 0; i < _yybytes_len; ++i ) + buf[i] = yybytes[i]; + + buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR; + + b = yy_scan_buffer(buf,n ,yyscanner); + if ( ! b ) + YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" ); + + /* It's okay to grow etc. this buffer, and we should throw it + * away when we're done. + */ + b->yy_is_our_buffer = 1; + + return b; +} + +#ifndef YY_EXIT_FAILURE +#define YY_EXIT_FAILURE 2 +#endif + +static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner) +{ + (void) fprintf( stderr, "%s\n", msg ); + exit( YY_EXIT_FAILURE ); +} + +/* Redefine yyless() so it works in section 3 code. */ + +#undef yyless +#define yyless(n) \ + do \ + { \ + /* Undo effects of setting up yytext. */ \ + int yyless_macro_arg = (n); \ + YY_LESS_LINENO(yyless_macro_arg);\ + yytext[yyleng] = yyg->yy_hold_char; \ + yyg->yy_c_buf_p = yytext + yyless_macro_arg; \ + yyg->yy_hold_char = *yyg->yy_c_buf_p; \ + *yyg->yy_c_buf_p = '\0'; \ + yyleng = yyless_macro_arg; \ + } \ + while ( 0 ) + +/* Accessor methods (get/set functions) to struct members. */ + +/** Get the user-defined data for this scanner. + * @param yyscanner The scanner object. + */ +YY_EXTRA_TYPE yyget_extra (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yyextra; +} + +/** Get the current line number. + * @param yyscanner The scanner object. + */ +int yyget_lineno (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + if (! YY_CURRENT_BUFFER) + return 0; + + return yylineno; +} + +/** Get the current column number. + * @param yyscanner The scanner object. + */ +int yyget_column (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + if (! YY_CURRENT_BUFFER) + return 0; + + return yycolumn; +} + +/** Get the input stream. + * @param yyscanner The scanner object. + */ +FILE *yyget_in (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yyin; +} + +/** Get the output stream. + * @param yyscanner The scanner object. + */ +FILE *yyget_out (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yyout; +} + +/** Get the length of the current token. + * @param yyscanner The scanner object. + */ +int yyget_leng (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yyleng; +} + +/** Get the current token. + * @param yyscanner The scanner object. + */ + +char *yyget_text (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yytext; +} + +/** Set the user-defined data. This data is never touched by the scanner. + * @param user_defined The data to be associated with this scanner. + * @param yyscanner The scanner object. + */ +void yyset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yyextra = user_defined ; +} + +/** Set the current line number. + * @param line_number + * @param yyscanner The scanner object. + */ +void yyset_lineno (int line_number , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + /* lineno is only valid if an input buffer exists. */ + if (! YY_CURRENT_BUFFER ) + yy_fatal_error( "yyset_lineno called with no buffer" , yyscanner); + + yylineno = line_number; +} + +/** Set the current column. + * @param line_number + * @param yyscanner The scanner object. + */ +void yyset_column (int column_no , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + /* column is only valid if an input buffer exists. */ + if (! YY_CURRENT_BUFFER ) + yy_fatal_error( "yyset_column called with no buffer" , yyscanner); + + yycolumn = column_no; +} + +/** Set the input stream. This does not discard the current + * input buffer. + * @param in_str A readable stream. + * @param yyscanner The scanner object. + * @see yy_switch_to_buffer + */ +void yyset_in (FILE * in_str , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yyin = in_str ; +} + +void yyset_out (FILE * out_str , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yyout = out_str ; +} + +int yyget_debug (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yy_flex_debug; +} + +void yyset_debug (int bdebug , yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yy_flex_debug = bdebug ; +} + +/* Accessor methods for yylval and yylloc */ + +/* User-visible API */ + +/* yylex_init is special because it creates the scanner itself, so it is + * the ONLY reentrant function that doesn't take the scanner as the last argument. + * That's why we explicitly handle the declaration, instead of using our macros. + */ + +int yylex_init(yyscan_t* ptr_yy_globals) + +{ + if (ptr_yy_globals == NULL){ + errno = EINVAL; + return 1; + } + + *ptr_yy_globals = (yyscan_t) yyalloc ( sizeof( struct yyguts_t ), NULL ); + + if (*ptr_yy_globals == NULL){ + errno = ENOMEM; + return 1; + } + + /* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */ + memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t)); + + return yy_init_globals ( *ptr_yy_globals ); +} + +/* yylex_init_extra has the same functionality as yylex_init, but follows the + * convention of taking the scanner as the last argument. Note however, that + * this is a *pointer* to a scanner, as it will be allocated by this call (and + * is the reason, too, why this function also must handle its own declaration). + * The user defined value in the first argument will be available to yyalloc in + * the yyextra field. + */ + +int yylex_init_extra(YY_EXTRA_TYPE yy_user_defined,yyscan_t* ptr_yy_globals ) + +{ + struct yyguts_t dummy_yyguts; + + yyset_extra (yy_user_defined, &dummy_yyguts); + + if (ptr_yy_globals == NULL){ + errno = EINVAL; + return 1; + } + + *ptr_yy_globals = (yyscan_t) yyalloc ( sizeof( struct yyguts_t ), &dummy_yyguts ); + + if (*ptr_yy_globals == NULL){ + errno = ENOMEM; + return 1; + } + + /* By setting to 0xAA, we expose bugs in + yy_init_globals. Leave at 0x00 for releases. */ + memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t)); + + yyset_extra (yy_user_defined, *ptr_yy_globals); + + return yy_init_globals ( *ptr_yy_globals ); +} + +static int yy_init_globals (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + /* Initialization is the same as for the non-reentrant scanner. + * This function is called from yylex_destroy(), so don't allocate here. + */ + + yyg->yy_buffer_stack = 0; + yyg->yy_buffer_stack_top = 0; + yyg->yy_buffer_stack_max = 0; + yyg->yy_c_buf_p = (char *) 0; + yyg->yy_init = 0; + yyg->yy_start = 0; + + yyg->yy_start_stack_ptr = 0; + yyg->yy_start_stack_depth = 0; + yyg->yy_start_stack = NULL; + +/* Defined in main.c */ +#ifdef YY_STDINIT + yyin = stdin; + yyout = stdout; +#else + yyin = (FILE *) 0; + yyout = (FILE *) 0; +#endif + + /* For future reference: Set errno on error, since we are called by + * yylex_init() + */ + return 0; +} + +/* yylex_destroy is for both reentrant and non-reentrant scanners. */ +int yylex_destroy (yyscan_t yyscanner) +{ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + + /* Pop the buffer stack, destroying each element. */ + while(YY_CURRENT_BUFFER){ + yy_delete_buffer(YY_CURRENT_BUFFER ,yyscanner ); + YY_CURRENT_BUFFER_LVALUE = NULL; + yypop_buffer_state(yyscanner); + } + + /* Destroy the stack itself. */ + yyfree(yyg->yy_buffer_stack ,yyscanner); + yyg->yy_buffer_stack = NULL; + + /* Destroy the start condition stack. */ + yyfree(yyg->yy_start_stack ,yyscanner ); + yyg->yy_start_stack = NULL; + + /* Reset the globals. This is important in a non-reentrant scanner so the next time + * yylex() is called, initialization will occur. */ + yy_init_globals( yyscanner); + + /* Destroy the main struct (reentrant only). */ + yyfree ( yyscanner , yyscanner ); + yyscanner = NULL; + return 0; +} + +/* + * Internal utility routines. + */ + +#ifndef yytext_ptr +static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner) +{ + register int i; + for ( i = 0; i < n; ++i ) + s1[i] = s2[i]; +} +#endif + +#ifdef YY_NEED_STRLEN +static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner) +{ + register int n; + for ( n = 0; s[n]; ++n ) + ; + + return n; +} +#endif + +void *yyalloc (yy_size_t size , yyscan_t yyscanner) +{ + return (void *) malloc( size ); +} + +void *yyrealloc (void * ptr, yy_size_t size , yyscan_t yyscanner) +{ + /* The cast to (char *) in the following accommodates both + * implementations that use char* generic pointers, and those + * that use void* generic pointers. It works with the latter + * because both ANSI C and C++ allow castless assignment from + * any pointer type to void*, and deal with argument conversions + * as though doing an assignment. + */ + return (void *) realloc( (char *) ptr, size ); +} + +void yyfree (void * ptr , yyscan_t yyscanner) +{ + free( (char *) ptr ); /* see yyrealloc() for (char *) cast */ +} + +#define YYTABLES_NAME "yytables" + +#line 239 "psp_parser.l" + + + +/* this is for emacs +Local Variables: +mode:C +End: +*/ + diff --git a/src/psp_parser.l b/src/psp_parser.l new file mode 100644 index 0000000..aeaf4d3 --- /dev/null +++ b/src/psp_parser.l @@ -0,0 +1,246 @@ +%{ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * + * This file originally written by Sterling Hughes. + * + */ + +/* NOTE The seemingly unusual generated Python code (sometime using + * ";" to separate statements, newline placement, etc) is such that + * for vast majority of cases the line number of the input file will + * match the line number of the output! + */ + +#include "psp_parser.h" + +#define OUTPUT_WHITESPACE(__wsstring) \ + psp_string_0((__wsstring)); \ + psp_string_append(&PSP_PG(pycode), (__wsstring)->blob) + +#define CLEAR_WHITESPACE(__wsstring) psp_string_clear((__wsstring)); + +%} + +%option noyywrap nounistd + +%x TEXT +%x PYCODE +%x INDENT +%x DIR +%x COMMENT + +%% + +\r\n|\n { + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("req.write(\"\"\"")); + + yyless(0); + BEGIN TEXT; +} + +. { + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("req.write(\"\"\"")); + + yyless(0); + BEGIN TEXT; +} + +"\\n" { + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\\\\n")); +} + +"\\r" { + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\\\\r")); +} + +"\\t" { + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\\\\t")); +} + +"<%=" { /* expression */ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\"\"\",0); req.write(str(")); + PSP_PG(is_psp_echo) = 1; + + BEGIN PYCODE; +} + +"<%" { /* python code */ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\"\"\",0);")); + CLEAR_WHITESPACE(&PSP_PG(whitespace)); + PSP_PG(seen_newline) = 0; + BEGIN PYCODE; +} + +"<%@" { /* directive */ + BEGIN DIR; +} + +"<%--" { /* comment */ + BEGIN COMMENT; +} + +\r\n|\n { + psp_string_appendc(&PSP_PG(pycode), '\n'); +} + +. { + if (yytext[0] == '"') { + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\\\"")); + } else { + psp_string_appendc(&PSP_PG(pycode), yytext[0]); + } +} + +<> { + yypop_buffer_state(yyscanner); + if (!YY_CURRENT_BUFFER) { + /* this is really the end */ + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("\"\"\",0)\n")); + yyterminate(); + } + else { + /* we are inside include, continue scanning */ + BEGIN DIR; + } +} + +\r\n|\n|\r { + psp_string_appendc(&PSP_PG(pycode), '\n'); + + PSP_PG(seen_newline) = 1; + BEGIN INDENT; +} + +"%>" { + + if (PSP_PG(is_psp_echo)) { + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("),0); req.write(\"\"\"")); + PSP_PG(is_psp_echo) = 0; + } + else { + if (!PSP_PG(seen_newline)) { + /* this will happen is you have <%%> */ + psp_string_appendc(&PSP_PG(pycode), ';'); + } + + if (PSP_PG(after_colon)) { + /* this is dumb mistake-proof measure, if %> + is immediately following where there should be an indent */ + psp_string_appendc(&PSP_PG(whitespace), '\t'); + PSP_PG(after_colon) = 0; + } + OUTPUT_WHITESPACE(&PSP_PG(whitespace)); + psp_string_appendl(&PSP_PG(pycode), STATIC_STR("req.write(\"\"\"")); + } + + BEGIN TEXT; +} + +":" { + psp_string_appendc(&PSP_PG(pycode), yytext[0]); + PSP_PG(after_colon) = 1; +} + +. { + psp_string_appendc(&PSP_PG(pycode), yytext[0]); + PSP_PG(after_colon) = 0; +} + +^[\t ]* { + + CLEAR_WHITESPACE(&PSP_PG(whitespace)); + psp_string_appendl(&PSP_PG(whitespace), yytext, yyleng); + psp_string_appendl(&PSP_PG(pycode), yytext, yyleng); + + BEGIN PYCODE; +} + +"%>" { + yyless(0); + BEGIN PYCODE; +} + +\r\n|\n { + CLEAR_WHITESPACE(&PSP_PG(whitespace)); + yyless(0); + BEGIN PYCODE; +} + +. { + CLEAR_WHITESPACE(&PSP_PG(whitespace)); + yyless(0); + BEGIN PYCODE; +} + +"include"[ ]+"file"[ ]?=[ ]?"\""[^ ]+"\"" { + + char *filename; + char *path; + FILE *f; + + /* find a quote */ + filename = strchr(yytext, '"') + 1; + filename[strchr(filename, '"')-filename] = '\0'; + + /* XXX The absolute path check won't work on Windows, + * needs to be corrected + */ + + if (PSP_PG(dir) && filename[0] != '/') { + path = malloc(strlen(filename)+strlen(PSP_PG(dir))+1); + if (path == NULL) { + PyErr_NoMemory(); + yyterminate(); + } + strcpy(path, PSP_PG(dir)); + strcat(path, filename); + } + else { + path = filename; + } + + Py_BEGIN_ALLOW_THREADS + f = fopen(path, "rb"); + Py_END_ALLOW_THREADS + if (f == NULL) { + PyErr_SetFromErrnoWithFilename(PyExc_IOError, path); + } + else { + yypush_buffer_state(yy_create_buffer(f, YY_BUF_SIZE, yyscanner), + yyscanner); + BEGIN(TEXT); + } + + if (PSP_PG(dir)) free(path); +} + +"%>" { + BEGIN TEXT; +} + +"--%>" { + BEGIN TEXT; +} + +%% + +/* this is for emacs +Local Variables: +mode:C +End: +*/ diff --git a/src/psp_string.c b/src/psp_string.c new file mode 100644 index 0000000..76815f5 --- /dev/null +++ b/src/psp_string.c @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +#include "psp_string.h" + +#define psp_string_alloc(__pspstring, __length) \ + if ((__length) > (__pspstring)->allocated) { \ + (__pspstring)->blob = realloc((__pspstring)->blob, (__length) + PSP_STRING_BLOCK); \ + (__pspstring)->allocated = (__length) + PSP_STRING_BLOCK; \ + } + +void +psp_string_0(psp_string *s) +{ + if (!s->length) { + return; + } + + s->blob[s->length] = '\0'; +} + +void +psp_string_appendl(psp_string *s, char *text, size_t length) +{ + int newlen = s->length + length; + + if (text == NULL) { + return; + } + + psp_string_alloc(s, newlen); + memcpy(s->blob + s->length, text, length); + s->length = newlen; +} + +void +psp_string_append(psp_string *s, char *text) +{ + if (text == NULL) { + return; + } + psp_string_appendl(s, text, strlen(text)); +} + +void +psp_string_appendc(psp_string *s, char c) +{ + int newlen = s->length + 1; + + psp_string_alloc(s, newlen); + s->blob[s->length] = c; + s->length = newlen; +} + +void +psp_string_clear(psp_string *s) +{ + memset(s->blob, 0, s->length); + s->length = 0; +} + +void +psp_string_free(psp_string *s) +{ + free(s->blob); + s->blob = NULL; + s->length = 0; + s->allocated = 0; +} diff --git a/src/requestobject.c b/src/requestobject.c new file mode 100644 index 0000000..035a06b --- /dev/null +++ b/src/requestobject.c @@ -0,0 +1,2433 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * requestobject.c + * + * + */ + +#include "mod_python.h" + +/* mod_ssl.h is not safe for inclusion in 2.0, so duplicate the + * optional function declarations. */ +APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup, + (apr_pool_t *, server_rec *, + conn_rec *, request_rec *, + char *)); +APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *)); + +/* Optional functions imported from mod_ssl when loaded: */ +static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *optfn_ssl_var_lookup = NULL; +static APR_OPTIONAL_FN_TYPE(ssl_is_https) *optfn_is_https = NULL; + +/** + ** MpRequest_FromRequest + ** + * This routine creates a Python requestobject given an Apache + * request_rec pointer. + * + */ + +PyObject * MpRequest_FromRequest(request_rec *req) +{ + requestobject *result; + MpRequest_Type.ob_type = &PyType_Type; + result = PyObject_GC_New(requestobject, &MpRequest_Type); + if (! result) + return PyErr_NoMemory(); + + result->dict = PyDict_New(); + if (!result->dict) + return PyErr_NoMemory(); + result->request_rec = req; + result->connection = NULL; + result->server = NULL; + result->headers_in = NULL; + result->headers_out = NULL; + result->err_headers_out = NULL; + result->subprocess_env = NULL; + result->notes = NULL; + result->phase = NULL; + result->config = NULL; + result->options = NULL; + result->extension = NULL; + result->content_type_set = 0; + result->bytes_queued = 0; + result->hlo = NULL; + result->rbuff = NULL; + result->rbuff_pos = 0; + result->rbuff_len = 0; + + /* we make sure that the object dictionary is there + * before registering the object with the GC + */ + PyObject_GC_Track(result); + + return (PyObject *) result; +} + + +/* Methods */ + +/** + ** request.add_common_vars(reqeust self) + ** + * Interface to ap_add_common_vars. Adds a some more of CGI + * environment variables to subprocess_env. + * + */ + +static PyObject * req_add_common_vars(requestobject *self) +{ + + ap_add_common_vars(self->request_rec); + + Py_INCREF(Py_None); + return Py_None; + +} + +/** + ** request.add_cgi_vars(reqeust self) + ** + * This is a clone of ap_add_cgi_vars which does not bother + * calculating PATH_TRANSLATED and thus avoids creating + * sub-requests and filesystem calls. + * + */ + +static PyObject * req_add_cgi_vars(requestobject *self) +{ + + request_rec *r = self->request_rec; + apr_table_t *e = r->subprocess_env; + + apr_table_setn(e, "GATEWAY_INTERFACE", "CGI/1.1"); + apr_table_setn(e, "SERVER_PROTOCOL", r->protocol); + apr_table_setn(e, "REQUEST_METHOD", r->method); + apr_table_setn(e, "QUERY_STRING", r->args ? r->args : ""); + apr_table_setn(e, "REQUEST_URI", r->uri); + + if (!r->path_info || !*r->path_info) { + apr_table_setn(e, "SCRIPT_NAME", r->uri); + } + else { + int path_info_start = ap_find_path_info(r->uri, r->path_info); + + apr_table_setn(e, "SCRIPT_NAME", + apr_pstrndup(r->pool, r->uri, path_info_start)); + + apr_table_setn(e, "PATH_INFO", r->path_info); + } + + ap_add_common_vars(self->request_rec); + + Py_INCREF(Py_None); + return Py_None; + +} + + +/** + ** set_wsgi_path_info(self) + ** + * Set path_info the way wsgi likes it. + * Return: 0 == OK, 1 == bad base_uri, 2 == base_uri mismatch + */ + +static int set_wsgi_path_info(requestobject *self) +{ + + py_config *conf = + (py_config *) ap_get_module_config(self->request_rec->per_dir_config, + &python_module); + const char *path_info = self->request_rec->uri; + const char *base_uri = apr_table_get(conf->options, "mod_python.wsgi.base_uri"); + + if (!base_uri && conf->d_is_location) { + + /* Use Location as the base_uri, automatically adjust trailing slash */ + + char *bu = apr_pstrdup(self->request_rec->pool, conf->config_dir); + int last = strlen(bu) - 1; + if (*bu && bu[last] == '/') + bu[last] = '\0'; + + base_uri = bu; + + } else if (base_uri && *base_uri) { + + /* This base_uri was set by hand, enforce correctness */ + + if (base_uri[strlen(base_uri)-1] == '/') { + PyErr_SetString(PyExc_ValueError, + apr_psprintf(self->request_rec->pool, + "PythonOption 'mod_python.wsgi.base_uri' ('%s') must not end with '/'", + base_uri)); + return 1; + } + } + + if (base_uri && *base_uri) { + + /* find end of base_uri match in r->uri, this will be our path_info */ + while (*path_info && *base_uri && (*path_info == *base_uri)) { + path_info++; + base_uri++; + } + + if (*base_uri) { + /* we have not reached end of base_uri, therefore + r->uri does not start with base_uri */ + return 2; + } + } + + self->request_rec->path_info = apr_pstrdup(self->request_rec->pool, path_info); + return 0; +} + +/** + ** request.build_wsgi_env(request self) + ** + * Build a WSGI environment dictionary. + * + */ +/* these things never change and we never decref them */ +static PyObject *wsgi_version = NULL; +static PyObject *wsgi_multithread = NULL; +static PyObject *wsgi_multiprocess = NULL; + +static PyObject *req_build_wsgi_env(requestobject *self) +{ + + request_rec *r = self->request_rec; + apr_table_t *e = r->subprocess_env; + PyObject *env, *v; + const char *val; + int i, j, rc; + + env = PyDict_New(); + if (!env) + return NULL; + + rc = set_wsgi_path_info(self); + if (rc == 1) { + /* bad base_uri, the error is already set */ + Py_DECREF(env); + return NULL; + } else if (rc == 2) { + /* base_uri does not match uri, wsgi.py will decline */ + Py_DECREF(env); + Py_INCREF(Py_None); + return Py_None; + } + + /* this will create the correct SCRIPT_NAME based on our path_info now */ + req_add_cgi_vars(self); + + /* copy r->subprocess_env */ + if (!self->subprocess_env) + self->subprocess_env = MpTable_FromTable(self->request_rec->subprocess_env); + else + ((tableobject*)self->subprocess_env)->table = r->subprocess_env; + PyDict_Merge(env, (PyObject*)self->subprocess_env, 0); + + /* authorization */ + if ((val = apr_table_get(r->headers_in, "authorization"))) { + v = MpBytesOrUnicode_FromString(val); + PyDict_SetItemString(env, "HTTP_AUTHORIZATION", v); + Py_DECREF(v); + } + + PyDict_SetItemString(env, "wsgi.input", (PyObject *) self); + PyDict_SetItemString(env, "wsgi.errors", PySys_GetObject("stderr")); + + if (!wsgi_version) { + int result; + wsgi_version = Py_BuildValue("(ii)", 1, 0); + + ap_mpm_query(AP_MPMQ_IS_THREADED, &result); + wsgi_multithread = PyBool_FromLong(result); + + ap_mpm_query(AP_MPMQ_IS_FORKED, &result); + wsgi_multiprocess = PyBool_FromLong(result); + } + + /* NOTE: these are global vars which we never decref! */ + PyDict_SetItemString(env, "wsgi.version", wsgi_version); + PyDict_SetItemString(env, "wsgi.multithread", wsgi_multithread); + PyDict_SetItemString(env, "wsgi.multiprocess", wsgi_multiprocess); + + val = apr_table_get(r->subprocess_env, "HTTPS"); + if (!val || !strcasecmp(val, "off")) { + v = MpBytesOrUnicode_FromString("http"); + PyDict_SetItemString(env, "wsgi.url_scheme", v); + Py_DECREF(v); + } else { + v = MpBytesOrUnicode_FromString("https"); + PyDict_SetItemString(env, "wsgi.url_scheme", v); + Py_DECREF(v); + } + + return env; +} + +/** + ** request.wsgi_start_response(self, args) + ** + * The WSGI start_response() + * + */ +static PyObject *req_wsgi_start_response(requestobject *self, PyObject *args) +{ + + char *status_line = NULL; + PyObject *headers = NULL; + PyObject *exc_info = NULL; + int status, i; + + if (! PyArg_ParseTuple(args, "sO|O:wsgi_start_response", &status_line, &headers, &exc_info)) + return NULL; + + if (!PyList_Check(headers)) { + PyErr_Format(PyExc_TypeError, "headers argument must be a list, not a '%.200s'", + headers->ob_type->tp_name); + return NULL; + } + + /* I don't understand what PEP3333 wants us to do with the + * exception, we just re-raise it like other WSGI tools do */ + if (exc_info) { + PyObject *exc, *value, *tb; + if (PyArg_UnpackTuple(exc_info, "wsgi_start_response", 3, 3, &exc, &value, &tb)) { + Py_INCREF(exc); + Py_INCREF(value); + Py_INCREF(tb); + PyErr_Restore(exc, value, tb); + } + return NULL; + } + + /* add the headers */ + for (i=0; i < PyList_Size(headers); i++) { + PyObject *key = NULL, *val = NULL; + char *k, *v; + PyObject *item = PyList_GetItem(headers, i); + + if (!PyTuple_CheckExact(item)) { + PyErr_Format(PyExc_TypeError, "each header must be a tuple, not a '%.200s'", + item->ob_type->tp_name); + return NULL; + } + + if (! PyArg_ParseTuple(item, "OO", &key, &val)) + return NULL; + + if (!((PyUnicode_CheckExact(key) || PyBytes_CheckExact(key)))) { + PyErr_Format(PyExc_TypeError, "header names must be strings, not '%.200s'", + key->ob_type->tp_name); + return NULL; + } + + if (!((PyUnicode_CheckExact(val) || PyBytes_CheckExact(val)))) { + PyErr_Format(PyExc_TypeError, "header values must be strings, not '%.200s'", + val->ob_type->tp_name); + return NULL; + } + + MP_ANYSTR_AS_STR(k, key, 1); + MP_ANYSTR_AS_STR(v, val, 1); + if ((!k) || (!v)) { + Py_DECREF(key); /* MP_ANYSTR_AS_STR */ + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + apr_table_add(self->request_rec->headers_out, k, v); + + Py_DECREF(key); /* MP_ANYSTR_AS_STR */ + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + } + + status = atoi(status_line); + if (!ap_is_HTTP_VALID_RESPONSE(status)) { + PyErr_SetString(PyExc_ValueError, + apr_psprintf(self->request_rec->pool, + "Invalid status line: %s", status_line)); + return NULL; + } + + self->request_rec->status_line = apr_pstrdup(self->request_rec->pool, status_line); + + return PyObject_GetAttrString((PyObject*)self, "write"); +} + +/** + ** valid_phase() + ** + * utility func - makes sure a phase is valid + */ + +static int valid_phase(const char *p) +{ + if ((strcmp(p, "PythonHandler") != 0) && + (strcmp(p, "PythonAuthenHandler") != 0) && + (strcmp(p, "PythonPostReadRequestHandler") != 0) && + (strcmp(p, "PythonTransHandler") != 0) && + (strcmp(p, "PythonHeaderParserHandler") != 0) && + (strcmp(p, "PythonAccessHandler") != 0) && + (strcmp(p, "PythonAuthzHandler") != 0) && + (strcmp(p, "PythonTypeHandler") != 0) && + (strcmp(p, "PythonFixupHandler") != 0) && + (strcmp(p, "PythonLogHandler") != 0) && + (strcmp(p, "PythonInitHandler") != 0)) + return 0; + else + return 1; +} + +/** + ** request.add_handler(request self, string phase, string handler) + ** + * Allows to add another handler to the handler list. + */ + +static PyObject *req_add_handler(requestobject *self, PyObject *args) +{ + char *phase = NULL; + PyObject *o_phase; + char *handler; + const char *dir = NULL; + const char *currphase; + + if (! PyArg_ParseTuple(args, "ss|z", &phase, &handler, &dir)) + return NULL; + + if (! valid_phase(phase)) { + PyErr_SetString(PyExc_IndexError, + apr_psprintf(self->request_rec->pool, + "Invalid phase: %s", phase)); + return NULL; + } + + /* Canonicalize path and add trailing slash at + * this point if directory was provided. */ + + if (dir) { + + char *newpath = 0; + apr_status_t rv; + + rv = apr_filepath_merge(&newpath, NULL, dir, + APR_FILEPATH_TRUENAME, + self->request_rec->pool); + + /* If there is a failure, use the original path + * which was supplied. */ + + if (rv == APR_SUCCESS || rv == APR_EPATHWILD) { + dir = newpath; + if (dir[strlen(dir) - 1] != '/') { + dir = apr_pstrcat(self->request_rec->pool, dir, "/", NULL); + } + } + else { + /* dir is from Python, so duplicate it */ + + dir = apr_pstrdup(self->request_rec->pool, dir); + } + } + + /* handler is from Python, so duplicate it */ + + handler = apr_pstrdup(self->request_rec->pool, handler); + + /* which phase are we processing? */ + o_phase = self->phase; + MP_ANYSTR_AS_STR(currphase, o_phase, 1); + + /* are we in same phase as what's being added? */ + if (strcmp(currphase, phase) == 0) { + + /* then just append to hlist */ + hlist_append(self->request_rec->pool, self->hlo->head, + handler, dir, 0, 0, NULL, NOTSILENT); + } + else { + /* this is a phase that we're not in */ + + py_req_config *req_config; + hl_entry *hle; + + /* get request config */ + req_config = (py_req_config *) + ap_get_module_config(self->request_rec->request_config, + &python_module); + + hle = apr_hash_get(req_config->dynhls, phase, APR_HASH_KEY_STRING); + + if (! hle) { + hle = hlist_new(self->request_rec->pool, handler, dir, 0, 0, NULL, NOTSILENT); + apr_hash_set(req_config->dynhls, phase, APR_HASH_KEY_STRING, hle); + } + else { + hlist_append(self->request_rec->pool, hle, handler, dir, 0, 0, NULL, NOTSILENT); + } + } + + Py_XDECREF(o_phase); + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.add_input_filter(request self, string name) + ** + * Specifies that a pre registered filter be added to input filter chain. + */ + +static PyObject *req_add_input_filter(requestobject *self, PyObject *args) +{ + char *name; + py_req_config *req_config; + python_filter_ctx *ctx; + + if (! PyArg_ParseTuple(args, "s", &name)) + return NULL; + + req_config = (py_req_config *) ap_get_module_config( + self->request_rec->request_config, &python_module); + + if (apr_hash_get(req_config->in_filters, name, APR_HASH_KEY_STRING)) { + ctx = (python_filter_ctx *) apr_pcalloc(self->request_rec->pool, + sizeof(python_filter_ctx)); + ctx->name = apr_pstrdup(self->request_rec->pool, name); + + ap_add_input_filter(FILTER_NAME, ctx, self->request_rec, + self->request_rec->connection); + } else { + ap_add_input_filter(name, NULL, self->request_rec, + self->request_rec->connection); + } + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.add_output_filter(request self, string name) + ** + * Specifies that a pre registered filter be added to output filter chain. + */ + +static PyObject *req_add_output_filter(requestobject *self, PyObject *args) +{ + char *name; + py_req_config *req_config; + python_filter_ctx *ctx; + + if (! PyArg_ParseTuple(args, "s", &name)) + return NULL; + + req_config = (py_req_config *) ap_get_module_config( + self->request_rec->request_config, &python_module); + + if (apr_hash_get(req_config->out_filters, name, APR_HASH_KEY_STRING)) { + ctx = (python_filter_ctx *) apr_pcalloc(self->request_rec->pool, + sizeof(python_filter_ctx)); + ctx->name = apr_pstrdup(self->request_rec->pool, name); + + ap_add_output_filter(FILTER_NAME, ctx, self->request_rec, + self->request_rec->connection); + } else { + ap_add_output_filter(name, NULL, self->request_rec, + self->request_rec->connection); + } + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.register_input_filter(request self, string name, string handler, list dir) + ** + * Registers an input filter active for life of the request. + */ + +static PyObject *req_register_input_filter(requestobject *self, PyObject *args) +{ + char *name; + char *handler; + char *dir = NULL; + py_req_config *req_config; + py_handler *fh; + + if (! PyArg_ParseTuple(args, "ss|s", &name, &handler, &dir)) + return NULL; + + req_config = (py_req_config *) ap_get_module_config( + self->request_rec->request_config, &python_module); + + fh = (py_handler *) apr_pcalloc(self->request_rec->pool, + sizeof(py_handler)); + fh->handler = apr_pstrdup(self->request_rec->pool, handler); + + /* Canonicalize path and add trailing slash at + * this point if directory was provided. */ + + if (dir) { + + char *newpath = 0; + apr_status_t rv; + + rv = apr_filepath_merge(&newpath, NULL, dir, + APR_FILEPATH_TRUENAME, + self->request_rec->pool); + + /* If there is a failure, use the original path + * which was supplied. */ + + if (rv == APR_SUCCESS || rv == APR_EPATHWILD) { + dir = newpath; + if (dir[strlen(dir) - 1] != '/') { + dir = apr_pstrcat(self->request_rec->pool, dir, "/", NULL); + } + fh->directory = dir; + } else { + fh->directory = apr_pstrdup(self->request_rec->pool, dir); + } + } + + apr_hash_set(req_config->in_filters, + apr_pstrdup(self->request_rec->pool, name), + APR_HASH_KEY_STRING, fh); + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.register_output_filter(request self, string name, string handler, list dir) + ** + * Registers an output filter active for life of the request. + */ + +static PyObject *req_register_output_filter(requestobject *self, PyObject *args) +{ + char *name; + char *handler; + char *dir = NULL; + py_req_config *req_config; + py_handler *fh; + + if (! PyArg_ParseTuple(args, "ss|s", &name, &handler, &dir)) + return NULL; + + req_config = (py_req_config *) ap_get_module_config( + self->request_rec->request_config, &python_module); + + fh = (py_handler *) apr_pcalloc(self->request_rec->pool, + sizeof(py_handler)); + fh->handler = apr_pstrdup(self->request_rec->pool, handler); + + /* Canonicalize path and add trailing slash at + * this point if directory was provided. */ + + if (dir) { + + char *newpath = 0; + apr_status_t rv; + + rv = apr_filepath_merge(&newpath, NULL, dir, + APR_FILEPATH_TRUENAME, + self->request_rec->pool); + + /* If there is a failure, use the original path + * which was supplied. */ + + if (rv == APR_SUCCESS || rv == APR_EPATHWILD) { + dir = newpath; + if (dir[strlen(dir) - 1] != '/') { + dir = apr_pstrcat(self->request_rec->pool, dir, "/", NULL); + } + fh->directory = dir; + } else { + fh->directory = apr_pstrdup(self->request_rec->pool, dir); + } + } + + apr_hash_set(req_config->out_filters, + apr_pstrdup(self->request_rec->pool, name), + APR_HASH_KEY_STRING, fh); + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.allow_methods(request self, list methods, reset=0) + ** + * a wrapper around ap_allow_methods. (used for the "allow:" header + * to be passed to client when needed.) + */ + +static PyObject *req_allow_methods(requestobject *self, PyObject *args) +{ + + PyObject *methods; + int reset = 0; + int len, i; + + if (! PyArg_ParseTuple(args, "O|i", &methods, &reset)) + return NULL; + + if (! PySequence_Check(methods)){ + PyErr_SetString(PyExc_TypeError, + "First argument must be a sequence"); + return NULL; + } + + len = PySequence_Length(methods); + + if (len) { + + PyObject *method; + char *m; + + method = PySequence_GetItem(methods, 0); + + MP_ANYSTR_AS_STR(m, method, 1); + if (!m) { + Py_DECREF(method); /* MP_ANYSTR_AS_STR */ + return NULL; + } + ap_allow_methods(self->request_rec, (reset == REPLACE_ALLOW), m, NULL); + Py_DECREF(method); /* MP_ANYSTR_AS_STR */ + + for (i = 1; i < len; i++) { + method = PySequence_GetItem(methods, i); + MP_ANYSTR_AS_STR(m, method, 1); + if (!m) { + Py_DECREF(method); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + ap_allow_methods(self->request_rec, MERGE_ALLOW, m, NULL); + Py_DECREF(method); /* MP_ANYSTR_AS_STR */ + } + } + + Py_INCREF(Py_None); + return Py_None; +} + + +/** + ** request.is_https(self) + ** + * mod_ssl ssl_is_https() wrapper + */ + +static PyObject * req_is_https(requestobject *self) +{ + int is_https; + + if (!optfn_is_https) + optfn_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); + + is_https = optfn_is_https && optfn_is_https(self->request_rec->connection); + + return PyLong_FromLong(is_https); +} + + +/** + ** request.ssl_var_lookup(self, string variable_name) + ** + * mod_ssl ssl_var_lookup() wrapper + */ + +static PyObject * req_ssl_var_lookup(requestobject *self, PyObject *args) +{ + char *var_name; + + if (! PyArg_ParseTuple(args, "s", &var_name)) + return NULL; /* error */ + + if (!optfn_ssl_var_lookup) + optfn_ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); + + if (optfn_ssl_var_lookup) { + const char *val; + val = optfn_ssl_var_lookup(self->request_rec->pool, + self->request_rec->server, + self->request_rec->connection, + self->request_rec, + var_name); + if (val) + return MpBytesOrUnicode_FromString(val); + } + + /* variable not found, or mod_ssl is not loaded */ + Py_INCREF(Py_None); + return Py_None; +} + + +/** + ** request.document_root(self) + ** + * ap_docuement_root wrapper + */ + +static PyObject *req_document_root(requestobject *self) +{ + return MpBytesOrUnicode_FromString(ap_document_root(self->request_rec)); +} + +/** + ** request.get_basic_auth_pw(request self) + ** + * get basic authentication password, + * similar to ap_get_basic_auth_pw + */ + +static PyObject * req_get_basic_auth_pw(requestobject *self, PyObject *args) +{ + const char *pw; + request_rec *req; + + /* http://stackoverflow.com/questions/702629/utf-8-characters-mangled-in-http-basic-auth-username/703341#703341 */ + /* Latin1 is Safari, Chrome and Mozilla - otherwise it can be decoded manually */ + + req = self->request_rec; + + if (! ap_get_basic_auth_pw(req, &pw)) { +#if PY_MAJOR_VERSION < 3 + return PyBytes_FromString(pw); +#else + return PyUnicode_DecodeLatin1(pw, strlen(pw), NULL); +#endif + } else { + Py_INCREF(Py_None); + return Py_None; + } +} + +/** + ** request.auth_name(self) + ** + * ap_auth_name wrapper + */ + +static PyObject *req_auth_name(requestobject *self) +{ + const char *auth_name = ap_auth_name(self->request_rec); + + if (!auth_name) { + Py_INCREF(Py_None); + return Py_None; + } + return MpBytesOrUnicode_FromString(auth_name); +} + +/** + ** request.auth_type(self) + ** + * ap_auth_type wrapper + */ + +static PyObject *req_auth_type(requestobject *self) +{ + const char *auth_type = ap_auth_type(self->request_rec); + + if (!auth_type) { + Py_INCREF(Py_None); + return Py_None; + } + + return MpBytesOrUnicode_FromString(auth_type); +} + +/** + ** request.construct_url(self) + ** + * ap_construct_url wrapper + */ + +static PyObject *req_construct_url(requestobject *self, PyObject *args) +{ + char *uri; + + if (! PyArg_ParseTuple(args, "s", &uri)) + return NULL; + + return MpBytesOrUnicode_FromString(ap_construct_url(self->request_rec->pool, + uri, self->request_rec)); +} + +/** + ** request.discard_request_body(request self) + ** + * discard content supplied with request + */ + +static PyObject * req_discard_request_body(requestobject *self) +{ + return PyLong_FromLong(ap_discard_request_body(self->request_rec)); +} + +/** + ** request.get_addhandler_exts(request self) + ** + * Returns file extentions that were given as argument to AddHandler mod_mime + * directive, if any, if at all. This is useful for the Publisher, which can + * chop off file extentions for modules based on this info. + * + * XXX Due to the way this is implemented, it is best stay undocumented. + */ + +static PyObject * req_get_addhandler_exts(requestobject *self, PyObject *args) +{ + + char *exts = get_addhandler_extensions(self->request_rec); + + if (exts) + return MpBytesOrUnicode_FromString(exts); + else + return MpBytesOrUnicode_FromString(""); +} + +/** + ** request.get_config(request self) + ** + * Returns the config directives set through Python* apache directives. + * except for Python*Handler and PythonOption (which you get via get_options). + */ + +static PyObject * req_get_config(requestobject *self) +{ + py_config *conf = + (py_config *) ap_get_module_config(self->request_rec->per_dir_config, + &python_module); + if (!self->config) + self->config = MpTable_FromTable(conf->directives); + + if (((tableobject*)self->config)->table != conf->directives) + ((tableobject*)self->config)->table = conf->directives; + + Py_INCREF(self->config); + return self->config; +} + +/** + ** request.get_remodte_host(request self, [int type]) + ** + * An interface to the ap_get_remote_host function. + */ + +static PyObject * req_get_remote_host(requestobject *self, PyObject *args) +{ + + int type = REMOTE_NAME; + PyObject *str_is_ip = Py_None; + int _str_is_ip; + const char *host; + + if (! PyArg_ParseTuple(args, "|iO", &type, &str_is_ip)) + return NULL; + + if (str_is_ip != Py_None) { + host = ap_get_remote_host(self->request_rec->connection, + self->request_rec->per_dir_config, type, &_str_is_ip); + } + else { + host = ap_get_remote_host(self->request_rec->connection, + self->request_rec->per_dir_config, type, NULL); + } + + if (! host) { + Py_INCREF(Py_None); + return Py_None; + } + else { + if (str_is_ip != Py_None) { + return Py_BuildValue("(s,i)", host, _str_is_ip); + } + else { + return MpBytesOrUnicode_FromString(host); + } + } +} + +/** + ** request.get_options(request self) + ** + */ + +static PyObject * req_get_options(requestobject *self, PyObject *args) +{ + const apr_array_header_t* ah; + apr_table_entry_t* elts; + int i; + + py_config *conf = + (py_config *) ap_get_module_config(self->request_rec->per_dir_config, + &python_module); + if (!self->options) + self->options = MpTable_FromTable(conf->options); + + if (((tableobject*)self->options)->table != conf->options) + ((tableobject*)self->options)->table = conf->options; + + ah = apr_table_elts(conf->options); + elts = (apr_table_entry_t *) ah->elts; + + + /* Remove the empty values as a way to unset values. + * See https://issues.apache.org/jira/browse/MODPYTHON-6 */ + for(i=0;inelts;i++,elts++) { + if(strlen(elts->val)==0) { + apr_table_unset(conf->options,elts->key); + } + } + + Py_INCREF(self->options); + return self->options; +} + + +/** + ** request.internal_redirect(request self, string newuri) + ** + */ + +static PyObject * req_internal_redirect(requestobject *self, PyObject *args) +{ + char *new_uri; + + if (! PyArg_ParseTuple(args, "z", &new_uri)) + return NULL; /* error */ + + Py_BEGIN_ALLOW_THREADS + ap_internal_redirect(new_uri, self->request_rec); + Py_END_ALLOW_THREADS + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.log_error(req self, string message, int level) + ** + * calls ap_log_rerror + */ + +static PyObject * req_log_error(requestobject *self, PyObject *args) +{ + int level = 0; + char *message = NULL; + + if (! PyArg_ParseTuple(args, "z|i", &message, &level)) + return NULL; /* error */ + + if (message) { + + if (! level) + level = APLOG_ERR; + + Py_BEGIN_ALLOW_THREADS + ap_log_rerror(APLOG_MARK, level, 0, self->request_rec, "%s", message); + Py_END_ALLOW_THREADS + } + + Py_INCREF(Py_None); + return Py_None; +} + + /** + ** request.meets_conditions(req self) + ** + * ap_meets_conditions wrapper + */ + +static PyObject * req_meets_conditions(requestobject *self) { + return PyLong_FromLong(ap_meets_conditions(self->request_rec)); +} + + +/** + ** request.read(request self, int bytes) + ** + * Reads stuff like POST requests from the client + * (based on the old net_read) + */ + +static PyObject * req_read(requestobject *self, PyObject *args) +{ + int rc, bytes_read, chunk_len; + char *buffer; + PyObject *result; + int copied = 0; + long len = -1; + + if (! PyArg_ParseTuple(args, "|l", &len)) + return NULL; + + if (len == 0) { + return PyBytes_FromString(""); + } + + /* is this the first read? */ + if (! self->request_rec->read_length) { + + /* then do some initial setting up */ + rc = ap_setup_client_block(self->request_rec, REQUEST_CHUNKED_ERROR); + if(rc != OK) { + PyObject *val = PyLong_FromLong(rc); + if (val == NULL) + return NULL; + PyErr_SetObject(get_ServerReturn(), val); + Py_DECREF(val); + return NULL; + } + + if (! ap_should_client_block(self->request_rec)) { + /* client has nothing to send */ + return PyBytes_FromString(""); + } + } + + if (len < 0) + /* XXX ok to use request_rec->remaining? */ + len = self->request_rec->remaining + + (self->rbuff_len - self->rbuff_pos); + + result = PyBytes_FromStringAndSize(NULL, len); + + /* possibly no more memory */ + if (result == NULL) + return NULL; + + buffer = PyBytes_AS_STRING((PyBytesObject *) result); + + /* if anything left in the readline buffer */ + while ((self->rbuff_pos < self->rbuff_len) && (copied < len)) + buffer[copied++] = self->rbuff[self->rbuff_pos++]; + + /* Free rbuff if we're done with it */ + if (self->rbuff_pos >= self->rbuff_len && self->rbuff != NULL) { + free(self->rbuff); + self->rbuff = NULL; + } + + if (copied == len) + return result; /* we're done! */ + + /* read it in */ + Py_BEGIN_ALLOW_THREADS + chunk_len = ap_get_client_block(self->request_rec, buffer, len); + Py_END_ALLOW_THREADS + bytes_read = chunk_len; + + /* if this is a "short read", try reading more */ + while ((bytes_read < len) && (chunk_len != 0)) { + Py_BEGIN_ALLOW_THREADS + chunk_len = ap_get_client_block(self->request_rec, + buffer+bytes_read, len-bytes_read); + Py_END_ALLOW_THREADS + if (chunk_len == -1) { + PyErr_SetString(PyExc_IOError, "Client read error (Timeout?)"); + return NULL; + } + else + bytes_read += chunk_len; + } + + /* resize if necessary */ + if (bytes_read < len) + if(_PyBytes_Resize(&result, bytes_read)) + return NULL; + + return result; +} + +/** + ** request.readline(request self, int maxbytes) + ** + * Reads stuff like POST requests from the client + * (based on the old net_read) until EOL + */ + +static PyObject * req_readline(requestobject *self, PyObject *args) +{ + + int rc, chunk_len, bytes_read; + char *buffer; + PyObject *result; + int copied = 0; + long len = -1; + + if (! PyArg_ParseTuple(args, "|l", &len)) + return NULL; + + if (len == 0) { + return PyBytes_FromString(""); + } + + /* is this the first read? */ + if (! self->request_rec->read_length) { + + /* then do some initial setting up */ + rc = ap_setup_client_block(self->request_rec, REQUEST_CHUNKED_ERROR); + + if (rc != OK) { + PyObject *val = PyLong_FromLong(rc); + if (val == NULL) + return NULL; + PyErr_SetObject(get_ServerReturn(), val); + Py_DECREF(val); + return NULL; + } + + if (! ap_should_client_block(self->request_rec)) { + /* client has nothing to send */ + return PyBytes_FromString(""); + } + } + + if (len < 0) + len = self->request_rec->remaining + + (self->rbuff_len - self->rbuff_pos); + + /* create the result buffer */ + result = PyBytes_FromStringAndSize(NULL, len); + + /* possibly no more memory */ + if (result == NULL) + return NULL; + + buffer = PyBytes_AS_STRING((PyBytesObject *) result); + + /* is there anything left in the rbuff from previous reads? */ + if (self->rbuff_pos < self->rbuff_len) { + + /* if yes, process that first */ + while (self->rbuff_pos < self->rbuff_len) { + + buffer[copied++] = self->rbuff[self->rbuff_pos]; + if ((self->rbuff[self->rbuff_pos++] == '\n') || + (copied == len)) { + + /* our work is done */ + + /* resize if necessary */ + if (copied < len) + if(_PyBytes_Resize(&result, copied)) + return NULL; + + /* fix for MODPYTHON-181 leak */ + if (self->rbuff_pos >= self->rbuff_len && self->rbuff != NULL) { + free(self->rbuff); + self->rbuff = NULL; + } + + return result; + } + } + } + + /* Free old rbuff as the old contents have been copied over and + we are about to allocate a new rbuff. Perhaps this could be reused + somehow? */ + if (self->rbuff_pos >= self->rbuff_len && self->rbuff != NULL) + { + free(self->rbuff); + self->rbuff = NULL; + } + + /* if got this far, the buffer should be empty, we need to read more */ + + /* create a read buffer + + The buffer len will be at least HUGE_STRING_LEN in size, + to avoid memory fragmention + */ + self->rbuff_len = len > HUGE_STRING_LEN ? len : HUGE_STRING_LEN; + self->rbuff_pos = 0; + self->rbuff = malloc(self->rbuff_len); + if (! self->rbuff) + return PyErr_NoMemory(); + + /* read it in */ + Py_BEGIN_ALLOW_THREADS + chunk_len = ap_get_client_block(self->request_rec, self->rbuff, + self->rbuff_len); + Py_END_ALLOW_THREADS; + + /* ap_get_client_block could return -1 on error */ + if (chunk_len == -1) { + + /* Free rbuff since returning NULL here should end the request */ + free(self->rbuff); + self->rbuff = NULL; + + PyErr_SetString(PyExc_IOError, "Client read error (Timeout?)"); + return NULL; + } + + bytes_read = chunk_len; + + /* if this is a "short read", try reading more */ + while ((chunk_len != 0 ) && (bytes_read + copied < len)) { + + Py_BEGIN_ALLOW_THREADS + chunk_len = ap_get_client_block(self->request_rec, + self->rbuff + bytes_read, + self->rbuff_len - bytes_read); + Py_END_ALLOW_THREADS + + if (chunk_len == -1) { + + /* Free rbuff since returning NULL here should end the request */ + free(self->rbuff); + self->rbuff = NULL; + + PyErr_SetString(PyExc_IOError, "Client read error (Timeout?)"); + return NULL; + } + else + bytes_read += chunk_len; + } + self->rbuff_len = bytes_read; + self->rbuff_pos = 0; + + /* now copy the remaining bytes */ + while (self->rbuff_pos < self->rbuff_len) { + + buffer[copied++] = self->rbuff[self->rbuff_pos]; + if ((self->rbuff[self->rbuff_pos++] == '\n') || + (copied == len)) + /* our work is done */ + break; + } + + /* Free rbuff if we're done with it */ + if (self->rbuff_pos >= self->rbuff_len && self->rbuff != NULL) + { + free(self->rbuff); + self->rbuff = NULL; + } + + /* resize if necessary */ + if (copied < len) + if(_PyBytes_Resize(&result, copied)) + return NULL; + + return result; +} + +/** + ** request.readlines([long maxsize]) + ** + * just like file.readlines() + */ + +static PyObject *req_readlines(requestobject *self, PyObject *args) +{ + + PyObject *result = PyList_New(0); + PyObject *line, *rlargs; + long sizehint = -1; + long size = 0; + long linesize; + + if (! PyArg_ParseTuple(args, "|l", &sizehint)) + return NULL; + + if (result == NULL) + return PyErr_NoMemory(); + + rlargs = PyTuple_New(0); + if (result == NULL) + return PyErr_NoMemory(); + + line = req_readline(self, rlargs); + while (line && ((linesize=PyBytes_Size(line))>0)) { + PyList_Append(result, line); + size += linesize; + if ((sizehint != -1) && (size >= sizehint)) + break; + Py_DECREF(line); + line = req_readline(self, args); + } + Py_XDECREF(line); + + if (!line) + return NULL; + + return result; +} + +/** + ** request.register_cleanup(handler, data) + ** + * registers a cleanup at request pool destruction time. + * optional data argument will be passed to the cleanup function. + */ + +static PyObject *req_register_cleanup(requestobject *self, PyObject *args) +{ + cleanup_info *ci; + PyObject *handler = NULL; + PyObject *data = NULL; + PyObject *name_obj = NULL; + char * c_name_obj; + char *name = NULL; + + if (! PyArg_ParseTuple(args, "O|O", &handler, &data)) + return NULL; /* bad args */ + + ci = (cleanup_info *)malloc(sizeof(cleanup_info)); + ci->request_rec = self->request_rec; + ci->server_rec = self->request_rec->server; + if (PyCallable_Check(handler)) { + Py_INCREF(handler); + ci->handler = handler; + name_obj = python_interpreter_name(); + MP_ANYSTR_AS_STR(c_name_obj, name_obj, 1); + if (!c_name_obj) { + Py_DECREF(name_obj); /* MP_ANYSTR_AS_STR */ + return NULL; + } + name = (char *)malloc(strlen(c_name_obj)+1); + if (!name) + return PyErr_NoMemory(); + strcpy(name, c_name_obj); + ci->interpreter = name; + Py_DECREF(name_obj); /* MP_ANYSTR_AS_STR */ + if (data) { + Py_INCREF(data); + ci->data = data; + } + else { + Py_INCREF(Py_None); + ci->data = Py_None; + } + } + else { + PyErr_SetString(PyExc_ValueError, + "first argument must be a callable object"); + free(ci); + return NULL; + } + + apr_pool_cleanup_register(self->request_rec->pool, ci, python_cleanup, + apr_pool_cleanup_null); + + Py_INCREF(Py_None); + return Py_None; + +} + + +#if !AP_MODULE_MAGIC_AT_LEAST(20060110,0) + +/** + ** request.requires(self) + ** + * Interface to ap_requires() + */ + +static PyObject * req_requires(requestobject *self) +{ + + /* This function returns everything specified after the "requires" + as is, without any attempts to parse/organize because + "requires" args only need to be grokable by mod_auth if it is + authoritative. When AuthAuthoritative is off, anything can + follow requires, e.g. "requires role terminator". + */ + + const apr_array_header_t *reqs_arr = ap_requires(self->request_rec); + require_line *reqs; + int i, ti = 0; + + PyObject *result; + + if (!reqs_arr) { + return Py_BuildValue("()"); + } + + result = PyTuple_New(reqs_arr->nelts); + + reqs = (require_line *) reqs_arr->elts; + + for (i = 0; i < reqs_arr->nelts; ++i) { + if (reqs[i].method_mask & (AP_METHOD_BIT << self->request_rec->method_number)) { + PyTuple_SetItem(result, ti++, + MpBytesOrUnicode_FromString(reqs[i].requirement)); + } + } + + _PyTuple_Resize(&result, ti); + + return result; +} + +#endif + +/** + ** request.send_http_header(request self) + ** + * this is a noop, just so we don't break old scripts + */ + +static PyObject * req_send_http_header(requestobject *self) +{ + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.set_content_length(request self, long content_length) + ** + * write output to the client + */ + +static PyObject * req_set_content_length(requestobject *self, PyObject *args) +{ + long len; + + if (! PyArg_ParseTuple(args, "l", &len)) + return NULL; /* bad args */ + + ap_set_content_length(self->request_rec, len); + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.set_etag(request self) + ** + * sets the outgoing ETag header + */ + +static PyObject * req_set_etag(requestobject *self, PyObject *args) +{ + ap_set_etag(self->request_rec); + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.set_last_modified(request self) + ** + * set the Last-modified header + */ + +static PyObject * req_set_last_modified(requestobject *self, PyObject *args) +{ + ap_set_last_modified(self->request_rec); + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.update_mtime(request self, long mtime) + ** + * updates mtime attribute if newer + */ + +static PyObject * req_update_mtime(requestobject *self, PyObject *args) +{ + double mtime; + + if (! PyArg_ParseTuple(args, "d", &mtime)) + return NULL; /* bad args */ + + ap_update_mtime(self->request_rec, apr_time_from_sec(mtime)); + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.write(request self, string what, flush=1) + ** + * write output to the client + */ + +static PyObject * req_write(requestobject *self, PyObject *args) +{ + int len; + int rc; + char *buff; + int flush=1; + + if (! PyArg_ParseTuple(args, "s#|i", &buff, &len, &flush)) + return NULL; /* bad args */ + + if (len > 0 ) { + + Py_BEGIN_ALLOW_THREADS + rc = ap_rwrite(buff, len, self->request_rec); + if (flush && (rc != -1)) + rc = ap_rflush(self->request_rec); + Py_END_ALLOW_THREADS + if (rc == -1) { + PyErr_SetString(PyExc_IOError, "Write failed, client closed connection."); + return NULL; + } + } + + self->bytes_queued += len; + + Py_INCREF(Py_None); + return Py_None; + +} + +/** + ** request.flush(request self) + ** + * flush output buffer + */ + +static PyObject * req_flush(requestobject *self) +{ + int rc; + + Py_BEGIN_ALLOW_THREADS + rc = ap_rflush(self->request_rec); + Py_END_ALLOW_THREADS + if (rc == -1) { + PyErr_SetString(PyExc_IOError, "Flush failed, client closed connection."); + return NULL; + } + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** request.sendfile + ** + */ + +static PyObject * req_sendfile(requestobject *self, PyObject *args) +{ + char *fname; + apr_file_t *fd; + apr_size_t offset=0, len=-1, nbytes; + apr_status_t status; + PyObject * py_result = NULL; + apr_finfo_t finfo; + + if (! PyArg_ParseTuple(args, "s|ll", &fname, &offset, &len)) + return NULL; /* bad args */ + + Py_BEGIN_ALLOW_THREADS + status=apr_stat(&finfo, fname, + APR_FINFO_SIZE, self->request_rec->pool); + Py_END_ALLOW_THREADS + if (status != APR_SUCCESS) { + PyErr_SetString(PyExc_IOError, "Could not stat file for reading"); + return NULL; + } + + Py_BEGIN_ALLOW_THREADS + status=apr_file_open(&fd, fname, + APR_READ, APR_OS_DEFAULT, + self->request_rec->pool); + Py_END_ALLOW_THREADS + if (status != APR_SUCCESS) { + PyErr_SetString(PyExc_IOError, "Could not open file for reading"); + return NULL; + } + + if (len==-1) len=finfo.size; + + Py_BEGIN_ALLOW_THREADS + status = ap_send_fd(fd, self->request_rec, offset, + len, &nbytes); + Py_END_ALLOW_THREADS + apr_file_close(fd); + + if (status != APR_SUCCESS) { + PyErr_SetString(PyExc_IOError, "Write failed, client closed connection."); + return NULL; + } + + self->bytes_queued += len; + + py_result = PyLong_FromLong (nbytes); + Py_INCREF(py_result); + return py_result; +} + +static PyMethodDef request_methods[] = { + {"write", (PyCFunction) req_write, METH_VARARGS}, + {"get_config", (PyCFunction) req_get_config, METH_NOARGS}, + {"build_wsgi_env", (PyCFunction) req_build_wsgi_env, METH_NOARGS}, + {"wsgi_start_response", (PyCFunction) req_wsgi_start_response, METH_VARARGS}, + {"add_cgi_vars", (PyCFunction) req_add_cgi_vars, METH_NOARGS}, + {"add_common_vars", (PyCFunction) req_add_common_vars, METH_NOARGS}, + {"add_handler", (PyCFunction) req_add_handler, METH_VARARGS}, + {"add_input_filter", (PyCFunction) req_add_input_filter, METH_VARARGS}, + {"add_output_filter", (PyCFunction) req_add_output_filter, METH_VARARGS}, + {"allow_methods", (PyCFunction) req_allow_methods, METH_VARARGS}, + {"auth_name", (PyCFunction) req_auth_name, METH_NOARGS}, + {"auth_type", (PyCFunction) req_auth_type, METH_NOARGS}, + {"construct_url", (PyCFunction) req_construct_url, METH_VARARGS}, + {"discard_request_body", (PyCFunction) req_discard_request_body, METH_NOARGS}, + {"document_root", (PyCFunction) req_document_root, METH_NOARGS}, + {"flush", (PyCFunction) req_flush, METH_NOARGS}, + {"get_basic_auth_pw", (PyCFunction) req_get_basic_auth_pw, METH_NOARGS}, + {"get_addhandler_exts", (PyCFunction) req_get_addhandler_exts, METH_NOARGS}, + {"get_remote_host", (PyCFunction) req_get_remote_host, METH_VARARGS}, + {"get_options", (PyCFunction) req_get_options, METH_NOARGS}, + {"internal_redirect", (PyCFunction) req_internal_redirect, METH_VARARGS}, + {"is_https", (PyCFunction) req_is_https, METH_NOARGS}, + {"log_error", (PyCFunction) req_log_error, METH_VARARGS}, + {"meets_conditions", (PyCFunction) req_meets_conditions, METH_NOARGS}, + {"read", (PyCFunction) req_read, METH_VARARGS}, + {"readline", (PyCFunction) req_readline, METH_VARARGS}, + {"readlines", (PyCFunction) req_readlines, METH_VARARGS}, + {"register_cleanup", (PyCFunction) req_register_cleanup, METH_VARARGS}, + {"register_input_filter", (PyCFunction) req_register_input_filter, METH_VARARGS}, + {"register_output_filter", (PyCFunction) req_register_output_filter, METH_VARARGS}, +#if !AP_MODULE_MAGIC_AT_LEAST(20060110,0) + {"requires", (PyCFunction) req_requires, METH_NOARGS}, +#endif + {"send_http_header", (PyCFunction) req_send_http_header, METH_NOARGS}, + {"sendfile", (PyCFunction) req_sendfile, METH_VARARGS}, + {"set_content_length", (PyCFunction) req_set_content_length, METH_VARARGS}, + {"set_etag", (PyCFunction) req_set_etag, METH_NOARGS}, + {"set_last_modified", (PyCFunction) req_set_last_modified, METH_NOARGS}, + {"ssl_var_lookup", (PyCFunction) req_ssl_var_lookup, METH_VARARGS}, + {"update_mtime", (PyCFunction) req_update_mtime, METH_VARARGS}, + { NULL, NULL } /* sentinel */ +}; + + +/* + These are offsets into the Apache request_rec structure. + They are accessed via getset functions. Note that the types + specified here are irrelevant if a function other than + getreq_recmbr() is used. E.g. bytes_sent is a long long, + and is retrieved via getreq_recmbr_off() which ignores what's + here. +*/ + +#define OFF(x) offsetof(request_rec, x) + +static PyMemberDef request_rec_mbrs[] = { + {"uri", T_STRING, OFF(uri)}, + {"status", T_INT, OFF(status)}, + {"the_request", T_STRING, OFF(the_request)}, + {"assbackwards", T_INT, OFF(assbackwards)}, + {"proxyreq", T_INT, OFF(proxyreq)}, + {"header_only", T_INT, OFF(header_only)}, + {"protocol", T_STRING, OFF(protocol)}, + {"proto_num", T_INT, OFF(proto_num)}, + {"hostname", T_STRING, OFF(hostname)}, + {"request_time", T_LONG, OFF(request_time)}, + {"status_line", T_STRING, OFF(status_line)}, + {"method", T_STRING, OFF(method)}, + {"method_number", T_INT, OFF(method_number)}, + {"allowed", T_LONG, OFF(allowed)}, + {"allowed_xmethods", T_OBJECT, OFF(allowed_xmethods)}, + {"allowed_methods", T_OBJECT, OFF(allowed_methods)}, + {"sent_bodyct", T_LONG, OFF(sent_bodyct)}, + {"bytes_sent", T_LONG, OFF(bytes_sent)}, + {"mtime", T_LONG, OFF(mtime)}, + {"chunked", T_INT, OFF(chunked)}, + {"range", T_STRING, OFF(range)}, + {"clength", T_LONG, OFF(clength)}, + {"remaining", T_LONG, OFF(remaining)}, + {"read_length", T_LONG, OFF(read_length)}, + {"read_body", T_INT, OFF(read_body)}, + {"read_chunked", T_INT, OFF(read_chunked)}, + {"expecting_100", T_INT, OFF(expecting_100)}, + {"content_type", T_STRING, OFF(content_type)}, + {"handler", T_STRING, OFF(handler)}, + {"content_encoding", T_STRING, OFF(content_encoding)}, + {"content_languages", T_OBJECT, OFF(content_languages)}, + {"vlist_validator", T_STRING, OFF(vlist_validator)}, + {"user", T_STRING, OFF(user)}, + {"ap_auth_type", T_STRING, OFF(ap_auth_type)}, + {"no_cache", T_INT, OFF(no_cache)}, + {"no_local_copy", T_INT, OFF(no_local_copy)}, + {"unparsed_uri", T_STRING, OFF(unparsed_uri)}, + {"filename", T_STRING, OFF(filename)}, + {"canonical_filename", T_STRING, OFF(canonical_filename)}, + {"path_info", T_STRING, OFF(path_info)}, + {"args", T_STRING, OFF(args)}, + {"finfo", T_OBJECT, OFF(finfo)}, + {"parsed_uri", T_OBJECT, OFF(parsed_uri)}, + {"used_path_info", T_INT, OFF(used_path_info)}, + {"eos_sent", T_INT, OFF(eos_sent)}, +#if AP_MODULE_MAGIC_AT_LEAST(20111130,0) + {"useragent_addr", T_OBJECT, OFF(useragent_addr)}, + {"useragent_ip", T_STRING, OFF(useragent_ip)}, +#endif + {NULL} /* Sentinel */ +}; + +/** + ** getreq_recmbr + ** + * Retrieves request_rec structure members + */ + +static PyObject *getreq_recmbr(requestobject *self, void *name) +{ + /* + * apparently at least ap_internal_fast_redirect blatently + * substitute request members, and so we always have to make + * sure that various apr_tables referenced haven't been + * replaced in between handlers and we're left with a stale. + */ + + if (strcmp(name, "interpreter") == 0) { + return python_interpreter_name(); + } + else if (strcmp(name, "headers_in") == 0) { + if (!self->headers_in) + self->headers_in = MpTable_FromTable(self->request_rec->headers_in); + else if (((tableobject*)self->headers_in)->table != self->request_rec->headers_in) + ((tableobject*)self->headers_in)->table = self->request_rec->headers_in; + Py_INCREF(self->headers_in); + return self->headers_in; + } + else if (strcmp(name, "headers_out") == 0) { + if (!self->headers_out) + self->headers_out = MpTable_FromTable(self->request_rec->headers_out); + else if (((tableobject*)self->headers_out)->table != self->request_rec->headers_out) + ((tableobject*)self->headers_out)->table = self->request_rec->headers_out; + Py_INCREF(self->headers_out); + return self->headers_out; + } + else if (strcmp(name, "err_headers_out") == 0) { + if (!self->err_headers_out) + self->err_headers_out = MpTable_FromTable(self->request_rec->err_headers_out); + else if (((tableobject*)self->err_headers_out)->table != self->request_rec->err_headers_out) + ((tableobject*)self->err_headers_out)->table = self->request_rec->err_headers_out; + Py_INCREF(self->err_headers_out); + return self->err_headers_out; + } + else if (strcmp(name, "subprocess_env") == 0) { + if (!self->subprocess_env) + self->subprocess_env = MpTable_FromTable(self->request_rec->subprocess_env); + else if (((tableobject*)self->subprocess_env)->table != self->request_rec->subprocess_env) + ((tableobject*)self->subprocess_env)->table = self->request_rec->subprocess_env; + Py_INCREF(self->subprocess_env); + return self->subprocess_env; + } + else if (strcmp(name, "notes") == 0) { + if (!self->notes) + self->notes = MpTable_FromTable(self->request_rec->notes); + else if (((tableobject*)self->notes)->table != self->request_rec->notes) + ((tableobject*)self->notes)->table = self->request_rec->notes; + Py_INCREF(self->notes); + return self->notes; + } + else if (strcmp(name, "_bytes_queued") == 0) { + if (sizeof(apr_off_t) == sizeof(PY_LONG_LONG)) { + PY_LONG_LONG l = self->bytes_queued; + return PyLong_FromLongLong(l); + } + else { + /* assume it's long */ + long l = self->bytes_queued; + return PyLong_FromLong(l); + } + } + else if (strcmp(name, "user") == 0) { + /* Use Latin1, see req_get_basic_auth_pw() comment */ + if (self->request_rec->user) { +#if PY_MAJOR_VERSION < 3 + return PyBytes_FromString(self->request_rec->user); +#else + return PyUnicode_DecodeLatin1(self->request_rec->user, strlen(self->request_rec->user), NULL); +#endif + } else { + Py_INCREF(Py_None); + return Py_None; + } + } + else if (strcmp(name, "_request_rec") == 0) { +#if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION < 7 + return PyCObject_FromVoidPtr(self->request_rec, 0); +#else + return PyCapsule_New((void *)self->request_rec, NULL, NULL); +#endif + } + else { + PyMemberDef *md = find_memberdef(request_rec_mbrs, name); + if (!md) { + PyErr_SetString(PyExc_AttributeError, name); + return NULL; + } + return PyMember_GetOne((char*)self->request_rec, md); + } +} + +/** + ** setreq_recmbr + ** + * Sets request_rec structure members + */ + +static int setreq_recmbr(requestobject *self, PyObject *val, void *name) +{ + char *v; + PyMemberDef *md; + if (strcmp(name, "content_type") == 0) { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + ap_set_content_type(self->request_rec, + apr_pstrdup(self->request_rec->pool, v)); + self->content_type_set = 1; + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + else if (strcmp(name, "user") == 0) { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + self->request_rec->user = + apr_pstrdup(self->request_rec->pool, v); + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + else if (strcmp(name, "ap_auth_type") == 0) { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + self->request_rec->ap_auth_type = + apr_pstrdup(self->request_rec->pool, v); + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + else if (strcmp(name, "filename") == 0) { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + self->request_rec->filename = + apr_pstrdup(self->request_rec->pool, v); + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + else if (strcmp(name, "canonical_filename") == 0) { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + self->request_rec->canonical_filename = + apr_pstrdup(self->request_rec->pool, v); + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + else if (strcmp(name, "path_info") == 0) { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + self->request_rec->path_info = + apr_pstrdup(self->request_rec->pool, v); + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + else if (strcmp(name, "args") == 0) { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + self->request_rec->args = + apr_pstrdup(self->request_rec->pool, v); + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + else if (strcmp(name, "handler") == 0) { + if (val == Py_None) { + self->request_rec->handler = 0; + return 0; + } + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + self->request_rec->handler = + apr_pstrdup(self->request_rec->pool, v); + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + else if (strcmp(name, "uri") == 0) { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + self->request_rec->uri = + apr_pstrdup(self->request_rec->pool, v); + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + else if (strcmp(name, "finfo") == 0) { + finfoobject *f; + if (! MpFinfo_Check(val)) { + PyErr_SetString(PyExc_TypeError, "finfo must be a finfoobject"); + return -1; + } + f = (finfoobject *)val; + self->request_rec->finfo = *f->finfo; + self->request_rec->finfo.fname = apr_pstrdup(self->request_rec->pool, + f->finfo->fname); + self->request_rec->finfo.name = apr_pstrdup(self->request_rec->pool, + f->finfo->name); + return 0; + } + else if (strcmp(name, "chunked") == 0) { + if (! PyLong_Check(val)) { + PyErr_SetString(PyExc_TypeError, "chunked must be a integer"); + return -1; + } + self->request_rec->chunked = PyLong_AsLong(val); + return 0; + } + else if (strcmp(name, "status_line") == 0) { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + self->request_rec->status_line = + apr_pstrdup(self->request_rec->pool, v); + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; + } + + md = find_memberdef(request_rec_mbrs, name); + if (!md) { + PyErr_SetString(PyExc_AttributeError, name); + return -1; + } + return PyMember_SetOne((char*)self->request_rec, md, val); +} + +/** + ** getreq_recmbr_sockaddr + ** + * Retrieves apr_sockaddr_t request_rec members + */ + +static PyObject *getreq_recmbr_sockaddr(requestobject *self, void *name) +{ + PyMemberDef *md = find_memberdef(request_rec_mbrs, name); + apr_sockaddr_t *addr = *(apr_sockaddr_t **)((char *)self->request_rec + md->offset); + return makesockaddr(addr); +} + +/** + ** getreq_recmbr_time + ** + * Retrieves apr_time_t request_rec members + */ + +static PyObject *getreq_recmbr_time(requestobject *self, void *name) +{ + PyMemberDef *md = find_memberdef(request_rec_mbrs, name); + char *addr = (char *)self->request_rec + md->offset; + apr_time_t time = *(apr_time_t*)addr; + return PyFloat_FromDouble(time*0.000001); +} + +/** + ** getreq_recmbr_off + ** + * Retrieves apr_off_t request_rec members + */ + +static PyObject *getreq_recmbr_off(requestobject *self, void *name) +{ + PyMemberDef *md = find_memberdef(request_rec_mbrs, name); + char *addr = (char *)self->request_rec + md->offset; + if (sizeof(apr_off_t) == sizeof(PY_LONG_LONG)) { + PY_LONG_LONG l = *(PY_LONG_LONG*)addr; + return PyLong_FromLongLong(l); + } + else { + /* assume it's long */ + long l = *(long*)addr; + return PyLong_FromLong(l); + } +} + +/** + ** getreq_rec_ah + ** + * For array headers that will get converted to tuple + */ + +static PyObject *getreq_rec_ah(requestobject *self, void *name) +{ + const PyMemberDef *md = find_memberdef(request_rec_mbrs, name); + apr_array_header_t *ah = + *(apr_array_header_t **)((char *)self->request_rec + md->offset); + + return tuple_from_array_header(ah); +} + +/** + ** getreq_rec_ml + ** + * For method lists that will get converted to tuple + */ + +static PyObject *getreq_rec_ml(requestobject *self, void *name) +{ + const PyMemberDef *md = find_memberdef(request_rec_mbrs, (char*)name); + ap_method_list_t *ml = + *(ap_method_list_t **)((char *)self->request_rec + md->offset); + + return tuple_from_method_list(ml); +} + +/** + ** getreq_rec_fi + ** + * For file info that will get converted to tuple + */ + +static PyObject *getreq_rec_fi(requestobject *self, void *name) +{ + const PyMemberDef *md = find_memberdef(request_rec_mbrs, (char*)name); + apr_finfo_t *fi = + (apr_finfo_t *)((char *)self->request_rec + md->offset); + + return MpFinfo_FromFinfo(fi); +} + +/** + ** getreq_rec_uri + ** + * For parsed uri that will get converted to tuple + */ + +static PyObject *getreq_rec_uri(requestobject *self, void *name) +{ + const PyMemberDef *md = find_memberdef(request_rec_mbrs, (char*)name); + apr_uri_t *uri = (apr_uri_t *)((char *)self->request_rec + md->offset); + + return tuple_from_apr_uri(uri); +} + +/** + ** getmakeobj + ** + * A getter func that creates an object as needed. + */ + +static PyObject *getmakeobj(requestobject* self, void *objname) +{ + char *name = (char *)objname; + PyObject *result = NULL; + + if (strcmp(name, "connection") == 0) { + if (!self->connection && self->request_rec->connection) { + self->connection = MpConn_FromConn(self->request_rec->connection); + } + result = self->connection; + } + else if (strcmp(name, "server") == 0) { + if (!self->server && self->request_rec->server) { + self->server = MpServer_FromServer(self->request_rec->server); + } + result = self->server; + } + else if (strcmp(name, "next") == 0) { + if (self->request_rec->next) { + result = (PyObject*)python_get_request_object( + self->request_rec->next, 0); + } + } + else if (strcmp(name, "prev") == 0) { + if (self->request_rec->prev) { + result = (PyObject*)python_get_request_object( + self->request_rec->prev, 0); + } + } + else if (strcmp(name, "main") == 0) { + if (self->request_rec->main) { + result = (PyObject*)python_get_request_object( + self->request_rec->main, 0); + } + } + + if (!result) + result = Py_None; + + Py_INCREF(result); + return result; + +} + +static PyGetSetDef request_getsets[] = { + {"handler", (getter)getreq_recmbr, (setter)setreq_recmbr, "The handler string", "handler"}, + {"uri", (getter)getreq_recmbr, (setter)setreq_recmbr, "The path portion of URI", "uri"}, + {"interpreter", (getter)getreq_recmbr, NULL, "Python interpreter name", "interpreter"}, + {"connection", (getter)getmakeobj, NULL, "Connection object", "connection"}, + {"server", (getter)getmakeobj, NULL, "Server object", "server"}, + {"next", (getter)getmakeobj, NULL, "If redirected, pointer to the to request", "next"}, + {"prev", (getter)getmakeobj, NULL, "If redirected, pointer to the from request", "prev"}, + {"main", (getter)getmakeobj, NULL, "If subrequest, pointer to the main request", "main"}, + {"the_request", (getter)getreq_recmbr, NULL, "First line of request", "the_request"}, + {"assbackwards", (getter)getreq_recmbr, (setter)setreq_recmbr, "HTTP/0.9 \"simple\" request", "assbackwards"}, + {"proxyreq", (getter)getreq_recmbr, (setter)setreq_recmbr, "A proxy request: one of apache.PROXYREQ_* values", "proxyreq"}, + {"header_only", (getter)getreq_recmbr, NULL, "HEAD request, as oppsed to GET", "header_only"}, + {"protocol", (getter)getreq_recmbr, NULL, "Protocol as given to us, or HTTP/0.9", "protocol"}, + {"proto_num", (getter)getreq_recmbr, NULL, "Protocol version. 1.1 = 1001", "proto_num"}, + {"hostname", (getter)getreq_recmbr, NULL, "Host, as set by full URI or Host:", "hostname"}, + {"request_time", (getter)getreq_recmbr_time, NULL, "When request started", "request_time"}, + {"status_line", (getter)getreq_recmbr, (setter)setreq_recmbr, "Status line, if set by script", "status_line"}, + {"status", (getter)getreq_recmbr, (setter)setreq_recmbr, "Status", "status"}, + {"method", (getter)getreq_recmbr, NULL, "Request method", "method"}, + {"method_number", (getter)getreq_recmbr, NULL, "Request method number, one of apache.M_*", "method_number"}, + {"allowed", (getter)getreq_recmbr, NULL, "Status", "allowed"}, + {"allowed_xmethods", (getter)getreq_rec_ah, NULL, "Allowed extension methods", "allowed_xmethods"}, + {"allowed_methods", (getter)getreq_rec_ml, NULL, "Allowed methods", "allowed_methods"}, + {"sent_bodyct", (getter)getreq_recmbr_off, NULL, "Byte count in stream for body", "sent_bodyct"}, + {"bytes_sent", (getter)getreq_recmbr_off, NULL, "Bytes sent", "bytes_sent"}, + {"mtime", (getter)getreq_recmbr_time, NULL, "Time resource was last modified", "mtime"}, + {"chunked", (getter)getreq_recmbr, (setter)setreq_recmbr, "Sending chunked transfer-coding", "chunked"}, + {"range", (getter)getreq_recmbr, NULL, "The Range: header", "range"}, + {"clength", (getter)getreq_recmbr_off, NULL, "The \"real\" contenct length", "clength"}, + {"remaining", (getter)getreq_recmbr_off, NULL, "Bytes left to read", "remaining"}, + {"read_length", (getter)getreq_recmbr_off, NULL, "Bytes that have been read", "read_length"}, + {"read_body", (getter)getreq_recmbr, NULL, "How the request body should be read", "read_body"}, + {"read_chunked", (getter)getreq_recmbr, NULL, "Reading chunked transfer-coding", "read_chunked"}, + {"expecting_100", (getter)getreq_recmbr, NULL, "Is client waitin for a 100 response?", "expecting_100"}, + {"content_type", (getter)getreq_recmbr, (setter)setreq_recmbr, "Content type", "content_type"}, + {"content_encoding", (getter)getreq_recmbr, NULL, "How to encode the data", "content_encoding"}, + {"content_languages", (getter)getreq_rec_ah, NULL, "Content languages", "content_languages"}, + {"vlist_validator", (getter)getreq_recmbr, NULL, "Variant list validator (if negotiated)", "vlist_validator"}, + {"user", (getter)getreq_recmbr, (setter)setreq_recmbr, "If authentication check was made, the user name", "user"}, + {"ap_auth_type", (getter)getreq_recmbr, (setter)setreq_recmbr, "If authentication check was made, auth type", "ap_auth_type"}, + {"no_cache", (getter)getreq_recmbr, (setter)setreq_recmbr, "This response in non-cacheable", "no_cache"}, + {"no_local_copy", (getter)getreq_recmbr, (setter)setreq_recmbr, "There is no local copy of the response", "no_local_copy"}, + {"unparsed_uri", (getter)getreq_recmbr, NULL, "The URI without any parsing performed", "unparsed_uri"}, + {"filename", (getter)getreq_recmbr, (setter)setreq_recmbr, "The file name on disk that this request corresponds to", "filename"}, + {"canonical_filename", (getter)getreq_recmbr, (setter)setreq_recmbr, "The true filename (req.filename is canonicalized if they dont match)", "canonical_filename"}, + {"path_info", (getter)getreq_recmbr, (setter)setreq_recmbr, "Path_info, if any", "path_info"}, + {"args", (getter)getreq_recmbr, (setter)setreq_recmbr, "QUERY_ARGS, if any", "args"}, + {"finfo", (getter)getreq_rec_fi, (setter)setreq_recmbr, "File information", "finfo"}, + {"parsed_uri", (getter)getreq_rec_uri, NULL, "Components of URI", "parsed_uri"}, + {"used_path_info", (getter)getreq_recmbr, (setter)setreq_recmbr, "Flag to accept or reject path_info on current request", "used_path_info"}, + {"headers_in", (getter)getreq_recmbr, NULL, "Incoming headers", "headers_in"}, + {"headers_out", (getter)getreq_recmbr, NULL, "Outgoing headers", "headers_out"}, + {"err_headers_out", (getter)getreq_recmbr, NULL, "Outgoing headers for errors", "err_headers_out"}, + {"subprocess_env", (getter)getreq_recmbr, NULL, "Subprocess environment", "subprocess_env"}, + {"notes", (getter)getreq_recmbr, NULL, "Notes", "notes"}, + /* XXX per_dir_config */ + /* XXX request_config */ + /* XXX htaccess */ + /* XXX filters and eos */ + {"eos_sent", (getter)getreq_recmbr, NULL, "EOS bucket sent", "eos_sent"}, + {"_bytes_queued", (getter)getreq_recmbr, NULL, "Bytes queued by handler", "_bytes_queued"}, + {"_request_rec", (getter)getreq_recmbr, NULL, "Actual request_rec struct", "_request_rec"}, +#if AP_MODULE_MAGIC_AT_LEAST(20111130,0) + /* XXX useragent_* should be writable */ + {"useragent_addr", (getter)getreq_recmbr_sockaddr, NULL, "User agent address (could be overriden by a module)", "useragent_addr"}, + {"useragent_ip", (getter)getreq_recmbr, NULL, "User agent ip (could be overriden by a module)", "useragent_ip"}, +#endif + {NULL} /* Sentinel */ +}; + +#undef OFF +#define OFF(x) offsetof(requestobject, x) + +static PyMemberDef request_members[] = { + {"_content_type_set", T_INT, OFF(content_type_set), READONLY}, + {"phase", T_OBJECT, OFF(phase), READONLY}, + {"extension", T_STRING, OFF(extension), READONLY}, + {"hlist", T_OBJECT, OFF(hlo), READONLY}, + {NULL} /* Sentinel */ +}; + +/** + ** request_tp_clear + ** + * + */ + +#ifndef CLEAR_REQUEST_MEMBER +#define CLEAR_REQUEST_MEMBER(member)\ + tmp = (PyObject *) member;\ + member = NULL;\ + Py_XDECREF(tmp) +#endif + +static int request_tp_clear(requestobject *self) +{ + PyObject* tmp; + + CLEAR_REQUEST_MEMBER(self->dict); + CLEAR_REQUEST_MEMBER(self->connection); + CLEAR_REQUEST_MEMBER(self->server); + CLEAR_REQUEST_MEMBER(self->headers_in); + CLEAR_REQUEST_MEMBER(self->headers_out); + CLEAR_REQUEST_MEMBER(self->err_headers_out); + CLEAR_REQUEST_MEMBER(self->subprocess_env); + CLEAR_REQUEST_MEMBER(self->notes); + CLEAR_REQUEST_MEMBER(self->phase); + CLEAR_REQUEST_MEMBER(self->config); + CLEAR_REQUEST_MEMBER(self->options); + CLEAR_REQUEST_MEMBER(self->hlo); + + return 0; +} + + +/** + ** request_dealloc + ** + * + */ + +static void request_tp_dealloc(requestobject *self) +{ + /* de-register the object from the GC + * before its deallocation, to prevent the + * GC to run on a partially de-allocated object + */ + PyObject_GC_UnTrack(self); + + /* self->rebuff is used by req_readline. + * It may not have been freed if req_readline was not + * enough times to consume rbuff's contents. + */ + if (self->rbuff != NULL) + free(self->rbuff); + + request_tp_clear(self); + + PyObject_GC_Del(self); +} + +/** + ** request_tp_traverse + ** + * Traversal of the request object + */ +#ifndef VISIT_REQUEST_MEMBER +#define VISIT_REQUEST_MEMBER(member, visit, arg)\ + if (member) {\ + result = visit(member, arg);\ + if (result)\ + return result;\ + } +#endif + +static int request_tp_traverse(requestobject* self, visitproc visit, void *arg) { + int result; + VISIT_REQUEST_MEMBER(self->dict, visit, arg); + VISIT_REQUEST_MEMBER(self->connection, visit, arg); + VISIT_REQUEST_MEMBER(self->server, visit, arg); + VISIT_REQUEST_MEMBER(self->headers_in, visit, arg); + VISIT_REQUEST_MEMBER(self->headers_out, visit, arg); + VISIT_REQUEST_MEMBER(self->err_headers_out, visit, arg); + VISIT_REQUEST_MEMBER(self->subprocess_env, visit, arg); + VISIT_REQUEST_MEMBER(self->notes, visit, arg); + VISIT_REQUEST_MEMBER(self->phase, visit, arg); + + /* no need to Py_DECREF(dict) since the reference is borrowed */ + return 0; +} +static char request_doc[] = +"Apache request_rec structure\n"; + +PyTypeObject MpRequest_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "mp_request", /* tp_name */ + sizeof(requestobject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)request_tp_dealloc, /* tp_dealloc*/ + 0, /* tp_print*/ + 0, /* tp_getattr*/ + 0, /* tp_setattr*/ + 0, /* tp_compare*/ + 0, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash*/ + 0, /* tp_call */ + 0, /* tp_str */ + PyObject_GenericGetAttr, /* tp_getattro */ + PyObject_GenericSetAttr, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | + Py_TPFLAGS_BASETYPE| + Py_TPFLAGS_HAVE_GC , /* tp_flags */ + request_doc, /* tp_doc */ + (traverseproc)request_tp_traverse, /* tp_traverse */ + /* PYTHON 2.5: 'inquiry' should be perhaps replaced with 'lenfunc' */ + (inquiry)request_tp_clear, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + request_methods, /* tp_methods */ + request_members, /* tp_members */ + request_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + offsetof(requestobject, dict), /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ +}; + + + + + + diff --git a/src/serverobject.c b/src/serverobject.c new file mode 100644 index 0000000..8e95303 --- /dev/null +++ b/src/serverobject.c @@ -0,0 +1,423 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * serverobject.c + * + * + */ + +#include "mod_python.h" + + +/** + ** MpServer_FromServer + ** + * This routine creates a Python serverobject given an Apache + * server_rec pointer. + * + */ + +PyObject * MpServer_FromServer(server_rec *s) +{ + serverobject *result; + MpServer_Type.ob_type = &PyType_Type; + result = PyObject_New(serverobject, &MpServer_Type); + if (! result) + return PyErr_NoMemory(); + + result->dict = PyDict_New(); + if (!result->dict) + return PyErr_NoMemory(); + + result->server = s; + result->next = NULL; + + return (PyObject *)result; +} + +/** + ** server.get_config(server self) + ** + * Returns the config directives set through Python* apache directives. + * unlike req.get_config, this one returns the per-server config + */ + +static PyObject * server_get_config(serverobject *self) +{ + py_config *conf = + (py_config *) ap_get_module_config(self->server->module_config, + &python_module); + return MpTable_FromTable(conf->directives); +} + +/** + ** server.get_options(server self) + ** + * Returns the options set through PythonOption directives. + * unlike req.get_options, this one returns the per-server config + */ + +static PyObject * server_get_options(serverobject *self) +{ + py_config *conf = + (py_config *) ap_get_module_config(self->server->module_config, + &python_module); + return MpTable_FromTable(conf->options); +} + +/** + ** server.log_error(server self, string message, int level) + ** + * calls ap_log_error + */ + +static PyObject * server_log_error(serverobject *self, PyObject *args) +{ + int level = 0; + char *message = NULL; + + if (! PyArg_ParseTuple(args, "z|i", &message, &level)) + return NULL; /* error */ + + if (message) { + + if (! level) + level = APLOG_ERR; + + ap_log_error(APLOG_MARK, level, 0, self->server, "%s", message); + } + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** server.register_cleanup(req, handler, data) + ** + * same as request.register_cleanup, except the server pool is used. + * the server pool gets destroyed before the child dies or when the + * whole process dies in multithreaded situations. + */ + +static PyObject *server_register_cleanup(serverobject *self, PyObject *args) +{ + + cleanup_info *ci; + PyObject *handler = NULL; + PyObject *data = NULL; + requestobject *req = NULL; + PyObject *name_obj = NULL; + char *name = NULL; + char *c_name_obj; + + if (! PyArg_ParseTuple(args, "OO|O", &req, &handler, &data)) + return NULL; + + if (! MpRequest_Check(req)) { + PyErr_SetString(PyExc_ValueError, + "first argument must be a request object"); + return NULL; + } + else if(!PyCallable_Check(handler)) { + PyErr_SetString(PyExc_ValueError, + "second argument must be a callable object"); + return NULL; + } + + ci = (cleanup_info *)malloc(sizeof(cleanup_info)); + ci->request_rec = NULL; + ci->server_rec = self->server; + Py_INCREF(handler); + ci->handler = handler; + name_obj = python_interpreter_name(); + MP_ANYSTR_AS_STR(c_name_obj, name_obj, 1); + if (!c_name_obj) { + Py_DECREF(name_obj); /* MP_ANYSTR_AS_STR */ + return NULL; + } + name = (char *)malloc(strlen(c_name_obj)+1); + if (!name) + return PyErr_NoMemory(); + strcpy(name, c_name_obj); + ci->interpreter = name; + Py_DECREF(name_obj); /* MP_ANYSTR_AS_STR */ + if (data) { + Py_INCREF(data); + ci->data = data; + } + else { + Py_INCREF(Py_None); + ci->data = Py_None; + } + + apr_pool_cleanup_register(child_init_pool, ci, python_cleanup, + apr_pool_cleanup_null); + + Py_INCREF(Py_None); + return Py_None; +} + +static PyMethodDef server_methods[] = { + {"get_config", (PyCFunction) server_get_config, METH_NOARGS}, + {"get_options", (PyCFunction) server_get_options, METH_NOARGS}, + {"log_error", (PyCFunction) server_log_error, METH_VARARGS}, + {"register_cleanup", (PyCFunction) server_register_cleanup, METH_VARARGS}, + { NULL, NULL } /* sentinel */ +}; + + +/* + These are offsets into the Apache server_rec structure. + They are accessed via getset functions. Note that the types + specified here are irrelevant if a function other than + getreq_recmbr() is used. E.g. bytes_sent is a long long, + and is retrieved via getreq_recmbr_off() which ignores what's + here. +*/ + +#define OFF(x) offsetof(server_rec, x) + +static PyMemberDef server_rec_mbrs[] = { + {"defn_name", T_STRING, OFF(defn_name)}, + {"defn_line_number", T_INT, OFF(defn_line_number)}, + {"server_admin", T_STRING, OFF(server_admin)}, + {"server_hostname", T_STRING, OFF(server_hostname)}, + {"port", T_SHORT, OFF(port)}, + {"error_fname", T_STRING, OFF(error_fname)}, +#if AP_MODULE_MAGIC_AT_LEAST(20100606,0) + {"loglevel", T_INT, OFF(log.level)}, +#else + {"loglevel", T_INT, OFF(loglevel)}, +#endif + {"is_virtual", T_INT, OFF(is_virtual)}, + /* XXX implement module_config ? */ + /* XXX implement lookup_defaults ? */ + /* XXX implement server_addr_rec ? */ + {"timeout", T_LONG, OFF(timeout)}, + {"keep_alive_timeout", T_LONG, OFF(keep_alive_timeout)}, + {"keep_alive_max", T_INT, OFF(keep_alive_max)}, + {"keep_alive", T_INT, OFF(keep_alive)}, + /* XXX send_buffer_size gone. where? document */ + /*{"send_buffer_size", T_INT, OFF(send_buffer_size), READONLY},*/ + {"path", T_STRING, OFF(path)}, + {"pathlen", T_INT, OFF(pathlen)}, + {"names", T_OBJECT, OFF(names)}, + {"wild_names", T_OBJECT, OFF(wild_names)}, + /* XXX server_uid and server_gid seem gone. Where? Document. */ + /*{"server_uid", T_INT, OFF(server_uid), READONLY},*/ + /*{"server_gid", T_INT, OFF(server_gid), READONLY},*/ + /* XXX Document limit* below. Make RW? */ + {"limit_req_line", T_INT, OFF(limit_req_line)}, + {"limit_req_fieldsize", T_INT, OFF(limit_req_fieldsize)}, + {"limit_req_fields", T_INT, OFF(limit_req_fields)}, + {NULL} /* Sentinel */ +}; + +/** + ** getsrv_recmbr + ** + * Retrieves server_rec structure members + */ + +static PyObject *getsrv_recmbr(serverobject *self, void *name) +{ + PyMemberDef *md; + if (strcmp(name, "_server_rec") == 0) { +#if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION < 7 + return PyCObject_FromVoidPtr(self->server, 0); +#else + return PyCapsule_New((void *)self->server, NULL, NULL); +#endif + + } + + md = find_memberdef(server_rec_mbrs, name); + if (!md) { + PyErr_SetString(PyExc_AttributeError, name); + return NULL; + } + return PyMember_GetOne((char*)self->server, md); +} + +/* we don't need setsrv_recmbr for now */ + +/** + ** getsrv_recmbr_time + ** + * Retrieves apr_time_t server_rec members + */ + +static PyObject *getsrv_recmbr_time(serverobject *self, void *name) +{ + PyMemberDef *md = find_memberdef(server_rec_mbrs, name); + char *addr = (char *)self->server + md->offset; + apr_time_t time = *(apr_time_t*)addr; + return PyFloat_FromDouble(time*0.000001); +} + +/** + ** getsrv_rec_ah + ** + * For array headers that will get converted to tuple + */ + +static PyObject *getsrv_recmbr_ah(serverobject *self, void *name) +{ + const PyMemberDef *md = find_memberdef(server_rec_mbrs, name); + apr_array_header_t *ah = + *(apr_array_header_t **)((char *)self->server + md->offset); + + return tuple_from_array_header(ah); +} + +/** + ** getmakeobj + ** + * A getter func that creates an object as needed. + */ + +static PyObject *getmakeobj(serverobject* self, void *objname) +{ + char *name = (char *)objname; + PyObject *result = NULL; + + if (strcmp(name, "next") == 0) { + if (!self->next && self->server->next) + self->next = MpServer_FromServer(self->server->next); + result = self->next; + } + + if (!result) + result = Py_None; + + Py_INCREF(result); + return result; +} + +static PyObject *my_generation(serverobject *self, void *objname) +{ + int mpm_generation = 0; +#if defined(AP_MPMQ_GENERATION) + ap_mpm_query(AP_MPMQ_GENERATION, &mpm_generation); +#else + mpm_generation = ap_my_generation; +#endif + return PyLong_FromLong((long)mpm_generation); +} + +static PyObject *restart_time(serverobject *self, void *objname) +{ + return PyFloat_FromDouble(ap_scoreboard_image->global->restart_time*0.000001); +} + +static PyGetSetDef server_getsets[] = { + /* XXX process */ + {"next", (getter)getmakeobj, NULL, "The next server in the list", "next"}, + {"defn_name", (getter)getsrv_recmbr, NULL, "The name of the server", "defn_name"}, + {"defn_line_number", (getter)getsrv_recmbr, NULL, + "The line of the config file that the server was defined on", "defn_line_number"}, + {"server_admin", (getter)getsrv_recmbr, NULL, "The admin's contact information", "server_admin"}, + {"server_hostname", (getter)getsrv_recmbr, NULL, "The server hostname", "server_hostname"}, + {"port", (getter)getsrv_recmbr, NULL, " for redirects, etc.", "port"}, + {"error_fname", (getter)getsrv_recmbr, NULL, "The name of the error log", "error_fname"}, + /* XXX error_log apr_file_t */ + {"loglevel", (getter)getsrv_recmbr, NULL, "The log level for this server", "loglevel"}, + {"is_virtual", (getter)getsrv_recmbr, NULL, "true if this is the virtual server", "is_virtual"}, + {"timeout", (getter)getsrv_recmbr_time, NULL, "Timeout, as interval, before we give up", "timeout"}, + {"keep_alive_timeout", (getter)getsrv_recmbr_time, NULL, "The apr interval we will wait for another request", "keep_alive_timeout"}, + {"keep_alive_max", (getter)getsrv_recmbr, NULL, "Maximum requests per connection", "keep_alive_max"}, + {"keep_alive", (getter)getsrv_recmbr, NULL, "Use persistent connections?", "keep_alive"}, + {"path", (getter)getsrv_recmbr, NULL, "Pathname for ServerPath", "path"}, + {"pathlen", (getter)getsrv_recmbr, NULL, "Length of path", "pathlen"}, + {"names", (getter)getsrv_recmbr_ah, NULL, "Normal names for ServerAlias servers", "names"}, + {"wild_names", (getter)getsrv_recmbr_ah, NULL, "Wildcarded names for ServerAlias servers", "wild_names"}, + {"limit_req_line", (getter)getsrv_recmbr, NULL, "limit on size of the HTTP request line", "limit_req_line"}, + {"limit_req_fieldsize", (getter)getsrv_recmbr, NULL, "limit on size of any request header field", "limit_req_fieldsize"}, + {"limit_req_fields", (getter)getsrv_recmbr, NULL, "limit on number of request header fields", "limit_req_fields"}, + {"my_generation", (getter)my_generation, NULL, "Generation of this child", "my_generation"}, + {"restart_time", (getter)restart_time, NULL, "Server restart time", "restart_time"}, + {"_server_rec", (getter)getsrv_recmbr, NULL, "Actual server_rec struct", "_server_rec"}, + {NULL} /* Sentinel */ +}; + + +/** + ** server_dealloc + ** + * + */ + +static void server_dealloc(void *o) +{ + serverobject *self = (serverobject *)o; + Py_XDECREF(self->dict); + Py_XDECREF(self->next); + PyObject_Del(self); +} + +static char server_doc[] = +"Apache server_rec structure\n"; + +PyTypeObject MpServer_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "mp_server", /* tp_name */ + sizeof(serverobject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor) server_dealloc, /* tp_dealloc*/ + 0, /* tp_print*/ + 0, /* tp_getattr*/ + 0, /* tp_setattr*/ + 0, /* tp_compare*/ + 0, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash*/ + 0, /* tp_call */ + 0, /* tp_str */ + PyObject_GenericGetAttr, /* tp_getattro */ + PyObject_GenericSetAttr, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | + Py_TPFLAGS_BASETYPE, /* tp_flags */ + server_doc, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + server_methods, /* tp_methods */ + 0, /* tp_members */ + server_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + offsetof(serverobject, dict), /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + server_dealloc, /* tp_free */ +}; + + + + + diff --git a/src/tableobject.c b/src/tableobject.c new file mode 100644 index 0000000..ee12bc8 --- /dev/null +++ b/src/tableobject.c @@ -0,0 +1,1373 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * tableobject.c + * + * + */ + +#include "mod_python.h" + +/** XXX this is a hack. because apr_table_t + is not available in a header file */ +#define TABLE_HASH_SIZE 32 +struct apr_table_t { + apr_array_header_t a; +#ifdef MAKE_TABLE_PROFILE + void *creator; +#endif + apr_uint32_t index_initialized; + int index_first[TABLE_HASH_SIZE]; + int index_last[TABLE_HASH_SIZE]; +}; + +/** + ** MpTable_FromTable + ** + * This routine creates a Python tableobject given an Apache + * table pointer. + * + */ + +PyObject * MpTable_FromTable(apr_table_t *t) +{ + tableobject *result; + + TABLE_DEBUG("MpTable_FromTable"); + MpTable_Type.ob_type = &PyType_Type; + result = PyObject_New(tableobject, &MpTable_Type); + if (! result) + return PyErr_NoMemory(); + + result->table = t; + result->pool = NULL; + + return (PyObject *)result; +} + +/** + ** MpTable_New + ** + * This returns a new object of built-in type table. + * + * NOTE: The ap_table gets greated in its own pool, which lives + * throught the live of the tableobject. This is because this + * object may persist from hit to hit. + * + * ALSO NOTE: table_new() also creates tables, independent of this + * (it gets called when table is being subclassed) + * + */ + +PyObject * MpTable_New() +{ + tableobject *t; + apr_pool_t *p; + + TABLE_DEBUG("MpTable_New"); + + /* XXX need second arg abort function to report mem error */ + apr_pool_create_ex(&p, NULL, NULL, NULL); + + /* two is a wild guess */ + t = (tableobject *)MpTable_FromTable(apr_table_make(p, 2)); + + /* remember the pointer to our own pool */ + t->pool = p; + + return (PyObject *)t; + +} + +/** + ** table_dealloc + ** + * Frees table's memory + */ + +static void table_dealloc(register void *o) +{ + tableobject *self = (tableobject *)o; + + TABLE_DEBUG("table_dealloc"); + + if (MpTable_Check(self)) { + if (self->pool) + apr_pool_destroy(self->pool); + PyObject_Del(self); + } + else + Py_TYPE(self)->tp_free((PyObject *)self); + +} + +/** + ** table_print + ** + * prints table like a dictionary + * (Useful when debugging) + */ + +static int table_print(register tableobject *self, register FILE *fp, register int flags) +{ + const apr_array_header_t *ah = NULL; + apr_table_entry_t *elts; + register int i; + + TABLE_DEBUG("table_print"); + + fprintf(fp, "{"); + + ah = apr_table_elts(self->table); + elts = (apr_table_entry_t *) ah->elts; + + i = ah->nelts; + if (i == 0) { + fprintf(fp, "}"); + return 0; + } + + while (i--) + if (elts[i].key) + { + fprintf(fp, "'%s': '%s'", elts[i].key, elts[i].val); + + if (i > 0) + fprintf(fp, ", "); + else + fprintf(fp, "}"); + } + + return 0; +} + +/** + ** table_repr + ** + * repr table like a dictionary + */ + +static PyObject * table_repr(tableobject *self) +{ + PyObject *s; + PyObject *t = NULL; + const apr_array_header_t *ah; + apr_table_entry_t *elts; + int i; + + TABLE_DEBUG("table_repr"); + + s = PyBytes_FromString("{"); + + ah = apr_table_elts (self->table); + elts = (apr_table_entry_t *) ah->elts; + + i = ah->nelts; + if (i == 0) + PyBytes_ConcatAndDel(&s, PyBytes_FromString("}")); + + while (i--) + if (elts[i].key) + { + t = PyBytes_FromString(elts[i].key); + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t));; + Py_XDECREF(t); + + PyBytes_ConcatAndDel(&s, PyBytes_FromString(": ")); + + if (elts[i].val) { + t = PyBytes_FromString(elts[i].val); + } else { + t = Py_None; + Py_INCREF(t); + } + + PyBytes_ConcatAndDel(&s, MpObject_ReprAsBytes(t)); + Py_XDECREF(t); + + if (i > 0) + PyBytes_ConcatAndDel(&s, PyBytes_FromString(", ")); + else + PyBytes_ConcatAndDel(&s, PyBytes_FromString("}")); + } + +#if PY_MAJOR_VERSION < 3 + return s; +#else + { + PyObject *str = PyUnicode_FromString(PyBytes_AS_STRING(s)); + Py_DECREF(s); + return str; + } +#endif +} + +/** + ** tablelength + ** + * Number of elements in a table. Called + * when you do len(table) in Python. + */ + +static Py_ssize_t tablelength(PyObject *self) +{ + TABLE_DEBUG("tablelength"); + + return apr_table_elts(((tableobject *)self)->table)->nelts; +} + +/** + ** table_subscript + ** + * Gets a dictionary item + */ + +static PyObject * table_subscript(PyObject *self, register PyObject *key) +{ + char *k; + const apr_array_header_t *ah; + apr_table_entry_t *elts; + register int i; + PyObject *list; + + TABLE_DEBUG("table_subscript"); + + MP_ANYSTR_AS_STR(k, key, 1); + if (!k) { + Py_DECREF(key); + return NULL; + } + + /* it's possible that we have duplicate keys, so + we can't simply use apr_table_get since that just + returns the first match. + */ + + list = PyList_New(0); + if (!list) + return NULL; + + ah = apr_table_elts (((tableobject *)self)->table); + elts = (apr_table_entry_t *) ah->elts; + + i = ah->nelts; + + while (i--) + if (elts[i].key) { + if (apr_strnatcasecmp(elts[i].key, k) == 0) { + PyObject *v = NULL; + if (elts[i].val != NULL) + v = MpBytesOrUnicode_FromString(elts[i].val); + else { + v = Py_None; + Py_INCREF(v); + } + PyList_Insert(list, 0, v); + Py_DECREF(v); + } + } + + Py_DECREF(key); /* becasue of MP_ANYSTR_AS_STR */ + + /* if no match */ + if (PyList_Size(list) == 0) { + Py_DECREF(list); + PyErr_SetObject(PyExc_KeyError, key); + return NULL; + } + + /* if we got one match */ + if (PyList_Size(list) == 1) { + PyObject *v = PyList_GetItem(list, 0); + Py_INCREF(v); + Py_DECREF(list); + return v; + } + + /* else we return a list */ + return list; +} + +/** + ** table_ass_subscript + ** + * insert into table dictionary-style + * *** NOTE *** + * Since the underlying ap_table_set makes a *copy* of the string, + * there is no need to increment the reference to the Python + * string passed in. + */ + +static int table_ass_subscript(PyObject *self, PyObject *key, + PyObject *val) +{ + char *k, *v; + + TABLE_DEBUG("table_ass_subscript"); + + MP_ANYSTR_AS_STR(k, key, 1); + if (!k) { + Py_XDECREF(key); /* MP_ANYSTR_AS_STR */ + return -1; + } + + if (val == NULL) + apr_table_unset(((tableobject *)self)->table, k); + else { + MP_ANYSTR_AS_STR(v, val, 1); + if (!v) { + Py_XDECREF(key); /* MP_ANYSTR_AS_STR */ + Py_XDECREF(val); /* MP_ANYSTR_AS_STR */ + return -1; + } + apr_table_set(((tableobject *)self)->table, k, v); + } + Py_XDECREF(key); /* MP_ANYSTR_AS_STR */ + Py_XDECREF(val); /* MP_ANYSTR_AS_STR */ + return 0; +} + +/* table as mapping */ + +static PyMappingMethods table_as_mapping = { + tablelength, /*mp_length*/ + table_subscript, /*mp_subscript*/ + table_ass_subscript, /*mp_ass_subscript*/ +}; + +/** + ** table_keys + ** + * + * Implements dictionary's keys() method. + */ + +static PyObject * table_keys(register tableobject *self) +{ + PyObject *v; + const apr_array_header_t *ah; + apr_table_entry_t *elts; + int i, j; + + TABLE_DEBUG("table_keys"); + + ah = apr_table_elts(self->table); + elts = (apr_table_entry_t *) ah->elts; + + v = PyList_New(ah->nelts); + + for (i = 0, j = 0; i < ah->nelts; i++) + { + if (elts[i].key) + { + PyObject *key = MpBytesOrUnicode_FromString(elts[i].key); + PyList_SetItem(v, j, key); + j++; + } + } + return v; +} + +/** + ** table_values + ** + * + * Implements dictionary's values() method. + */ + +static PyObject * table_values(register tableobject *self) +{ + + PyObject *v; + const apr_array_header_t *ah; + apr_table_entry_t *elts; + int i, j; + + TABLE_DEBUG("table_values"); + + ah = apr_table_elts(self->table); + elts = (apr_table_entry_t *) ah->elts; + v = PyList_New(ah->nelts); + + for (i = 0, j = 0; i < ah->nelts; i++) + { + if (elts[i].key) + { + PyObject *val = NULL; + if (elts[i].val != NULL) + val = MpBytesOrUnicode_FromString(elts[i].val); + else { + val = Py_None; + Py_INCREF(val); + } + PyList_SetItem(v, j, val); + j++; + } + } + return v; +} + +/** + ** table_items + ** + * + * Implements dictionary's items() method. + */ + +static PyObject * table_items(register tableobject *self) +{ + + PyObject *v; + const apr_array_header_t *ah; + apr_table_entry_t *elts; + int i, j; + + TABLE_DEBUG("table_items"); + + ah = apr_table_elts(self->table); + elts = (apr_table_entry_t *) ah->elts; + v = PyList_New(ah->nelts); + + for (i = 0, j = 0; i < ah->nelts; i++) + { + if (elts[i].key) + { + PyObject *keyval = Py_BuildValue("(s,s)", elts[i].key, elts[i].val); + PyList_SetItem(v, j, keyval); + j++; + } + } + return v; +} + +/** + ** table_merge + ** + * Since tables can only store strings, key/vals from + * mapping object b will be str()ingized. + */ + +static int table_merge(tableobject *a, PyObject *b, int override) +{ + /* Do it the generic, slower way */ + PyObject *keys = PyMapping_Keys(b); + PyObject *iter; + PyObject *key, *value, *skey, *svalue; + int status; + + TABLE_DEBUG("table_merge"); + + if (keys == NULL) { + TABLE_DEBUG(" table_merge: keys NULL"); + return -1; + } + + iter = PyObject_GetIter(keys); + Py_DECREF(keys); + if (iter == NULL) { + TABLE_DEBUG(" table_merge: iter NULL"); + return -1; + } + + for (key = PyIter_Next(iter); key; key = PyIter_Next(iter)) { + char *c_skey; + + skey = PyObject_Str(key); + if (skey == NULL) { + Py_DECREF(iter); + Py_DECREF(key); + TABLE_DEBUG(" table_merge: skey NULL"); + return -1; + } + MP_ANYSTR_AS_STR(c_skey, skey, 0); + if (!c_skey) { + Py_DECREF(key); + Py_DECREF(skey); + TABLE_DEBUG(" table_merge: c_skey NULL"); + return -1; + } + if (!override && apr_table_get(a->table, c_skey) != NULL) { + Py_DECREF(key); + Py_DECREF(skey); + continue; + } + + value = PyObject_GetItem(b, key); + if (value == NULL) { + Py_DECREF(iter); + Py_DECREF(key); + Py_DECREF(skey); + TABLE_DEBUG(" table_merge: value NULL"); + return -1; + } + svalue = PyObject_Str(value); + if (svalue == NULL) { + Py_DECREF(iter); + Py_DECREF(key); + Py_DECREF(skey); + Py_DECREF(value); + TABLE_DEBUG(" table_merge: svalue NULL"); + return -1; + } + status = table_ass_subscript((PyObject *)a, skey, svalue); + Py_DECREF(key); + Py_DECREF(value); + Py_DECREF(skey); + Py_DECREF(svalue); + if (status < 0) { + Py_DECREF(iter); + TABLE_DEBUG(" table_merge: status < 0"); + return -1; + } + } + Py_DECREF(iter); + if (PyErr_Occurred()) { + /* Iterator completed, via error */ + TABLE_DEBUG(" table_merge: PyErr_Occurred()"); + return -1; + } + + return 0; +} + +/** + ** table_update + ** + */ + +static PyObject *table_update(tableobject *self, PyObject *other) +{ + TABLE_DEBUG("table_update"); + + if (table_merge(self, other, 1) < 0) + return NULL; + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** table_mergefromseq2 + ** + * Similar to PyDict_MergeFromSeq2 (code borrowed from there). + */ + +static int table_mergefromseq2(tableobject *self, PyObject *seq2, int override) +{ + PyObject *it; /* iter(seq2) */ + int i; /* index into seq2 of current element */ + PyObject *item; /* seq2[i] */ + PyObject *fast; /* item as a 2-tuple or 2-list */ + + TABLE_DEBUG("table_mergefromseq2"); + + it = PyObject_GetIter(seq2); + if (it == NULL) + return -1; + + for (i = 0; ; ++i) { + PyObject *key, *value, *skey, *svalue; + char *c_skey; + int n; + + fast = NULL; + item = PyIter_Next(it); + if (item == NULL) { + if (PyErr_Occurred()) + goto Fail; + break; + } + + /* Convert item to sequence, and verify length 2. */ + fast = PySequence_Fast(item, ""); + if (fast == NULL) { + if (PyErr_ExceptionMatches(PyExc_TypeError)) + PyErr_Format(PyExc_TypeError, + "cannot convert table update " + "sequence element #%d to a sequence", + i); + goto Fail; + } + n = PySequence_Fast_GET_SIZE(fast); + if (n != 2) { + PyErr_Format(PyExc_ValueError, + "table update sequence element #%d " + "has length %d; 2 is required", + i, n); + goto Fail; + } + + /* Update/merge with this (key, value) pair. */ + key = PySequence_Fast_GET_ITEM(fast, 0); + value = PySequence_Fast_GET_ITEM(fast, 1); + skey = PyObject_Str(key); + if (skey == NULL) + goto Fail; + svalue = PyObject_Str(value); + if (svalue == NULL) { + Py_DECREF(svalue); + goto Fail; + } + + MP_ANYSTR_AS_STR(c_skey, skey, 0); + if (!c_skey) { + Py_DECREF(skey); + Py_DECREF(svalue); + goto Fail; + } + + if (override || apr_table_get(self->table, c_skey) == NULL) { + int status = table_ass_subscript((PyObject *)self, skey, svalue); + if (status < 0) { + Py_DECREF(skey); + Py_DECREF(svalue); + goto Fail; + } + } + + Py_DECREF(skey); + Py_DECREF(svalue); + Py_DECREF(fast); + Py_DECREF(item); + } + + i = 0; + goto Return; + Fail: + Py_XDECREF(item); + Py_XDECREF(fast); + i = -1; + Return: + Py_DECREF(it); + return i; +} + +/** + ** table_copy + ** + */ + +static PyObject *table_copy(register tableobject *from) +{ + tableobject *to = (tableobject *)MpTable_New(); + + TABLE_DEBUG("table_copy"); + + if (to != NULL) + apr_table_overlap(to->table, from->table, 0); + + return (PyObject*)to; +} + +#if PY_MAJOR_VERSION < 3 +/** + ** table_compare + ** + */ + +static int table_compare(tableobject *a, tableobject *b) +{ + /* + we're so lazy that we just copy tables to dicts + and rely on dict's compare ability. this is not + the best way to do this to say the least + */ + + PyObject *ad, *bd; + int result; + + TABLE_DEBUG("table_compare"); + + ad = PyDict_New(); + bd = PyDict_New(); + + PyDict_Merge(ad, (PyObject*)a, 0); + PyDict_Merge(bd, (PyObject*)b, 0); + + result = PyObject_Compare(ad, bd); + + Py_DECREF(ad); + Py_DECREF(bd); + + return result; +} +#endif + +/** + ** table_richcompare + ** + */ + +static PyObject *table_richcompare(PyObject *a, PyObject *b, int op) +{ + + PyObject *ad, *bd, *result; + + TABLE_DEBUG("table_richcompare"); + + ad = PyDict_New(); + bd = PyDict_New(); + + PyDict_Merge(ad, (PyObject*)a, 0); + PyDict_Merge(bd, (PyObject*)b, 0); + + result = PyObject_RichCompare(ad, bd, op); + + Py_DECREF(ad); + Py_DECREF(bd); + + return result; + +} + +/** + ** table_has_key + ** + */ + +static PyObject * table_has_key(tableobject *self, PyObject *key) +{ + + const char *k; + + TABLE_DEBUG("table_has_key"); + + MP_ANYSTR_AS_STR(k, key, 1); + if (!k) { + Py_DECREF(key); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + if (apr_table_get(self->table, k)) + return PyLong_FromLong(1); + else + return PyLong_FromLong(0); +} + +/** + ** table_get + ** + * implements get([failobj]) method + * (only returns the first match) + */ + +static PyObject *table_get(register tableobject *self, PyObject *args) +{ + PyObject *key; + PyObject *failobj = Py_None; + PyObject *val = NULL; + const char *k, *v; + + TABLE_DEBUG("table_get"); + + if (!PyArg_ParseTuple(args, "O|O:get", &key, &failobj)) + return NULL; + + MP_ANYSTR_AS_STR(k, key, 1); + if (!k) { + Py_DECREF(key); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + v = apr_table_get(self->table, k); + if (!v) { + val = failobj; + Py_INCREF(val); + } + else + val = MpBytesOrUnicode_FromString(v); + + Py_DECREF(key); /* MP_ANYSTR_AS_STR */ + return val; +} + +/** + ** table_setdefault + ** + * implements setdefault(key, [val]) method + */ + +static PyObject *table_setdefault(register tableobject *self, PyObject *args) +{ + int len; + PyObject *failobj = NULL; + PyObject *key, *val = NULL; + char *k = NULL, *f = NULL; + const char *v = NULL; + + TABLE_DEBUG("table_setdefault"); + + if (!PyArg_ParseTuple(args, "O|O:setdefault", &key, &failobj)) + return NULL; + + MP_ANYSTR_AS_STR(k, key, 1); + if (!k) { + Py_DECREF(key); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + if (failobj) { + MP_ANYSTR_AS_STR(f, failobj, 1); + if (!f) { + Py_DECREF(failobj); /* MP_ANYSTR_AS_ATR */ + return NULL; + } + } + + v = apr_table_get(self->table, k); + if (!v) { + if (f) { + apr_table_set(self->table, k, f); + val = failobj; + Py_INCREF(val); + } + else { + apr_table_set(self->table, k, ""); + v = ""; + } + } + + val = MpBytesOrUnicode_FromString(v); + + Py_XDECREF(failobj); /* MP_ANYSTR_AS_ATR */ + return val; +} + +/** + ** table_clear + ** + */ + +static PyObject *table_clear(register tableobject *self) +{ + TABLE_DEBUG("table_clear"); + + apr_table_clear(self->table); + + Py_INCREF(Py_None); + return Py_None; +} + +/** + ** table_popitem + ** + */ + +static PyObject *table_popitem(tableobject *self) +{ + apr_array_header_t *ah; + apr_table_entry_t *elts; + PyObject *res; + + TABLE_DEBUG("table_popitem"); + + ah = (apr_array_header_t *) apr_table_elts(self->table); + elts = (apr_table_entry_t *) ah->elts; + + if (ah->nelts == 0) { + PyErr_SetString(PyExc_KeyError, + "popitem(): table is empty"); + return NULL; + } + + res = Py_BuildValue("(s,s)", elts[0].key, elts[0].val); + ah->nelts--; + elts++; + + return res; +} + +/** + ** table_traverse + ** + */ + +static int table_traverse(tableobject *self, visitproc visit, void *arg) +{ + const apr_array_header_t *ah; + apr_table_entry_t *elts; + register int i; + + TABLE_DEBUG("table_traverse"); + + ah = apr_table_elts (self->table); + elts = (apr_table_entry_t *) ah->elts; + + i = ah->nelts; + + while (i--) + if (elts[i].key) { + int err; + + PyObject *v = NULL; + if (elts[i].val != NULL) + v = MpBytesOrUnicode_FromString(elts[i].val); + else { + v = Py_None; + Py_INCREF(v); + } + + err = visit(v, arg); + Py_XDECREF(v); + if (err) + return err; + } + + return 0; +} + +/** + ** table_tp_clear + ** + */ + +static int table_tp_clear(tableobject *self) +{ + TABLE_DEBUG("table_tp_clear"); + + table_clear(self); + return 0; +} + +/** + ** mp_table_add + ** + * this function is equivalent of ap_table_add - + * it can create duplicate entries. + */ + +static PyObject * mp_table_add(tableobject *self, PyObject *args) +{ + PyObject *key, *val; + const char *k, *v; + + TABLE_DEBUG("mp_table_add"); + + if (! PyArg_ParseTuple(args, "OO", &key, &val)) + return NULL; + + MP_ANYSTR_AS_STR(k, key, 1); + MP_ANYSTR_AS_STR(v, val, 1); + if ((!k) || (!v)) { + Py_DECREF(key); /* MP_ANYSTR_AS_STR */ + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + return NULL; + } + + apr_table_add(self->table, k, v); + + Py_DECREF(key); /* MP_ANYSTR_AS_STR */ + Py_DECREF(val); /* MP_ANYSTR_AS_STR */ + + Py_INCREF(Py_None); + return Py_None; +} + +typedef PyObject * (*tableselectfunc)(apr_table_entry_t *); + +static PyObject *tableiter_new(tableobject *, tableselectfunc); + +static PyObject *select_key(apr_table_entry_t *elts) +{ + return MpBytesOrUnicode_FromString(elts->key); +} + +static PyObject *select_value(apr_table_entry_t *elts) +{ + PyObject *val = NULL; + + TABLE_DEBUG("select_value"); + + if (elts->val != NULL) + val = MpBytesOrUnicode_FromString(elts->val); + else { + val = Py_None; + Py_INCREF(val); + } + + return val; +} + +static PyObject *select_item(apr_table_entry_t *elts) +{ + TABLE_DEBUG("select_item"); + + return Py_BuildValue("(s,s)", elts->key, elts->val); +} + +static PyObject *table_iterkeys(tableobject *self) +{ + TABLE_DEBUG("table_iterkeys"); + + return tableiter_new(self, select_key); +} + +static PyObject *table_itervalues(tableobject *self) +{ + TABLE_DEBUG("table_itervalues"); + + return tableiter_new(self, select_value); +} + +static PyObject *table_iteritems(tableobject *self) +{ + TABLE_DEBUG("table_iteritems"); + + return tableiter_new(self, select_item); +} + +static char has_key__doc__[] = +"T.has_key(k) -> 1 if T has a key k, else 0"; + +static char get__doc__[] = +"T.get(k[,d]) -> T[k] if T.has_key(k), else d. d defaults to None."; + +static char setdefault_doc__[] = +"T.setdefault(k[,d]) -> T.get(k,d), also set T[k]=d if not T.has_key(k)"; + +static char popitem__doc__[] = +"T.popitem() -> (k, v), remove and return some (key, value) pair as a\n\ +2-tuple; but raise KeyError if T is empty"; + +static char keys__doc__[] = +"T.keys() -> list of T's keys"; + +static char items__doc__[] = +"T.items() -> list of T's (key, value) pairs, as 2-tuples"; + +static char values__doc__[] = +"T.values() -> list of T's values"; + +static char update__doc__[] = +"T.update(E) -> None. Update T from E: for k in E.keys(): T[k] = E[k]"; + +static char clear__doc__[] = +"T.clear() -> None. Remove all items from T."; + +static char copy__doc__[] = +"T.copy() -> a shallow copy of T"; + +static char iterkeys__doc__[] = +"T.iterkeys() -> an iterator over the keys of T"; + +static char itervalues__doc__[] = +"T.itervalues() -> an iterator over the values of T"; + +static char iteritems__doc__[] = +"T.iteritems() -> an iterator over the (key, value) items of T"; + +static char add__doc__[] = +"T.add(k, v) -> add (as oppsed to replace) a key k and value v"; + +/* table method definitions */ +static PyMethodDef mp_table_methods[] = { + {"has_key", (PyCFunction)table_has_key, METH_O, has_key__doc__}, + {"get", (PyCFunction)table_get, METH_VARARGS, get__doc__}, + {"setdefault", (PyCFunction)table_setdefault, METH_VARARGS, setdefault_doc__}, + {"popitem", (PyCFunction)table_popitem, METH_NOARGS, popitem__doc__}, + {"keys", (PyCFunction)table_keys, METH_NOARGS, keys__doc__}, + {"items", (PyCFunction)table_items, METH_NOARGS, items__doc__}, + {"values", (PyCFunction)table_values, METH_NOARGS, values__doc__}, + {"update", (PyCFunction)table_update, METH_O, update__doc__}, + {"clear", (PyCFunction)table_clear, METH_NOARGS, clear__doc__}, + {"copy", (PyCFunction)table_copy, METH_NOARGS, copy__doc__}, + {"iterkeys", (PyCFunction)table_iterkeys, METH_NOARGS, iterkeys__doc__}, + {"itervalues", (PyCFunction)table_itervalues, METH_NOARGS, itervalues__doc__}, + {"iteritems", (PyCFunction)table_iteritems, METH_NOARGS, iteritems__doc__}, + {"add", (PyCFunction)mp_table_add, METH_VARARGS, add__doc__}, + {NULL, NULL} /* sentinel */ +}; + +static int table_contains(tableobject *self, PyObject *key) +{ + char *k; + const char *v; + int rc; + + TABLE_DEBUG("table_contains"); + + MP_ANYSTR_AS_STR(k, key, 1); + if (!k) { + Py_DECREF(key); + return -1; + } + v = apr_table_get(self->table, k); + Py_DECREF(key); + return (v != NULL); +} + +static PySequenceMethods table_as_sequence = { + 0, /* sq_length */ + 0, /* sq_concat */ + 0, /* sq_repeat */ + 0, /* sq_item */ + 0, /* sq_slice */ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + (objobjproc)table_contains, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ +}; + +/** + ** table_new + ** + */ + +static PyObject *table_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + TABLE_DEBUG("table_new"); + + return MpTable_New(); +} + +static int table_init(tableobject *self, PyObject *args, PyObject *kwds) +{ + PyObject *arg = NULL; + static char *kwlist[] = {"items", 0}; + int result = 0; + + TABLE_DEBUG("table_init"); + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O:mp_table", + kwlist, &arg)) + result = -1; + + else if (arg != NULL) { + if (PyObject_HasAttrString(arg, "keys")) + result = table_merge(self, arg, 1); + else + result = table_mergefromseq2(self, arg, 1); + } + return result; +} + +static long table_nohash(PyObject *self) +{ + TABLE_DEBUG("table_nohash"); + + PyErr_SetString(PyExc_TypeError, "mp_table objects are unhashable"); + return -1; +} + +static PyObject *table_iter(tableobject *self) +{ + TABLE_DEBUG("table_iter"); + + return tableiter_new(self, select_key); +} + +static char mp_table_doc[] = +"table() -> new empty table.\n" +"table(mapping) -> new table initialized from a mapping object's\n" +" (key, value) pairs.\n" +"table(seq) -> new table initialized as if via:\n" +" d = {}\n" +" for k, v in seq:\n" +" d[k] = v"; + +PyTypeObject MpTable_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "mp_table", /* tp_name */ + sizeof(tableobject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)table_dealloc, /* tp_dealloc */ + (printfunc)table_print, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if PY_MAJOR_VERSION < 3 + (cmpfunc)table_compare, /* tp_compare */ +#else + 0, /* tp_reserved */ +#endif + (reprfunc)table_repr, /* tp_repr */ + 0, /* tp_as_number */ + &table_as_sequence, /* tp_as_sequence */ + &table_as_mapping, /* tp_as_mapping */ + table_nohash, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + PyObject_GenericGetAttr, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | + Py_TPFLAGS_BASETYPE, /* tp_flags */ + mp_table_doc, /* tp_doc */ + (traverseproc)table_traverse, /* tp_traverse */ + /* PYTHON 2.5: 'inquiry' should be perhaps replaced with 'lenfunc' */ + (inquiry)table_tp_clear, /* tp_clear */ + table_richcompare, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + (getiterfunc)table_iter, /* tp_iter */ + 0, /* tp_iternext */ + mp_table_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)table_init, /* tp_init */ + 0, /* tp_alloc */ + table_new, /* tp_new */ + table_dealloc, /* tp_free */ +}; + +/* Table iterator type */ + +extern PyTypeObject MpTableIter_Type; /* Forward */ + +typedef struct { + PyObject_HEAD + tableobject *table; + int ti_nelts; + int ti_pos; + tableselectfunc ti_select; +} tableiterobject; + +static PyObject *tableiter_new(tableobject *table, tableselectfunc select) +{ + tableiterobject *ti; + + TABLE_DEBUG("tableiter_new"); + MpTableIter_Type.ob_type = &PyType_Type; + ti = PyObject_NEW(tableiterobject, &MpTableIter_Type); + if (ti == NULL) + return NULL; + Py_INCREF(table); + ti->table = table; + ti->ti_nelts = table->table->a.nelts; + ti->ti_pos = 0; + ti->ti_select = select; + return (PyObject *)ti; +} + +static void tableiter_dealloc(tableiterobject *ti) +{ + Py_DECREF(ti->table); + PyObject_DEL(ti); +} + +static PyObject *tableiter_next(tableiterobject *ti, PyObject *args) +{ + apr_table_entry_t *elts = (apr_table_entry_t *) ti->table->table->a.elts; + + TABLE_DEBUG("tableiter_next"); + + /* make sure the table hasn't change while being iterated */ + + if (ti->ti_nelts != ti->table->table->a.nelts) { + PyErr_SetString(PyExc_RuntimeError, + "table changed size during iteration"); + return NULL; + } + + /* return the next key/val */ + + if (ti->ti_pos < ti->table->table->a.nelts) { + return (*ti->ti_select)(&elts[ti->ti_pos++]); + } + + /* the end has been reached */ + + PyErr_SetObject(PyExc_StopIteration, Py_None); + return NULL; +} + +static PyObject *tableiter_getiter(PyObject *it) +{ + TABLE_DEBUG("tableiter_getiter"); + + Py_INCREF(it); + return it; +} + +static PyMethodDef tableiter_methods[] = { + {"next", (PyCFunction)tableiter_next, METH_VARARGS, + "it.next() -- get the next value, or raise StopIteration"}, + {NULL, NULL} /* sentinel */ +}; + +static PyObject *tableiter_iternext(tableiterobject *ti) +{ + apr_table_entry_t *elts = (apr_table_entry_t *) ti->table->table->a.elts; + + TABLE_DEBUG("tableiter_iternext"); + + /* make sure the table hasn't change while being iterated */ + + if (ti->ti_nelts != ti->table->table->a.nelts) { + PyErr_SetString(PyExc_RuntimeError, + "table changed size during iteration"); + return NULL; + } + + /* return the next key/val */ + + if (ti->ti_pos < ti->table->table->a.nelts) { + return (*ti->ti_select)(&elts[ti->ti_pos++]); + } + + /* the end has been reached */ + + PyErr_SetObject(PyExc_StopIteration, Py_None); + return NULL; + +} + +PyTypeObject MpTableIter_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "dictionary-iterator", /* tp_name */ + sizeof(tableiterobject), /* tp_basicsize */ + 0, /* tp_itemsize */ + /* methods */ + (destructor)tableiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + PyObject_GenericGetAttr, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + (getiterfunc)tableiter_getiter, /* tp_iter */ + (iternextfunc)tableiter_iternext, /* tp_iternext */ + tableiter_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ +}; + + diff --git a/src/util.c b/src/util.c new file mode 100644 index 0000000..99e73a2 --- /dev/null +++ b/src/util.c @@ -0,0 +1,457 @@ +/* + * Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + * + * Originally developed by Gregory Trubetskoy. + * + * + * util.c + * + * + * See accompanying documentation and source code comments + * for details. + * + */ + +#include "mod_python.h" + +/** + ** tuple_from_array_header + ** + * Given an array header return a tuple. The array elements + * assumed to be strings. + */ + +PyObject * tuple_from_array_header(const apr_array_header_t *ah) +{ + + PyObject *t; + int i; + char **s; + + if (ah == NULL) + t = PyTuple_New(0); + else { + t = PyTuple_New(ah->nelts); + + s = (char **) ah->elts; + for (i = 0; i < ah->nelts; i++) + PyTuple_SetItem(t, i, MpBytesOrUnicode_FromString(s[i])); + } + return t; +} + +/** + ** tuple_from_method_list + ** + * Given an apr_method_list_t return a tuple. + */ + +PyObject * tuple_from_method_list(const ap_method_list_t *l) +{ + + PyObject *t; + int i; + char **methods; + + if ((l->method_list == NULL) || (l->method_list->nelts == 0)) + t = PyTuple_New(0); + else { + t = PyTuple_New(l->method_list->nelts); + + methods = (char **)l->method_list->elts; + for (i = 0; i < l->method_list->nelts; ++i) + PyTuple_SetItem(t, i, MpBytesOrUnicode_FromString(methods[i])); + } + return t; +} + +/** + ** tuple_from_finfo + ** + * makes a tuple similar to return of os.stat() from apr_finfo_t + * + */ + +PyObject *tuple_from_finfo(apr_finfo_t *f) +{ + PyObject *t; + + if (f->filetype == APR_NOFILE) { + Py_INCREF(Py_None); + return Py_None; + } + + t = PyTuple_New(13); + + /* this should have been first, but was added later */ + PyTuple_SET_ITEM(t, 12, PyLong_FromLong(f->filetype)); + + if (f->valid & APR_FINFO_PROT) { + PyTuple_SET_ITEM(t, 0, PyLong_FromLong(f->protection)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 0, Py_None); + } + if (f->valid & APR_FINFO_INODE) { + PyTuple_SET_ITEM(t, 1, PyLong_FromLong(f->inode)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 1, Py_None); + } + if (f->valid & APR_FINFO_DEV) { + PyTuple_SET_ITEM(t, 2, PyLong_FromLong(f->device)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 2, Py_None); + } + if (f->valid & APR_FINFO_NLINK) { + PyTuple_SET_ITEM(t, 3, PyLong_FromLong(f->nlink)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 3, Py_None); + } + if (f->valid & APR_FINFO_USER) { + PyTuple_SET_ITEM(t, 4, PyLong_FromLong(f->user)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 4, Py_None); + } + if (f->valid & APR_FINFO_GROUP) { + PyTuple_SET_ITEM(t, 5, PyLong_FromLong(f->group)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 5, Py_None); + } + if (f->valid & APR_FINFO_SIZE) { + PyTuple_SET_ITEM(t, 6, PyLong_FromLong(f->size)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 6, Py_None); + } + if (f->valid & APR_FINFO_ATIME) { + PyTuple_SET_ITEM(t, 7, PyLong_FromLong(f->atime*0.000001)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 7, Py_None); + } + if (f->valid & APR_FINFO_MTIME) { + PyTuple_SET_ITEM(t, 8, PyLong_FromLong(f->mtime*0.000001)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 8, Py_None); + } + if (f->valid & APR_FINFO_CTIME) { + PyTuple_SET_ITEM(t, 9, PyLong_FromLong(f->ctime*0.000001)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 9, Py_None); + } + if (f->fname) { + PyTuple_SET_ITEM(t, 10, MpBytesOrUnicode_FromString(f->fname)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 10, Py_None); + } + if (f->valid & APR_FINFO_NAME) { + PyTuple_SET_ITEM(t, 11, MpBytesOrUnicode_FromString(f->name)); + } else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 11, Py_None); + } + /* it'd be nice to also return the file dscriptor, + f->filehand->filedes, but it's platform dependent, + so may be later... */ + + return t; +} + +/** + ** tuple_from_apr_uri + ** + * makes a tuple from uri_components + * + */ + +PyObject *tuple_from_apr_uri(apr_uri_t *u) +{ + PyObject *t; + + t = PyTuple_New(9); + + if (u->scheme) { + PyTuple_SET_ITEM(t, 0, MpBytesOrUnicode_FromString(u->scheme)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 0, Py_None); + } + if (u->hostinfo) { + PyTuple_SET_ITEM(t, 1, MpBytesOrUnicode_FromString(u->hostinfo)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 1, Py_None); + } + if (u->user) { + PyTuple_SET_ITEM(t, 2, MpBytesOrUnicode_FromString(u->user)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 2, Py_None); + } + if (u->password) { + PyTuple_SET_ITEM(t, 3, MpBytesOrUnicode_FromString(u->password)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 3, Py_None); + } + if (u->hostname) { + PyTuple_SET_ITEM(t, 4, MpBytesOrUnicode_FromString(u->hostname)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 4, Py_None); + } + if (u->port_str) { + PyTuple_SET_ITEM(t, 5, PyLong_FromLong(u->port)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 5, Py_None); + } + if (u->path) { + PyTuple_SET_ITEM(t, 6, MpBytesOrUnicode_FromString(u->path)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 6, Py_None); + } + if (u->query) { + PyTuple_SET_ITEM(t, 7, MpBytesOrUnicode_FromString(u->query)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 7, Py_None); + } + if (u->fragment) { + PyTuple_SET_ITEM(t, 8, MpBytesOrUnicode_FromString(u->fragment)); + } + else { + Py_INCREF(Py_None); + PyTuple_SET_ITEM(t, 8, Py_None); + } + /* XXX hostent, is_initialized, dns_* */ + + return t; +} + + +/** + ** python_decref + ** + * This helper function is used with apr_pool_cleanup_register to destroy + * python objects when a certain pool is destroyed. + */ + +apr_status_t python_decref(void *object) +{ + Py_XDECREF((PyObject *) object); + return 0; +} + +/** + ** find_module + ** + * Find an Apache module by name, used by get_addhandler_extensions + */ + +static module *find_module(char *name) +{ + int n; + for (n = 0; ap_loaded_modules[n]; ++n) { + + if (strcmp(name, ap_loaded_modules[n]->name) == 0) + return ap_loaded_modules[n]; + + } + return NULL; +} + +/** + ** get_addhandler_extensions + ** + * Get extensions specified for AddHandler, if any. To do this we + * retrieve mod_mime's config. This is used by the publisher to strip + * file extentions from modules in the most meaningful way. + * + * XXX This function is a hack and will stop working if mod_mime people + * decide to change their code. A better way to implement this would + * be via the config tree, but it doesn't seem to be quite there just + * yet, because it doesn't have .htaccess directives. + */ + +char * get_addhandler_extensions(request_rec *req) +{ + + /* these typedefs are copied from mod_mime.c */ + + typedef struct { + apr_hash_t *extension_mappings; + apr_array_header_t *remove_mappings; + char *default_language; + int multimatch; + } mime_dir_config; + + typedef struct extension_info { + char *forced_type; /* Additional AddTyped stuff */ + char *encoding_type; /* Added with AddEncoding... */ + char *language_type; /* Added with AddLanguage... */ + char *handler; /* Added with AddHandler... */ + char *charset_type; /* Added with AddCharset... */ + char *input_filters; /* Added with AddInputFilter... */ + char *output_filters; /* Added with AddOutputFilter... */ + } extension_info; + + mime_dir_config *mconf; + + apr_hash_index_t *hi; + void *val; + void *key; + extension_info *ei; + char *result = NULL; + + module *mod_mime = find_module("mod_mime.c"); + mconf = (mime_dir_config *) ap_get_module_config(req->per_dir_config, mod_mime); + + if (mconf->extension_mappings) { + + for (hi = apr_hash_first(req->pool, mconf->extension_mappings); hi; hi = apr_hash_next(hi)) { + apr_hash_this(hi, (const void **)&key, NULL, &val); + ei = (extension_info *)val; + if (ei->handler) + if (strcmp("mod_python", ei->handler) == 0 || + strcmp("python-program", ei->handler) == 0) + result = apr_pstrcat(req->pool, (char *)key, " ", result, NULL); + } + } + + return result; +} + +/** + ** find_memberdef + ** + * Find a memberdef in a PyMemberDef array + */ + +PyMemberDef *find_memberdef(const PyMemberDef *mlist, const char *name) +{ + const PyMemberDef *md; + + for (md = mlist; md->name != NULL; md++) + if (name[0] == md->name[0] && + strcmp(md->name+1, name+1) == 0) + return (PyMemberDef *)md; + + /* this should never happen or the mlist is screwed up */ + return NULL; +} + +/** + ** cfgtree_walk + ** + * walks ap_directive_t tree returning a list of + * tuples and lists + */ + +PyObject *cfgtree_walk(ap_directive_t *dir) +{ + + PyObject *list = PyList_New(0); + if (!list) + return PyErr_NoMemory(); + + while (dir) { + + PyObject *t = Py_BuildValue("(s, s)", dir->directive, dir->args); + if (!t) + return PyErr_NoMemory(); + + PyList_Append(list, t); + + Py_DECREF(t); + + if (dir->first_child) { + + PyObject *child = cfgtree_walk(dir->first_child); + if (!child) + return PyErr_NoMemory(); + + PyList_Append(list, child); + + Py_DECREF(child); + + } + + dir = dir->next; + } + + return list; +} + +/** + ** makeipaddr + ** + * utility func to make an ip address + */ + +static PyObject *makeipaddr(struct apr_sockaddr_t *addr) +{ + char *str = NULL; + apr_status_t rc; + PyObject *ret = NULL; + + rc = apr_sockaddr_ip_get(&str, addr); + if (rc==APR_SUCCESS) { + ret = MpBytesOrUnicode_FromString(str); + } + else { + PyErr_SetString(PyExc_SystemError,"apr_sockaddr_ip_get failure"); + } + return ret; +} + +/** + ** makesockaddr + ** + * utility func to make a socket address + */ + +PyObject *makesockaddr(struct apr_sockaddr_t *addr) +{ + PyObject *addrobj = makeipaddr(addr); + PyObject *ret = NULL; + + if (addrobj) { + apr_port_t port; + port = addr->port; + ret = Py_BuildValue("Oi", addrobj, port ); + Py_DECREF(addrobj); + } + return ret; +} diff --git a/test/Makefile.in b/test/Makefile.in new file mode 100644 index 0000000..9d51e53 --- /dev/null +++ b/test/Makefile.in @@ -0,0 +1,30 @@ + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + # +PYTHON_BIN=@PYTHON_BIN@ + +test: + $(PYTHON_BIN) test.py + +clean: + rm -f *.pyc *.pyo + cd conf && rm -f test.conf + cd htdocs && rm -f *pyc *pyo + rm -rf logs + rm -rf tmp + +distclean: clean + rm -f Makefile testconf.py diff --git a/test/README b/test/README new file mode 100644 index 0000000..31d3d49 --- /dev/null +++ b/test/README @@ -0,0 +1,90 @@ +Running the test suite +====================== + +1) Setting up Apache +-------------------- + +- You must have a valid Apache 2 installation, responding to start and stop +commands. The test suite will launch an Apache instance with its own +configuration file, on its own TCP/IP port, so hopefully it won't break +anything on your Apache setup. + +- If you're running the Apache Monitor under Win32 (the tray icon that shows the +state of the Apache Server), you'll notice that it will show the status of the +test server during the tests. Unfortunately once the tests are over the monitor +does not revert back to showing the status of your Apache server. The server +itself is still running, though. + +2) Setting up mod_python source code +------------------------------------ + +- The source code is required because the unit test aren't provided in the +binary distributions. + +- Get it from http://www.modpython.org/ or using SVN from : + http://svn.apache.org/repos/asf/httpd/mod_python/trunk + +- We'll suppose you've extracted or checked it out in a directory +named MOD_PYTHON. + +- Either you run the MOD_PYTHON/configure script... + +- ..or if you're under Win32, then running the configure script +may be too much of a hassle. We advise you to copy +MOD_PYTHON/test/testconf.py.in to MOD_PYTHON/test/testconf.py +and edit this file as described in the comments. + +3) Building and/or installing mod_python binaries +------------------------------------------------- + +- If you're really adventurous, you may want to build mod_python from source. +Then it's either "./configure; make; make install" on Unix, or running +MOD_PYTHON/dist/build_installer.bat on Win32. + +- The best way to ensure that the test suite will run is to install the target +mod_python binaries on your system. The complete list of binaries (including +Win32 ones) can be found following the "Other Binaries" link on : + http://httpd.apache.org/modules/python-download.cgi + +- Running your own version of mod_python without installing it is possible and +useful if you cannot be root, but you're a bit on your own, sorry. You'll have +to make sure MOD_PYTHON/testconf.py points to the right version of +mod_python.so and tweak Apache user's PYTHONPATH environment variable so that +mod_python can find its Python modules. + +4) Running the test suite +------------------------- + +In the current directory, just launch the test.py script : + +python test.py + +You should see a bunch of text scrolling by as the tests unfold. If everything +goes well, you should see two lines around the end of the unput, one with : + +8<---8<---8<---8<---8<--- +Ran 43 tests in 17.955s + +OK +8<---8<---8<---8<---8<--- + +and the other with : + +8<---8<---8<---8<---8<--- +Ran 6 tests in 70.942s + +OK +8<---8<---8<---8<---8<--- + +Of course the number of tests will vary in the future, but what's important here +are the two shiny "OK". This means the tests were successful. + +5) Report the results +--------------------- + +Now you can report your success to python-dev@httpd.apache.org ! Of course if +it's not successful and you're sure it's not due to a problem in your setup, +we're also interested. Send us the full output of the test suite, or at least +the stack trace of the failed tests. + +Thanks for your time spent of running those tests ! \ No newline at end of file diff --git a/test/conf/mime.types b/test/conf/mime.types new file mode 100644 index 0000000..6eab0ff --- /dev/null +++ b/test/conf/mime.types @@ -0,0 +1,471 @@ +# This is a comment. I love comments. + +# This file controls what Internet media types are sent to the client for +# given file extension(s). Sending the correct media type to the client +# is important so they know how to handle the content of the file. +# Extra types can either be added here or by using an AddType directive +# in your config files. For more information about Internet media types, +# please read RFC 2045, 2046, 2047, 2048, and 2077. The Internet media type +# registry is at . + +# MIME type Extension +application/EDI-Consent +application/EDI-X12 +application/EDIFACT +application/activemessage +application/andrew-inset ez +application/applefile +application/atomicmail +application/batch-SMTP +application/beep+xml +application/cals-1840 +application/commonground +application/cybercash +application/dca-rft +application/dec-dx +application/dvcs +application/eshop +application/http +application/hyperstudio +application/iges +application/index +application/index.cmd +application/index.obj +application/index.response +application/index.vnd +application/iotp +application/ipp +application/isup +application/font-tdpfr +application/mac-binhex40 hqx +application/mac-compactpro cpt +application/macwriteii +application/marc +application/mathematica +application/mathematica-old +application/msword doc +application/news-message-id +application/news-transmission +application/ocsp-request +application/ocsp-response +application/octet-stream bin dms lha lzh exe class so dll +application/oda oda +application/parityfec +application/pdf pdf +application/pgp-encrypted +application/pgp-keys +application/pgp-signature +application/pkcs10 +application/pkcs7-mime +application/pkcs7-signature +application/pkix-cert +application/pkix-crl +application/pkixcmp +application/postscript ai eps ps +application/prs.alvestrand.titrax-sheet +application/prs.cww +application/prs.nprend +application/qsig +application/remote-printing +application/riscos +application/rtf +application/sdp +application/set-payment +application/set-payment-initiation +application/set-registration +application/set-registration-initiation +application/sgml +application/sgml-open-catalog +application/sieve +application/slate +application/smil smi smil +application/timestamp-query +application/timestamp-reply +application/vemmi +application/vnd.3M.Post-it-Notes +application/vnd.FloGraphIt +application/vnd.accpac.simply.aso +application/vnd.accpac.simply.imp +application/vnd.acucobol +application/vnd.aether.imp +application/vnd.anser-web-certificate-issue-initiation +application/vnd.anser-web-funds-transfer-initiation +application/vnd.audiograph +application/vnd.businessobjects +application/vnd.bmi +application/vnd.canon-cpdl +application/vnd.canon-lips +application/vnd.claymore +application/vnd.commerce-battelle +application/vnd.commonspace +application/vnd.comsocaller +application/vnd.contact.cmsg +application/vnd.cosmocaller +application/vnd.cups-postscript +application/vnd.cups-raster +application/vnd.cups-raw +application/vnd.ctc-posml +application/vnd.cybank +application/vnd.dna +application/vnd.dpgraph +application/vnd.dxr +application/vnd.ecdis-update +application/vnd.ecowin.chart +application/vnd.ecowin.filerequest +application/vnd.ecowin.fileupdate +application/vnd.ecowin.series +application/vnd.ecowin.seriesrequest +application/vnd.ecowin.seriesupdate +application/vnd.enliven +application/vnd.epson.esf +application/vnd.epson.msf +application/vnd.epson.quickanime +application/vnd.epson.salt +application/vnd.epson.ssf +application/vnd.ericsson.quickcall +application/vnd.eudora.data +application/vnd.fdf +application/vnd.ffsns +application/vnd.framemaker +application/vnd.fsc.weblaunch +application/vnd.fujitsu.oasys +application/vnd.fujitsu.oasys2 +application/vnd.fujitsu.oasys3 +application/vnd.fujitsu.oasysgp +application/vnd.fujitsu.oasysprs +application/vnd.fujixerox.ddd +application/vnd.fujixerox.docuworks +application/vnd.fujixerox.docuworks.binder +application/vnd.fut-misnet +application/vnd.grafeq +application/vnd.groove-account +application/vnd.groove-identity-message +application/vnd.groove-injector +application/vnd.groove-tool-message +application/vnd.groove-tool-template +application/vnd.groove-vcard +application/vnd.hhe.lesson-player +application/vnd.hp-HPGL +application/vnd.hp-PCL +application/vnd.hp-PCLXL +application/vnd.hp-hpid +application/vnd.hp-hps +application/vnd.httphone +application/vnd.hzn-3d-crossword +application/vnd.ibm.afplinedata +application/vnd.ibm.MiniPay +application/vnd.ibm.modcap +application/vnd.informix-visionary +application/vnd.intercon.formnet +application/vnd.intertrust.digibox +application/vnd.intertrust.nncp +application/vnd.intu.qbo +application/vnd.intu.qfx +application/vnd.irepository.package+xml +application/vnd.is-xpr +application/vnd.japannet-directory-service +application/vnd.japannet-jpnstore-wakeup +application/vnd.japannet-payment-wakeup +application/vnd.japannet-registration +application/vnd.japannet-registration-wakeup +application/vnd.japannet-setstore-wakeup +application/vnd.japannet-verification +application/vnd.japannet-verification-wakeup +application/vnd.koan +application/vnd.lotus-1-2-3 +application/vnd.lotus-approach +application/vnd.lotus-freelance +application/vnd.lotus-notes +application/vnd.lotus-organizer +application/vnd.lotus-screencam +application/vnd.lotus-wordpro +application/vnd.mcd +application/vnd.mediastation.cdkey +application/vnd.meridian-slingshot +application/vnd.mif mif +application/vnd.minisoft-hp3000-save +application/vnd.mitsubishi.misty-guard.trustweb +application/vnd.mobius.daf +application/vnd.mobius.dis +application/vnd.mobius.msl +application/vnd.mobius.plc +application/vnd.mobius.txf +application/vnd.motorola.flexsuite +application/vnd.motorola.flexsuite.adsi +application/vnd.motorola.flexsuite.fis +application/vnd.motorola.flexsuite.gotap +application/vnd.motorola.flexsuite.kmr +application/vnd.motorola.flexsuite.ttc +application/vnd.motorola.flexsuite.wem +application/vnd.mozilla.xul+xml +application/vnd.ms-artgalry +application/vnd.ms-asf +application/vnd.ms-excel xls +application/vnd.ms-lrm +application/vnd.ms-powerpoint ppt +application/vnd.ms-project +application/vnd.ms-tnef +application/vnd.ms-works +application/vnd.mseq +application/vnd.msign +application/vnd.music-niff +application/vnd.musician +application/vnd.netfpx +application/vnd.noblenet-directory +application/vnd.noblenet-sealer +application/vnd.noblenet-web +application/vnd.novadigm.EDM +application/vnd.novadigm.EDX +application/vnd.novadigm.EXT +application/vnd.osa.netdeploy +application/vnd.palm +application/vnd.pg.format +application/vnd.pg.osasli +application/vnd.powerbuilder6 +application/vnd.powerbuilder6-s +application/vnd.powerbuilder7 +application/vnd.powerbuilder7-s +application/vnd.powerbuilder75 +application/vnd.powerbuilder75-s +application/vnd.previewsystems.box +application/vnd.publishare-delta-tree +application/vnd.pvi.ptid1 +application/vnd.pwg-xhtml-print+xml +application/vnd.rapid +application/vnd.s3sms +application/vnd.seemail +application/vnd.shana.informed.formdata +application/vnd.shana.informed.formtemplate +application/vnd.shana.informed.interchange +application/vnd.shana.informed.package +application/vnd.sss-cod +application/vnd.sss-dtf +application/vnd.sss-ntf +application/vnd.street-stream +application/vnd.svd +application/vnd.swiftview-ics +application/vnd.triscape.mxs +application/vnd.trueapp +application/vnd.truedoc +application/vnd.tve-trigger +application/vnd.ufdl +application/vnd.uplanet.alert +application/vnd.uplanet.alert-wbxml +application/vnd.uplanet.bearer-choice-wbxml +application/vnd.uplanet.bearer-choice +application/vnd.uplanet.cacheop +application/vnd.uplanet.cacheop-wbxml +application/vnd.uplanet.channel +application/vnd.uplanet.channel-wbxml +application/vnd.uplanet.list +application/vnd.uplanet.list-wbxml +application/vnd.uplanet.listcmd +application/vnd.uplanet.listcmd-wbxml +application/vnd.uplanet.signal +application/vnd.vcx +application/vnd.vectorworks +application/vnd.vidsoft.vidconference +application/vnd.visio +application/vnd.vividence.scriptfile +application/vnd.wap.sic +application/vnd.wap.slc +application/vnd.wap.wbxml wbxml +application/vnd.wap.wmlc wmlc +application/vnd.wap.wmlscriptc wmlsc +application/vnd.webturbo +application/vnd.wrq-hp3000-labelled +application/vnd.wt.stf +application/vnd.xara +application/vnd.xfdl +application/vnd.yellowriver-custom-menu +application/whoispp-query +application/whoispp-response +application/wita +application/wordperfect5.1 +application/x-bcpio bcpio +application/x-cdlink vcd +application/x-chess-pgn pgn +application/x-compress +application/x-cpio cpio +application/x-csh csh +application/x-director dcr dir dxr +application/x-dvi dvi +application/x-futuresplash spl +application/x-gtar gtar +application/x-gzip +application/x-hdf hdf +application/x-javascript js +application/x-koan skp skd skt skm +application/x-latex latex +application/x-netcdf nc cdf +application/x-sh sh +application/x-shar shar +application/x-shockwave-flash swf +application/x-stuffit sit +application/x-sv4cpio sv4cpio +application/x-sv4crc sv4crc +application/x-tar tar +application/x-tcl tcl +application/x-tex tex +application/x-texinfo texinfo texi +application/x-troff t tr roff +application/x-troff-man man +application/x-troff-me me +application/x-troff-ms ms +application/x-ustar ustar +application/x-wais-source src +application/x400-bp +application/xhtml+xml xhtml xht +application/xml +application/xml-dtd +application/xml-external-parsed-entity +application/zip zip +audio/32kadpcm +audio/basic au snd +audio/g.722.1 +audio/l16 +audio/midi mid midi kar +audio/mp4a-latm +audio/mpa-robust +audio/mpeg mpga mp2 mp3 +audio/parityfec +audio/prs.sid +audio/telephone-event +audio/tone +audio/vnd.cisco.nse +audio/vnd.cns.anp1 +audio/vnd.cns.inf1 +audio/vnd.digital-winds +audio/vnd.everad.plj +audio/vnd.lucent.voice +audio/vnd.nortel.vbk +audio/vnd.nuera.ecelp4800 +audio/vnd.nuera.ecelp7470 +audio/vnd.nuera.ecelp9600 +audio/vnd.octel.sbc +audio/vnd.qcelp +audio/vnd.rhetorex.32kadpcm +audio/vnd.vmx.cvsd +audio/x-aiff aif aiff aifc +audio/x-mpegurl m3u +audio/x-pn-realaudio ram rm +audio/x-pn-realaudio-plugin rpm +audio/x-realaudio ra +audio/x-wav wav +chemical/x-pdb pdb +chemical/x-xyz xyz +image/bmp bmp +image/cgm +image/g3fax +image/gif gif +image/ief ief +image/jpeg jpeg jpg jpe +image/naplps +image/png png +image/prs.btif +image/prs.pti +image/tiff tiff tif +image/vnd.cns.inf2 +image/vnd.djvu djvu djv +image/vnd.dwg +image/vnd.dxf +image/vnd.fastbidsheet +image/vnd.fpx +image/vnd.fst +image/vnd.fujixerox.edmics-mmr +image/vnd.fujixerox.edmics-rlc +image/vnd.mix +image/vnd.net-fpx +image/vnd.svf +image/vnd.wap.wbmp wbmp +image/vnd.xiff +image/x-cmu-raster ras +image/x-portable-anymap pnm +image/x-portable-bitmap pbm +image/x-portable-graymap pgm +image/x-portable-pixmap ppm +image/x-rgb rgb +image/x-xbitmap xbm +image/x-xpixmap xpm +image/x-xwindowdump xwd +message/delivery-status +message/disposition-notification +message/external-body +message/http +message/news +message/partial +message/rfc822 +message/s-http +model/iges igs iges +model/mesh msh mesh silo +model/vnd.dwf +model/vnd.flatland.3dml +model/vnd.gdl +model/vnd.gs-gdl +model/vnd.gtw +model/vnd.mts +model/vnd.vtu +model/vrml wrl vrml +multipart/alternative +multipart/appledouble +multipart/byteranges +multipart/digest +multipart/encrypted +multipart/form-data +multipart/header-set +multipart/mixed +multipart/parallel +multipart/related +multipart/report +multipart/signed +multipart/voice-message +text/calendar +text/css css +text/directory +text/enriched +text/html html htm +text/parityfec +text/plain asc txt +text/prs.lines.tag +text/rfc822-headers +text/richtext rtx +text/rtf rtf +text/sgml sgml sgm +text/tab-separated-values tsv +text/t140 +text/uri-list +text/vnd.DMClientScript +text/vnd.IPTC.NITF +text/vnd.IPTC.NewsML +text/vnd.abc +text/vnd.curl +text/vnd.flatland.3dml +text/vnd.fly +text/vnd.fmi.flexstor +text/vnd.in3d.3dml +text/vnd.in3d.spot +text/vnd.latex-z +text/vnd.motorola.reflex +text/vnd.ms-mediapackage +text/vnd.wap.si +text/vnd.wap.sl +text/vnd.wap.wml wml +text/vnd.wap.wmlscript wmls +text/x-setext etx +text/xml xml xsl +text/xml-external-parsed-entity +video/mp4v-es +video/mpeg mpeg mpg mpe +video/parityfec +video/pointer +video/quicktime qt mov +video/vnd.fvt +video/vnd.motorola.video +video/vnd.motorola.videop +video/vnd.mpegurl mxu +video/vnd.mts +video/vnd.nokia.interleaved-multimedia +video/vnd.vivo +video/x-msvideo avi +video/x-sgi-movie movie +x-conference/x-cooltalk ice diff --git a/test/htdocs/cgitest.py b/test/htdocs/cgitest.py new file mode 100644 index 0000000..b5c5372 --- /dev/null +++ b/test/htdocs/cgitest.py @@ -0,0 +1,6 @@ + +from __future__ import print_function + +print("Content-type: text/plain\n") +print("test ok") + diff --git a/test/htdocs/dummymodule.py b/test/htdocs/dummymodule.py new file mode 100644 index 0000000..94cea11 --- /dev/null +++ b/test/htdocs/dummymodule.py @@ -0,0 +1,7 @@ +from mod_python import apache + +apache.log_error("dummymodule / %s" % apache.interpreter) + +def function(): + apache.log_error("dummymodule::function / %s" % apache.interpreter) + apache.main_server.get_options()["dummymodule::function"] = "1" diff --git a/test/htdocs/index.py b/test/htdocs/index.py new file mode 100644 index 0000000..ee0358c --- /dev/null +++ b/test/htdocs/index.py @@ -0,0 +1,24 @@ + # + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # + # + +# mod_python tests + +def index(req): + return "test 1 ok, interpreter=%s" % req.interpreter + +def foobar(req): + return "test 2 ok, interpreter=%s" % req.interpreter diff --git a/test/htdocs/psp_parser.psp b/test/htdocs/psp_parser.psp new file mode 100644 index 0000000..b09f818 --- /dev/null +++ b/test/htdocs/psp_parser.psp @@ -0,0 +1,35 @@ +# format of this file +# Comment lines are ignored (python or psp). +# Lines beginning with test will be included, which +# includes lines generated by python code. +# Blank lines are ignored. +# +# The test format is "test:expected_result:test_string$" +# where the string '$' is a token to indicate the end of the test line. +# The '$' character was chosen as it's unlikely to be in the output of +# the parser. +# In processing the expected_result, the '-' character will be replace +# by the '\' character. +# The following substitutions will also be made in the expect_result +# LF linefeed character +# CR carriage-return character +# TB tab character + + +# BEGIN$ +test:-n:\n$ +test:-r:\r$ +test:-t:\t$ +test:-r-n:\r\n$ +<% +req.write("test:-n:\\n$") +%> + +<% +test_str='single_quotes' +%> +test:'single_quotes':'<%= test_str %>'$ +<% +test_str='double_quotes' +%> +test:"double_quotes":"<%= test_str %>"$ diff --git a/test/htdocs/psptest.psp b/test/htdocs/psptest.psp new file mode 100644 index 0000000..649d384 --- /dev/null +++ b/test/htdocs/psptest.psp @@ -0,0 +1,4 @@ +<% +if 1: + req.write("t") +%>est<%=" "+"ok"%> diff --git a/test/htdocs/psptest_fail.psp b/test/htdocs/psptest_fail.psp new file mode 100644 index 0000000..80e7b7c --- /dev/null +++ b/test/htdocs/psptest_fail.psp @@ -0,0 +1,4 @@ +fail +<% +session['dummy'] = 1 +%> diff --git a/test/htdocs/psptest_main.psp b/test/htdocs/psptest_main.psp new file mode 100644 index 0000000..5813e07 --- /dev/null +++ b/test/htdocs/psptest_main.psp @@ -0,0 +1,6 @@ +okay +<% +psp.set_error_page('psptest_fail.psp') +session['dummy'] = 1 +raise 'fail' +%> diff --git a/test/htdocs/ssi.shtml b/test/htdocs/ssi.shtml new file mode 100644 index 0000000..f3cbc60 --- /dev/null +++ b/test/htdocs/ssi.shtml @@ -0,0 +1,4 @@ + diff --git a/test/htdocs/subdir/dummy.txt b/test/htdocs/subdir/dummy.txt new file mode 100644 index 0000000..e69de29 diff --git a/test/htdocs/tests.py b/test/htdocs/tests.py new file mode 100644 index 0000000..c44d61d --- /dev/null +++ b/test/htdocs/tests.py @@ -0,0 +1,1565 @@ + # + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # + # + +# mod_python tests + +from mod_python.python22 import * + +from mod_python import apache +import sys +import unittest +import re +import time +import os +import io + +PY2 = sys.version[0] == '2' + +# This is used for mod_python.publisher security tests +_SECRET_PASSWORD = 'root' +__ANSWER = 42 + +class SimpleTestCase(unittest.TestCase): + + def __init__(self, methodName, req): + unittest.TestCase.__init__(self, methodName) + self.req = req + + def test_apache_log_error(self): + + s = self.req.server + c = self.req.connection + + apache.log_error("Testing apache.log_error():", apache.APLOG_INFO, s) + + apache.log_error("xEMERGx", apache.APLOG_EMERG, s) + apache.log_error("xALERTx", apache.APLOG_ALERT, s) + apache.log_error("xCRITx", apache.APLOG_CRIT, s) + apache.log_error("xERRx", apache.APLOG_ERR, s) + apache.log_error("xWARNINGx", apache.APLOG_WARNING, s) + apache.log_error("xNOTICEx", apache.APLOG_NOTICE, s) + apache.log_error("xINFOx", apache.APLOG_INFO, s) + apache.log_error("xDEBUGx", apache.APLOG_DEBUG, s) + + s.log_error("xEMERGx", apache.APLOG_EMERG) + s.log_error("xALERTx", apache.APLOG_ALERT) + s.log_error("xCRITx", apache.APLOG_CRIT) + s.log_error("xERRx", apache.APLOG_ERR) + s.log_error("xWARNINGx", apache.APLOG_WARNING) + s.log_error("xNOTICEx", apache.APLOG_NOTICE) + s.log_error("xINFOx", apache.APLOG_INFO) + s.log_error("xDEBUGx", apache.APLOG_DEBUG) + + c.log_error("xEMERGx", apache.APLOG_EMERG) + c.log_error("xALERTx", apache.APLOG_ALERT) + c.log_error("xCRITx", apache.APLOG_CRIT) + c.log_error("xERRx", apache.APLOG_ERR) + c.log_error("xWARNINGx", apache.APLOG_WARNING) + c.log_error("xNOTICEx", apache.APLOG_NOTICE) + c.log_error("xINFOx", apache.APLOG_INFO) + c.log_error("xDEBUGx", apache.APLOG_DEBUG) + + # see what's in the log now + f = open("%s/logs/error_log" % apache.server_root()) + # for some reason re doesn't like \n, why? + log = "".join(map(str.strip, f.readlines())) + f.close() + + if not re.search("xEMERGx.*xALERTx.*xCRITx.*xERRx.*xWARNINGx.*xNOTICEx.*xINFOx.*xDEBUGx.*xEMERGx.*xALERTx.*xCRITx.*xERRx.*xWARNINGx.*xNOTICEx.*xINFOx.*xDEBUGx.*xEMERGx.*xALERTx.*xCRITx.*xERRx.*xWARNINGx.*xNOTICEx.*xINFOx.*xDEBUGx", log): + self.fail("Could not find test messages in error_log") + + + def test_apache_table(self): + + log = self.req.log_error + + log("Testing table object.") + + # tests borrowed from Python test suite for dict + _test_table() + + # inheritance + log(" inheritance") + class mytable(apache.table): + def __str__(self): + return "str() from mytable" + mt = mytable({'a':'b'}) + + # add() + log(" table.add()") + a = apache.table({'a':'b'}) + a.add('a', 'c') + if a['a'] != ['b', 'c']: + self.fail('table.add() broken: a["a"] is %s' % repr(a["a"])) + + log("Table test DONE.") + + def test_req_add_common_vars(self): + + self.req.log_error("Testing req.add_common_vars().") + + a = len(self.req.subprocess_env) + self.req.add_common_vars() + b = len(self.req.subprocess_env) + if a >= b: + self.fail("req.subprocess_env() is same size before and after") + + def test_req_add_cgi_vars(self): + + self.req.log_error("Testing req.add_cgi_vars().") + + a = len(self.req.subprocess_env) + self.req.add_cgi_vars() + b = len(self.req.subprocess_env) + if a >= b: + self.fail("req.subprocess_env() is same size before and after") + + def test_req_members(self): + + # just run through request members making sure + # they make sense + + req = self.req + log = req.log_error + + log("Examining request memebers:") + + log(" req.connection: %s" % repr(req.connection)) + s = str(type(req.connection)) + if s not in ("", ""): + self.fail("strange req.connection type %s" % repr(s)) + + log(" req.server: '%s'" % repr(req.server)) + s = str(type(req.server)) + if s not in ("", ""): + self.fail("strange req.server type %s" % repr(s)) + + for x in ((req.next, "next"), + (req.prev, "prev"), + (req.main, "main")): + val, name = x + log(" req.%s: '%s'" % (name, repr(val))) + if val: + self.fail("strange, req.%s should be None, not %s" % (name, repr(val))) + + log(" req.the_request: '%s'" % req.the_request) + if not re.match(r"GET /.* HTTP/1\.", req.the_request): + self.fail("strange req.the_request %s" % repr(req.the_request)) + + for x in ((req.assbackwards, "assbackwards"), + (req.proxyreq, "proxyreq"), + (req.header_only, "header_only")): + val, name = x + log(" req.%s: %s" % (name, repr(val))) + if val: + self.fail("%s should be 0" % name) + + log(" req.protocol: %s" % repr(req.protocol)) + if not req.protocol == req.the_request.split()[-1]: + self.fail("req.protocol doesn't match req.the_request") + + log(" req.proto_num: %s" % repr(req.proto_num)) + if req.proto_num != 1000 + int(req.protocol[-1]): + self.fail("req.proto_num doesn't match req.protocol") + + log(" req.hostname: %s" % repr(req.hostname)) + if req.hostname != "test_internal": + self.fail("req.hostname isn't 'test_internal'") + + log(" req.request_time: %s" % repr(req.request_time)) + if (time.time() - req.request_time) > 10: + self.fail("req.request_time suggests request started more than 10 secs ago") + + log(" req.status_line: %s" % repr(req.status_line)) + if req.status_line: + self.fail("req.status_line should be None at this point") + + log(" req.status: %s" % repr(req.status)) + if req.status != 200: + self.fail("req.status should be 200") + req.status = req.status # make sure its writable + + log(" req.method: %s" % repr(req.method)) + if req.method != "GET": + self.fail("req.method should be 'GET'") + + log(" req.method_number: %s" % repr(req.method_number)) + if req.method_number != 0: + self.fail("req.method_number should be 0") + + log(" req.allowed: %s" % repr(req.allowed)) + if req.allowed != 0: + self.fail("req.allowed should be 0") + + log(" req.allowed_xmethods: %s" % repr(req.allowed_xmethods)) + if req.allowed_xmethods != (): + self.fail("req.allowed_xmethods should be an empty tuple") + + log(" req.allowed_methods: %s" % repr(req.allowed_methods)) + if req.allowed_methods != (): + self.fail("req.allowed_methods should be an empty tuple") + + log(" req.sent_bodyct: %s" % repr(req.sent_bodyct)) + if req.sent_bodyct != 0: + self.fail("req.sent_bodyct should be 0") + + log(" req.bytes_sent: %s" % repr(req.bytes_sent)) + save = req.bytes_sent + log(" writing 4 bytes...") + req.write("1234") + log(" req.bytes_sent: %s" % repr(req.bytes_sent)) + if req.bytes_sent - save != 4: + self.fail("req.bytes_sent should have incremented by 4, but didn't") + + log(" req.mtime: %s" % repr(req.mtime)) + if req.mtime != 0: + self.fail("req.mtime should be 0") + + log(" req.chunked: %s" % repr(req.chunked)) + if req.chunked != 1: + self.fail("req.chunked should be 1") + + log(" req.range: %s" % repr(req.range)) + if req.range: + self.fail("req.range should be None") + + log(" req.clength: %s" % repr(req.clength)) + log(" calling req.set_content_length(15)...") + req.set_content_length(15) + log(" req.clength: %s" % repr(req.clength)) + if req.clength != 15: + self.fail("req.clength should be 15") + + log(" req.remaining: %s" % repr(req.remaining)) + if req.remaining != 0: + self.fail("req.remaining should be 0") + + log(" req.read_length: %s" % repr(req.read_length)) + if req.read_length != 0: + self.fail("req.read_length should be 0") + + log(" req.read_body: %s" % repr(req.read_body)) + if req.read_body != 0: + self.fail("req.read_body should be 0") + + log(" req.read_chunked: %s" % repr(req.read_chunked)) + if req.read_chunked != 0: + self.fail("req.read_chunked should be 0") + + log(" req.expecting_100: %s" % repr(req.expecting_100)) + if req.expecting_100 != 0: + self.fail("req.expecting_100 should be 0") + + log(" req.headers_in: %s" % repr(req.headers_in)) + if req.headers_in["Host"][:13].lower() != "test_internal": + self.fail("The 'Host' header should begin with 'test_internal'") + + log(" req.headers_out: %s" % repr(req.headers_out)) + if (("content-length" not in req.headers_out) or + req.headers_out["content-length"] != "15"): + self.fail("req.headers_out['content-length'] should be 15") + + log(" req.subprocess_env: %s" % repr(req.subprocess_env)) + if req.subprocess_env["SERVER_SOFTWARE"].find("Python") == -1: + self.fail("req.subprocess_env['SERVER_SOFTWARE'] should contain 'Python'") + + log(" req.notes: %s" % repr(req.notes)) + log(" doing req.notes['testing'] = '123' ...") + req.notes['testing'] = '123' + log(" req.notes: %s" % repr(req.notes)) + if req.notes["testing"] != '123': + self.fail("req.notes['testing'] should be '123'") + + log(" req.phase: %s" % repr(req.phase)) + if req.phase != "PythonHandler": + self.fail("req.phase should be 'PythonHandler'") + + log(" req.interpreter: %s" % repr(req.interpreter)) + if req.interpreter != apache.interpreter: + self.fail("req.interpreter should be same as apache.interpreter" % repr(apache.interpreter)) + if req.interpreter != req.server.server_hostname: + self.fail("req.interpreter should be same as req.server.server_hostname: %s" % repr(req.server.server_hostname)) + + log(" req.content_type: %s" % repr(req.content_type)) + log(" doing req.content_type = 'test/123' ...") + req.content_type = 'test/123' + log(" req.content_type: %s" % repr(req.content_type)) + if req.content_type != 'test/123' or not req._content_type_set: + self.fail("req.content_type should be 'test/123' and req._content_type_set 1") + + log(" req.handler: %s" % repr(req.handler)) + if req.handler != "mod_python": + self.fail("req.handler should be 'mod_python'") + + log(" req.content_encoding: %s" % repr(req.content_encoding)) + if req.content_encoding: + self.fail("req.content_encoding should be None") + + log(" req.content_languages: %s" % repr(req.content_languages)) + if req.content_languages != (): + self.fail("req.content_languages should be an empty tuple") + + log(" req.vlist_validator: %s" % repr(req.vlist_validator)) + if req.vlist_validator: + self.fail("req.vlist_validator should be None") + + log(" req.user: %s" % repr(req.user)) + if req.user: + self.fail("req.user should be None") + + log(" req.ap_auth_type: %s" % repr(req.ap_auth_type)) + if req.ap_auth_type: + self.fail("req.ap_auth_type should be None") + + log(" req.no_cache: %s" % repr(req.no_cache)) + if req.no_cache != 0: + self.fail("req.no_cache should be 0") + + log(" req.no_local_copy: %s" % repr(req.no_local_copy)) + if req.no_local_copy != 0: + self.fail("req.no_local_copy should be 0") + + log(" req.unparsed_uri: %s" % repr(req.unparsed_uri)) + if req.unparsed_uri != "/tests.py": + self.fail("req.unparsed_uri should be '/tests.py'") + + log(" req.uri: %s" % repr(req.uri)) + if req.uri != "/tests.py": + self.fail("req.uri should be '/tests.py'") + + log(" req.filename: %s" % repr(req.filename)) + if req.filename != req.document_root() + req.uri: + self.fail("req.filename should be req.document_root() + req.uri, but it isn't") + + log(" req.canonical_filename: %s" % repr(req.canonical_filename)) + if not req.canonical_filename: + self.fail("req.canonical_filename should not be blank") + + log(" req.path_info: %s" % repr(req.path_info)) + if req.path_info != '': + self.fail("req.path_info should be ''") + + log(" req.args: %s" % repr(req.args)) + if req.args: + self.fail("req.args should be None") + + log(" req.finfo: %s" % repr(req.finfo)) + if req.finfo[apache.FINFO_FNAME] and (req.finfo[apache.FINFO_FNAME] != req.canonical_filename): + self.fail("req.finfo[apache.FINFO_FNAME] should be the (canonical) filename") + + log(" req.parsed_uri: %s" % repr(req.parsed_uri)) + if req.parsed_uri[apache.URI_PATH] != '/tests.py': + self.fail("req.parsed_uri[apache.URI_PATH] should be '/tests.py'") + + log(" req.used_path_info: %s" % repr(req.used_path_info)) + if req.used_path_info != 2: + self.fail("req.used_path_info should be 2") # XXX really? :-) + + log(" req.eos_sent: %s" % repr(req.eos_sent)) + if req.eos_sent: + self.fail("req.eos_sent says we sent EOS, but we didn't") + + if apache.MODULE_MAGIC_NUMBER_MAJOR > 20111130: + + try: + import socket + localip = socket.gethostbyname("localhost") + except: + localip = "127.0.0.1" + + log(" req.useragent_ip: %s" % repr(req.useragent_ip)) + if not req.useragent_ip in ("127.0.0.1", localip): + self.fail("req.useragent_ip should be '127.0.0.1'") + + log(" req.useragent_addr: %s" % repr(req.useragent_addr)) + if not req.useragent_addr[0] in ("127.0.0.1", "0.0.0.0", localip): + self.fail("req.useragent_addr[0] should be '127.0.0.1' or '0.0.0.0'") + + + def test_req_get_config(self): + + req = self.req + log = req.log_error + + log("req.get_config(): %s" % repr(req.get_config())) + if req.get_config()["PythonDebug"] != "1": + self.fail("get_config return should show PythonDebug 1") + + log("req.get_options(): %s" % repr(req.get_options())) + if req.get_options() != apache.table({"testing":"123"}): + self.fail("get_options() should contain 'testing':'123', contains %s"%list(req.get_options().items())) + + def test_req_get_remote_host(self): + + # simulating this test for real is too complex... + req = self.req + log = req.log_error + log("req.get_get_remote_host(): %s" % repr(req.get_remote_host(apache.REMOTE_HOST))) + log("req.get_get_remote_host(): %s" % repr(req.get_remote_host())) + if (req.get_remote_host(apache.REMOTE_HOST) != None) or \ + (req.get_remote_host() != "127.0.0.1"): + self.fail("remote host test failed: %s" % req.get_remote_host()) + + def test_server_members(self): + + req = self.req + log = req.log_error + server = req.server + + log("Examining server memebers:") + + log(" server.defn_name: %s" % repr(server.defn_name)) + if server.defn_name[-9:] != "test.conf": + self.fail("server.defn_name does not end in 'test.conf'") + + log(" server.defn_line_number: %s" % repr(server.defn_line_number)) + if server.defn_line_number == 0: + self.fail("server.defn_line_number should not be 0") + + log(" server.server_admin: %s" % repr(server.server_admin)) + if server.server_admin != "serveradmin@somewhere.com": + self.fail("server.server_admin must be 'serveradmin@somewhere.com'") + + log(" server.server_hostname: %s" % repr(server.server_hostname)) + if server.server_hostname != "test_internal": + self.fail("server.server_hostname must be 'test_internal'") + + log(" server.port: %s" % repr(server.port)) + # hmm it really is 0... + #if server.port == 0: + # self.fail("server.port should not be 0") + + log(" server.error_fname: %s" % repr(server.error_fname)) + if server.error_fname != "logs/error_log": + self.fail("server.error_fname should be 'logs/error_log'") + + log(" server.loglevel: %s" % repr(server.loglevel)) + if server.loglevel != 7: + self.fail("server.loglevel should be 7") + + log(" server.is_virtual: %s" % repr(server.is_virtual)) + if server.is_virtual != 1: + self.fail("server.is_virtual should be 1") + + log(" server.timeout: %s" % repr(server.timeout)) + if not server.timeout in (5.0, 60.0): + self.fail("server.timeout should be 5.0 or 60.0") + + log(" server.keep_alive_timeout: %s" % repr(server.keep_alive_timeout)) + if server.keep_alive_timeout != 15.0: + self.fail("server.keep_alive_timeout should be 15.0") + + log(" server.keep_alive_max: %s" % repr(server.keep_alive_max)) + if server.keep_alive_max != 100: + self.fail("server.keep_alive_max should be 100") + + log(" server.keep_alive: %s" % repr(server.keep_alive)) + if server.keep_alive != 1: + self.fail("server.keep_alive should be 1") + + log(" server.path: %s" % repr(server.path)) + if server.path != "some/path": + self.fail("server.path should be 'some/path'") + + log(" server.pathlen: %s" % repr(server.pathlen)) + if server.pathlen != len('some/path'): + self.fail("server.pathlen should be %d" % len('some/path')) + + log(" server.limit_req_line: %s" % repr(server.limit_req_line)) + if server.limit_req_line != 8190: + self.fail("server.limit_req_line should be 8190") + + log(" server.limit_req_fieldsize: %s" % repr(server.limit_req_fieldsize)) + if server.limit_req_fieldsize != 8190: + self.fail("server.limit_req_fieldsize should be 8190") + + log(" server.limit_req_fields: %s" % repr(server.limit_req_fields)) + if server.limit_req_fields != 100: + self.fail("server.limit_req_fields should be 100") + + log(" server.names: %s" % repr(server.names)) + if server.names != (): + self.fail("server.names should be an empty tuple") + + log(" server.wild_names: %s" % repr(server.wild_names)) + if server.wild_names != (): + self.fail("server.wild_names should be an empty tuple") + + + def test_connection_members(self): + + req = self.req + log = req.log_error + conn = req.connection + + try: + import socket + localip = socket.gethostbyname("localhost") + except: + localip = "127.0.0.1" + + log("Examining connection memebers:") + + log(" connection.base_server: %s" % repr(conn.base_server)) + if type(conn.base_server) is not type(req.server): + self.fail("conn.base_server should be same type as req.server") + + log(" connection.local_addr: %s" % repr(conn.local_addr)) + if not conn.local_addr[0] in ("127.0.0.1", "0.0.0.0", localip): + self.fail("conn.local_addr[0] should be '127.0.0.1' or '0.0.0.0'") + + if apache.MODULE_MAGIC_NUMBER_MAJOR > 20111130: + + log(" connection.client_addr: %s" % repr(conn.client_addr)) + if not conn.client_addr[0] in ("127.0.0.1", "0.0.0.0", localip): + self.fail("conn.client_addr[0] should be '127.0.0.1' or '0.0.0.0'") + + log(" connection.client_ip: %s" % repr(conn.client_ip)) + if not conn.client_ip in ("127.0.0.1", localip): + self.fail("conn.client_ip should be '127.0.0.1'") + + else: + + log(" connection.remote_addr: %s" % repr(conn.remote_addr)) + if not conn.remote_addr[0] in ("127.0.0.1", "0.0.0.0", localip): + self.fail("conn.remote_addr[0] should be '127.0.0.1' or '0.0.0.0'") + + log(" connection.remote_ip: %s" % repr(conn.remote_ip)) + if not conn.remote_ip in ("127.0.0.1", localip): + self.fail("conn.remote_ip should be '127.0.0.1'") + + log(" connection.remote_host: %s" % repr(conn.remote_host)) + if conn.remote_host is not None: + self.fail("conn.remote_host should be None") + + log(" connection.remote_logname: %s" % repr(conn.remote_logname)) + if conn.remote_logname is not None: + self.fail("conn.remote_logname should be None") + + log(" connection.aborted: %s" % repr(conn.aborted)) + if conn.aborted != 0: + self.fail("conn.aborted should be 0") + + log(" connection.keepalive: %s" % repr(conn.keepalive)) + if conn.keepalive != 2: + self.fail("conn.keepalive should be 2") + + log(" connection.double_reverse: %s" % repr(conn.double_reverse)) + if conn.double_reverse != 0: + self.fail("conn.double_reverse should be 0") + + log(" connection.keepalives: %s" % repr(conn.keepalives)) + if conn.keepalives != 1: + self.fail("conn.keepalives should be 1") + + log(" connection.local_ip: %s" % repr(conn.local_ip)) + if not conn.local_ip in ("127.0.0.1", localip): + self.fail("conn.local_ip should be '127.0.0.1'") + + log(" connection.local_host: %s" % repr(conn.local_host)) + if conn.local_host is not None: + self.fail("conn.local_host should be None") + + log(" connection.id: %s" % repr(conn.id)) + if conn.id > 10000: + self.fail("conn.id probably should not be this high?") + + log(" connection.notes: %s" % repr(conn.notes)) + if repr(conn.notes) != '{}': + self.fail("conn.notes should be {}") + +def make_suite(req): + + mpTestSuite = unittest.TestSuite() + mpTestSuite.addTest(SimpleTestCase("test_apache_log_error", req)) + mpTestSuite.addTest(SimpleTestCase("test_apache_table", req)) + # NB: add_common_vars must be before cgi_vars + mpTestSuite.addTest(SimpleTestCase("test_req_add_common_vars", req)) + mpTestSuite.addTest(SimpleTestCase("test_req_add_cgi_vars", req)) + mpTestSuite.addTest(SimpleTestCase("test_req_members", req)) + mpTestSuite.addTest(SimpleTestCase("test_req_get_config", req)) + mpTestSuite.addTest(SimpleTestCase("test_req_get_remote_host", req)) + mpTestSuite.addTest(SimpleTestCase("test_server_members", req)) + mpTestSuite.addTest(SimpleTestCase("test_connection_members", req)) + return mpTestSuite + + +def handler(req): + + if PY2: + out = io.BytesIO() + else: + out = io.StringIO() + + tr = unittest.TextTestRunner(out) + result = tr.run(make_suite(req)) + + req.log_error(out.getvalue()) + + if result.wasSuccessful(): + req.write("test ok") + else: + req.write("test failed") + + return apache.OK + +def req_add_handler(req): + + req.secret_message = "foo" + req.add_handler("PythonHandler", "tests::simple_handler") + + return apache.OK + +def simple_handler(req): + # for req_add_handler() + if (req.secret_message == "foo"): + req.write("test ok") + + return apache.OK + +def req_add_bad_handler(req): + # bad_handler does not exist so adding it should + # should raise an AttributeError exception + + req.log_error("req_add_bad_handler " + req.hlist.handler) + req.add_handler("PythonHandler", "tests::bad_handler") + req.log_error("req_add_bad_handler " + req.hlist.handler) + req.write("test ok") + + return apache.OK + +def req_add_empty_handler_string(req): + # Adding an empty string as a handler should should + # should raise an exception + + req.log_error("req_add_empty_handler_string") + req.add_handler("PythonHandler", "") + req.write("no exception") + + return apache.OK + +def req_add_handler_empty_phase(req): + req.log_error("req_add_handler_empty_phase") + req.log_error("phase=%s" % req.phase) + req.log_error("interpreter=%s" % req.interpreter) + req.log_error("directory=%s" % req.hlist.directory) + if req.phase != "PythonHandler": + directory = os.path.dirname(__file__) + req.add_handler("PythonHandler", "tests::req_add_handler_empty_phase", directory) + else: + req.write("test ok") + + return apache.OK + +def accesshandler_add_handler_to_empty_hl(req): + # Prior to version 3.2.6, adding a python handler + # to and empty handler list would cause a segfault + + req.secret_message = "foo" + req.log_error("accesshandler_add_handler_to_empty_hl") + req.add_handler("PythonHandler", "tests::simple_handler") + + return apache.OK + +def test_req_add_handler_directory(req): + # dir1 will not have a trailing slash and on Win32 + # will use back slashes and not forward slashes. + dir1 = os.path.dirname(__file__) + if req.phase == "PythonFixupHandler": + req.add_handler("PythonHandler", "tests::test_req_add_handler_directory", dir1) + else: + # dir2 should only use forward slashes and + # should have a trailing forward slash added by + # call to req.add_handler(). When dir1 and dir2 + # are normalised for current operating system, + # they should be equivalent. + dir2 = req.hlist.directory + if dir2[-1] != '/' or dir2.count('\\') != 0: + req.write('test failed') + else: + dir1 = os.path.normpath(dir1) + dir2 = os.path.normpath(dir2) + if dir2 != dir1: + req.write('test failed') + else: + req.write('test ok') + + return apache.OK + + +def req_allow_methods(req): + + req.allow_methods(["PYTHONIZE"]) + return apache.HTTP_METHOD_NOT_ALLOWED + +def req_get_basic_auth_pw(req): + + LATIN1_SPAM = 'sp\xe1m' + LATIN1_EGGS = '\xe9ggs' + + pw = req.get_basic_auth_pw() + if (req.user == "spam" and pw == "eggs" or + req.user == LATIN1_SPAM and pw == LATIN1_EGGS): + req.write("test ok") + else: + req.write("test failed, user %s, pw %s" % (repr(req.user), repr(pw))) + + return apache.OK + +def req_unauthorized(req): + + pw = req.get_basic_auth_pw() + if req.user == "spam" and pw == "eggs": + req.write("test ok") + return apache.OK + + return apache.HTTP_UNAUTHORIZED + +def req_auth_type(req): + + auth_type = req.auth_type() + if auth_type != "dummy": + req.log_error("auth_type check failed") + req.write("test failed, req.auth_type() returned: %s" % repr(auth_type)) + return apache.DONE + auth_name = req.auth_name() + if auth_name != "blah": + req.log_error("auth_name check failed") + req.write("test failed, req.auth_name() returned: %s" % repr(auth_name)) + return apache.DONE + + if req.phase == "PythonAuthenHandler": + + req.user = "dummy" + req.ap_auth_type = req.auth_type() + + elif req.phase != "PythonAuthzHandler": + + req.write("test ok") + + return apache.OK + +def req_requires(req): + + if req.requires() == ('valid-user',): + req.write("test ok") + return apache.DONE + + req.write("test failed") + return apache.DONE + +def req_document_root(req): + + req.write(req.document_root()) + return apache.OK + +def req_internal_redirect(req): + + req.internal_redirect("/test.int") + + return apache.OK + +def req_internal_redirect_int(req): + # used by req_internal_redirect + + req.prev.write("test ") + req.write("ok") + + return apache.OK + +def req_construct_url(req): + + url = req.construct_url("/index.html") + + if not re.match("^http://test_req_construct_url:[0-9]+/index.html$",url): + req.write("test failed") + else: + req.write("test ok") + + return apache.OK + +def req_read(req): + + s = req.read() + req.write(s) + + return apache.OK + +def req_readline(req): + + s = req.readline() + while s: + req.write(s) + s = req.readline() + + return apache.OK + +def req_readlines(req): + + + if 'SizeHint' in req.headers_in: + lines = req.readlines(int(req.headers_in['SizeHint'])) + else: + lines = req.readlines() + + req.write(b"".join(lines)) + + return apache.OK + +def req_discard_request_body(req): + + s = req.read(10) + if s != b'1234567890': + req.log_error('read() #1 returned %s' % repr(s)) + req.write('test failed') + return apache.OK + + status = req.discard_request_body() + if status != apache.OK: + req.log_error('discard_request_body() returned %d' % status) + return status + + s = req.read() + if s: + req.log_error('read() #2 returned %s' % repr(s)) + req.write('test failed') + return apache.OK + + req.write('test ok') + + return apache.OK + +def req_register_cleanup(req): + + req.cleanup_data = "req_register_cleanup test ok" + req.register_cleanup(cleanup, req) + req.write("registered cleanup that will write to log") + + return apache.OK + +def cleanup(data): + # for req_register_cleanup above + + data.log_error(data.cleanup_data) + +def server_cleanup(data): + # for srv_register_cleanup and apache_register_cleanup below + + apache.log_error(data) + +def req_headers_out(req): + + req.headers_out["X-Test-Header"] = "test ok" + req.write("test ok") + + return apache.OK + +def req_headers_out_access(req): + + return apache.OK + +def req_sendfile(req): + + import tempfile + fname = tempfile.mktemp("txt") + f = open(fname, "w") + f.write(" test ok "); + f.close() + + req.sendfile(fname, 2, 7) + + # os.remove(fname) + return apache.OK + +def req_sendfile2(req): + + import tempfile + fname = tempfile.mktemp("txt") + f = open(fname, "w") + f.write("0123456789"*100); + f.close() + + req.sendfile(fname) + + # os.remove(fname) + return apache.OK + +def req_sendfile3(req): + """Check if sendfile handles symlinks properly. + This is only valid on posix systems. + """ + + import tempfile + # note mktemp is deprecated in python 2.3. Should use mkstemp instead. + fname = tempfile.mktemp("txt") + f = open(fname, "w") + f.write("0123456789"*100); + f.close() + fname_symlink = '%s.lnk' % fname + os.symlink(fname, fname_symlink) + req.sendfile(fname_symlink) + os.remove(fname_symlink) + os.remove(fname) + return apache.OK + +def req_handler(req): + if req.phase == "PythonFixupHandler": + req.handler = "mod_python" + req.handler = None + req.handler = "mod_python" + req.add_handler("PythonHandler","tests::req_handler") + return apache.OK + elif req.phase == "PythonHandler": + req.write('test ok') + return apache.OK + else: + req.write('test failed') + return apache.OK + +def req_no_cache(req): + req.no_cache = 1 + req.write('test ok') + return apache.OK + +def req_update_mtime(req): + assert(req.mtime == 0.0) + req.update_mtime(100.0) + assert(req.mtime == 100.0) + req.set_etag() + req.set_last_modified() + req.write('test ok') + return apache.OK + +def util_redirect(req): + from mod_python import util + if req.main: + # Sub request for ErrorDocument. + req.write("test failed") + return apache.DONE + else: + if req.phase == "PythonFixupHandler": + util.redirect(req,location="/dummy",text="test ok") + else: + req.write('test failed') + return apache.OK + +def req_server_get_config(req): + + if req.server.get_config().get("PythonDebug", "0") != "1" or \ + req.get_config().get("PythonDebug", "0") != "0": + req.write('test failed') + else: + req.write('test ok') + + return apache.OK + +def req_server_get_options(req): + + try: + server_options = apache.main_server.get_options() + assert(server_options.get("global","0") == "0") + assert(server_options.get("override","0") == "0") + + server_options = req.connection.base_server.get_options() + assert(server_options.get("global","0") == "0") + assert(server_options.get("override","0") == "0") + + server_options = req.server.get_options() + assert(server_options["global"] == "1") + assert(server_options["override"] == "1") + + request_options = req.get_options() + assert(request_options["global"] == "1") + assert(request_options["override"] == "2") + assert(request_options["local"] == "1") + except: + req.write('test failed') + else: + req.write('test ok') + + return apache.OK + +def fileupload(req): + from mod_python import util + + fields = util.FieldStorage(req) + f = fields.getfirst('testfile') + + if PY2: + import md5 + req.write(md5.new(f.file.read()).hexdigest()) + else: + from hashlib import md5 + req.write(md5(f.file.read()).hexdigest()) + + return apache.OK + +def srv_register_cleanup(req): + + req.server.register_cleanup(req, server_cleanup, "srv_register_cleanup test ok") + req.write("registered server cleanup that will write to log") + + return apache.OK + +def apache_register_cleanup(req): + + apache.register_cleanup(server_cleanup, "apache_register_cleanup test ok") + req.write("registered server cleanup that will write to log") + + return apache.OK + +def apache_exists_config_define(req): + if apache.exists_config_define('FOOBAR'): + req.write('FOOBAR') + else: + req.write('NO_FOOBAR') + return apache.OK + +def util_fieldstorage(req): + + from mod_python import util + req.write(repr(util.FieldStorage(req).list)) + return apache.OK + +def postreadrequest(req): + + req.log_error('postreadrequest') + + req.add_common_vars() + + req.subprocess_env['TEST1'] = "'" + req.subprocess_env['TEST2'] = '"' + + req.log_error('subprocess_env = %s' % req.subprocess_env) + req.log_error('subprocess_env.values() = %s' % list(req.subprocess_env.values())) + + for value in req.subprocess_env.values(): + req.log_error('VALUE = %s' % value) + + for item in req.subprocess_env.items(): + req.log_error('ITEM = %s' % (item,)) + + req.log_error('SCRIPT_FILENAME = %s' % req.subprocess_env.get('SCRIPT_FILENAME')) + req.log_error('SCRIPT_FILENAME = %s' % req.subprocess_env['SCRIPT_FILENAME']) + + req.write("test ok") + + return apache.DONE + + +def trans(req): + + req.filename = req.document_root()+"/tests.py" + + return apache.OK + +def import_test(req): + + import sys, os + directory = os.path.dirname(__file__) + assert([os.path.normpath(d) for d in sys.path].count(directory) == 1) + if "dummymodule" in sys.modules: + if "dummymodule::function" not in apache.main_server.get_options(): + req.log_error("dummymodule::function not executed") + req.write("test failed") + else: + req.write("test ok") + else: + req.log_error("dummymodule not found in sys.modules") + req.write("test failed") + + return apache.OK + +def outputfilter(fltr): + + assert(not hasattr(fltr, "non_existent")) + + s = fltr.read() + while s: + fltr.write(s.upper()) + s = fltr.read() + + if s is None: + fltr.close() + + return apache.OK + +def simplehandler(req): + + if req.phase != "PythonHandler": + req.write("test failed") + return apache.OK + + req.write("test ok") + return apache.OK + +def req_add_output_filter(req): + + req.add_output_filter("MP_TEST_FILTER") + + req.write("test ok") + + return apache.OK + +def req_register_output_filter(req): + + req.register_output_filter("MP_TEST_FILTER","tests::outputfilter") + + req.add_output_filter("MP_TEST_FILTER") + + req.write("test ok") + + return apache.OK + +def connectionhandler(conn): + + # read whatever + s = conn.readline().strip() + while s: + s = conn.readline().strip() + + # fake an HTTP response + conn.write("HTTP/1.1 200 OK\r\n") + conn.write("Content-Length: 7\r\n\r\n") + conn.write("test ok") + + return apache.OK + +def pipe_ext(req): + + # this is called by publisher + + return "pipe ext" + + +def Cookie_Cookie(req): + + from mod_python import Cookie + + cookies = Cookie.get_cookies(req) + + for k in cookies: + Cookie.add_cookie(req, cookies[k]) + + req.write("test ok") + + return apache.OK + +def Cookie_MarshalCookie(req): + + from mod_python import Cookie + + cookies = Cookie.get_cookies(req, Cookie.MarshalCookie, + secret="secret") + + for k in cookies: + Cookie.add_cookie(req, cookies[k]) + + req.write("test ok") + + return apache.OK + + +def global_lock(req): + + import _apache + + _apache._global_lock(req.server, 1) + time.sleep(1) + _apache._global_unlock(req.server, 1) + + req.write("test ok") + + return apache.OK + +def Session_Session(req): + + from mod_python import Session, Cookie + s = Session.Session(req) + if s.is_new(): + s.save() + + cookies = Cookie.get_cookies(req) + if Session.COOKIE_NAME in cookies and s.is_new(): + req.write(str(cookies[Session.COOKIE_NAME])) + else: + req.write("test ok") + + return apache.OK + +def files_directive(req): + + req.write(str(req.hlist.directory)) + return apache.OK + +none_handler = None + +def server_return_1(req): + raise apache.SERVER_RETURN(apache.OK) + +def server_return_2(req): + req.write("test ok") + return apache.OK + +def phase_status_1(req): + apache.log_error("phase_status_1") + req.phases = [1] + return apache.DECLINED + +def phase_status_2(req): + apache.log_error("phase_status_2") + req.phases.append(2) + req.user = "bogus" + req.ap_auth_type = "bogus" + return apache.OK + +def phase_status_3(req): + apache.log_error("phase_status_3") + req.phases.append(3) + return apache.OK + +def phase_status_4(req): + apache.log_error("phase_status_4") + #req.phases.append(4) + return apache.OK + +def phase_status_5(req): + apache.log_error("phase_status_5") + req.phases.append(5) + return apache.DECLINED + +def phase_status_6(req): + apache.log_error("phase_status_6") + req.phases.append(6) + return apache.OK + +def phase_status_7(req): + apache.log_error("phase_status_7") + req.phases.append(7) + return apache.OK + +def phase_status_8(req): + apache.log_error("phase_status_8") + apache.log_error("phases = %s" % req.phases) + if req.phases != [1, 2, 5, 6, 7]: + req.write("test failed") + else: + req.write("test ok") + return apache.OK + +def phase_status_cleanup(req): + apache.log_error("phase_status_cleanup_log_entry") + return apache.OK + +def test_sys_argv(req): + import sys + req.write(repr(sys.argv)) + return apache.OK + +def PythonOption_items(req): + options = list(req.get_options().items()) + + # The tests may using PythonOption mod_python.* in the test configuration + # We need to remove those particular options so they don't interfer + # with this test result. + options = [ o for o in options if not o[0].startswith('mod_python') ] + + options.sort() + req.write(str(options)) + return apache.OK + +def interpreter(req): + req.write(req.interpreter) + return apache.DONE + +def index(req): + return "test ok, interpreter=%s" % req.interpreter + +def test_publisher(req): + return "test ok, interpreter=%s" % req.interpreter + +def test_publisher_auth_nested(req): + def __auth__(req, user, password): + test_globals = test_publisher + req.notes["auth_called"] = "1" + return user == "spam" and password == "eggs" + def __access__(req, user): + req.notes["access_called"] = "1" + return 1 + assert(int(req.notes.get("auth_called",0))) + assert(int(req.notes.get("access_called",0))) + return "test ok, interpreter=%s" % req.interpreter + +class _test_publisher_auth_method_nested: + def method(self, req): + def __auth__(req, user, password): + test_globals = test_publisher + req.notes["auth_called"] = "1" + return user == "spam" and password == "eggs" + def __access__(req, user): + req.notes["access_called"] = "1" + return 1 + assert(int(req.notes.get("auth_called",0))) + assert(int(req.notes.get("access_called",0))) + return "test ok, interpreter=%s" % req.interpreter + +test_publisher_auth_method_nested = _test_publisher_auth_method_nested() + +class OldStyleClassTest: + def __init__(self): + pass + def __call__(self, req): + return "test callable old-style instance ok" + def traverse(self, req): + return "test traversable old-style instance ok" +old_instance = OldStyleClassTest() + +test_dict = {1:1, 2:2, 3:3} +test_dict_keys = test_dict.keys + +def test_dict_iteration(req): + return test_dict_keys() + +def test_generator(req): + c = 0 + while c < 10: + yield c + c += 1 + +def server_side_include(req): + req.ssi_globals = { "data": "test" } + return apache.OK + +class InstanceTest(object): + def __call__(self, req): + return "test callable instance ok" + def traverse(self, req): + return "test traversable instance ok" +instance = InstanceTest() + +# Hierarchy traversal tests +class Mapping(object): + def __init__(self,name): + self.name = name + + def __call__(self,req): + return "Called %s"%self.name +hierarchy_root = Mapping("root"); +hierarchy_root.page1 = Mapping("page1") +hierarchy_root.page1.subpage1 = Mapping("subpage1") +hierarchy_root.page2 = Mapping("page2") + +class Mapping2: + pass +hierarchy_root_2 = Mapping2() +hierarchy_root_2.__call__ = index +hierarchy_root_2.page1 = index +hierarchy_root_2.page2 = index + +def _test_table(): + + log = apache.log_error + + log(" starting _test_table") + d = apache.table() + if list(d.keys()) != []: raise TestFailed('{}.keys()') + if ('a' in d) != 0: raise TestFailed("'a' in {}") + if ('a' not in d) != 1: raise TestFailed("'a' not in {}") + if len(d) != 0: raise TestFailed('len({})') + d = {'a': 1, 'b': 2} + if len(d) != 2: raise TestFailed('len(dict)') + k = list(d.keys()) + k.sort() + if k != ['a', 'b']: raise TestFailed('dict keys()') + if 'a' in d and 'b' in d and 'c' not in d: pass + else: raise TestFailed('dict keys()') + if 'a' in d and 'b' in d and 'c' not in d: pass + else: raise TestFailed('dict keys() # in/not in version') + if d['a'] != 1 or d['b'] != 2: raise TestFailed('dict item') + d['c'] = 3 + d['a'] = 4 + if d['c'] != 3 or d['a'] != 4: raise TestFailed('dict item assignment') + del d['b'] + if d != {'a': 4, 'c': 3}: raise TestFailed('dict item deletion') + + # dict.clear() + log(" table.clear()") + d = apache.table() + d['1'] = '1' + d['2'] = '2' + d['3'] = '3' + d.clear() + if d != apache.table(): raise TestFailed('dict clear') + + # dict.update() + log(" table.update()") + d.update({'1':'100'}) + d.update({'2':'20'}) + d.update({'1':'1', '2':'2', '3':'3'}) + if d != apache.table({'1':'1', '2':'2', '3':'3'}): raise TestFailed('dict update') + d.clear() + try: d.update(None) + except AttributeError: pass + else: raise TestFailed('dict.update(None), AttributeError expected') + class SimpleUserDict: + def __init__(self): + self.d = {1:1, 2:2, 3:3} + def keys(self): + return list(self.d.keys()) + def __getitem__(self, i): + return self.d[i] + d.update(SimpleUserDict()) + if d != apache.table({1:1, 2:2, 3:3}): raise TestFailed('dict.update(instance)') + d.clear() + class FailingUserDict: + def keys(self): + raise ValueError + try: d.update(FailingUserDict()) + except ValueError: pass + else: raise TestFailed('dict.keys() expected ValueError') + class FailingUserDict: + def keys(self): + class BogonIter: + def __iter__(self): + raise ValueError + return BogonIter() + try: d.update(FailingUserDict()) + except ValueError: pass + else: raise TestFailed('iter(dict.keys()) expected ValueError') + class FailingUserDict: + def keys(self): + class BogonIter: + def __init__(self): + self.i = 1 + def __iter__(self): + return self + def __next__(self): + if self.i: + self.i = 0 + return 'a' + raise ValueError + def next(self): + return self.__next__() + return BogonIter() + def __getitem__(self, key): + return key + try: d.update(FailingUserDict()) + except ValueError: pass + else: raise TestFailed('iter(dict.keys()).next() expected ValueError') + class FailingUserDict: + def keys(self): + class BogonIter: + def __init__(self): + self.i = ord('a') + def __iter__(self): + return self + def __next__(self): + if self.i <= ord('z'): + rtn = chr(self.i) + self.i += 1 + return rtn + raise StopIteration + def next(self): + return self.__next__() + return BogonIter() + def __getitem__(self, key): + raise ValueError + try: d.update(FailingUserDict()) + except ValueError: pass + else: raise TestFailed('dict.update(), __getitem__ expected ValueError') + # dict.copy() + log(" table.copy()") + d = {1:1, 2:2, 3:3} + if d.copy() != {1:1, 2:2, 3:3}: raise TestFailed('dict copy') + if apache.table().copy() != apache.table(): raise TestFailed('empty dict copy') + # dict.get() + log(" table.get()") + d = apache.table() + if d.get('c') is not None: raise TestFailed('missing {} get, no 2nd arg') + if d.get('c', '3') != '3': raise TestFailed('missing {} get, w/ 2nd arg') + d = apache.table({'a' : '1', 'b' : '2'}) + if d.get('c') is not None: raise TestFailed('missing dict get, no 2nd arg') + if d.get('c', '3') != '3': raise TestFailed('missing dict get, w/ 2nd arg') + if d.get('a') != '1': raise TestFailed('present dict get, no 2nd arg') + if d.get('a', '3') != '1': raise TestFailed('present dict get, w/ 2nd arg') + # dict.setdefault() + log(" table.setdefault()") + d = apache.table() + d.setdefault('key0') + if d.setdefault('key0') is not "": + raise TestFailed('missing {} setdefault, no 2nd arg') + if d.setdefault('key0') is not "": + raise TestFailed('present {} setdefault, no 2nd arg') + # dict.popitem() + log(" table.popitem()") + for copymode in -1, +1: + # -1: b has same structure as a + # +1: b is a.copy() + for log2size in range(12): + size = 2**log2size + a = apache.table() + b = apache.table() + for i in range(size): + a[repr(i)] = str(i) + if copymode < 0: + b[repr(i)] = str(i) + if copymode > 0: + b = a.copy() + for i in range(size): + ka, va = ta = a.popitem() + if va != ka: raise TestFailed("a.popitem: %s" % str(ta)) + kb, vb = tb = b.popitem() + if vb != kb: raise TestFailed("b.popitem: %s" % str(tb)) + if copymode < 0 and ta != tb: + raise TestFailed("a.popitem != b.popitem: %s, %s" % ( + str(ta), str(tb))) + if a: raise TestFailed('a not empty after popitems: %s' % str(a)) + if b: raise TestFailed('b not empty after popitems: %s' % str(b)) + + # iteration (just make sure we can iterate without a segfault) + d = apache.table({'a' : '1', 'b' : '2', 'c' : '3'}) + log(" for k in table") + for k in d: + pass + + log(" _test_table test finished") + +def okay(req): + req.write("test ok") + return apache.OK + +def memory(req): + + # NB: This only works on Linux. + + ## warm up + for x in range(10000): + req.write("test ok") + + ## check memory usage before (the unit is pages, usually 4k) + before = list(map(int, open("/proc/self/statm").read().split())) + + for x in range(100000): + req.write("test ok") + req.flush() + + ## check memory usage after + after = list(map(int, open("/proc/self/statm").read().split())) + + req.write("|%s|%s" % (before[0], after[0])) + + return apache.OK diff --git a/test/htdocs/wsgitest.py b/test/htdocs/wsgitest.py new file mode 100644 index 0000000..2034728 --- /dev/null +++ b/test/htdocs/wsgitest.py @@ -0,0 +1,53 @@ + +import sys + +def application(env, start_response): + status = '200 OK' + output = 'test fail\n' + + try: + assert(env['wsgi.input'].__class__.__name__ == 'mp_request') + assert(env['wsgi.errors'] == sys.stderr) + assert(env['wsgi.version'] == (1,0)) + assert(env['wsgi.multithread'] in (True, False)) + assert(env['wsgi.multiprocess'] in (True, False)) + assert(env['wsgi.url_scheme'] == 'http') + assert(env['SCRIPT_NAME'] == '') + assert(env['PATH_INFO'] == '/tests.py') + output = 'test ok\n' + except: + pass + + env['wsgi.errors'].write('written_from_wsgi_test\n') + env['wsgi.errors'].flush() + + response_headers = [('Content-type', 'text/plain'), + ('Content-Length', str(len(output)))] + start_response(status, response_headers) + + return [output] + +def base_uri(env, start_response): + status = '200 OK' + output = 'test fail\n' + + try: + assert(env['wsgi.input'].__class__.__name__ == 'mp_request') + assert(env['wsgi.errors'] == sys.stderr) + assert(env['wsgi.version'] == (1,0)) + assert(env['wsgi.multithread'] in (True, False)) + assert(env['wsgi.multiprocess'] in (True, False)) + assert(env['wsgi.url_scheme'] == 'http') + assert(env['SCRIPT_NAME'] == '/foo') + assert(env['PATH_INFO'] == '/bar') + output = 'test ok\n' + except: + pass + + response_headers = [('Content-type', 'text/plain'), + ('Content-Length', str(len(output)))] + start_response(status, response_headers) + + return [output] + + diff --git a/test/test.py b/test/test.py new file mode 100644 index 0000000..4c52251 --- /dev/null +++ b/test/test.py @@ -0,0 +1,3149 @@ + # + # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy + # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation + # + # Licensed under the Apache License, Version 2.0 (the "License"); you + # may not use this file except in compliance with the License. You + # may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + # implied. See the License for the specific language governing + # permissions and limitations under the License. + # + # + +""" + + Writing Tests + + Writing mod_python tests can be a tricky task. This module + attempts to lay out a framework for making the testing process + consistent and quick to implement. + + All tests are based on Python Unit Test framework, it's a good + idea to study the docs for the unittest module before going any + further. + + To write a test, first decide in which of the 3 following categories + it falls: + + o Simple tests that do not require any special server configuration + and can be conducted along with other similar tests all in one + request. + + o Per-Request tests. These tests require a whole separate request + (or several requests) for a complete test. + + o Per-Instance tests. These require restarting the instance of + http and running it in a particular way, perhaps with a special + config to complete the test. An example might be load testing, or + checking for memory leaks. + + There are two modules involved in testing - the one you're looking at + now (test.py), which is responsible for setting up the http config + running it and initiating requests, AND htdocs/tests.py (sorry for + boring names), which is where all mod_python handlers reside. + + To write a Simple test: + + o Look at tests.SimpleTestCase class and the test methods in it, + then write your own following the example. + + o Look at the tests.make_suite function, and make sure your test + is added to the suite in there. + + o Keep in mind that the only way for Simple tests to communicate + with the outside world is via the error log, do not be shy about + writing to it. + + To write a Per-Request test: + + Most, if not all per-request tests require special server configuration + as part of the fixture. To avoid having to restart the server with a + different config (which would, btw, effectively turn this into a per- + instance test), we separate configs by placing them in separate virtual + hosts. This will become clearer if you follow the code. + + o Look at test.PerRequestCase class. + + o Note that for every test there are two methods defined: the test + method itself, plus a method with the same name ending with + "_conf". The _conf methods are supposed to return the virtual + host config necessary for this test. As tests are instantiated, + the configs are appended to a class variable (meaning its shared + across all instances) appendConfig, then before the suite is run, + the httpd config is built and httpd started. Each test will + know to query its own virtual host. This way all tests can be + conducted using a single instance of httpd. + + o Note how the _config methods generate the config - they use the + httpdconf module to generate an object whose string representation + is the config part, simlar to the way HTMLgen produces html. You + do not have to do it this way, but it makes for cleaner code. + + o Every Per-Request test must also have a corresponding handler in + the tests module. The convention is name everything based on the + subject of the test, e.g. the test of req.document_root() will have + a test method in PerRequestCase class called test_req_documet_root, + a config method PerRequestCase.test_req_document_root_conf, the + VirtualHost name will be test_req_document_root, and the handler + in tests.py will be called req_document_root. + + o Note that you cannot use urllib if you have to specify a custom + host: header, which is required for this whole thing to work. + There is a convenience method, vhost_get, which takes the host + name as the first argument, and optionally path as the second + (though that is almost never needed). If vhost_get does not + suffice, use httplib. Note the very useful skip_host=1 argument. + + o Remember to have your test added to the suite in + PerInstanceTestCase.testPerRequestTests + + To write a Per-Instance test: + + o Look at test.PerInstanceTestCase class. + + o You have to start httpd in your test, but no need to stop it, + it will be stopped for you in tearDown() + + o Add the test to the suite in test.suite() method + +""" +from __future__ import print_function +import sys +import os + +PY2 = sys.version[0] == '2' + +try: + import mod_python.version +except: + print ( + "Cannot import mod_python.version. Either you didn't " + "run the ./configure script, or you're running this script " + "in a Win32 environment, in which case you have to make it by hand." + ) + sys.exit() +else: + def testpath(variable,isfile): + value = getattr(mod_python.version,variable,'') + + if isfile: + if os.path.isfile(value): + return True + else: + if os.path.isdir(value): + return True + print('Bad value for mod_python.version.%s : %s'%( + variable, + value + )) + return False + + good = testpath('HTTPD',True) + good = testpath('TESTHOME',False) and good + good = testpath('LIBEXECDIR',False) and good + good = testpath('TEST_MOD_PYTHON_SO',True) and good + if not good: + print("Please check your mod_python/version.py file") + sys.exit() + + del testpath + del good + + +from mod_python.httpdconf import * + +import unittest +if PY2: + from commands import getoutput + import urllib2 + import httplib + from httplib import UNAUTHORIZED + import md5 + from cStringIO import StringIO + from urllib2 import urlopen + from urllib import urlencode +else: + from subprocess import getoutput + import urllib.request, urllib.error + import http.client + from http.client import UNAUTHORIZED + from hashlib import md5 + from io import StringIO, BytesIO, TextIOWrapper + from urllib.request import urlopen + from urllib.parse import urlencode +import shutil +import time +import socket +import tempfile +import base64 +import random + +try: + import threading + THREADS = True +except: + THREADS = False + +HTTPD = mod_python.version.HTTPD +TESTHOME = mod_python.version.TESTHOME +MOD_PYTHON_SO = mod_python.version.TEST_MOD_PYTHON_SO +LIBEXECDIR = mod_python.version.LIBEXECDIR +SERVER_ROOT = TESTHOME +CONFIG = os.path.join(TESTHOME, "conf", "test.conf") +DOCUMENT_ROOT = os.path.join(TESTHOME, "htdocs") +TMP_DIR = os.path.join(TESTHOME, "tmp") +PORT = 0 # this is set in fundUnusedPort() + + +# readBlockSize is required for the test_fileupload_* tests. +# We can't import mod_python.util.readBlockSize from a cmd line +# interpreter, so we'll hard code it here. +# If util.readBlockSize changes, it MUST be changed here as well. +# Maybe we should set up a separate test to query the server to +# get the correct readBlockSize? +# +readBlockSize = 65368 + +def findUnusedPort(): + + # bind to port 0 which makes the OS find the next + # unused port. + + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind(("127.0.0.1", 0)) + port = s.getsockname()[1] + s.close() + + return port + +def http_connection(conn_str): + if PY2: + return httplib.HTTPConnection(conn_str) + else: + return http.client.HTTPConnection(conn_str) + +def md5_hash(s): + if PY2: + return md5.new(s).hexdigest() + else: + if isinstance(s, str): + s = s.encode('latin1') + return md5(s).hexdigest().encode('latin1') + +def get_ab_path(): + """ Find the location of the ab (apache benchmark) program """ + for name in ['ab', 'ab2', 'ab.exe', 'ab2.exe']: + path = os.path.join(os.path.split(HTTPD)[0], name) + if os.path.exists(path): + return quote_if_space(path) + + return None + +def get_apache_version(): + + print("Checking Apache version....") + httpd = quote_if_space(HTTPD) + stdout = getoutput('%s -v' % (httpd)) + + version_str = None + for line in stdout.splitlines(): + if line.startswith('Server version'): + version_str = line.strip() + break + + if version_str: + version_str = version_str.split('/')[1] + major,minor,patch = version_str.split('.',3) + version = '%s.%s' % (major,minor) + else: + + print("Can't determine Apache version. Assuming 2.0") + version = '2.0' + print(version) + return version + +APACHE_VERSION = get_apache_version() +if not mod_python.version.HTTPD_VERSION.startswith(APACHE_VERSION): + print("ERROR: Build version %s does not match version reported by %s: %s, re-run ./configure?" % \ + (mod_python.version.HTTPD_VERSION, HTTPD, APACHE_VERSION)) + sys.exit() + +class HttpdCtrl: + # a mixin providing ways to control httpd + + def checkFiles(self): + + modules = os.path.join(SERVER_ROOT, "modules") + if not os.path.exists(modules): + os.mkdir(modules) + + logs = os.path.join(SERVER_ROOT, "logs") + if os.path.exists(logs): + shutil.rmtree(logs) + os.mkdir(logs) + + # place + if os.path.exists(TMP_DIR): + shutil.rmtree(TMP_DIR) + os.mkdir(TMP_DIR) + + + def makeConfig(self, append=Container()): + + # create config files, etc + + print(" Creating config....") + + self.checkFiles() + + global PORT + PORT = findUnusedPort() + print(" listen port:", PORT) + + # where other modules might be + modpath = LIBEXECDIR + + s = Container( + IfModule("prefork.c", + StartServers("3"), + MaxSpareServers("1")), + IfModule("worker.c", + StartServers("2"), + MaxClients("6"), + MinSpareThreads("1"), + MaxSpareThreads("1"), + ThreadsPerChild("3"), + MaxRequestsPerChild("0")), + IfModule("perchild.c", + NumServers("2"), + StartThreads("2"), + MaxSpareThreads("1"), + MaxThreadsPerChild("2")), + IfModule("mpm_winnt.c", + ThreadsPerChild("5"), + MaxRequestsPerChild("0")), + IfModule("!mod_mime.c", + LoadModule("mime_module %s" % + quote_if_space(os.path.join(modpath, "mod_mime.so")))), + IfModule("!mod_log_config.c", + LoadModule("log_config_module %s" % + quote_if_space(os.path.join(modpath, "mod_log_config.so")))), + IfModule("!mod_dir.c", + LoadModule("dir_module %s" % + quote_if_space(os.path.join(modpath, "mod_dir.so")))), + IfModule("!mod_include.c", + LoadModule("include_module %s" % + quote_if_space(os.path.join(modpath, "mod_include.so")))), + ServerRoot(SERVER_ROOT), + ErrorLog("logs/error_log"), + LogLevel("debug"), + LogFormat(r'"%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined'), + CustomLog("logs/access_log combined"), + TypesConfig("conf/mime.types"), + PidFile("logs/httpd.pid"), + ServerName("127.0.0.1"), + Listen(PORT), + Timeout(60), + PythonOption('mod_python.mutex_directory %s' % TMP_DIR), + PythonOption('PythonOptionTest sample_value'), + DocumentRoot(DOCUMENT_ROOT), + LoadModule("python_module %s" % quote_if_space(MOD_PYTHON_SO))) + + if APACHE_VERSION == '2.4': + s.append(Mutex("file:logs")) + else: + s.append(LockFile("logs/accept.lock")) + + if APACHE_VERSION == '2.4': + s.append(IfModule("!mod_unixd.c", + LoadModule("unixd_module %s" % + quote_if_space(os.path.join(modpath, "mod_unixd.so"))))) + s.append(IfModule("!mod_authn_core.c", + LoadModule("authn_core_module %s" % + quote_if_space(os.path.join(modpath, "mod_authn_core.so"))))) + s.append(IfModule("!mod_authz_core.c", + LoadModule("authz_core_module %s" % + quote_if_space(os.path.join(modpath, "mod_authz_core.so"))))) + s.append(IfModule("!mod_authn_file.c", + LoadModule("authn_file_module %s" % + quote_if_space(os.path.join(modpath, "mod_authn_file.so"))))) + s.append(IfModule("!mod_authz_user.c", + LoadModule("authz_user_module %s" % + quote_if_space(os.path.join(modpath, "mod_authz_user.so"))))) + + if APACHE_VERSION in ['2.2', '2.4']: + # mod_auth has been split into mod_auth_basic and some other modules + s.append(IfModule("!mod_auth_basic.c", + LoadModule("auth_basic_module %s" % + quote_if_space(os.path.join(modpath, "mod_auth_basic.so"))))) + + # Default KeepAliveTimeout is 5 for apache 2.2, but 15 in apache 2.0 + # Explicitly set the value so it's the same as 2.0 + s.append(KeepAliveTimeout("15")) + else: + s.append(IfModule("!mod_auth.c", + LoadModule("auth_module %s" % + quote_if_space(os.path.join(modpath, "mod_auth.so"))))) + + + s.append(Comment(" --APPENDED--")) + s.append(append) + + f = open(CONFIG, "w") + f.write(str(s)) + f.close() + + def startHttpd(self,extra=''): + + print(" Starting Apache....") + httpd = quote_if_space(HTTPD) + config = quote_if_space(CONFIG) + cmd = '%s %s -k start -f %s' % (httpd, extra, config) + print(" ", cmd) + os.system(cmd) + time.sleep(1) + self.httpd_running = 1 + + def stopHttpd(self): + + print(" Stopping Apache...") + httpd = quote_if_space(HTTPD) + config = quote_if_space(CONFIG) + cmd = '%s -k stop -f %s' % (httpd, config) + print(" ", cmd) + os.system(cmd) + time.sleep(1) + + # Wait for apache to stop by checking for the existence of pid the + # file. If pid file still exists after 20 seconds raise an error. + # This check is here to facilitate testing on the qemu emulator. + # Qemu will run about 1/10 the native speed, so 1 second may + # not be long enough for apache to shut down. + count = 0 + pid_file = os.path.join(os.getcwd(), 'logs/httpd.pid') + while os.path.exists(pid_file): + time.sleep(1) + count += 1 + if count > 20: + # give up - apache refuses to die - or died a horrible + # death and never removed the pid_file. + raise RuntimeError(" Trouble stopping apache") + + self.httpd_running = 0 + +class PerRequestTestCase(unittest.TestCase): + + appendConfig = APACHE_VERSION < '2.4' and Container(NameVirtualHost('*')) or Container() + + def __init__(self, methodName="runTest"): + unittest.TestCase.__init__(self, methodName) + + # add to config + try: + confMeth = getattr(self, methodName+"_conf") + self.__class__.appendConfig.append(confMeth()) + except AttributeError: + pass + + def vhost_get(self, vhost, path="/tests.py"): + + # this is so that tests could easily be staged with curl + curl = "curl --verbose --header 'Host: %s' http://127.0.0.1:%s%s" % (vhost, PORT, path) + print(" $ %s" % curl) + + # allows to specify a custom host: header + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", path, skip_host=1) + conn.putheader("Host", "%s:%s" % (vhost, PORT)) + conn.endheaders() + response = conn.getresponse() + if PY2: + rsp = response.read() + else: + rsp = response.read().decode('latin1') + conn.close() + + return rsp + + def vhost_post_multipart_form_data(self, vhost, path="/tests.py",variables={}, files={}): + # variables is a { name : value } dict + # files is a { name : (filename, content) } dict + + # build the POST entity + if PY2: + entity = StringIO() + boundary = "============="+''.join( [ random.choice('0123456789') for x in range(10) ] )+'==' + else: + bio = BytesIO() + entity = TextIOWrapper(bio, encoding='latin1') + boundary = "============="+''.join( [ random.choice('0123456789') for x in range(10) ] )+'==' + + # A part for each variable + for name, value in variables.items(): + entity.write('--') + entity.write(boundary) + entity.write('\r\n') + entity.write('Content-Type: text/plain\r\n') + entity.write('Content-Disposition: form-data;\r\n name="%s"\r\n' % name) + entity.write('\r\n') + entity.write(str(value)) + entity.write('\r\n') + + # A part for each file + for name, filespec in files.items(): + filename, content = filespec + # if content is readable, read it + try: + content = content.read() + except: + pass + + if not isinstance(content, str): # always false on 2.x + content = content.decode('latin1') + + entity.write('--') + entity.write(boundary) + entity.write('\r\n') + entity.write('Content-Type: application/octet-stream\r\n') + entity.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (name, filename)) + entity.write('\r\n') + entity.write(content) + entity.write('\r\n') + + # The final boundary + entity.write('--') + entity.write(boundary) + entity.write('--\r\n') + + entity.flush() + if PY2: + entity = entity.getvalue() + else: + entity = bio.getvalue() + + conn = http_connection("127.0.0.1:%s" % PORT) + #conn.set_debuglevel(1000) + conn.putrequest("POST", path, skip_host=1) + conn.putheader("Host", "%s:%s" % (vhost, PORT)) + conn.putheader("Content-Type", 'multipart/form-data; boundary="%s"' % boundary) + conn.putheader("Content-Length", '%s'%(len(entity))) + conn.endheaders() + + start = time.time() + conn.send(entity) + response = conn.getresponse() + rsp = response.read() + conn.close() + print(' --> Send + process + receive took %.3f s'%(time.time()-start)) + + return rsp + + ### Tests begin here + + def test_req_document_root_conf(self): + + c = VirtualHost("*", + ServerName("test_req_document_root"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_document_root"), + PythonDebug("On"))) + return c + + + def test_req_document_root(self): + + print("\n * Testing req.document_root()") + rsp = self.vhost_get("test_req_document_root") + + if rsp.upper() != DOCUMENT_ROOT.replace("\\", "/").upper(): + self.fail(repr(rsp)) + + def test_req_add_handler_conf(self): + + c = VirtualHost("*", + ServerName("test_req_add_handler"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_add_handler"), + PythonDebug("On"))) + return c + + def test_req_add_handler(self): + + print("\n * Testing req.add_handler()") + rsp = self.vhost_get("test_req_add_handler") + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_req_add_bad_handler_conf(self): + + c = VirtualHost("*", + ServerName("test_req_add_bad_handler"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_add_bad_handler"), + PythonDebug("On"))) + return c + + def test_req_add_bad_handler(self): + # adding a non-existent handler with req.add_handler should raise + # an exception. + + print("""\n * Testing req.add_handler("PythonHandler", "bad_handler")""") + rsp = self.vhost_get("test_req_add_bad_handler") + + # look for evidence of the exception in the error log + time.sleep(1) + f = open(os.path.join(SERVER_ROOT, "logs/error_log")) + log = f.read() + f.close() + if log.find("contains no 'bad_handler'") == -1: + self.fail("""Could not find "contains no 'bad_handler'" in error_log""") + + def test_req_add_empty_handler_string_conf(self): + + c = VirtualHost("*", + ServerName("test_req_add_empty_handler_string"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_add_empty_handler_string"), + PythonDebug("On"))) + return c + + def test_req_add_empty_handler_string(self): + # Adding an empty string as the handler in req.add_handler + # should raise an exception + + print("""\n * Testing req.add_handler("PythonHandler","")""") + rsp = self.vhost_get("test_req_add_empty_handler_string") + + if (rsp == "no exception"): + self.fail("Expected an exception") + + def test_req_add_handler_empty_phase_conf(self): + + c = VirtualHost("*", + ServerName("test_req_add_handler_empty_phase"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonInterpPerDirective("On"), + PythonFixupHandler("tests::req_add_handler_empty_phase"), + PythonDebug("On"))) + return c + + def test_req_add_handler_empty_phase(self): + # Adding handler to content phase when no handler already + # exists for that phase. + + print("""\n * Testing req.add_handler() for empty phase""") + rsp = self.vhost_get("test_req_add_handler_empty_phase") + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_req_add_handler_directory_conf(self): + + c = VirtualHost("*", + ServerName("test_req_add_handler_directory"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonInterpPerDirective("On"), + PythonFixupHandler("tests::test_req_add_handler_directory"), + PythonDebug("On"))) + return c + + def test_req_add_handler_directory(self): + # Checking that directory is canonicalized and trailing + # slash is added. + + print("""\n * Testing req.add_handler() directory""") + rsp = self.vhost_get("test_req_add_handler_directory") + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_accesshandler_add_handler_to_empty_hl_conf(self): + # Note that there is no PythonHandler specified in the the VirtualHost + # config. We want to see if req.add_handler will work when the + # handler list is empty. + + #PythonHandler("tests::req_add_empty_handler_string"), + c = VirtualHost("*", + ServerName("test_accesshandler_add_handler_to_empty_hl"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonAccessHandler("tests::accesshandler_add_handler_to_empty_hl"), + PythonDebug("On"))) + return c + + def test_accesshandler_add_handler_to_empty_hl(self): + + print("""\n * Testing req.add_handler() when handler list is empty""") + rsp = self.vhost_get("test_accesshandler_add_handler_to_empty_hl") + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_req_allow_methods_conf(self): + + c = VirtualHost("*", + ServerName("test_req_allow_methods"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_allow_methods"), + PythonDebug("On"))) + return c + + def test_req_allow_methods(self): + + print("\n * Testing req.allow_methods()") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_allow_methods", PORT)) + conn.endheaders() + response = conn.getresponse() + server_hdr = response.getheader("Allow", "") + conn.close() + + self.failUnless(server_hdr.find("PYTHONIZE") > -1, "req.allow_methods() didn't work") + + def test_req_unauthorized_conf(self): + + if APACHE_VERSION == '2.4': + c = VirtualHost("*", + ServerName("test_req_unauthorized"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + AuthName("blah"), + AuthType("basic"), + Require("all granted"), + PythonHandler("tests::req_unauthorized"), + PythonDebug("On"))) + else: + c = VirtualHost("*", + ServerName("test_req_unauthorized"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + AuthName("blah"), + AuthType("basic"), + PythonHandler("tests::req_unauthorized"), + PythonDebug("On"))) + + return c + + def test_req_unauthorized(self): + + print("\n * Testing whether returning HTTP_UNAUTHORIZED works") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_unauthorized", PORT)) + auth = base64.encodestring(b"spam:eggs").strip() + if PY2: + conn.putheader("Authorization", "Basic %s" % auth) + else: + conn.putheader("Authorization", "Basic %s" % auth.decode("latin1")) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok"): + self.fail(repr(rsp)) + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_unauthorized", PORT)) + auth = base64.encodestring(b"spam:BAD PASSWD").strip() + if PY2: + conn.putheader("Authorization", "Basic %s" % auth) + else: + conn.putheader("Authorization", "Basic %s" % auth.decode("latin1")) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if response.status != UNAUTHORIZED: + self.fail("req.status is not httplib.UNAUTHORIZED, but: %s" % repr(response.status)) + + if rsp == b"test ok": + self.fail("We were supposed to get HTTP_UNAUTHORIZED") + + def test_req_get_basic_auth_pw_conf(self): + + if APACHE_VERSION == '2.4': + c = VirtualHost("*", + ServerName("test_req_get_basic_auth_pw"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + AuthName("blah"), + AuthType("basic"), + Require("all granted"), + PythonHandler("tests::req_get_basic_auth_pw"), + PythonDebug("On"))) + else: + c = VirtualHost("*", + ServerName("test_req_get_basic_auth_pw"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + AuthName("blah"), + AuthType("basic"), + PythonHandler("tests::req_get_basic_auth_pw"), + PythonDebug("On"))) + + return c + + def test_req_get_basic_auth_pw(self): + + print("\n * Testing req.get_basic_auth_pw()") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_get_basic_auth_pw", PORT)) + auth = base64.encodestring(b"spam:eggs").strip() + if PY2: + conn.putheader("Authorization", "Basic %s" % auth) + else: + conn.putheader("Authorization", "Basic %s" % auth.decode("latin1")) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok"): + self.fail(repr(rsp)) + + def test_req_get_basic_auth_pw_latin1_conf(self): + return self.test_req_get_basic_auth_pw_conf() + + def test_req_get_basic_auth_pw_latin1(self): + + print("\n * Testing req.get_basic_auth_pw_latin1()") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_get_basic_auth_pw", PORT)) + auth = base64.encodestring(b'sp\xe1m:\xe9ggs').strip() + if PY2: + conn.putheader("Authorization", "Basic %s" % auth) + else: + conn.putheader("Authorization", "Basic %s" % auth.decode("latin1")) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok"): + self.fail(repr(rsp)) + + def test_req_auth_type_conf(self): + + c = VirtualHost("*", + ServerName("test_req_auth_type"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + AuthName("blah"), + AuthType("dummy"), + Require("valid-user"), + PythonAuthenHandler("tests::req_auth_type"), + PythonAuthzHandler("tests::req_auth_type"), + PythonHandler("tests::req_auth_type"), + PythonDebug("On"))) + return c + + def test_req_auth_type(self): + + print("\n * Testing req.auth_type()") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_auth_type", PORT)) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok"): + self.fail(repr(rsp)) + + def test_req_requires_conf(self): + + c = VirtualHost("*", + ServerName("test_req_requires"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + AuthName("blah"), + AuthType("dummy"), + Require("valid-user"), + PythonAuthenHandler("tests::req_requires"), + PythonDebug("On"))) + + return c + + def test_req_requires(self): + + print("\n * Testing req.requires()") + + rsp = self.vhost_get("test_req_requires") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_requires", PORT)) + auth = base64.encodestring(b"spam:eggs").strip() + if PY2: + conn.putheader("Authorization", "Basic %s" % auth) + else: + conn.putheader("Authorization", "Basic %s" % auth.decode("latin1")) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok"): + self.fail(repr(rsp)) + + def test_req_internal_redirect_conf(self): + + c = VirtualHost("*", + ServerName("test_req_internal_redirect"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_internal_redirect | .py"), + PythonHandler("tests::req_internal_redirect_int | .int"), + PythonDebug("On"))) + return c + + def test_req_internal_redirect(self): + + print("\n * Testing req.internal_redirect()") + rsp = self.vhost_get("test_req_internal_redirect") + + if rsp != "test ok": + self.fail("internal_redirect") + + def test_req_construct_url_conf(self): + + c = VirtualHost("*", + ServerName("test_req_construct_url"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_construct_url"), + PythonDebug("On"))) + return c + + def test_req_construct_url(self): + + print("\n * Testing req.construct_url()") + rsp = self.vhost_get("test_req_construct_url") + + if rsp != "test ok": + self.fail("construct_url") + + def test_req_read_conf(self): + + c = Container(Timeout("5"), + VirtualHost("*", + ServerName("test_req_read"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_read"), + PythonDebug("On")))) + return c + + def test_req_read(self): + + print("\n * Testing req.read()") + + params = b'1234567890'*10000 + print(" writing %d bytes..." % len(params)) + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("POST", "/tests.py", skip_host=1) + conn.putheader("Host", "test_req_read:%s" % PORT) + conn.putheader("Content-Length", str(len(params))) + conn.endheaders() + conn.send(params) + response = conn.getresponse() + rsp = response.read() + conn.close() + + print(" response size: %d\n" % len(rsp)) + if (rsp != params): + self.fail(repr(rsp)) + + print(" read/write ok, now lets try causing a timeout (should be 5 secs)") + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("POST", "/tests.py", skip_host=1) + conn.putheader("Host", "test_req_read:%s" % PORT) + conn.putheader("Content-Length", str(10)) + conn.endheaders() + conn.send(b"123456789") + response = conn.getresponse() + rsp = response.read() + conn.close() + + if rsp.find(b"IOError") < 0 and rsp.find(b"OSError") < 0: + self.fail("timeout test failed") + + + def test_req_readline_conf(self): + + c = VirtualHost("*", + ServerName("test_req_readline"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_readline"), + PythonDebug("On"))) + return c + + def test_req_readline(self): + + print("\n * Testing req.readline()") + + params = (b'1234567890'*3000+b'\n')*4 + print(" writing %d bytes..." % len(params)) + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("POST", "/tests.py", skip_host=1) + conn.putheader("Host", "test_req_readline:%s" % PORT) + conn.putheader("Content-Length", str(len(params))) + conn.endheaders() + conn.send(params) + response = conn.getresponse() + rsp = response.read() + conn.close() + + print(" response size: %d\n" % len(rsp)) + if (rsp != params): + self.fail(repr(rsp)) + + def test_req_readlines_conf(self): + + c = VirtualHost("*", + ServerName("test_req_readlines"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_readlines"), + PythonDebug("On"))) + return c + + def test_req_readlines(self): + + print("\n * Testing req.readlines()") + + params = (b'1234567890'*3000+b'\n')*4 + print(" writing %d bytes..." % len(params)) + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("POST", "/tests.py", skip_host=1) + conn.putheader("Host", "test_req_readlines:%s" % PORT) + conn.putheader("Content-Length", str(len(params))) + conn.endheaders() + conn.send(params) + response = conn.getresponse() + rsp = response.read() + conn.close() + + print(" response size: %d\n" % len(rsp)) + if (rsp != params): + self.fail(repr(rsp)) + + print("\n * Testing req.readlines(size_hint=30000)") + + params = (b'1234567890'*3000+b'\n')*4 + print(" writing %d bytes..." % len(params)) + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("POST", "/tests.py", skip_host=1) + conn.putheader("Host", "test_req_readlines:%s" % PORT) + conn.putheader("Content-Length", str(len(params))) + conn.putheader("SizeHint", str(30000)) + conn.endheaders() + conn.send(params) + response = conn.getresponse() + rsp = response.read() + conn.close() + + print(" response size: %d\n" % len(rsp)) + if (rsp != (b'1234567890'*3000+b'\n')): + self.fail(repr(rsp)) + + print("\n * Testing req.readlines(size_hint=32000)") + + params = (b'1234567890'*3000+b'\n')*4 + print(" writing %d bytes..." % len(params)) + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("POST", "/tests.py", skip_host=1) + conn.putheader("Host", "test_req_readlines:%s" % PORT) + conn.putheader("Content-Length", str(len(params))) + conn.putheader("SizeHint", str(32000)) + conn.endheaders() + conn.send(params) + response = conn.getresponse() + rsp = response.read() + conn.close() + + print(" response size: %d\n" % len(rsp)) + if (rsp != ((b'1234567890'*3000+b'\n')*2)): + self.fail(repr(rsp)) + + def test_req_discard_request_body_conf(self): + + c = Container(Timeout("5"), + VirtualHost("*", + ServerName("test_req_discard_request_body"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_discard_request_body"), + PythonDebug("On")))) + return c + + def test_req_discard_request_body(self): + + print("\n * Testing req.discard_request_body()") + + params = b'1234567890'*2 + print(" writing %d bytes..." % len(params)) + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "test_req_discard_request_body:%s" % PORT) + conn.putheader("Content-Length", str(len(params))) + conn.endheaders() + conn.send(params) + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok"): + self.fail(repr(rsp)) + + def test_req_register_cleanup_conf(self): + + c = VirtualHost("*", + ServerName("test_req_register_cleanup"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_register_cleanup"), + PythonDebug("On"))) + return c + + def test_req_register_cleanup(self): + + print("\n * Testing req.register_cleanup()") + + rsp = self.vhost_get("test_req_register_cleanup") + + # see what's in the log now + time.sleep(1) + f = open(os.path.join(SERVER_ROOT, "logs/error_log")) + log = f.read() + f.close() + if log.find("req_register_cleanup test ok") == -1: + self.fail("Could not find test message in error_log") + + def test_req_headers_out_conf(self): + + c = VirtualHost("*", + ServerName("test_req_headers_out"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_headers_out"), + PythonDebug("On"))) + return c + + def test_req_headers_out(self): + + print("\n * Testing req.headers_out") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/test.py", skip_host=1) + conn.putheader("Host", "test_req_headers_out:%s" % PORT) + conn.endheaders() + response = conn.getresponse() + h = response.getheader("x-test-header", None) + response.read() + conn.close() + + if h is None: + self.fail("Could not find x-test-header") + + if h != "test ok": + self.fail("x-test-header is there, but does not contain 'test ok'") + + def test_req_sendfile_conf(self): + + c = VirtualHost("*", + ServerName("test_req_sendfile"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_sendfile"), + PythonDebug("On"))) + + return c + + def test_req_sendfile(self): + + print("\n * Testing req.sendfile() with offset and length") + + rsp = self.vhost_get("test_req_sendfile") + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_req_sendfile2_conf(self): + + c = VirtualHost("*", + ServerName("test_req_sendfile2"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_sendfile2"), + PythonDebug("On"))) + + return c + + def test_req_sendfile2(self): + + print("\n * Testing req.sendfile() without offset and length") + + rsp = self.vhost_get("test_req_sendfile2") + + if (rsp != "0123456789"*100): + self.fail(repr(rsp)) + + def test_req_sendfile3_conf(self): + + c = VirtualHost("*", + ServerName("test_req_sendfile3"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_sendfile3"), + PythonDebug("On"))) + + return c + + def test_req_sendfile3(self): + + if os.name == 'posix': + + print("\n * Testing req.sendfile() for a file which is a symbolic link") + + rsp = self.vhost_get("test_req_sendfile3") + + if (rsp != "0123456789"*100): + self.fail(repr(rsp)) + else: + print("\n * Skipping req.sendfile() for a file which is a symbolic link") + + def test_req_handler_conf(self): + + c = VirtualHost("*", + ServerName("test_req_handler"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + PythonFixupHandler("tests::req_handler"), + PythonDebug("On"))) + return c + + def test_req_handler(self): + + print("\n * Testing req.handler") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_handler", PORT)) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok"): + self.fail(repr(rsp)) + + def test_req_no_cache_conf(self): + + c = VirtualHost("*", + ServerName("test_req_no_cache"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_no_cache"), + PythonDebug("On"))) + return c + + def test_req_no_cache(self): + + print("\n * Testing req.no_cache") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_no_cache", PORT)) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if response.getheader("expires", None) is None: + self.fail(repr(response.getheader("expires", None))) + + if (rsp != b"test ok"): + self.fail(repr(rsp)) + + def test_req_update_mtime_conf(self): + + c = VirtualHost("*", + ServerName("test_req_update_mtime"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_update_mtime"), + PythonDebug("On"))) + return c + + def test_req_update_mtime(self): + + print("\n * Testing req.update_mtime") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_req_update_mtime", PORT)) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if response.getheader("etag", None) is None: + self.fail(repr(response.getheader("etag", None))) + + if response.getheader("last-modified", None) is None: + self.fail(repr(response.getheader("last-modified", None))) + + if (rsp != b"test ok"): + self.fail(repr(rsp)) + + def test_util_redirect_conf(self): + + c = VirtualHost("*", + ServerName("test_util_redirect"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + PythonFixupHandler("tests::util_redirect"), + PythonHandler("tests::util_redirect"), + PythonDebug("On"))) + return c + + def test_util_redirect(self): + + print("\n * Testing util.redirect()") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_util_redirect", PORT)) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if response.status != 302: + self.fail('did not receive 302 status response: %s' % repr(response.status)) + + if response.getheader("location", None) != "/dummy": + self.fail('did not receive correct location for redirection') + + if rsp != b"test ok": + self.fail(repr(rsp)) + + def test_req_server_get_config_conf(self): + + c = VirtualHost("*", + ServerName("test_req_server_get_config"), + DocumentRoot(DOCUMENT_ROOT), + PythonDebug("On"), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_server_get_config"), + PythonDebug("Off"))) + return c + + def test_req_server_get_config(self): + + print("\n * Testing req.server.get_config()") + + rsp = self.vhost_get("test_req_server_get_config") + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_req_server_get_options_conf(self): + + c = VirtualHost("*", + ServerName("test_req_server_get_options"), + DocumentRoot(DOCUMENT_ROOT), + PythonDebug("Off"), + PythonOption("global 1"), + PythonOption("override 1"), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::req_server_get_options"), + PythonOption("local 1"), + PythonOption("override 2"), + PythonDebug("On"))) + return c + + def test_req_server_get_options(self): + + print("\n * Testing req.server.get_options()") + + rsp = self.vhost_get("test_req_server_get_options") + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_fileupload_conf(self): + + c = VirtualHost("*", + ServerName("test_fileupload"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::fileupload"), + PythonDebug("On"))) + + return c + + def test_fileupload(self): + + print("\n * Testing 1 MB file upload support") + + content = ''.join( [ chr(random.randrange(256)) for x in range(1024*1024) ] ) + digest = md5_hash(content) + + rsp = self.vhost_post_multipart_form_data( + "test_fileupload", + variables={'test':'abcd'}, + files={'testfile':('test.txt',content)}, + ) + + if (rsp != digest): + self.fail('1 MB file upload failed, its contents were corrupted. Expected (%s), got (%s)' % (repr(digest), repr(rsp))) + + def test_fileupload_embedded_cr_conf(self): + + c = VirtualHost("*", + ServerName("test_fileupload"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::fileupload"), + PythonDebug("On"))) + + return c + + def test_fileupload_embedded_cr(self): + # Strange things can happen if there is a '\r' character at position + # readBlockSize of a line being read by FieldStorage.read_to_boundary + # where the line length is > readBlockSize. + # This test will expose this problem. + + print("\n * Testing file upload with \\r char in a line at position == readBlockSize") + + content = ( + 'a'*100 + '\r\n' + + 'b'*(readBlockSize-1) + '\r' # trick ! + + 'ccc' + 'd'*100 + '\r\n' + ) + digest = md5_hash(content) + + rsp = self.vhost_post_multipart_form_data( + "test_fileupload", + variables={'test':'abcd'}, + files={'testfile':('test.txt',content)}, + ) + + if (rsp != digest): + self.fail('file upload embedded \\r test failed, its contents were corrupted (%s)'%rsp) + + # The UNIX-HATERS handbook illustrates this problem. Once we've done some additional + # investigation to make sure that our synthetic file used above is correct, + # we can likely remove this conditional test. Also, there is no way to be sure + # that ugh.pdf will always be the same in the future so the test may not be valid + # over the long term. + try: + ugh = open('ugh.pdf','rb') + content = ugh.read() + ugh.close() + except: + print(" * Skipping the test for The UNIX-HATERS handbook file upload.") + print(" To make this test, you need to download ugh.pdf from") + print(" http://web.mit.edu/~simsong/www/ugh.pdf") + print(" into this script's directory.") + else: + print(" * Testing The UNIX-HATERS handbook file upload support") + + digest = md5_hash(content) + + rsp = self.vhost_post_multipart_form_data( + "test_fileupload", + variables={'test':'abcd'}, + files={'testfile':('ugh.pdf',content)}, + ) + + + if (rsp != digest): + self.fail('The UNIX-HATERS handbook file upload failed, its contents was corrupted (%s)'%rsp) + + def test_fileupload_split_boundary_conf(self): + + c = VirtualHost("*", + ServerName("test_fileupload"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::fileupload"), + PythonDebug("On"))) + + return c + + def test_fileupload_split_boundary(self): + # This test is similar to test_fileupload_embedded_cr, but it is possible to + # write an implementation of FieldStorage.read_to_boundary that will pass + # that test but fail this one. + # + # Strange things can happen if the last line in the file being uploaded + # has length == readBlockSize -1. The boundary string marking the end of the + # file (eg. '\r\n--myboundary') will be split between the leading '\r' and the + # '\n'. Some implementations of read_to_boundary we've tried assume that this + # '\r' character is part of the file, instead of the boundary string. The '\r' + # will be appended to the uploaded file, leading to a corrupted file. + + print("\n * Testing file upload where length of last line == readBlockSize - 1") + + content = ( + 'a'*100 + '\r\n' + + 'b'*(readBlockSize-1) # trick ! + ) + digest = md5_hash(content) + + rsp = self.vhost_post_multipart_form_data( + "test_fileupload", + variables={'test':'abcd'}, + files={'testfile':('test.txt',content)}, + ) + + if (rsp != digest): + self.fail('file upload long line test failed, its contents were corrupted (%s)'%rsp) + + print(" * Testing file upload where length of last line == readBlockSize - 1 with an extra \\r") + + content = ( + 'a'*100 + '\r\n' + + 'b'*(readBlockSize-1) + + '\r' # second trick ! + ) + digest = md5_hash(content) + + rsp = self.vhost_post_multipart_form_data( + "test_fileupload", + variables={'test':'abcd'}, + files={'testfile':('test.txt',content)}, + ) + + if (rsp != digest): + self.fail('file upload long line test failed, its contents were corrupted (%s)'%rsp) + + def test_sys_argv_conf(self): + + c = VirtualHost("*", + ServerName("test_sys_argv"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::test_sys_argv"), + PythonDebug("On"))) + + return c + + def test_sys_argv(self): + + print("\n * Testing sys.argv definition") + + rsp = self.vhost_get("test_sys_argv") + + if (rsp != "['mod_python']"): + self.fail(repr(rsp)) + + def test_PythonOption_conf(self): + + c = VirtualHost("*", + ServerName("test_PythonOption"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::PythonOption_items"), + PythonDebug("On"))) + + return c + + def test_PythonOption(self): + + print("\n * Testing PythonOption") + + rsp = self.vhost_get("test_PythonOption") + + if (rsp != "[('PythonOptionTest', 'sample_value')]"): + self.fail(repr(rsp)) + + def test_PythonOption_override_conf(self): + + c = VirtualHost("*", + ServerName("test_PythonOption_override"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::PythonOption_items"), + PythonOption('PythonOptionTest "new_value"'), + PythonOption('PythonOptionTest2 "new_value2"'), + PythonDebug("On"))) + + return c + + def test_PythonOption_override(self): + + print("\n * Testing PythonOption override") + + rsp = self.vhost_get("test_PythonOption_override") + + if (rsp != "[('PythonOptionTest', 'new_value'), ('PythonOptionTest2', 'new_value2')]"): + self.fail(repr(rsp)) + + def test_PythonOption_remove_conf(self): + + c = VirtualHost("*", + ServerName("test_PythonOption_remove"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::PythonOption_items"), + PythonOption('PythonOptionTest ""'), + PythonOption('PythonOptionTest2 "new_value2"'), + PythonDebug("On"))) + + return c + + def test_PythonOption_remove(self): + + print("\n * Testing PythonOption remove") + + rsp = self.vhost_get("test_PythonOption_remove") + + if (rsp != "[('PythonOptionTest2', 'new_value2')]"): + self.fail(repr(rsp)) + + def test_PythonOption_remove2_conf(self): + + c = VirtualHost("*", + ServerName("test_PythonOption_remove2"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::PythonOption_items"), + PythonOption('PythonOptionTest'), + PythonOption('PythonOptionTest2 "new_value2"'), + PythonOption('PythonOptionTest3 new_value3'), + PythonDebug("On"))) + + return c + + def test_PythonOption_remove2(self): + + print("\n * Testing PythonOption remove2") + + rsp = self.vhost_get("test_PythonOption_remove2") + + if (rsp != "[('PythonOptionTest2', 'new_value2'), ('PythonOptionTest3', 'new_value3')]"): + self.fail(repr(rsp)) + + def test_interpreter_per_directive_conf(self): + + c = VirtualHost("*", + ServerName("test_interpreter_per_directive"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + PythonInterpPerDirective('On'), + SetHandler("mod_python"), + PythonHandler("tests::interpreter"), + PythonDebug("On"))) + + return c + + def test_interpreter_per_directive(self): + + print("\n * Testing interpreter per directive") + + interpreter_name = (DOCUMENT_ROOT.replace('\\', '/')+'/').upper() + + rsp = self.vhost_get("test_interpreter_per_directive").upper() + if (rsp != interpreter_name): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_interpreter_per_directive", '/subdir/foo.py').upper() + if (rsp != interpreter_name): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_interpreter_per_directive", '/subdir/').upper() + if (rsp != interpreter_name): + self.fail(repr(rsp)) + + def test_interpreter_per_directory_conf(self): + + c = VirtualHost("*", + ServerName("test_interpreter_per_directory"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + PythonInterpPerDirectory('On'), + SetHandler("mod_python"), + PythonFixupHandler("tests::interpreter"), + PythonDebug("On")), + ) + + return c + + def test_interpreter_per_directory(self): + + print("\n * Testing interpreter per directory") + + interpreter_name = (DOCUMENT_ROOT.replace('\\', '/')+'/').upper() + + rsp = self.vhost_get("test_interpreter_per_directory").upper() + if (rsp != interpreter_name): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_interpreter_per_directory", '/subdir/foo.py').upper() + if (rsp != interpreter_name+'SUBDIR/'): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_interpreter_per_directory", '/subdir/').upper() + if (rsp != interpreter_name+'SUBDIR/'): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_interpreter_per_directory", '/subdir').upper() + if (rsp != interpreter_name+'SUBDIR/'): + self.fail(repr(rsp)) + + def test_util_fieldstorage_conf(self): + + c = VirtualHost("*", + ServerName("test_util_fieldstorage"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::util_fieldstorage"), + PythonDebug("On"))) + return c + + def test_util_fieldstorage(self): + + print("\n * Testing util_fieldstorage()") + + params = urlencode([('spam', 1), ('spam', 2), ('eggs', 3), ('bacon', 4)]) + headers = {"Host": "test_util_fieldstorage", + "Content-type": "application/x-www-form-urlencoded", + "Accept": "text/plain"} + conn = http_connection("127.0.0.1:%s" % PORT) + conn.request("POST", "/tests.py", params, headers) + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != "[Field('spam', '1'), Field('spam', '2'), Field('eggs', '3'), Field('bacon', '4')]" and + rsp != b"[Field(b'spam', b'1'), Field(b'spam', b'2'), Field(b'eggs', b'3'), Field(b'bacon', b'4')]"): + self.fail(repr(rsp)) + + def test_postreadrequest_conf(self): + + c = VirtualHost("*", + ServerName("test_postreadrequest"), + DocumentRoot(DOCUMENT_ROOT), + SetHandler("mod_python"), + PythonPath("[r'%s']+sys.path" % DOCUMENT_ROOT), + PythonPostReadRequestHandler("tests::postreadrequest"), + PythonDebug("On")) + return c + + def test_postreadrequest(self): + + print("\n * Testing PostReadRequestHandler") + rsp = self.vhost_get("test_postreadrequest") + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_trans_conf(self): + + c = VirtualHost("*", + ServerName("test_trans"), + DocumentRoot(DOCUMENT_ROOT), + SetHandler("mod_python"), + PythonPath("[r'%s']+sys.path" % DOCUMENT_ROOT), + PythonTransHandler("tests::trans"), + PythonDebug("On")) + return c + + def test_trans(self): + + print("\n * Testing TransHandler") + rsp = self.vhost_get("test_trans") + + if (rsp[0:2] != " #"): # first line in tests.py + self.fail(repr(rsp)) + + def test_import_conf(self): + + # configure apache to import it at startup + c = Container(PythonPath("[r'%s']+sys.path" % DOCUMENT_ROOT), + PythonImport("dummymodule test_import"), + PythonImport("dummymodule::function test_import"), + VirtualHost("*", + ServerName("test_import"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::import_test"), + PythonDebug("On")))) + return c + + def test_import(self): + + print("\n * Testing PythonImport") + rsp = self.vhost_get("test_import") + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_outputfilter_conf(self): + + c = VirtualHost("*", + ServerName("test_outputfilter"), + DocumentRoot(DOCUMENT_ROOT), + SetHandler("mod_python"), + PythonPath("[r'%s']+sys.path" % DOCUMENT_ROOT), + PythonHandler("tests::simplehandler"), + PythonOutputFilter("tests::outputfilter MP_TEST_FILTER"), + PythonDebug("On"), + AddOutputFilter("MP_TEST_FILTER .py")) + return c + + def test_outputfilter(self): + + print("\n * Testing PythonOutputFilter") + rsp = self.vhost_get("test_outputfilter") + + if (rsp != "TEST OK"): + self.fail(repr(rsp)) + + def test_req_add_output_filter_conf(self): + + c = VirtualHost("*", + ServerName("test_req_add_output_filter"), + DocumentRoot(DOCUMENT_ROOT), + SetHandler("mod_python"), + PythonPath("[r'%s']+sys.path" % DOCUMENT_ROOT), + PythonHandler("tests::req_add_output_filter"), + PythonOutputFilter("tests::outputfilter MP_TEST_FILTER"), + PythonDebug("On")) + return c + + def test_req_add_output_filter(self): + + print("\n * Testing req.add_output_filter") + rsp = self.vhost_get("test_req_add_output_filter") + + if (rsp != "TEST OK"): + self.fail(repr(rsp)) + + def test_req_register_output_filter_conf(self): + + c = VirtualHost("*", + ServerName("test_req_register_output_filter"), + DocumentRoot(DOCUMENT_ROOT), + SetHandler("mod_python"), + PythonPath("[r'%s']+sys.path" % DOCUMENT_ROOT), + PythonHandler("tests::req_register_output_filter"), + PythonDebug("On")) + return c + + def test_req_register_output_filter(self): + + print("\n * Testing req.register_output_filter") + rsp = self.vhost_get("test_req_register_output_filter") + + if (rsp != "TEST OK"): + self.fail(repr(rsp)) + + def test_connectionhandler_conf(self): + + try: + localip = socket.gethostbyname("localhost") + except: + localip = "127.0.0.1" + + self.conport = findUnusedPort() + c = Container(Listen("%d" % self.conport), + VirtualHost("%s:%d" % (localip, self.conport), + SetHandler("mod_python"), + PythonPath("[r'%s']+sys.path" % DOCUMENT_ROOT), + PythonConnectionHandler("tests::connectionhandler"))) + return c + + def test_connectionhandler(self): + + print("\n * Testing PythonConnectionHandler on port %d" % self.conport) + + url = "http://127.0.0.1:%s/tests.py" % self.conport + f = urlopen(url) + if PY2: + rsp = f.read() + else: + rsp = f.read().decode('latin1') + f.close() + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_internal_conf(self): + + c = VirtualHost("*", + ServerName("test_internal"), + ServerAdmin("serveradmin@somewhere.com"), + ErrorLog("logs/error_log"), + ServerPath("some/path"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests"), + PythonOption('PythonOptionTest ""'), + PythonOption('mod_python.mutex_directory ""'), + PythonOption("testing 123"), + PythonDebug("On"))) + return c + + def test_internal(self): + + print("\n * Testing internally (status messages go to error_log)") + + rsp = self.vhost_get("test_internal") + if (rsp[-7:] != "test ok"): + self.fail("Some tests failed, see error_log") + + def test_pipe_ext_conf(self): + + c = VirtualHost("*", + ServerName("test_pipe_ext"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher | .py"), + PythonHandler("tests::simplehandler"), + PythonDebug("On"))) + return c + + def test_pipe_ext(self): + + print("\n * Testing | .ext syntax") + + rsp = self.vhost_get("test_pipe_ext", path="/tests.py/pipe_ext") + if (rsp[-8:] != "pipe ext"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_pipe_ext", path="/tests/anything") + if (rsp[-7:] != "test ok"): + self.fail(repr(rsp)) + + def test_wsgihandler_conf(self): + + c = VirtualHost("*", + ServerName("test_wsgihandler"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.wsgi"), + PythonOption("mod_python.wsgi.application wsgitest"), + PythonDebug("On"))) + return c + + def test_wsgihandler(self): + + print("\n * Testing mod_python.wsgi") + + rsp = self.vhost_get("test_wsgihandler") + if (rsp[-8:] != "test ok\n"): + self.fail(repr(rsp)) + + # see what's in the log now + time.sleep(0.1) + log = open(os.path.join(SERVER_ROOT, "logs/error_log")).read() + if "written_from_wsgi_test" not in log: + self.fail("string 'written_from_wsgi_test' not found in error log.") + + def test_wsgihandler_location_conf(self): + + c = VirtualHost("*", + ServerName("test_wsgihandler_location"), + DocumentRoot(DOCUMENT_ROOT), + Location("/foo", + SetHandler("mod_python"), + PythonHandler("mod_python.wsgi"), + PythonPath("[r'%s']+sys.path" % DOCUMENT_ROOT), + PythonOption("mod_python.wsgi.application wsgitest::base_uri"), + PythonDebug("On"))) + return c + + def test_wsgihandler_location(self): + + print("\n * Testing mod_python.wsgi") + + rsp = self.vhost_get("test_wsgihandler_location", "/foo/bar") + if (rsp[-8:] != "test ok\n"): + self.fail(repr(rsp)) + + def test_cgihandler_conf(self): + + c = VirtualHost("*", + ServerName("test_cgihandler"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.cgihandler"), + PythonDebug("On"))) + return c + + def test_cgihandler(self): + + print("\n * Testing mod_python.cgihandler") + + rsp = self.vhost_get("test_cgihandler", path="/cgitest.py") + + if (rsp[-8:] != "test ok\n"): + self.fail(repr(rsp)) + + def test_psphandler_conf(self): + + c = VirtualHost("*", + ServerName("test_psphandler"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.psp"), + PythonDebug("On"))) + return c + + def test_psphandler(self): + + print("\n * Testing mod_python.psp") + + rsp = self.vhost_get("test_psphandler", path="/psptest.psp") + if (rsp[-8:] != "test ok\n"): + self.fail(repr(rsp)) + + def test_psp_parser_conf(self): + + c = VirtualHost("*", + ServerName("test_psp_parser"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.psp"), + PythonDebug("On"))) + return c + + def test_psp_parser(self): + + print("\n * Testing mod_python.psp parser") + # lines in psp_parser.psp should look like: + # test::$ + # + # For example: + # test:n:\n$ + # test:t:\t$ + + rsp = self.vhost_get("test_psp_parser", path="/psp_parser.psp") + lines = [ line.strip() for line in rsp.split('$') if line ] + failures = [] + for line in lines: + parts = line.split(':', 2) + if len(parts) < 3: + continue + + t, test_case, test_string = parts[0:3] + if not t.strip().startswith('test'): + continue + expected_result = test_case + # do the substitutions in expected_result + for ss, rs in [('-', '\\'),('CR', '\r'), ('LF', '\n'), ('TB', '\t')]: + expected_result = expected_result.replace(ss, rs) + + if expected_result != test_string: + failures.append(test_case) + #print 'expect{%s} got{%s}' % (expected_result, test_string) + + if failures: + msg = 'psp_parser parse errors for: %s' % (', '.join(failures)) + self.fail(msg) + + def test_psp_error_conf(self): + + c = VirtualHost("*", + ServerName("test_psp_error"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.psp"), + PythonOption('mod_python.session.database_directory "%s"' % TMP_DIR), + PythonDebug("On"))) + return c + + def test_psp_error(self): + + print("\n * Testing mod_python.psp error page") + + rsp = self.vhost_get("test_psp_error", path="/psptest_main.psp") + if (rsp.strip().split() != ["okay","fail"]): + self.fail(repr(rsp)) + + def test_Cookie_Cookie_conf(self): + + c = VirtualHost("*", + ServerName("test_Cookie_Cookie"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::Cookie_Cookie"), + PythonDebug("On"))) + return c + + def test_Cookie_Cookie(self): + + print("\n * Testing Cookie.Cookie") + + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/testz.py", skip_host=1) + # this is three cookies, nastily formatted + conn.putheader("Host", "test_Cookie_Cookie:%s" % PORT) + conn.putheader("Cookie", "spam=foo; path=blah;;eggs=bar;") + conn.putheader("Cookie", "bar=foo") + conn.endheaders() + response = conn.getresponse() + setcookie = response.getheader("set-cookie", None) + rsp = response.read() + conn.close() + + if rsp != b"test ok" or ('path=blah' not in setcookie or + 'eggs=bar' not in setcookie or + 'bar=foo' not in setcookie or + 'spam=foo' not in setcookie): + print(repr(rsp)) + print(repr(setcookie)) + self.fail("cookie parsing failed") + + def test_Cookie_MarshalCookie_conf(self): + + c = VirtualHost("*", + ServerName("test_Cookie_MarshalCookie"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::Cookie_Cookie"), + PythonDebug("On"))) + return c + + def test_Cookie_MarshalCookie(self): + + print("\n * Testing Cookie.MarshalCookie") + + mc = "eggs=d049b2b61adb6a1d895646719a3dc30bcwQAAABzcGFt" + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/testz.py", skip_host=1) + conn.putheader("Host", "test_Cookie_MarshalCookie:%s" % PORT) + conn.putheader("Cookie", mc) + conn.endheaders() + response = conn.getresponse() + setcookie = response.getheader("set-cookie", None) + rsp = response.read() + conn.close() + + if rsp != b"test ok" or setcookie != mc: + print(repr(rsp)) + self.fail("marshalled cookie parsing failed") + + # and now a long MarshalledCookie test ! + + mc = ('test=859690207856ec75fc641a7566894e40c1QAAAB0' + 'aGlzIGlzIGEgdmVyeSBsb25nIHZhbHVlLCBsb25nIGxvb' + 'mcgbG9uZyBsb25nIGxvbmcgc28gbG9uZyBidXQgd2UnbG' + 'wgZmluaXNoIGl0IHNvb24=') + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/testz.py", skip_host=1) + conn.putheader("Host", "test_Cookie_MarshalCookie:%s" % PORT) + conn.putheader("Cookie", mc) + conn.endheaders() + response = conn.getresponse() + setcookie = response.getheader("set-cookie", None) + rsp = response.read() + conn.close() + + if rsp != b"test ok" or setcookie != mc: + print(repr(rsp)) + self.fail("long marshalled cookie parsing failed") + + def test_Session_Session_conf(self): + + c = VirtualHost("*", + ServerName("test_Session_Session"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::Session_Session"), + PythonOption('mod_python.session.database_directory "%s"' % TMP_DIR), + PythonOption('mod_python.session.application_path "/path"'), + PythonOption('mod_python.session.application_domain "test_Session_Session"'), + PythonDebug("On"))) + return c + + def test_Session_Session(self): + + print("\n * Testing Session.Session") + + conn = http_connection("127.0.0.1:%s" % PORT) + #conn.set_debuglevel(1000) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "test_Session_Session:%s" % PORT) + conn.endheaders() + response = conn.getresponse() + setcookie = response.getheader("set-cookie", None) + rsp = response.read() + conn.close() + + if rsp != b"test ok" or setcookie == None: + self.fail("session did not set a cookie") + + parts = setcookie.split('; ') + fields = {} + for part in parts: + key, value = part.split('=') + fields[key] = value + + if 'path' not in fields or fields['path'] != '/path': + self.fail("session did not contain expected 'path'") + + if 'domain' not in fields or fields['domain'] != 'test_Session_Session': + self.fail("session did not contain expected 'domain'") + + conn = http_connection("127.0.0.1:%s" % PORT) + #conn.set_debuglevel(1000) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "test_Session_Session:%s" % PORT) + conn.putheader("Cookie", setcookie) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + if rsp != b"test ok": + self.fail("session did not accept our cookie: %s" % repr(rsp)) + + def test_Session_illegal_sid_conf(self): + + c = VirtualHost("*", + ServerName("test_Session_Session"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::Session_Session"), + PythonOption('mod_python.session.database_directory "%s"' % TMP_DIR), + PythonDebug("On"))) + return c + + def test_Session_illegal_sid(self): + + print("\n * Testing Session with illegal session id value") + bad_cookie = 'pysid=/path/traversal/attack/bad; path=/' + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "test_Session_Session:%s" % PORT) + conn.putheader("Cookie", bad_cookie) + conn.endheaders() + response = conn.getresponse() + setcookie = response.getheader("set-cookie", None) + status = response.status + conn.close() + if status != 200 or not setcookie: + self.fail("session id with illegal characters not replaced") + + bad_cookie = 'pysid=%s; path=/' % ('abcdef'*64) + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "test_Session_Session:%s" % PORT) + conn.putheader("Cookie", bad_cookie) + conn.endheaders() + response = conn.getresponse() + setcookie = response.getheader("set-cookie", None) + status = response.status + conn.close() + if status != 200 or not setcookie: + self.fail("session id which is too long not replaced") + + def test_files_directive_conf(self): + c = VirtualHost("*", + ServerName("test_files_directive"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + Files("*.py", + SetHandler("mod_python"), + PythonHandler("tests::files_directive"), + PythonDebug("On")))) + return c + + def test_files_directive(self): + + directory = (DOCUMENT_ROOT.replace('\\', '/')+'/').upper() + + print("\n * Testing Files directive") + rsp = self.vhost_get("test_files_directive", path="/tests.py").upper() + + if rsp != directory: + self.fail(repr(rsp)) + + def test_none_handler_conf(self): + c = VirtualHost("*", + ServerName("test_none_handler"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::none_handler"), + PythonDebug("On"))) + return c + + def test_none_handler(self): + + print("\n * Testing None handler") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py", skip_host=1) + conn.putheader("Host", "test_none_handler:%s" % PORT) + conn.endheaders() + response = conn.getresponse() + status = response.status + rsp = response.read() + conn.close() + if status != 500: + print(status, rsp) + self.fail("none handler should generate error") + + def test_server_return_conf(self): + c = VirtualHost("*", + ServerName("test_server_return"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::server_return_1"), + PythonHandler("tests::server_return_2"), + PythonDebug("On"))) + return c + + def test_server_return(self): + + print("\n * Testing SERVER_RETURN") + rsp = self.vhost_get("test_server_return") + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_phase_status_conf(self): + c = VirtualHost("*", + ServerName("test_phase_status"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + AuthType("bogus"), + AuthName("bogus"), + Require("valid-user"), + PythonAuthenHandler("tests::phase_status_1"), + PythonAuthenHandler("tests::phase_status_2"), + PythonAuthenHandler("tests::phase_status_3"), + PythonAuthzHandler("tests::phase_status_4"), + PythonFixupHandler("tests::phase_status_5"), + PythonFixupHandler("tests::phase_status_6"), + PythonFixupHandler("tests::phase_status_7"), + PythonHandler("tests::phase_status_8"), + PythonCleanupHandler("tests::phase_status_cleanup"), + PythonDebug("On"))) + return c + + def test_phase_status(self): + + print("\n * Testing phase status") + rsp = self.vhost_get("test_phase_status") + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + # see what's in the log now + time.sleep(0.1) + log = open(os.path.join(SERVER_ROOT, "logs/error_log")).read() + if "phase_status_cleanup_log_entry" not in log: + self.fail("phase_status_cleanup_log_entry not found in logs, cleanup handler never ran?") + + def test_publisher_conf(self): + c = VirtualHost("*", + ServerName("test_publisher"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher(self): + print("\n * Testing mod_python.publisher") + + rsp = self.vhost_get("test_publisher", path="/tests.py") + if (rsp != "test ok, interpreter=test_publisher"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher", path="/tests.py/index") + if (rsp != "test ok, interpreter=test_publisher"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher", path="/tests.py/test_publisher") + if (rsp != "test ok, interpreter=test_publisher"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher", path="/") + if (rsp != "test 1 ok, interpreter=test_publisher"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher", path="/foobar") + if (rsp != "test 2 ok, interpreter=test_publisher"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher", path="/tests") + if (rsp != "test ok, interpreter=test_publisher"): + self.fail(repr(rsp)) + + def test_publisher_auth_nested_conf(self): + c = VirtualHost("*", + ServerName("test_publisher_auth_nested"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher_auth_nested(self): + print("\n * Testing mod_python.publisher auth nested") + + conn = http_connection("127.0.0.1:%s" % PORT) + #conn.set_debuglevel(1000) + conn.putrequest("GET", "/tests.py/test_publisher_auth_nested", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_publisher_auth_nested", PORT)) + auth = base64.encodestring(b"spam:eggs").strip() + if PY2: + conn.putheader("Authorization", "Basic %s" % auth) + else: + conn.putheader("Authorization", "Basic %s" % auth.decode("latin1")) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok, interpreter=test_publisher_auth_nested"): + self.fail(repr(rsp)) + + def test_publisher_auth_method_nested_conf(self): + c = VirtualHost("*", + ServerName("test_publisher_auth_method_nested"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher_auth_method_nested(self): + print("\n * Testing mod_python.publisher auth method nested") + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py/test_publisher_auth_method_nested/method", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_publisher_auth_method_nested", PORT)) + auth = base64.encodestring(b"spam:eggs").strip() + if PY2: + conn.putheader("Authorization", "Basic %s" % auth) + else: + conn.putheader("Authorization", "Basic %s" % auth.decode("latin1")) + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok, interpreter=test_publisher_auth_method_nested"): + self.fail(repr(rsp)) + + def test_publisher_auth_digest_conf(self): + c = VirtualHost("*", + ServerName("test_publisher_auth_digest"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher_auth_digest(self): + print("\n * Testing mod_python.publisher auth digest compatability") + + # The contents of the authorization header is not relevant, + # as long as it looks valid. + + conn = http_connection("127.0.0.1:%s" % PORT) + conn.putrequest("GET", "/tests.py/test_publisher", skip_host=1) + conn.putheader("Host", "%s:%s" % ("test_publisher_auth_digest", PORT)) + conn.putheader("Authorization", 'Digest username="Mufasa", realm="testrealm@host.com", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", uri="/dir/index.html", qop=auth, nc=00000001, cnonce="0a4f113b", response="6629fae49393a05397450978507c4ef1", opaque="5ccc069c403ebaf9f0171e9517f40e41"') + conn.endheaders() + response = conn.getresponse() + rsp = response.read() + conn.close() + + if (rsp != b"test ok, interpreter=test_publisher_auth_digest"): + self.fail(repr(rsp)) + + def test_publisher_security_conf(self): + c = VirtualHost("*", + ServerName("test_publisher"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher_security(self): + print("\n * Testing mod_python.publisher security") + + def get_status(path): + conn = http_connection("127.0.0.1:%s" % PORT) + #conn.set_debuglevel(1000) + conn.putrequest("GET", path, skip_host=1) + conn.putheader("Host", "test_publisher:%s" % PORT) + conn.endheaders() + response = conn.getresponse() + status, response = response.status, response.read() + conn.close() + return status, response + + status, response = get_status("/tests.py/_SECRET_PASSWORD") + if status != 403: + self.fail('Vulnerability : underscore prefixed attribute (%i)\n%s' % (status, response)) + + status, response = get_status("/tests.py/__ANSWER") + if status != 403: + self.fail('Vulnerability : underscore prefixed attribute (%i)\n%s' % (status, response)) + + status, response = get_status("/tests.py/re") + if status != 403: + self.fail('Vulnerability : module published (%i)\n%s' % (status, response)) + + status, response = get_status("/tests.py/OldStyleClassTest") + if status != 403: + self.fail('Vulnerability : old style class published (%i)\n%s' % (status, response)) + + status, response = get_status("/tests.py/InstanceTest") + if status != 403: + self.fail('Vulnerability : new style class published (%i)\n%s' % (status, response)) + + status, response = get_status("/tests.py/index/func_code") + if status != 403: + self.fail('Vulnerability : function traversal (%i)\n%s' % (status, response)) + + status, response = get_status("/tests.py/old_instance/traverse/func_code") + if status != 403: + self.fail('Vulnerability : old-style method traversal (%i)\n%s' % (status, response)) + + status, response = get_status("/tests.py/instance/traverse/func_code") + if status != 403: + self.fail('Vulnerability : new-style method traversal (%i)\n%s' % (status, response)) + + status, response = get_status("/tests.py/test_dict/keys") + if status != 403: + self.fail('Vulnerability : built-in type traversal (%i)\n%s' % (status, response)) + + status, response = get_status("/tests.py/test_dict_keys") + if status != 403: + self.fail('Vulnerability : built-in type publishing (%i)\n%s' % (status, response)) + + def test_publisher_iterator_conf(self): + c = VirtualHost("*", + ServerName("test_publisher"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher_iterator(self): + print("\n * Testing mod_python.publisher iterators") + + rsp = self.vhost_get("test_publisher", path="/tests.py/test_dict_iteration") + if (rsp != "123"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher", path="/tests.py/test_generator") + if (rsp != "0123456789"): + self.fail(repr(rsp)) + + def test_publisher_hierarchy_conf(self): + c = VirtualHost("*", + ServerName("test_publisher_hierarchy"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher_hierarchy(self): + print("\n * Testing mod_python.publisher hierarchy") + + rsp = self.vhost_get("test_publisher_hierarchy", path="/tests.py/hierarchy_root") + if (rsp != "Called root"): + self.fail(repr(rsp)) + + if PY2: + rsp = self.vhost_get("test_publisher_hierarchy", path="/tests.py/hierarchy_root_2") + if (rsp != "test ok, interpreter=test_publisher_hierarchy"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher_hierarchy", path="/tests.py/hierarchy_root/page1") + if (rsp != "Called page1"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher_hierarchy", path="/tests.py/hierarchy_root_2/page1") + if (rsp != "test ok, interpreter=test_publisher_hierarchy"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher_hierarchy", path="/tests.py/hierarchy_root/page1/subpage1") + if (rsp != "Called subpage1"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher_hierarchy", path="/tests.py/hierarchy_root/page2") + if (rsp != "Called page2"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher_hierarchy", path="/tests.py/hierarchy_root_2/page2") + if (rsp != "test ok, interpreter=test_publisher_hierarchy"): + self.fail(repr(rsp)) + + def test_publisher_old_style_instance_conf(self): + c = VirtualHost("*", + ServerName("test_publisher"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher_old_style_instance(self): + print("\n * Testing mod_python.publisher old-style instance publishing") + + rsp = self.vhost_get("test_publisher", path="/tests.py/old_instance") + if (rsp != "test callable old-style instance ok"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher", path="/tests.py/old_instance/traverse") + if (rsp != "test traversable old-style instance ok"): + self.fail(repr(rsp)) + + def test_publisher_instance_conf(self): + c = VirtualHost("*", + ServerName("test_publisher"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher_instance(self): + print("\n * Testing mod_python.publisher instance publishing") + + rsp = self.vhost_get("test_publisher", path="/tests.py/instance") + if (rsp != "test callable instance ok"): + self.fail(repr(rsp)) + + rsp = self.vhost_get("test_publisher", path="/tests.py/instance/traverse") + if (rsp != "test traversable instance ok"): + self.fail(repr(rsp)) + + def test_publisher_cache_conf(self): + c = VirtualHost("*", + ServerName("test_publisher"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("mod_python.publisher"), + PythonDebug("On"))) + return c + + def test_publisher_cache(self): + ## It is not possible to get reliable results with this test + # for mpm-prefork and worker, and in fact it may not be possible + # to get consistent results. + # Therefore this test is currently disabled in the + # testPerRequestTests setup. + + print("\n * Testing mod_python.publisher cache") + + def write_published(): + published = file('htdocs/temp.py','wb') + published.write('import time\n') + published.write('LOAD_TIME = time.time()\n') + published.write('def index(req):\n') + published.write(' return "OK %f"%LOAD_TIME\n') + published.close() + + write_published() + try: + rsp = self.vhost_get("test_publisher", path="/temp.py") + + if not rsp.startswith('OK '): + self.fail(repr(rsp)) + + rsp2 = self.vhost_get("test_publisher", path="/temp.py") + if rsp != rsp2: + self.fail( + "The publisher cache has reloaded a published module" + " even though it wasn't modified !" + ) + + # We wait three seconds to be sure we won't be annoyed + # by any lack of resolution of the stat().st_mtime member. + time.sleep(3) + write_published() + + rsp2 = self.vhost_get("test_publisher", path="/temp.py") + if rsp == rsp2: + self.fail( + "The publisher cache has not reloaded a published module" + " even though it was modified !" + ) + + rsp = self.vhost_get("test_publisher", path="/temp.py") + if rsp != rsp2: + self.fail( + "The publisher cache has reloaded a published module" + " even though it wasn't modified !" + ) + finally: + os.remove('htdocs/temp.py') + + def test_server_side_include_conf(self): + c = VirtualHost("*", + ServerName("test_server_side_include"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + Options("+Includes"), + AddType("text/html .shtml"), + AddOutputFilter("INCLUDES .shtml"), + PythonFixupHandler("tests::server_side_include"), + PythonDebug("On"))) + return c + + def test_server_side_include(self): + + print("\n * Testing server side include") + rsp = self.vhost_get("test_server_side_include", path="/ssi.shtml") + + rsp = rsp.strip() + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + def test_memory_conf(self): + + c = VirtualHost("*", + ServerName("test_memory"), + DocumentRoot(DOCUMENT_ROOT), + Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::memory"), + PythonDebug("On"))) + return c + + + def test_memory(self): + + # Note: This test will fail on Apache 2.2 because of a bug, + # but will pass on 2.4 where it is fixed (2.4 reuses the + # brigade on ap_rflush() rather than creating a new one each + # time). http://modpython.org/pipermail/mod_python/2007-July/023974.html + + print("\n * Testing req.write() and req.flush() memory usage (100,000 iterations)") + rsp = self.vhost_get("test_memory") + + before, after = list(map(int, rsp.split("|")[1:])) + + if before != after: + self.fail("Memory before: %s, memory after: %s" % (before, after)) + +class PerInstanceTestCase(unittest.TestCase, HttpdCtrl): + # this is a test case which requires a complete + # restart of httpd (e.g. we're using a fancy config) + + def tearDown(self): + if self.httpd_running: + self.stopHttpd() + + def testLoadModule(self): + + print("\n* Testing LoadModule") + + self.makeConfig() + self.startHttpd() + + f = urlopen("http://127.0.0.1:%s/tests.py" % PORT) + server_hdr = f.info()["Server"] + f.close() + self.failUnless(server_hdr.find("Python") > -1, + "%s does not appear to load, Server header does not contain Python" + % MOD_PYTHON_SO) + + def testVersionCheck(self): + + print("\n* Testing C/Py version mismatch warning") + + c = Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::okay"), + PythonDebug("On")) + self.makeConfig(c) + + self.startHttpd() + urlopen("http://127.0.0.1:%s/tests.py" % PORT) + self.stopHttpd() + + # see what's in the log now + time.sleep(0.1) + log = open(os.path.join(SERVER_ROOT, "logs/error_log")).read() + if "mod_python version mismatch" in log: + self.fail("version mismatch found in logs, but versions should be same?") + + from distutils.sysconfig import get_python_lib + version_path = os.path.join(get_python_lib(), "mod_python", "version.py") + + # the rest of this test requires write perms to site-packages/mod_python + if os.access(version_path, os.W_OK): + + # change the version to not match + v = open(version_path).read() + wrong_v = v + "\nversion = 'WRONG VERSION'\n" + open(version_path, "w").write(wrong_v) + + try: + self.startHttpd() + urlopen("http://127.0.0.1:%s/tests.py" % PORT) + self.stopHttpd() + + time.sleep(0.1) + log = open(os.path.join(SERVER_ROOT, "logs/error_log")).read() + if "mod_python version mismatch" not in log: + self.fail("version are different, no version mismatch found in logs") + finally: + # restore version.py + open(version_path, "w").write(v) + + def test_global_lock(self): + + print("\n * Testing _global_lock") + + c = Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::global_lock"), + PythonDebug("On")) + + self.makeConfig(c) + + self.startHttpd() + + f = urlopen("http://127.0.0.1:%s/tests.py" % PORT) + if PY2: + rsp = f.read() + else: + rsp = f.read().decode('latin1') + f.close() + + if (rsp != "test ok"): + self.fail(repr(rsp)) + + # if the mutex works, this test will take at least 5 secs + ab = get_ab_path() + if not ab: + print(" Can't find ab. Skipping _global_lock test") + return + + t1 = time.time() + print(" ", time.ctime()) + if os.name == "nt": + cmd = '%s -c 5 -n 5 http://127.0.0.1:%s/tests.py > NUL:' \ + % (ab, PORT) + else: + cmd = '%s -c 5 -n 5 http://127.0.0.1:%s/tests.py > /dev/null' \ + % (ab, PORT) + print(" ", cmd) + os.system(cmd) + print(" ", time.ctime()) + t2 = time.time() + if (t2 - t1) < 5: + self.fail("global_lock is broken (too quick): %f" % (t2 - t1)) + + def testPerRequestTests(self): + + print("\n* Running the per-request test suite...") + + perRequestSuite = unittest.TestSuite() + perRequestSuite.addTest(PerRequestTestCase("test_req_document_root")) + perRequestSuite.addTest(PerRequestTestCase("test_req_add_handler")) + perRequestSuite.addTest(PerRequestTestCase("test_req_add_bad_handler")) + perRequestSuite.addTest(PerRequestTestCase("test_req_add_empty_handler_string")) + perRequestSuite.addTest(PerRequestTestCase("test_req_add_handler_empty_phase")) + perRequestSuite.addTest(PerRequestTestCase("test_req_add_handler_directory")) + perRequestSuite.addTest(PerRequestTestCase("test_accesshandler_add_handler_to_empty_hl")) + perRequestSuite.addTest(PerRequestTestCase("test_req_allow_methods")) + perRequestSuite.addTest(PerRequestTestCase("test_req_unauthorized")) + perRequestSuite.addTest(PerRequestTestCase("test_req_get_basic_auth_pw")) + perRequestSuite.addTest(PerRequestTestCase("test_req_get_basic_auth_pw_latin1")) + perRequestSuite.addTest(PerRequestTestCase("test_req_auth_type")) + if APACHE_VERSION != '2.4': + perRequestSuite.addTest(PerRequestTestCase("test_req_requires")) + perRequestSuite.addTest(PerRequestTestCase("test_req_internal_redirect")) + perRequestSuite.addTest(PerRequestTestCase("test_req_construct_url")) + perRequestSuite.addTest(PerRequestTestCase("test_req_read")) + perRequestSuite.addTest(PerRequestTestCase("test_req_readline")) + perRequestSuite.addTest(PerRequestTestCase("test_req_readlines")) + perRequestSuite.addTest(PerRequestTestCase("test_req_discard_request_body")) + perRequestSuite.addTest(PerRequestTestCase("test_req_register_cleanup")) + perRequestSuite.addTest(PerRequestTestCase("test_req_headers_out")) + perRequestSuite.addTest(PerRequestTestCase("test_req_sendfile")) + perRequestSuite.addTest(PerRequestTestCase("test_req_sendfile2")) + perRequestSuite.addTest(PerRequestTestCase("test_req_sendfile3")) + perRequestSuite.addTest(PerRequestTestCase("test_req_handler")) + perRequestSuite.addTest(PerRequestTestCase("test_req_no_cache")) + perRequestSuite.addTest(PerRequestTestCase("test_req_update_mtime")) + perRequestSuite.addTest(PerRequestTestCase("test_util_redirect")) + perRequestSuite.addTest(PerRequestTestCase("test_req_server_get_config")) + perRequestSuite.addTest(PerRequestTestCase("test_req_server_get_options")) + perRequestSuite.addTest(PerRequestTestCase("test_fileupload")) + perRequestSuite.addTest(PerRequestTestCase("test_fileupload_embedded_cr")) + perRequestSuite.addTest(PerRequestTestCase("test_fileupload_split_boundary")) + perRequestSuite.addTest(PerRequestTestCase("test_sys_argv")) + perRequestSuite.addTest(PerRequestTestCase("test_PythonOption_override")) + perRequestSuite.addTest(PerRequestTestCase("test_PythonOption_remove")) + perRequestSuite.addTest(PerRequestTestCase("test_PythonOption_remove2")) + perRequestSuite.addTest(PerRequestTestCase("test_util_fieldstorage")) + perRequestSuite.addTest(PerRequestTestCase("test_postreadrequest")) + perRequestSuite.addTest(PerRequestTestCase("test_trans")) + perRequestSuite.addTest(PerRequestTestCase("test_outputfilter")) + perRequestSuite.addTest(PerRequestTestCase("test_req_add_output_filter")) + perRequestSuite.addTest(PerRequestTestCase("test_req_register_output_filter")) + perRequestSuite.addTest(PerRequestTestCase("test_connectionhandler")) + perRequestSuite.addTest(PerRequestTestCase("test_import")) + perRequestSuite.addTest(PerRequestTestCase("test_pipe_ext")) + perRequestSuite.addTest(PerRequestTestCase("test_cgihandler")) + perRequestSuite.addTest(PerRequestTestCase("test_psphandler")) + perRequestSuite.addTest(PerRequestTestCase("test_psp_parser")) + perRequestSuite.addTest(PerRequestTestCase("test_psp_error")) + perRequestSuite.addTest(PerRequestTestCase("test_Cookie_Cookie")) + perRequestSuite.addTest(PerRequestTestCase("test_Cookie_MarshalCookie")) + perRequestSuite.addTest(PerRequestTestCase("test_Session_Session")) + perRequestSuite.addTest(PerRequestTestCase("test_Session_illegal_sid")) + perRequestSuite.addTest(PerRequestTestCase("test_interpreter_per_directive")) + perRequestSuite.addTest(PerRequestTestCase("test_interpreter_per_directory")) + perRequestSuite.addTest(PerRequestTestCase("test_files_directive")) + perRequestSuite.addTest(PerRequestTestCase("test_none_handler")) + perRequestSuite.addTest(PerRequestTestCase("test_server_return")) + perRequestSuite.addTest(PerRequestTestCase("test_phase_status")) + perRequestSuite.addTest(PerRequestTestCase("test_publisher")) + perRequestSuite.addTest(PerRequestTestCase("test_publisher_auth_nested")) + perRequestSuite.addTest(PerRequestTestCase("test_publisher_auth_method_nested")) + perRequestSuite.addTest(PerRequestTestCase("test_publisher_auth_digest")) + perRequestSuite.addTest(PerRequestTestCase("test_publisher_old_style_instance")) + perRequestSuite.addTest(PerRequestTestCase("test_publisher_instance")) + perRequestSuite.addTest(PerRequestTestCase("test_publisher_security")) + # perRequestSuite.addTest(PerRequestTestCase("test_publisher_iterator")) + perRequestSuite.addTest(PerRequestTestCase("test_publisher_hierarchy")) + perRequestSuite.addTest(PerRequestTestCase("test_server_side_include")) + if APACHE_VERSION == '2.4' and sys.platform.startswith("linux") and THREADS: + perRequestSuite.addTest(PerRequestTestCase("test_memory")) + perRequestSuite.addTest(PerRequestTestCase("test_wsgihandler")) + perRequestSuite.addTest(PerRequestTestCase("test_wsgihandler_location")) + + # test_publisher_cache does not work correctly for mpm-prefork/worker + # and it may not be possible to get a reliable test for all + # configurations, so disable it. + # perRequestSuite.addTest(PerRequestTestCase("test_publisher_cache")) + + # this must be last so its error_log is not overwritten + perRequestSuite.addTest(PerRequestTestCase("test_internal")) + + self.makeConfig(PerRequestTestCase.appendConfig) + self.startHttpd() + + tr = unittest.TextTestRunner() + result = tr.run(perRequestSuite) + + self.failUnless(result.wasSuccessful()) + + def test_srv_register_cleanup(self): + + print("\n* Testing server.register_cleanup()...") + + c = Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::srv_register_cleanup"), + PythonDebug("On")) + + self.makeConfig(c) + + self.startHttpd() + + f = urlopen("http://127.0.0.1:%s/tests.py" % PORT) + f.read() + f.close() + + time.sleep(2) + + self.stopHttpd() + + # see what's in the log now + time.sleep(2) + f = open(os.path.join(SERVER_ROOT, "logs/error_log")) + log = f.read() + f.close() + if log.find("srv_register_cleanup test ok") == -1: + self.fail("Could not find test message in error_log") + + def test_apache_register_cleanup(self): + + print("\n* Testing apache.register_cleanup()...") + + c = Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::apache_register_cleanup"), + PythonDebug("On")) + + self.makeConfig(c) + + self.startHttpd() + + f = urlopen("http://127.0.0.1:%s/tests.py" % PORT) + f.read() + f.close() + + time.sleep(2) + + self.stopHttpd() + + # see what's in the log now + time.sleep(2) + f = open(os.path.join(SERVER_ROOT, "logs/error_log")) + log = f.read() + f.close() + if log.find("apache_register_cleanup test ok") == -1: + self.fail("Could not find test message in error_log") + + def test_apache_exists_config_define(self): + + print("\n* Testing apache.exists_config_define()...") + + c = Directory(DOCUMENT_ROOT, + SetHandler("mod_python"), + PythonHandler("tests::apache_exists_config_define"), + PythonDebug("On")) + + self.makeConfig(c) + + self.startHttpd() + + f = urlopen("http://127.0.0.1:%s/tests.py" % PORT) + if PY2: + rsp = f.read() + else: + rsp = f.read().decode('latin1') + f.close() + + self.stopHttpd() + + if rsp != 'NO_FOOBAR': + self.fail('Failure on apache.exists_config_define() : %s'%rsp) + + self.startHttpd(extra="-DFOOBAR") + + f = urlopen("http://127.0.0.1:%s/tests.py" % PORT) + if PY2: + rsp = f.read() + else: + rsp = f.read().decode('latin1') + f.close() + f.close() + + self.stopHttpd() + + if rsp != 'FOOBAR': + self.fail('Failure on apache.exists_config_define() : %s'%rsp) + +def suite(): + + mpTestSuite = unittest.TestSuite() + mpTestSuite.addTest(PerInstanceTestCase("testLoadModule")) + mpTestSuite.addTest(PerInstanceTestCase("testVersionCheck")) + mpTestSuite.addTest(PerInstanceTestCase("test_srv_register_cleanup")) + mpTestSuite.addTest(PerInstanceTestCase("test_apache_register_cleanup")) + mpTestSuite.addTest(PerInstanceTestCase("test_apache_exists_config_define")) + mpTestSuite.addTest(PerInstanceTestCase("test_global_lock")) + mpTestSuite.addTest(PerInstanceTestCase("testPerRequestTests")) + return mpTestSuite + +tr = unittest.TextTestRunner() +tr.run(suite()) +