This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "IPFire 3.x development tree".
The branch, master has been updated via cd5121cd427656f57e18517c087c48424a228253 (commit) via b701d96ffe3baafbc8355635064bb179e9c30c12 (commit) via 36c41da7e922e9ca0c6b8dcc842f6b033019b676 (commit) via 9ffc9c2c282c7615ca704bbeaea9819a28e4c1c1 (commit) via f79f24feacb8be7a26333f36e25c97ef43fefc39 (commit) via 641d349548379f23d84ced1e5308a20360079b40 (commit) via 1336997e2503d089a5f5f89b91c0b965b98c5971 (commit) from 8e72a7c5978061f0cc0c6f3f5f23def7559623e1 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit cd5121cd427656f57e18517c087c48424a228253 Author: Michael Tremer michael.tremer@ipfire.org Date: Wed Feb 23 22:37:51 2011 +0100
coreutils: Add gmp and libcap support.
commit b701d96ffe3baafbc8355635064bb179e9c30c12 Author: Michael Tremer michael.tremer@ipfire.org Date: Wed Feb 23 22:32:18 2011 +0100
curl: Fix curl-config --libs which put out all different kinds of libs that curl is liked to.
commit 36c41da7e922e9ca0c6b8dcc842f6b033019b676 Author: Michael Tremer michael.tremer@ipfire.org Date: Wed Feb 23 22:31:28 2011 +0100
nss: Add -devel dependency nspr-devel.
commit 9ffc9c2c282c7615ca704bbeaea9819a28e4c1c1 Author: Michael Tremer michael.tremer@ipfire.org Date: Wed Feb 23 22:21:53 2011 +0100
qt: Fix build dependencies.
commit f79f24feacb8be7a26333f36e25c97ef43fefc39 Author: Michael Tremer michael.tremer@ipfire.org Date: Wed Feb 23 22:20:55 2011 +0100
python-urlgrabber: Update to 3.9.1 (with fixes from upstream).
commit 641d349548379f23d84ced1e5308a20360079b40 Author: Michael Tremer michael.tremer@ipfire.org Date: Wed Feb 23 22:20:27 2011 +0100
python-pycurl: New package.
commit 1336997e2503d089a5f5f89b91c0b965b98c5971 Author: Michael Tremer michael.tremer@ipfire.org Date: Wed Feb 23 00:40:57 2011 +0100
linux-atm: Fix build dependencies (flex-devel).
-----------------------------------------------------------------------
Summary of changes: pkgs/core/coreutils/coreutils.nm | 6 +- pkgs/core/curl/curl.nm | 13 +- .../curl/patches/0101-curl-7.21.1-multilib.patch | 64 ++ pkgs/core/linux-atm/linux-atm.nm | 2 +- pkgs/core/nss/nss.nm | 2 +- .../python-pycurl-fix-do_curl_reset-refcount.patch | 24 + .../patches/python-pycurl-no-static-libs.patch0 | 12 + .../python-pycurl.nm} | 33 +- .../patches/urlgrabber-HEAD.patch | 701 ++++++++++++++++++++ pkgs/core/python-urlgrabber/python-urlgrabber.nm | 4 +- pkgs/core/qt/qt.nm | 9 +- 11 files changed, 841 insertions(+), 29 deletions(-) create mode 100644 pkgs/core/curl/patches/0101-curl-7.21.1-multilib.patch create mode 100644 pkgs/core/python-pycurl/patches/python-pycurl-fix-do_curl_reset-refcount.patch create mode 100644 pkgs/core/python-pycurl/patches/python-pycurl-no-static-libs.patch0 copy pkgs/core/{python-decorator/python-decorator.nm => python-pycurl/python-pycurl.nm} (71%) create mode 100644 pkgs/core/python-urlgrabber/patches/urlgrabber-HEAD.patch
Difference in files: diff --git a/pkgs/core/coreutils/coreutils.nm b/pkgs/core/coreutils/coreutils.nm index 6222b9b..a9ca919 100644 --- a/pkgs/core/coreutils/coreutils.nm +++ b/pkgs/core/coreutils/coreutils.nm @@ -26,7 +26,7 @@ include $(PKGROOT)/Include
PKG_NAME = coreutils PKG_VER = 8.9 -PKG_REL = 0 +PKG_REL = 1
PKG_MAINTAINER = PKG_GROUP = System/Base @@ -39,8 +39,8 @@ define PKG_DESCRIPTION the old GNU fileutils, sh-utils, and textutils packages. endef
-PKG_BUILD_DEPS+= autoconf automake libacl-devel libattr-devel libcap \ - libselinux-devel ncurses-devel e2fsprogs pam-devel +PKG_BUILD_DEPS+= autoconf automake e2fsprogs-devel gmp-devel libacl-devel \ + libattr-devel libcap-devel libselinux-devel ncurses-devel pam-devel
PKG_TARBALL = $(THISAPP).tar.gz
diff --git a/pkgs/core/curl/curl.nm b/pkgs/core/curl/curl.nm index 9085941..aceda4f 100644 --- a/pkgs/core/curl/curl.nm +++ b/pkgs/core/curl/curl.nm @@ -26,7 +26,7 @@ include $(PKGROOT)/Include
PKG_NAME = curl PKG_VER = 7.21.2 -PKG_REL = 0 +PKG_REL = 1
PKG_MAINTAINER = PKG_GROUP = Application/Internet @@ -34,8 +34,8 @@ PKG_URL = http://www.curl.haxx.se/ PKG_LICENSE = MIT PKG_SUMMARY = A utility for getting files from remote servers (FTP, HTTP, and others).
-PKG_BUILD_DEPS+= autoconf automake libidn-devel libssh2-devel libtool \ - openldap-devel openssl-devel pkg-config zlib-devel +PKG_BUILD_DEPS+= autoconf automake groff libidn-devel libssh2-devel libtool \ + nss-devel openldap-devel pkg-config zlib-devel
define PKG_DESCRIPTION cURL is a tool for getting files from HTTP, FTP, FILE, LDAP, LDAPS, \ @@ -58,15 +58,20 @@ PKG_DEPS-libcurl-devel = $(PKG_DEPS-$(PKG_NAME)-devel) PKG_BUILD_DEPS-libcurl-devel = $(PKG_BUILD_DEPS-$(PKG_NAME)-devel) PKG_SUMMARY-libcurl-devel = $(PKG_SUMMARY-$(PKG_NAME)-devel) PKG_FILES-libcurl-devel = $(PKG_FILES-$(PKG_NAME)-devel) +PKG_PROVIDES-libcurl-devel = curl-devel
CONFIGURE_OPTIONS += \ + --enable-hidden-symbols \ --disable-static \ --with-ca-bundle=/etc/pki/tls/certs/ca-bundle.crt \ --with-libidn \ --with-libssh2 \ --with-nss \ --enable-ldaps \ - --enable-ipv6 + --enable-ipv6 \ + --enable-manual \ + --enable-threaded-resolver \ + --without-ssl
define STAGE_PREPARE_CMDS cd $(DIR_APP) && autoreconf -vfi diff --git a/pkgs/core/curl/patches/0101-curl-7.21.1-multilib.patch b/pkgs/core/curl/patches/0101-curl-7.21.1-multilib.patch new file mode 100644 index 0000000..83c1cd8 --- /dev/null +++ b/pkgs/core/curl/patches/0101-curl-7.21.1-multilib.patch @@ -0,0 +1,64 @@ + curl-config.in | 22 ++++------------------ + libcurl.pc.in | 1 + + 2 files changed, 5 insertions(+), 18 deletions(-) + +diff --git a/curl-config.in b/curl-config.in +index ebda129..b404827 100644 +--- a/curl-config.in ++++ b/curl-config.in +@@ -43,7 +43,6 @@ Available values for OPTION include: + --libs library linking information + --prefix curl install prefix + --protocols newline separated list of enabled protocols +- --static-libs static libcurl library linking information + --version output version information + --vernum output the version information as a number (hexadecimal) + EOF +@@ -74,7 +73,7 @@ while test $# -gt 0; do + ;; + + --cc) +- echo "@CC@" ++ echo "gcc" + ;; + + --prefix) +@@ -136,25 +135,12 @@ while test $# -gt 0; do + ;; + + --libs) +- if test "X@libdir@" != "X/usr/lib" -a "X@libdir@" != "X/usr/lib64"; then +- CURLLIBDIR="-L@libdir@ " +- else +- CURLLIBDIR="" +- fi +- if test "X@REQUIRE_LIB_DEPS@" = "Xyes"; then +- echo ${CURLLIBDIR}-lcurl @LDFLAGS@ @LIBCURL_LIBS@ @LIBS@ +- else +- echo ${CURLLIBDIR}-lcurl @LDFLAGS@ @LIBS@ +- fi +- ;; +- +- --static-libs) +- echo @libdir@/libcurl.@libext@ @LDFLAGS@ @LIBCURL_LIBS@ @LIBS@ ++ pkg-config libcurl --libs + ;; + + --configure) +- echo @CONFIGURE_OPTIONS@ +- ;; ++ pkg-config libcurl --variable=configure_options | sed 's/^"//;s/"$//' ++ ;; + + *) + echo "unknown option: $1" +--- a/libcurl.pc.in ++++ b/libcurl.pc.in +@@ -29,6 +29,7 @@ libdir=@libdir@ + includedir=@includedir@ + supported_protocols="@SUPPORT_PROTOCOLS@" + supported_features="@SUPPORT_FEATURES@" ++configure_options=@CONFIGURE_OPTIONS@ + + Name: libcurl + URL: http://curl.haxx.se/ diff --git a/pkgs/core/linux-atm/linux-atm.nm b/pkgs/core/linux-atm/linux-atm.nm index c11e70c..d114a75 100644 --- a/pkgs/core/linux-atm/linux-atm.nm +++ b/pkgs/core/linux-atm/linux-atm.nm @@ -34,7 +34,7 @@ PKG_URL = http://linux-atm.sourceforge.net/ PKG_LICENSE = BSD and GPLv2 and GPLv2+ and LGPLv2+ and MIT PKG_SUMMARY = Tools to support ATM networking under Linux.
-PKG_BUILD_DEPS+= flex kernel-headers perl +PKG_BUILD_DEPS+= flex-devel kernel-headers perl
define PKG_DESCRIPTION This package contains header files and libraries for development \ diff --git a/pkgs/core/nss/nss.nm b/pkgs/core/nss/nss.nm index d1a9e34..dd2ffd8 100644 --- a/pkgs/core/nss/nss.nm +++ b/pkgs/core/nss/nss.nm @@ -50,7 +50,7 @@ PKG_OBJECTS += nss-pem-20100809.tar.bz2
PKG_PACKAGES += $(PKG_NAME_REAL)-devel $(PKG_NAME_REAL)-libs
-PKG_DEPS-$(PKG_NAME_REAL)-devel += nss nss-softokn-devel nss-util-devel +PKG_DEPS-$(PKG_NAME_REAL)-devel += nspr-devel nss nss-softokn-devel nss-util-devel
# Mozilla does no versioning :( define PKG_FILES-$(PKG_NAME_REAL)-devel diff --git a/pkgs/core/python-pycurl/patches/python-pycurl-fix-do_curl_reset-refcount.patch b/pkgs/core/python-pycurl/patches/python-pycurl-fix-do_curl_reset-refcount.patch new file mode 100644 index 0000000..7e20b15 --- /dev/null +++ b/pkgs/core/python-pycurl/patches/python-pycurl-fix-do_curl_reset-refcount.patch @@ -0,0 +1,24 @@ +--- a/src/pycurl.c ++++ a/src/pycurl.c +@@ -1452,6 +1452,7 @@ do_curl_reset(CurlObject *self) + } + } + ++ Py_INCREF(Py_None); + return Py_None; + } + +--- a/tests/test_internals.py ++++ a/tests/test_internals.py +@@ -245,6 +245,11 @@ if 1 and gc: + if opts.verbose >= 1: + print "Tracked objects:", len(gc.get_objects()) + ++if 1: ++ # Ensure that the refcounting error in "reset" is fixed: ++ for i in xrange(100000): ++ c = Curl() ++ c.reset() + + # /*********************************************************************** + # // done diff --git a/pkgs/core/python-pycurl/patches/python-pycurl-no-static-libs.patch0 b/pkgs/core/python-pycurl/patches/python-pycurl-no-static-libs.patch0 new file mode 100644 index 0000000..c1410dc --- /dev/null +++ b/pkgs/core/python-pycurl/patches/python-pycurl-no-static-libs.patch0 @@ -0,0 +1,12 @@ +--- setup.py~ 2008-04-22 17:00:45.000000000 +0300 ++++ setup.py 2008-07-03 21:53:36.000000000 +0300 +@@ -97,8 +97,7 @@ + else: + extra_compile_args.append(e) + libs = split_quoted( +- os.popen("'%s' --libs" % CURL_CONFIG).read()+\ +- os.popen("'%s' --static-libs" % CURL_CONFIG).read()) ++ os.popen("'%s' --libs" % CURL_CONFIG).read()) + for e in libs: + if e[:2] == "-l": + libraries.append(e[2:]) diff --git a/pkgs/core/python-pycurl/python-pycurl.nm b/pkgs/core/python-pycurl/python-pycurl.nm new file mode 100644 index 0000000..27c9777 --- /dev/null +++ b/pkgs/core/python-pycurl/python-pycurl.nm @@ -0,0 +1,63 @@ +############################################################################### +# # +# IPFire.org - A linux based firewall # +# Copyright (C) 2007, 2008 Michael Tremer & Christian Schmidt # +# # +# This program is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# This program is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with this program. If not, see http://www.gnu.org/licenses/. # +# # +############################################################################### + +############################################################################### +# Definitions +############################################################################### + +include $(PKGROOT)/Include + +PKG_NAME = pycurl +PKG_VER = 7.19.0 +PKG_REL = 1 +PKG_ARCH = noarch + +PKG_MAINTAINER = +PKG_GROUP = Development/Languages +PKG_URL = http://pycurl.sourceforge.net/ +PKG_LICENSE = LGPLv2+ or MIT +PKG_SUMMARY = A Python interface to libcurl. + +CFLAGS += -DHAVE_CURL_OPENSSL + +PKG_BUILD_DEPS+= curl-devel openssl-devel python-devel + +define PKG_DESCRIPTION + PycURL is a Python interface to libcurl. PycURL can be used to fetch \ + objects identified by a URL from a Python program, similar to the \ + urllib Python module. PycURL is mature, very fast, and supports a lot \ + of features. +endef + +PKG_TARBALL = $(THISAPP).tar.gz + +define STAGE_BUILD + cd $(DIR_APP) && python setup.py build +endef + +define STAGE_TEST + cd $(DIR_APP) && PYTHONPATH=$PWD/build/lib* python tests/test_internals.py -q +endef + +define STAGE_INSTALL + cd $(DIR_APP) && python setup.py install --skip-build -O1 \ + --root=$(BUILDROOT) + cd $(DIR_APP) && rm -rf $(BUILDROOT)/usr/share/doc/pycurl +endef diff --git a/pkgs/core/python-urlgrabber/patches/urlgrabber-HEAD.patch b/pkgs/core/python-urlgrabber/patches/urlgrabber-HEAD.patch new file mode 100644 index 0000000..6627a1f --- /dev/null +++ b/pkgs/core/python-urlgrabber/patches/urlgrabber-HEAD.patch @@ -0,0 +1,701 @@ +diff --git a/scripts/urlgrabber b/scripts/urlgrabber +index 518e512..09cd896 100644 +--- a/scripts/urlgrabber ++++ b/scripts/urlgrabber +@@ -115,6 +115,7 @@ options: + including quotes in the case of strings. + e.g. --user_agent='"foobar/2.0"' + ++ --output FILE + -o FILE write output to FILE, otherwise the basename of the + url will be used + -O print the names of saved files to STDOUT +@@ -170,12 +171,17 @@ class client_options: + return ug_options, ug_defaults + + def process_command_line(self): +- short_options = 'vd:hoOpD' ++ short_options = 'vd:ho:OpD' + long_options = ['profile', 'repeat=', 'verbose=', +- 'debug=', 'help', 'progress'] ++ 'debug=', 'help', 'progress', 'output='] + ug_long = [ o + '=' for o in self.ug_options ] +- optlist, args = getopt.getopt(sys.argv[1:], short_options, +- long_options + ug_long) ++ try: ++ optlist, args = getopt.getopt(sys.argv[1:], short_options, ++ long_options + ug_long) ++ except getopt.GetoptError, e: ++ print >>sys.stderr, "Error:", e ++ self.help([], ret=1) ++ + self.verbose = 0 + self.debug = None + self.outputfile = None +@@ -193,6 +199,7 @@ class client_options: + if o == '--verbose': self.verbose = v + if o == '-v': self.verbose += 1 + if o == '-o': self.outputfile = v ++ if o == '--output': self.outputfile = v + if o == '-p' or o == '--progress': self.progress = 1 + if o == '-d' or o == '--debug': self.debug = v + if o == '--profile': self.profile = 1 +@@ -222,7 +229,7 @@ class client_options: + print "ERROR: cannot use -o when grabbing multiple files" + sys.exit(1) + +- def help(self, args): ++ def help(self, args, ret=0): + if not args: + print MAINHELP + else: +@@ -234,7 +241,7 @@ class client_options: + self.help_ug_option(a) + else: + print 'ERROR: no help on command "%s"' % a +- sys.exit(0) ++ sys.exit(ret) + + def help_doc(self): + print __doc__ +diff --git a/test/base_test_code.py b/test/base_test_code.py +index 50c6348..5fb43f9 100644 +--- a/test/base_test_code.py ++++ b/test/base_test_code.py +@@ -1,6 +1,6 @@ + from munittest import * + +-base_http = 'http://www.linux.duke.edu/projects/urlgrabber/test/' ++base_http = 'http://urlgrabber.baseurl.org/test/' + base_ftp = 'ftp://localhost/test/' + + # set to a proftp server only. we're working around a couple of +diff --git a/urlgrabber/byterange.py b/urlgrabber/byterange.py +index 3e5f3b7..8eeaeda 100644 +--- a/urlgrabber/byterange.py ++++ b/urlgrabber/byterange.py +@@ -68,7 +68,7 @@ class HTTPRangeHandler(urllib2.BaseHandler): + + def http_error_416(self, req, fp, code, msg, hdrs): + # HTTP's Range Not Satisfiable error +- raise RangeError('Requested Range Not Satisfiable') ++ raise RangeError(9, 'Requested Range Not Satisfiable') + + class HTTPSRangeHandler(HTTPRangeHandler): + """ Range Header support for HTTPS. """ +@@ -208,7 +208,7 @@ class RangeableFileObject: + bufsize = offset - pos + buf = self.fo.read(bufsize) + if len(buf) != bufsize: +- raise RangeError('Requested Range Not Satisfiable') ++ raise RangeError(9, 'Requested Range Not Satisfiable') + pos+= bufsize + + class FileRangeHandler(urllib2.FileHandler): +@@ -238,7 +238,7 @@ class FileRangeHandler(urllib2.FileHandler): + (fb,lb) = brange + if lb == '': lb = size + if fb < 0 or fb > size or lb > size: +- raise RangeError('Requested Range Not Satisfiable') ++ raise RangeError(9, 'Requested Range Not Satisfiable') + size = (lb - fb) + fo = RangeableFileObject(fo, (fb,lb)) + headers = mimetools.Message(StringIO( +@@ -318,12 +318,12 @@ class FTPRangeHandler(urllib2.FTPHandler): + (fb,lb) = range_tup + if lb == '': + if retrlen is None or retrlen == 0: +- raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.') ++ raise RangeError(9, 'Requested Range Not Satisfiable due to unobtainable file length.') + lb = retrlen + retrlen = lb - fb + if retrlen < 0: + # beginning of range is larger than file +- raise RangeError('Requested Range Not Satisfiable') ++ raise RangeError(9, 'Requested Range Not Satisfiable') + else: + retrlen = lb - fb + fp = RangeableFileObject(fp, (0,retrlen)) +@@ -458,6 +458,6 @@ def range_tuple_normalize(range_tup): + # check if range is over the entire file + if (fb,lb) == (0,''): return None + # check that the range is valid +- if lb < fb: raise RangeError('Invalid byte range: %s-%s' % (fb,lb)) ++ if lb < fb: raise RangeError(9, 'Invalid byte range: %s-%s' % (fb,lb)) + return (fb,lb) + +diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py +index e090e90..b2770c5 100644 +--- a/urlgrabber/grabber.py ++++ b/urlgrabber/grabber.py +@@ -68,14 +68,14 @@ GENERAL ARGUMENTS (kwargs) + (which can be set on default_grabber.throttle) is used. See + BANDWIDTH THROTTLING for more information. + +- timeout = None ++ timeout = 300 + +- a positive float expressing the number of seconds to wait for socket +- operations. If the value is None or 0.0, socket operations will block +- forever. Setting this option causes urlgrabber to call the settimeout +- method on the Socket object used for the request. See the Python +- documentation on settimeout for more information. +- http://www.python.org/doc/current/lib/socket-objects.html ++ a positive integer expressing the number of seconds to wait before ++ timing out attempts to connect to a server. If the value is None ++ or 0, connection attempts will not time out. The timeout is passed ++ to the underlying pycurl object as its CONNECTTIMEOUT option, see ++ the curl documentation on CURLOPT_CONNECTTIMEOUT for more information. ++ http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUT + + bandwidth = 0 + +@@ -198,6 +198,12 @@ GENERAL ARGUMENTS (kwargs) + control, you should probably subclass URLParser and pass it in via + the 'urlparser' option. + ++ username = None ++ username to use for simple http auth - is automatically quoted for special characters ++ ++ password = None ++ password to use for simple http auth - is automatically quoted for special characters ++ + ssl_ca_cert = None + + this option can be used if M2Crypto is available and will be +@@ -248,6 +254,11 @@ GENERAL ARGUMENTS (kwargs) + + Maximum size (in bytes) of the headers. + ++ self.ip_resolve = 'whatever' ++ ++ What type of name to IP resolving to use, default is to do both IPV4 and ++ IPV6. ++ + + RETRY RELATED ARGUMENTS + +@@ -420,6 +431,7 @@ import time + import string + import urllib + import urllib2 ++from httplib import responses + import mimetools + import thread + import types +@@ -439,6 +451,12 @@ try: + except: + __version__ = '???' + ++try: ++ # this part isn't going to do much - need to talk to gettext ++ from i18n import _ ++except ImportError, msg: ++ def _(st): return st ++ + ######################################################################## + # functions for debugging output. These functions are here because they + # are also part of the module initialization. +@@ -527,6 +545,22 @@ def _(st): + # END MODULE INITIALIZATION + ######################################################################## + ++######################################################################## ++# UTILITY FUNCTIONS ++######################################################################## ++ ++# These functions are meant to be utilities for the urlgrabber library to use. ++ ++def _to_utf8(obj, errors='replace'): ++ '''convert 'unicode' to an encoded utf-8 byte string ''' ++ # stolen from yum.i18n ++ if isinstance(obj, unicode): ++ obj = obj.encode('utf-8', errors) ++ return obj ++ ++######################################################################## ++# END UTILITY FUNCTIONS ++######################################################################## + + + class URLGrabError(IOError): +@@ -662,6 +696,7 @@ class URLParser: + opts.quote = 0 --> do not quote it + opts.quote = None --> guess + """ ++ url = _to_utf8(url) + quote = opts.quote + + if opts.prefix: +@@ -800,6 +835,7 @@ class URLGrabberOptions: + self.close_connection = 0 + self.range = None + self.user_agent = 'urlgrabber/%s' % __version__ ++ self.ip_resolve = None + self.keepalive = 1 + self.proxies = None + self.reget = None +@@ -808,13 +844,15 @@ class URLGrabberOptions: + self.prefix = None + self.opener = None + self.cache_openers = True +- self.timeout = None ++ self.timeout = 300 + self.text = None + self.http_headers = None + self.ftp_headers = None + self.data = None + self.urlparser = URLParser() + self.quote = None ++ self.username = None ++ self.password = None + self.ssl_ca_cert = None # sets SSL_CAINFO - path to certdb + self.ssl_context = None # no-op in pycurl + self.ssl_verify_peer = True # check peer's cert for authenticityb +@@ -846,7 +884,7 @@ class URLGrabberOptions: + s = s + indent + '}' + return s + +-class URLGrabber: ++class URLGrabber(object): + """Provides easy opening of URLs with a variety of options. + + All options are specified as kwargs. Options may be specified when +@@ -931,6 +969,9 @@ class URLGrabber: + (scheme, host, path, parm, query, frag) = parts + if filename is None: + filename = os.path.basename( urllib.unquote(path) ) ++ if not filename: ++ # This is better than nothing. ++ filename = 'index.html' + if scheme == 'file' and not opts.copy_local: + # just return the name of the local file - don't make a + # copy currently +@@ -1030,7 +1071,7 @@ class URLGrabber: + default_grabber = URLGrabber() + + +-class PyCurlFileObject(): ++class PyCurlFileObject(object): + def __init__(self, url, filename, opts): + self.fo = None + self._hdr_dump = '' +@@ -1052,9 +1093,15 @@ class PyCurlFileObject(): + self._reget_length = 0 + self._prog_running = False + self._error = (None, None) +- self.size = None ++ self.size = 0 ++ self._hdr_ended = False + self._do_open() + ++ ++ def geturl(self): ++ """ Provide the geturl() method, used to be got from ++ urllib.addinfourl, via. urllib.URLopener.* """ ++ return self.url + + def __getattr__(self, name): + """This effectively allows us to wrap at the instance level. +@@ -1085,9 +1132,14 @@ class PyCurlFileObject(): + return -1 + + def _hdr_retrieve(self, buf): ++ if self._hdr_ended: ++ self._hdr_dump = '' ++ self.size = 0 ++ self._hdr_ended = False ++ + if self._over_max_size(cur=len(self._hdr_dump), + max_size=self.opts.max_header_size): +- return -1 ++ return -1 + try: + self._hdr_dump += buf + # we have to get the size before we do the progress obj start +@@ -1104,7 +1156,17 @@ class PyCurlFileObject(): + s = parse150(buf) + if s: + self.size = int(s) +- ++ ++ if buf.lower().find('location') != -1: ++ location = ':'.join(buf.split(':')[1:]) ++ location = location.strip() ++ self.scheme = urlparse.urlsplit(location)[0] ++ self.url = location ++ ++ if len(self._hdr_dump) != 0 and buf == '\r\n': ++ self._hdr_ended = True ++ if DEBUG: DEBUG.info('header ended:') ++ + return len(buf) + except KeyboardInterrupt: + return pycurl.READFUNC_ABORT +@@ -1113,8 +1175,10 @@ class PyCurlFileObject(): + if self._parsed_hdr: + return self._parsed_hdr + statusend = self._hdr_dump.find('\n') ++ statusend += 1 # ridiculous as it may seem. + hdrfp = StringIO() + hdrfp.write(self._hdr_dump[statusend:]) ++ hdrfp.seek(0) + self._parsed_hdr = mimetools.Message(hdrfp) + return self._parsed_hdr + +@@ -1136,11 +1200,21 @@ class PyCurlFileObject(): + self.curl_obj.setopt(pycurl.PROGRESSFUNCTION, self._progress_update) + self.curl_obj.setopt(pycurl.FAILONERROR, True) + self.curl_obj.setopt(pycurl.OPT_FILETIME, True) ++ self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) + + if DEBUG: + self.curl_obj.setopt(pycurl.VERBOSE, True) + if opts.user_agent: + self.curl_obj.setopt(pycurl.USERAGENT, opts.user_agent) ++ if opts.ip_resolve: ++ # Default is: IPRESOLVE_WHATEVER ++ ipr = opts.ip_resolve.lower() ++ if ipr == 'whatever': # Do we need this? ++ self.curl_obj.setopt(pycurl.IPRESOLVE,pycurl.IPRESOLVE_WHATEVER) ++ if ipr == 'ipv4': ++ self.curl_obj.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) ++ if ipr == 'ipv6': ++ self.curl_obj.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V6) + + # maybe to be options later + self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) +@@ -1148,9 +1222,11 @@ class PyCurlFileObject(): + + # timeouts + timeout = 300 +- if opts.timeout: +- timeout = int(opts.timeout) +- self.curl_obj.setopt(pycurl.CONNECTTIMEOUT, timeout) ++ if hasattr(opts, 'timeout'): ++ timeout = int(opts.timeout or 0) ++ self.curl_obj.setopt(pycurl.CONNECTTIMEOUT, timeout) ++ self.curl_obj.setopt(pycurl.LOW_SPEED_LIMIT, 1) ++ self.curl_obj.setopt(pycurl.LOW_SPEED_TIME, timeout) + + # ssl options + if self.scheme == 'https': +@@ -1203,12 +1279,19 @@ class PyCurlFileObject(): + if proxy == '_none_': proxy = "" + self.curl_obj.setopt(pycurl.PROXY, proxy) + +- # FIXME username/password/auth settings ++ if opts.username and opts.password: ++ if self.scheme in ('http', 'https'): ++ self.curl_obj.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_ANY) ++ ++ if opts.username and opts.password: ++ # apparently when applying them as curlopts they do not require quoting of any kind ++ userpwd = '%s:%s' % (opts.username, opts.password) ++ self.curl_obj.setopt(pycurl.USERPWD, userpwd) + + #posts - simple - expects the fields as they are + if opts.data: + self.curl_obj.setopt(pycurl.POST, True) +- self.curl_obj.setopt(pycurl.POSTFIELDS, self._to_utf8(opts.data)) ++ self.curl_obj.setopt(pycurl.POSTFIELDS, _to_utf8(opts.data)) + + # our url + self.curl_obj.setopt(pycurl.URL, self.url) +@@ -1228,12 +1311,14 @@ class PyCurlFileObject(): + + code = self.http_code + errcode = e.args[0] ++ errurl = urllib.unquote(self.url) ++ + if self._error[0]: + errcode = self._error[0] + + if errcode == 23 and code >= 200 and code < 299: +- err = URLGrabError(15, _('User (or something) called abort %s: %s') % (self.url, e)) +- err.url = self.url ++ err = URLGrabError(15, _('User (or something) called abort %s: %s') % (errurl, e)) ++ err.url = errurl + + # this is probably wrong but ultimately this is what happens + # we have a legit http code and a pycurl 'writer failed' code +@@ -1244,23 +1329,23 @@ class PyCurlFileObject(): + raise KeyboardInterrupt + + elif errcode == 28: +- err = URLGrabError(12, _('Timeout on %s: %s') % (self.url, e)) +- err.url = self.url ++ err = URLGrabError(12, _('Timeout on %s: %s') % (errurl, e)) ++ err.url = errurl + raise err + elif errcode == 35: + msg = _("problem making ssl connection") + err = URLGrabError(14, msg) +- err.url = self.url ++ err.url = errurl + raise err + elif errcode == 37: +- msg = _("Could not open/read %s") % (self.url) ++ msg = _("Could not open/read %s") % (errurl) + err = URLGrabError(14, msg) +- err.url = self.url ++ err.url = errurl + raise err + + elif errcode == 42: +- err = URLGrabError(15, _('User (or something) called abort %s: %s') % (self.url, e)) +- err.url = self.url ++ err = URLGrabError(15, _('User (or something) called abort %s: %s') % (errurl, e)) ++ err.url = errurl + # this is probably wrong but ultimately this is what happens + # we have a legit http code and a pycurl 'writer failed' code + # which almost always means something aborted it from outside +@@ -1272,33 +1357,93 @@ class PyCurlFileObject(): + elif errcode == 58: + msg = _("problem with the local client certificate") + err = URLGrabError(14, msg) +- err.url = self.url ++ err.url = errurl + raise err + + elif errcode == 60: +- msg = _("client cert cannot be verified or client cert incorrect") ++ msg = _("Peer cert cannot be verified or peer cert invalid") + err = URLGrabError(14, msg) +- err.url = self.url ++ err.url = errurl + raise err + + elif errcode == 63: + if self._error[1]: + msg = self._error[1] + else: +- msg = _("Max download size exceeded on %s") % (self.url) ++ msg = _("Max download size exceeded on %s") % () + err = URLGrabError(14, msg) +- err.url = self.url ++ err.url = errurl + raise err + + elif str(e.args[1]) == '' and self.http_code != 0: # fake it until you make it +- msg = 'HTTP Error %s : %s ' % (self.http_code, self.url) ++ if self.scheme in ['http', 'https']: ++ if self.http_code in responses: ++ resp = responses[self.http_code] ++ msg = 'HTTP Error %s - %s : %s' % (self.http_code, resp, errurl) ++ else: ++ msg = 'HTTP Error %s : %s ' % (self.http_code, errurl) ++ elif self.scheme in ['ftp']: ++ msg = 'FTP Error %s : %s ' % (self.http_code, errurl) ++ else: ++ msg = "Unknown Error: URL=%s , scheme=%s" % (errurl, self.scheme) + else: +- msg = 'PYCURL ERROR %s - "%s"' % (errcode, str(e.args[1])) ++ pyerr2str = { 5 : _("Couldn't resolve proxy"), ++ 6 : _("Couldn't resolve host"), ++ 7 : _("Couldn't connect"), ++ 8 : _("Bad reply to FTP server"), ++ 9 : _("Access denied"), ++ 11 : _("Bad reply to FTP pass"), ++ 13 : _("Bad reply to FTP pasv"), ++ 14 : _("Bad reply to FTP 227"), ++ 15 : _("Couldn't get FTP host"), ++ 17 : _("Couldn't set FTP type"), ++ 18 : _("Partial file"), ++ 19 : _("FTP RETR command failed"), ++ 22 : _("HTTP returned error"), ++ 23 : _("Write error"), ++ 25 : _("Upload failed"), ++ 26 : _("Read error"), ++ 27 : _("Out of Memory"), ++ 28 : _("Operation timed out"), ++ 30 : _("FTP PORT command failed"), ++ 31 : _("FTP REST command failed"), ++ 33 : _("Range failed"), ++ 34 : _("HTTP POST failed"), ++ 35 : _("SSL CONNECT failed"), ++ 36 : _("Couldn't resume download"), ++ 37 : _("Couldn't read file"), ++ 42 : _("Aborted by callback"), ++ 47 : _("Too many redirects"), ++ 51 : _("Peer certificate failed verification"), ++ 53 : _("SSL engine not found"), ++ 54 : _("SSL engine set failed"), ++ 55 : _("Network error send()"), ++ 56 : _("Network error recv()"), ++ 58 : _("Local certificate failed"), ++ 59 : _("SSL set cipher failed"), ++ 60 : _("Local CA certificate failed"), ++ 61 : _("HTTP bad transfer encoding"), ++ 63 : _("Maximum file size exceeded"), ++ 64 : _("FTP SSL failed"), ++ 67 : _("Authentication failure"), ++ 70 : _("Out of disk space on server"), ++ 73 : _("Remove file exists"), ++ } ++ errstr = str(e.args[1]) ++ if not errstr: ++ errstr = pyerr2str.get(errcode, '<Unknown>') ++ msg = 'curl#%s - "%s"' % (errcode, errstr) + code = errcode + err = URLGrabError(14, msg) + err.code = code + err.exception = e + raise err ++ else: ++ if self._error[1]: ++ msg = self._error[1] ++ err = URLGRabError(14, msg) ++ err.url = urllib.unquote(self.url) ++ raise err + + def _do_open(self): + self.curl_obj = _curl_cache +@@ -1333,7 +1478,11 @@ class PyCurlFileObject(): + + if self.opts.range: + rt = self.opts.range +- if rt[0]: rt = (rt[0] + reget_length, rt[1]) ++ ++ if rt[0] is None: ++ rt = (0, rt[1]) ++ rt = (rt[0] + reget_length, rt[1]) ++ + + if rt: + header = range_tuple_to_header(rt) +@@ -1434,9 +1583,13 @@ class PyCurlFileObject(): + #fh, self._temp_name = mkstemp() + #self.fo = open(self._temp_name, 'wb') + +- +- self._do_perform() +- ++ try: ++ self._do_perform() ++ except URLGrabError, e: ++ self.fo.flush() ++ self.fo.close() ++ raise e ++ + + + if _was_filename: +@@ -1446,9 +1599,23 @@ class PyCurlFileObject(): + # set the time + mod_time = self.curl_obj.getinfo(pycurl.INFO_FILETIME) + if mod_time != -1: +- os.utime(self.filename, (mod_time, mod_time)) ++ try: ++ os.utime(self.filename, (mod_time, mod_time)) ++ except OSError, e: ++ err = URLGrabError(16, _(\ ++ 'error setting timestamp on file %s from %s, OSError: %s') ++ % (self.filename, self.url, e)) ++ err.url = self.url ++ raise err + # re open it +- self.fo = open(self.filename, 'r') ++ try: ++ self.fo = open(self.filename, 'r') ++ except IOError, e: ++ err = URLGrabError(16, _(\ ++ 'error opening file from %s, IOError: %s') % (self.url, e)) ++ err.url = self.url ++ raise err ++ + else: + #self.fo = open(self._temp_name, 'r') + self.fo.seek(0) +@@ -1532,11 +1699,14 @@ class PyCurlFileObject(): + def _over_max_size(self, cur, max_size=None): + + if not max_size: +- max_size = self.size +- if self.opts.size: # if we set an opts size use that, no matter what +- max_size = self.opts.size ++ if not self.opts.size: ++ max_size = self.size ++ else: ++ max_size = self.opts.size ++ + if not max_size: return False # if we have None for all of the Max then this is dumb +- if cur > max_size + max_size*.10: ++ ++ if cur > int(float(max_size) * 1.10): + + msg = _("Downloaded more than max size for %s: %s > %s") \ + % (self.url, cur, max_size) +@@ -1544,13 +1714,6 @@ class PyCurlFileObject(): + return True + return False + +- def _to_utf8(self, obj, errors='replace'): +- '''convert 'unicode' to an encoded utf-8 byte string ''' +- # stolen from yum.i18n +- if isinstance(obj, unicode): +- obj = obj.encode('utf-8', errors) +- return obj +- + def read(self, amt=None): + self._fill_buffer(amt) + if amt is None: +@@ -1582,9 +1745,21 @@ class PyCurlFileObject(): + self.opts.progress_obj.end(self._amount_read) + self.fo.close() + +- ++ def geturl(self): ++ """ Provide the geturl() method, used to be got from ++ urllib.addinfourl, via. urllib.URLopener.* """ ++ return self.url ++ + _curl_cache = pycurl.Curl() # make one and reuse it over and over and over + ++def reset_curl_obj(): ++ """To make sure curl has reread the network/dns info we force a reload""" ++ global _curl_cache ++ _curl_cache.close() ++ _curl_cache = pycurl.Curl() ++ ++ ++ + + ##################################################################### + # DEPRECATED FUNCTIONS +diff --git a/urlgrabber/mirror.py b/urlgrabber/mirror.py +index dad410b..8731aed 100644 +--- a/urlgrabber/mirror.py ++++ b/urlgrabber/mirror.py +@@ -90,7 +90,7 @@ CUSTOMIZATION + import random + import thread # needed for locking to make this threadsafe + +-from grabber import URLGrabError, CallbackObject, DEBUG ++from grabber import URLGrabError, CallbackObject, DEBUG, _to_utf8 + + def _(st): + return st +@@ -263,7 +263,8 @@ class MirrorGroup: + def _parse_mirrors(self, mirrors): + parsed_mirrors = [] + for m in mirrors: +- if type(m) == type(''): m = {'mirror': m} ++ if isinstance(m, basestring): ++ m = {'mirror': _to_utf8(m)} + parsed_mirrors.append(m) + return parsed_mirrors + +diff --git a/urlgrabber/progress.py b/urlgrabber/progress.py +index dd07c6a..45eb248 100644 +--- a/urlgrabber/progress.py ++++ b/urlgrabber/progress.py +@@ -658,6 +658,8 @@ def format_time(seconds, use_hours=0): + if seconds is None or seconds < 0: + if use_hours: return '--:--:--' + else: return '--:--' ++ elif seconds == float('inf'): ++ return 'Infinite' + else: + seconds = int(seconds) + minutes = seconds / 60 diff --git a/pkgs/core/python-urlgrabber/python-urlgrabber.nm b/pkgs/core/python-urlgrabber/python-urlgrabber.nm index 358de7f..b5beca8 100644 --- a/pkgs/core/python-urlgrabber/python-urlgrabber.nm +++ b/pkgs/core/python-urlgrabber/python-urlgrabber.nm @@ -25,7 +25,7 @@ include $(PKGROOT)/Include
PKG_NAME = urlgrabber -PKG_VER = 3.1.0 +PKG_VER = 3.9.1 PKG_REL = 1 PKG_ARCH = noarch
@@ -35,7 +35,7 @@ PKG_URL = http://urlgrabber.baseurl.org/ PKG_LICENSE = LGPLv2+ PKG_SUMMARY = A high-level cross-protocol url-grabber.
-PKG_BUILD_DEPS+= python-devel +PKG_BUILD_DEPS+= python-devel python-pycurl
define PKG_DESCRIPTION A high-level cross-protocol url-grabber for python supporting HTTP, \ diff --git a/pkgs/core/qt/qt.nm b/pkgs/core/qt/qt.nm index 287121c..94ffa07 100644 --- a/pkgs/core/qt/qt.nm +++ b/pkgs/core/qt/qt.nm @@ -42,11 +42,10 @@ PKG_FILES-$(PKG_NAME_REAL)-devel += \ /usr/lib/qt4/*.prl \ /usr/lib/qt4/mkspecs
-PKG_BUILD_DEPS+= fontconfig-devel gcc-c++ libjpeg-devel libmng-devel \ - libpng-devel libtiff-devel libX11-devel libXext-devel libXfixes-devel \ - libXrandr-devel libXrender-devel libXi-devel pcre-devel pkg-config \ - xorg-x11-proto-devel zlib-devel - +PKG_BUILD_DEPS+= fontconfig-devel freetype-devel gcc-c++ libjpeg-devel \ + libmng-devel libpng-devel libtiff-devel libX11-devel \ + libXext-devel libXfixes-devel libXrandr-devel libXrender-devel \ + libXi-devel pcre-devel pkg-config xorg-x11-proto-devel zlib-devel
define PKG_DESCRIPTION Qt is a software toolkit for developing applications.
hooks/post-receive -- IPFire 3.x development tree