##// END OF EJS Templates
py3: conditionalize the urlparse import...
Pulkit Goyal -
r29431:80880ad3 default
parent child Browse files
Show More
@@ -1,927 +1,928 b''
1 # bugzilla.py - bugzilla integration for mercurial
1 # bugzilla.py - bugzilla integration for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2011-4 Jim Hague <jim.hague@acm.org>
4 # Copyright 2011-4 Jim Hague <jim.hague@acm.org>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''hooks for integrating with the Bugzilla bug tracker
9 '''hooks for integrating with the Bugzilla bug tracker
10
10
11 This hook extension adds comments on bugs in Bugzilla when changesets
11 This hook extension adds comments on bugs in Bugzilla when changesets
12 that refer to bugs by Bugzilla ID are seen. The comment is formatted using
12 that refer to bugs by Bugzilla ID are seen. The comment is formatted using
13 the Mercurial template mechanism.
13 the Mercurial template mechanism.
14
14
15 The bug references can optionally include an update for Bugzilla of the
15 The bug references can optionally include an update for Bugzilla of the
16 hours spent working on the bug. Bugs can also be marked fixed.
16 hours spent working on the bug. Bugs can also be marked fixed.
17
17
18 Three basic modes of access to Bugzilla are provided:
18 Three basic modes of access to Bugzilla are provided:
19
19
20 1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
20 1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
21
21
22 2. Check data via the Bugzilla XMLRPC interface and submit bug change
22 2. Check data via the Bugzilla XMLRPC interface and submit bug change
23 via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
23 via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
24
24
25 3. Writing directly to the Bugzilla database. Only Bugzilla installations
25 3. Writing directly to the Bugzilla database. Only Bugzilla installations
26 using MySQL are supported. Requires Python MySQLdb.
26 using MySQL are supported. Requires Python MySQLdb.
27
27
28 Writing directly to the database is susceptible to schema changes, and
28 Writing directly to the database is susceptible to schema changes, and
29 relies on a Bugzilla contrib script to send out bug change
29 relies on a Bugzilla contrib script to send out bug change
30 notification emails. This script runs as the user running Mercurial,
30 notification emails. This script runs as the user running Mercurial,
31 must be run on the host with the Bugzilla install, and requires
31 must be run on the host with the Bugzilla install, and requires
32 permission to read Bugzilla configuration details and the necessary
32 permission to read Bugzilla configuration details and the necessary
33 MySQL user and password to have full access rights to the Bugzilla
33 MySQL user and password to have full access rights to the Bugzilla
34 database. For these reasons this access mode is now considered
34 database. For these reasons this access mode is now considered
35 deprecated, and will not be updated for new Bugzilla versions going
35 deprecated, and will not be updated for new Bugzilla versions going
36 forward. Only adding comments is supported in this access mode.
36 forward. Only adding comments is supported in this access mode.
37
37
38 Access via XMLRPC needs a Bugzilla username and password to be specified
38 Access via XMLRPC needs a Bugzilla username and password to be specified
39 in the configuration. Comments are added under that username. Since the
39 in the configuration. Comments are added under that username. Since the
40 configuration must be readable by all Mercurial users, it is recommended
40 configuration must be readable by all Mercurial users, it is recommended
41 that the rights of that user are restricted in Bugzilla to the minimum
41 that the rights of that user are restricted in Bugzilla to the minimum
42 necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
42 necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
43
43
44 Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
44 Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
45 email to the Bugzilla email interface to submit comments to bugs.
45 email to the Bugzilla email interface to submit comments to bugs.
46 The From: address in the email is set to the email address of the Mercurial
46 The From: address in the email is set to the email address of the Mercurial
47 user, so the comment appears to come from the Mercurial user. In the event
47 user, so the comment appears to come from the Mercurial user. In the event
48 that the Mercurial user email is not recognized by Bugzilla as a Bugzilla
48 that the Mercurial user email is not recognized by Bugzilla as a Bugzilla
49 user, the email associated with the Bugzilla username used to log into
49 user, the email associated with the Bugzilla username used to log into
50 Bugzilla is used instead as the source of the comment. Marking bugs fixed
50 Bugzilla is used instead as the source of the comment. Marking bugs fixed
51 works on all supported Bugzilla versions.
51 works on all supported Bugzilla versions.
52
52
53 Configuration items common to all access modes:
53 Configuration items common to all access modes:
54
54
55 bugzilla.version
55 bugzilla.version
56 The access type to use. Values recognized are:
56 The access type to use. Values recognized are:
57
57
58 :``xmlrpc``: Bugzilla XMLRPC interface.
58 :``xmlrpc``: Bugzilla XMLRPC interface.
59 :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
59 :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
60 :``3.0``: MySQL access, Bugzilla 3.0 and later.
60 :``3.0``: MySQL access, Bugzilla 3.0 and later.
61 :``2.18``: MySQL access, Bugzilla 2.18 and up to but not
61 :``2.18``: MySQL access, Bugzilla 2.18 and up to but not
62 including 3.0.
62 including 3.0.
63 :``2.16``: MySQL access, Bugzilla 2.16 and up to but not
63 :``2.16``: MySQL access, Bugzilla 2.16 and up to but not
64 including 2.18.
64 including 2.18.
65
65
66 bugzilla.regexp
66 bugzilla.regexp
67 Regular expression to match bug IDs for update in changeset commit message.
67 Regular expression to match bug IDs for update in changeset commit message.
68 It must contain one "()" named group ``<ids>`` containing the bug
68 It must contain one "()" named group ``<ids>`` containing the bug
69 IDs separated by non-digit characters. It may also contain
69 IDs separated by non-digit characters. It may also contain
70 a named group ``<hours>`` with a floating-point number giving the
70 a named group ``<hours>`` with a floating-point number giving the
71 hours worked on the bug. If no named groups are present, the first
71 hours worked on the bug. If no named groups are present, the first
72 "()" group is assumed to contain the bug IDs, and work time is not
72 "()" group is assumed to contain the bug IDs, and work time is not
73 updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
73 updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
74 ``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
74 ``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
75 variations thereof, followed by an hours number prefixed by ``h`` or
75 variations thereof, followed by an hours number prefixed by ``h`` or
76 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
76 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
77
77
78 bugzilla.fixregexp
78 bugzilla.fixregexp
79 Regular expression to match bug IDs for marking fixed in changeset
79 Regular expression to match bug IDs for marking fixed in changeset
80 commit message. This must contain a "()" named group ``<ids>` containing
80 commit message. This must contain a "()" named group ``<ids>` containing
81 the bug IDs separated by non-digit characters. It may also contain
81 the bug IDs separated by non-digit characters. It may also contain
82 a named group ``<hours>`` with a floating-point number giving the
82 a named group ``<hours>`` with a floating-point number giving the
83 hours worked on the bug. If no named groups are present, the first
83 hours worked on the bug. If no named groups are present, the first
84 "()" group is assumed to contain the bug IDs, and work time is not
84 "()" group is assumed to contain the bug IDs, and work time is not
85 updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
85 updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
86 ``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
86 ``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
87 variations thereof, followed by an hours number prefixed by ``h`` or
87 variations thereof, followed by an hours number prefixed by ``h`` or
88 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
88 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
89
89
90 bugzilla.fixstatus
90 bugzilla.fixstatus
91 The status to set a bug to when marking fixed. Default ``RESOLVED``.
91 The status to set a bug to when marking fixed. Default ``RESOLVED``.
92
92
93 bugzilla.fixresolution
93 bugzilla.fixresolution
94 The resolution to set a bug to when marking fixed. Default ``FIXED``.
94 The resolution to set a bug to when marking fixed. Default ``FIXED``.
95
95
96 bugzilla.style
96 bugzilla.style
97 The style file to use when formatting comments.
97 The style file to use when formatting comments.
98
98
99 bugzilla.template
99 bugzilla.template
100 Template to use when formatting comments. Overrides style if
100 Template to use when formatting comments. Overrides style if
101 specified. In addition to the usual Mercurial keywords, the
101 specified. In addition to the usual Mercurial keywords, the
102 extension specifies:
102 extension specifies:
103
103
104 :``{bug}``: The Bugzilla bug ID.
104 :``{bug}``: The Bugzilla bug ID.
105 :``{root}``: The full pathname of the Mercurial repository.
105 :``{root}``: The full pathname of the Mercurial repository.
106 :``{webroot}``: Stripped pathname of the Mercurial repository.
106 :``{webroot}``: Stripped pathname of the Mercurial repository.
107 :``{hgweb}``: Base URL for browsing Mercurial repositories.
107 :``{hgweb}``: Base URL for browsing Mercurial repositories.
108
108
109 Default ``changeset {node|short} in repo {root} refers to bug
109 Default ``changeset {node|short} in repo {root} refers to bug
110 {bug}.\\ndetails:\\n\\t{desc|tabindent}``
110 {bug}.\\ndetails:\\n\\t{desc|tabindent}``
111
111
112 bugzilla.strip
112 bugzilla.strip
113 The number of path separator characters to strip from the front of
113 The number of path separator characters to strip from the front of
114 the Mercurial repository path (``{root}`` in templates) to produce
114 the Mercurial repository path (``{root}`` in templates) to produce
115 ``{webroot}``. For example, a repository with ``{root}``
115 ``{webroot}``. For example, a repository with ``{root}``
116 ``/var/local/my-project`` with a strip of 2 gives a value for
116 ``/var/local/my-project`` with a strip of 2 gives a value for
117 ``{webroot}`` of ``my-project``. Default 0.
117 ``{webroot}`` of ``my-project``. Default 0.
118
118
119 web.baseurl
119 web.baseurl
120 Base URL for browsing Mercurial repositories. Referenced from
120 Base URL for browsing Mercurial repositories. Referenced from
121 templates as ``{hgweb}``.
121 templates as ``{hgweb}``.
122
122
123 Configuration items common to XMLRPC+email and MySQL access modes:
123 Configuration items common to XMLRPC+email and MySQL access modes:
124
124
125 bugzilla.usermap
125 bugzilla.usermap
126 Path of file containing Mercurial committer email to Bugzilla user email
126 Path of file containing Mercurial committer email to Bugzilla user email
127 mappings. If specified, the file should contain one mapping per
127 mappings. If specified, the file should contain one mapping per
128 line::
128 line::
129
129
130 committer = Bugzilla user
130 committer = Bugzilla user
131
131
132 See also the ``[usermap]`` section.
132 See also the ``[usermap]`` section.
133
133
134 The ``[usermap]`` section is used to specify mappings of Mercurial
134 The ``[usermap]`` section is used to specify mappings of Mercurial
135 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
135 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
136 Contains entries of the form ``committer = Bugzilla user``.
136 Contains entries of the form ``committer = Bugzilla user``.
137
137
138 XMLRPC access mode configuration:
138 XMLRPC access mode configuration:
139
139
140 bugzilla.bzurl
140 bugzilla.bzurl
141 The base URL for the Bugzilla installation.
141 The base URL for the Bugzilla installation.
142 Default ``http://localhost/bugzilla``.
142 Default ``http://localhost/bugzilla``.
143
143
144 bugzilla.user
144 bugzilla.user
145 The username to use to log into Bugzilla via XMLRPC. Default
145 The username to use to log into Bugzilla via XMLRPC. Default
146 ``bugs``.
146 ``bugs``.
147
147
148 bugzilla.password
148 bugzilla.password
149 The password for Bugzilla login.
149 The password for Bugzilla login.
150
150
151 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
151 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
152 and also:
152 and also:
153
153
154 bugzilla.bzemail
154 bugzilla.bzemail
155 The Bugzilla email address.
155 The Bugzilla email address.
156
156
157 In addition, the Mercurial email settings must be configured. See the
157 In addition, the Mercurial email settings must be configured. See the
158 documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
158 documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
159
159
160 MySQL access mode configuration:
160 MySQL access mode configuration:
161
161
162 bugzilla.host
162 bugzilla.host
163 Hostname of the MySQL server holding the Bugzilla database.
163 Hostname of the MySQL server holding the Bugzilla database.
164 Default ``localhost``.
164 Default ``localhost``.
165
165
166 bugzilla.db
166 bugzilla.db
167 Name of the Bugzilla database in MySQL. Default ``bugs``.
167 Name of the Bugzilla database in MySQL. Default ``bugs``.
168
168
169 bugzilla.user
169 bugzilla.user
170 Username to use to access MySQL server. Default ``bugs``.
170 Username to use to access MySQL server. Default ``bugs``.
171
171
172 bugzilla.password
172 bugzilla.password
173 Password to use to access MySQL server.
173 Password to use to access MySQL server.
174
174
175 bugzilla.timeout
175 bugzilla.timeout
176 Database connection timeout (seconds). Default 5.
176 Database connection timeout (seconds). Default 5.
177
177
178 bugzilla.bzuser
178 bugzilla.bzuser
179 Fallback Bugzilla user name to record comments with, if changeset
179 Fallback Bugzilla user name to record comments with, if changeset
180 committer cannot be found as a Bugzilla user.
180 committer cannot be found as a Bugzilla user.
181
181
182 bugzilla.bzdir
182 bugzilla.bzdir
183 Bugzilla install directory. Used by default notify. Default
183 Bugzilla install directory. Used by default notify. Default
184 ``/var/www/html/bugzilla``.
184 ``/var/www/html/bugzilla``.
185
185
186 bugzilla.notify
186 bugzilla.notify
187 The command to run to get Bugzilla to send bug change notification
187 The command to run to get Bugzilla to send bug change notification
188 emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
188 emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
189 id) and ``user`` (committer bugzilla email). Default depends on
189 id) and ``user`` (committer bugzilla email). Default depends on
190 version; from 2.18 it is "cd %(bzdir)s && perl -T
190 version; from 2.18 it is "cd %(bzdir)s && perl -T
191 contrib/sendbugmail.pl %(id)s %(user)s".
191 contrib/sendbugmail.pl %(id)s %(user)s".
192
192
193 Activating the extension::
193 Activating the extension::
194
194
195 [extensions]
195 [extensions]
196 bugzilla =
196 bugzilla =
197
197
198 [hooks]
198 [hooks]
199 # run bugzilla hook on every change pulled or pushed in here
199 # run bugzilla hook on every change pulled or pushed in here
200 incoming.bugzilla = python:hgext.bugzilla.hook
200 incoming.bugzilla = python:hgext.bugzilla.hook
201
201
202 Example configurations:
202 Example configurations:
203
203
204 XMLRPC example configuration. This uses the Bugzilla at
204 XMLRPC example configuration. This uses the Bugzilla at
205 ``http://my-project.org/bugzilla``, logging in as user
205 ``http://my-project.org/bugzilla``, logging in as user
206 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
206 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
207 collection of Mercurial repositories in ``/var/local/hg/repos/``,
207 collection of Mercurial repositories in ``/var/local/hg/repos/``,
208 with a web interface at ``http://my-project.org/hg``. ::
208 with a web interface at ``http://my-project.org/hg``. ::
209
209
210 [bugzilla]
210 [bugzilla]
211 bzurl=http://my-project.org/bugzilla
211 bzurl=http://my-project.org/bugzilla
212 user=bugmail@my-project.org
212 user=bugmail@my-project.org
213 password=plugh
213 password=plugh
214 version=xmlrpc
214 version=xmlrpc
215 template=Changeset {node|short} in {root|basename}.
215 template=Changeset {node|short} in {root|basename}.
216 {hgweb}/{webroot}/rev/{node|short}\\n
216 {hgweb}/{webroot}/rev/{node|short}\\n
217 {desc}\\n
217 {desc}\\n
218 strip=5
218 strip=5
219
219
220 [web]
220 [web]
221 baseurl=http://my-project.org/hg
221 baseurl=http://my-project.org/hg
222
222
223 XMLRPC+email example configuration. This uses the Bugzilla at
223 XMLRPC+email example configuration. This uses the Bugzilla at
224 ``http://my-project.org/bugzilla``, logging in as user
224 ``http://my-project.org/bugzilla``, logging in as user
225 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
225 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
226 collection of Mercurial repositories in ``/var/local/hg/repos/``,
226 collection of Mercurial repositories in ``/var/local/hg/repos/``,
227 with a web interface at ``http://my-project.org/hg``. Bug comments
227 with a web interface at ``http://my-project.org/hg``. Bug comments
228 are sent to the Bugzilla email address
228 are sent to the Bugzilla email address
229 ``bugzilla@my-project.org``. ::
229 ``bugzilla@my-project.org``. ::
230
230
231 [bugzilla]
231 [bugzilla]
232 bzurl=http://my-project.org/bugzilla
232 bzurl=http://my-project.org/bugzilla
233 user=bugmail@my-project.org
233 user=bugmail@my-project.org
234 password=plugh
234 password=plugh
235 version=xmlrpc+email
235 version=xmlrpc+email
236 bzemail=bugzilla@my-project.org
236 bzemail=bugzilla@my-project.org
237 template=Changeset {node|short} in {root|basename}.
237 template=Changeset {node|short} in {root|basename}.
238 {hgweb}/{webroot}/rev/{node|short}\\n
238 {hgweb}/{webroot}/rev/{node|short}\\n
239 {desc}\\n
239 {desc}\\n
240 strip=5
240 strip=5
241
241
242 [web]
242 [web]
243 baseurl=http://my-project.org/hg
243 baseurl=http://my-project.org/hg
244
244
245 [usermap]
245 [usermap]
246 user@emaildomain.com=user.name@bugzilladomain.com
246 user@emaildomain.com=user.name@bugzilladomain.com
247
247
248 MySQL example configuration. This has a local Bugzilla 3.2 installation
248 MySQL example configuration. This has a local Bugzilla 3.2 installation
249 in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
249 in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
250 the Bugzilla database name is ``bugs`` and MySQL is
250 the Bugzilla database name is ``bugs`` and MySQL is
251 accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
251 accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
252 with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
252 with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
253 with a web interface at ``http://my-project.org/hg``. ::
253 with a web interface at ``http://my-project.org/hg``. ::
254
254
255 [bugzilla]
255 [bugzilla]
256 host=localhost
256 host=localhost
257 password=XYZZY
257 password=XYZZY
258 version=3.0
258 version=3.0
259 bzuser=unknown@domain.com
259 bzuser=unknown@domain.com
260 bzdir=/opt/bugzilla-3.2
260 bzdir=/opt/bugzilla-3.2
261 template=Changeset {node|short} in {root|basename}.
261 template=Changeset {node|short} in {root|basename}.
262 {hgweb}/{webroot}/rev/{node|short}\\n
262 {hgweb}/{webroot}/rev/{node|short}\\n
263 {desc}\\n
263 {desc}\\n
264 strip=5
264 strip=5
265
265
266 [web]
266 [web]
267 baseurl=http://my-project.org/hg
267 baseurl=http://my-project.org/hg
268
268
269 [usermap]
269 [usermap]
270 user@emaildomain.com=user.name@bugzilladomain.com
270 user@emaildomain.com=user.name@bugzilladomain.com
271
271
272 All the above add a comment to the Bugzilla bug record of the form::
272 All the above add a comment to the Bugzilla bug record of the form::
273
273
274 Changeset 3b16791d6642 in repository-name.
274 Changeset 3b16791d6642 in repository-name.
275 http://my-project.org/hg/repository-name/rev/3b16791d6642
275 http://my-project.org/hg/repository-name/rev/3b16791d6642
276
276
277 Changeset commit comment. Bug 1234.
277 Changeset commit comment. Bug 1234.
278 '''
278 '''
279
279
280 from __future__ import absolute_import
280 from __future__ import absolute_import
281
281
282 import re
282 import re
283 import time
283 import time
284 import urlparse
285 import xmlrpclib
284 import xmlrpclib
286
285
287 from mercurial.i18n import _
286 from mercurial.i18n import _
288 from mercurial.node import short
287 from mercurial.node import short
289 from mercurial import (
288 from mercurial import (
290 cmdutil,
289 cmdutil,
291 error,
290 error,
292 mail,
291 mail,
293 util,
292 util,
294 )
293 )
295
294
295 urlparse = util.urlparse
296
296 # Note for extension authors: ONLY specify testedwith = 'internal' for
297 # Note for extension authors: ONLY specify testedwith = 'internal' for
297 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
298 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
298 # be specifying the version(s) of Mercurial they are tested with, or
299 # be specifying the version(s) of Mercurial they are tested with, or
299 # leave the attribute unspecified.
300 # leave the attribute unspecified.
300 testedwith = 'internal'
301 testedwith = 'internal'
301
302
302 class bzaccess(object):
303 class bzaccess(object):
303 '''Base class for access to Bugzilla.'''
304 '''Base class for access to Bugzilla.'''
304
305
305 def __init__(self, ui):
306 def __init__(self, ui):
306 self.ui = ui
307 self.ui = ui
307 usermap = self.ui.config('bugzilla', 'usermap')
308 usermap = self.ui.config('bugzilla', 'usermap')
308 if usermap:
309 if usermap:
309 self.ui.readconfig(usermap, sections=['usermap'])
310 self.ui.readconfig(usermap, sections=['usermap'])
310
311
311 def map_committer(self, user):
312 def map_committer(self, user):
312 '''map name of committer to Bugzilla user name.'''
313 '''map name of committer to Bugzilla user name.'''
313 for committer, bzuser in self.ui.configitems('usermap'):
314 for committer, bzuser in self.ui.configitems('usermap'):
314 if committer.lower() == user.lower():
315 if committer.lower() == user.lower():
315 return bzuser
316 return bzuser
316 return user
317 return user
317
318
318 # Methods to be implemented by access classes.
319 # Methods to be implemented by access classes.
319 #
320 #
320 # 'bugs' is a dict keyed on bug id, where values are a dict holding
321 # 'bugs' is a dict keyed on bug id, where values are a dict holding
321 # updates to bug state. Recognized dict keys are:
322 # updates to bug state. Recognized dict keys are:
322 #
323 #
323 # 'hours': Value, float containing work hours to be updated.
324 # 'hours': Value, float containing work hours to be updated.
324 # 'fix': If key present, bug is to be marked fixed. Value ignored.
325 # 'fix': If key present, bug is to be marked fixed. Value ignored.
325
326
326 def filter_real_bug_ids(self, bugs):
327 def filter_real_bug_ids(self, bugs):
327 '''remove bug IDs that do not exist in Bugzilla from bugs.'''
328 '''remove bug IDs that do not exist in Bugzilla from bugs.'''
328 pass
329 pass
329
330
330 def filter_cset_known_bug_ids(self, node, bugs):
331 def filter_cset_known_bug_ids(self, node, bugs):
331 '''remove bug IDs where node occurs in comment text from bugs.'''
332 '''remove bug IDs where node occurs in comment text from bugs.'''
332 pass
333 pass
333
334
334 def updatebug(self, bugid, newstate, text, committer):
335 def updatebug(self, bugid, newstate, text, committer):
335 '''update the specified bug. Add comment text and set new states.
336 '''update the specified bug. Add comment text and set new states.
336
337
337 If possible add the comment as being from the committer of
338 If possible add the comment as being from the committer of
338 the changeset. Otherwise use the default Bugzilla user.
339 the changeset. Otherwise use the default Bugzilla user.
339 '''
340 '''
340 pass
341 pass
341
342
342 def notify(self, bugs, committer):
343 def notify(self, bugs, committer):
343 '''Force sending of Bugzilla notification emails.
344 '''Force sending of Bugzilla notification emails.
344
345
345 Only required if the access method does not trigger notification
346 Only required if the access method does not trigger notification
346 emails automatically.
347 emails automatically.
347 '''
348 '''
348 pass
349 pass
349
350
350 # Bugzilla via direct access to MySQL database.
351 # Bugzilla via direct access to MySQL database.
351 class bzmysql(bzaccess):
352 class bzmysql(bzaccess):
352 '''Support for direct MySQL access to Bugzilla.
353 '''Support for direct MySQL access to Bugzilla.
353
354
354 The earliest Bugzilla version this is tested with is version 2.16.
355 The earliest Bugzilla version this is tested with is version 2.16.
355
356
356 If your Bugzilla is version 3.4 or above, you are strongly
357 If your Bugzilla is version 3.4 or above, you are strongly
357 recommended to use the XMLRPC access method instead.
358 recommended to use the XMLRPC access method instead.
358 '''
359 '''
359
360
360 @staticmethod
361 @staticmethod
361 def sql_buglist(ids):
362 def sql_buglist(ids):
362 '''return SQL-friendly list of bug ids'''
363 '''return SQL-friendly list of bug ids'''
363 return '(' + ','.join(map(str, ids)) + ')'
364 return '(' + ','.join(map(str, ids)) + ')'
364
365
365 _MySQLdb = None
366 _MySQLdb = None
366
367
367 def __init__(self, ui):
368 def __init__(self, ui):
368 try:
369 try:
369 import MySQLdb as mysql
370 import MySQLdb as mysql
370 bzmysql._MySQLdb = mysql
371 bzmysql._MySQLdb = mysql
371 except ImportError as err:
372 except ImportError as err:
372 raise error.Abort(_('python mysql support not available: %s') % err)
373 raise error.Abort(_('python mysql support not available: %s') % err)
373
374
374 bzaccess.__init__(self, ui)
375 bzaccess.__init__(self, ui)
375
376
376 host = self.ui.config('bugzilla', 'host', 'localhost')
377 host = self.ui.config('bugzilla', 'host', 'localhost')
377 user = self.ui.config('bugzilla', 'user', 'bugs')
378 user = self.ui.config('bugzilla', 'user', 'bugs')
378 passwd = self.ui.config('bugzilla', 'password')
379 passwd = self.ui.config('bugzilla', 'password')
379 db = self.ui.config('bugzilla', 'db', 'bugs')
380 db = self.ui.config('bugzilla', 'db', 'bugs')
380 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
381 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
381 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
382 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
382 (host, db, user, '*' * len(passwd)))
383 (host, db, user, '*' * len(passwd)))
383 self.conn = bzmysql._MySQLdb.connect(host=host,
384 self.conn = bzmysql._MySQLdb.connect(host=host,
384 user=user, passwd=passwd,
385 user=user, passwd=passwd,
385 db=db,
386 db=db,
386 connect_timeout=timeout)
387 connect_timeout=timeout)
387 self.cursor = self.conn.cursor()
388 self.cursor = self.conn.cursor()
388 self.longdesc_id = self.get_longdesc_id()
389 self.longdesc_id = self.get_longdesc_id()
389 self.user_ids = {}
390 self.user_ids = {}
390 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
391 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
391
392
392 def run(self, *args, **kwargs):
393 def run(self, *args, **kwargs):
393 '''run a query.'''
394 '''run a query.'''
394 self.ui.note(_('query: %s %s\n') % (args, kwargs))
395 self.ui.note(_('query: %s %s\n') % (args, kwargs))
395 try:
396 try:
396 self.cursor.execute(*args, **kwargs)
397 self.cursor.execute(*args, **kwargs)
397 except bzmysql._MySQLdb.MySQLError:
398 except bzmysql._MySQLdb.MySQLError:
398 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
399 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
399 raise
400 raise
400
401
401 def get_longdesc_id(self):
402 def get_longdesc_id(self):
402 '''get identity of longdesc field'''
403 '''get identity of longdesc field'''
403 self.run('select fieldid from fielddefs where name = "longdesc"')
404 self.run('select fieldid from fielddefs where name = "longdesc"')
404 ids = self.cursor.fetchall()
405 ids = self.cursor.fetchall()
405 if len(ids) != 1:
406 if len(ids) != 1:
406 raise error.Abort(_('unknown database schema'))
407 raise error.Abort(_('unknown database schema'))
407 return ids[0][0]
408 return ids[0][0]
408
409
409 def filter_real_bug_ids(self, bugs):
410 def filter_real_bug_ids(self, bugs):
410 '''filter not-existing bugs from set.'''
411 '''filter not-existing bugs from set.'''
411 self.run('select bug_id from bugs where bug_id in %s' %
412 self.run('select bug_id from bugs where bug_id in %s' %
412 bzmysql.sql_buglist(bugs.keys()))
413 bzmysql.sql_buglist(bugs.keys()))
413 existing = [id for (id,) in self.cursor.fetchall()]
414 existing = [id for (id,) in self.cursor.fetchall()]
414 for id in bugs.keys():
415 for id in bugs.keys():
415 if id not in existing:
416 if id not in existing:
416 self.ui.status(_('bug %d does not exist\n') % id)
417 self.ui.status(_('bug %d does not exist\n') % id)
417 del bugs[id]
418 del bugs[id]
418
419
419 def filter_cset_known_bug_ids(self, node, bugs):
420 def filter_cset_known_bug_ids(self, node, bugs):
420 '''filter bug ids that already refer to this changeset from set.'''
421 '''filter bug ids that already refer to this changeset from set.'''
421 self.run('''select bug_id from longdescs where
422 self.run('''select bug_id from longdescs where
422 bug_id in %s and thetext like "%%%s%%"''' %
423 bug_id in %s and thetext like "%%%s%%"''' %
423 (bzmysql.sql_buglist(bugs.keys()), short(node)))
424 (bzmysql.sql_buglist(bugs.keys()), short(node)))
424 for (id,) in self.cursor.fetchall():
425 for (id,) in self.cursor.fetchall():
425 self.ui.status(_('bug %d already knows about changeset %s\n') %
426 self.ui.status(_('bug %d already knows about changeset %s\n') %
426 (id, short(node)))
427 (id, short(node)))
427 del bugs[id]
428 del bugs[id]
428
429
429 def notify(self, bugs, committer):
430 def notify(self, bugs, committer):
430 '''tell bugzilla to send mail.'''
431 '''tell bugzilla to send mail.'''
431 self.ui.status(_('telling bugzilla to send mail:\n'))
432 self.ui.status(_('telling bugzilla to send mail:\n'))
432 (user, userid) = self.get_bugzilla_user(committer)
433 (user, userid) = self.get_bugzilla_user(committer)
433 for id in bugs.keys():
434 for id in bugs.keys():
434 self.ui.status(_(' bug %s\n') % id)
435 self.ui.status(_(' bug %s\n') % id)
435 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
436 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
436 bzdir = self.ui.config('bugzilla', 'bzdir',
437 bzdir = self.ui.config('bugzilla', 'bzdir',
437 '/var/www/html/bugzilla')
438 '/var/www/html/bugzilla')
438 try:
439 try:
439 # Backwards-compatible with old notify string, which
440 # Backwards-compatible with old notify string, which
440 # took one string. This will throw with a new format
441 # took one string. This will throw with a new format
441 # string.
442 # string.
442 cmd = cmdfmt % id
443 cmd = cmdfmt % id
443 except TypeError:
444 except TypeError:
444 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
445 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
445 self.ui.note(_('running notify command %s\n') % cmd)
446 self.ui.note(_('running notify command %s\n') % cmd)
446 fp = util.popen('(%s) 2>&1' % cmd)
447 fp = util.popen('(%s) 2>&1' % cmd)
447 out = fp.read()
448 out = fp.read()
448 ret = fp.close()
449 ret = fp.close()
449 if ret:
450 if ret:
450 self.ui.warn(out)
451 self.ui.warn(out)
451 raise error.Abort(_('bugzilla notify command %s') %
452 raise error.Abort(_('bugzilla notify command %s') %
452 util.explainexit(ret)[0])
453 util.explainexit(ret)[0])
453 self.ui.status(_('done\n'))
454 self.ui.status(_('done\n'))
454
455
455 def get_user_id(self, user):
456 def get_user_id(self, user):
456 '''look up numeric bugzilla user id.'''
457 '''look up numeric bugzilla user id.'''
457 try:
458 try:
458 return self.user_ids[user]
459 return self.user_ids[user]
459 except KeyError:
460 except KeyError:
460 try:
461 try:
461 userid = int(user)
462 userid = int(user)
462 except ValueError:
463 except ValueError:
463 self.ui.note(_('looking up user %s\n') % user)
464 self.ui.note(_('looking up user %s\n') % user)
464 self.run('''select userid from profiles
465 self.run('''select userid from profiles
465 where login_name like %s''', user)
466 where login_name like %s''', user)
466 all = self.cursor.fetchall()
467 all = self.cursor.fetchall()
467 if len(all) != 1:
468 if len(all) != 1:
468 raise KeyError(user)
469 raise KeyError(user)
469 userid = int(all[0][0])
470 userid = int(all[0][0])
470 self.user_ids[user] = userid
471 self.user_ids[user] = userid
471 return userid
472 return userid
472
473
473 def get_bugzilla_user(self, committer):
474 def get_bugzilla_user(self, committer):
474 '''See if committer is a registered bugzilla user. Return
475 '''See if committer is a registered bugzilla user. Return
475 bugzilla username and userid if so. If not, return default
476 bugzilla username and userid if so. If not, return default
476 bugzilla username and userid.'''
477 bugzilla username and userid.'''
477 user = self.map_committer(committer)
478 user = self.map_committer(committer)
478 try:
479 try:
479 userid = self.get_user_id(user)
480 userid = self.get_user_id(user)
480 except KeyError:
481 except KeyError:
481 try:
482 try:
482 defaultuser = self.ui.config('bugzilla', 'bzuser')
483 defaultuser = self.ui.config('bugzilla', 'bzuser')
483 if not defaultuser:
484 if not defaultuser:
484 raise error.Abort(_('cannot find bugzilla user id for %s') %
485 raise error.Abort(_('cannot find bugzilla user id for %s') %
485 user)
486 user)
486 userid = self.get_user_id(defaultuser)
487 userid = self.get_user_id(defaultuser)
487 user = defaultuser
488 user = defaultuser
488 except KeyError:
489 except KeyError:
489 raise error.Abort(_('cannot find bugzilla user id for %s or %s')
490 raise error.Abort(_('cannot find bugzilla user id for %s or %s')
490 % (user, defaultuser))
491 % (user, defaultuser))
491 return (user, userid)
492 return (user, userid)
492
493
493 def updatebug(self, bugid, newstate, text, committer):
494 def updatebug(self, bugid, newstate, text, committer):
494 '''update bug state with comment text.
495 '''update bug state with comment text.
495
496
496 Try adding comment as committer of changeset, otherwise as
497 Try adding comment as committer of changeset, otherwise as
497 default bugzilla user.'''
498 default bugzilla user.'''
498 if len(newstate) > 0:
499 if len(newstate) > 0:
499 self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
500 self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
500
501
501 (user, userid) = self.get_bugzilla_user(committer)
502 (user, userid) = self.get_bugzilla_user(committer)
502 now = time.strftime('%Y-%m-%d %H:%M:%S')
503 now = time.strftime('%Y-%m-%d %H:%M:%S')
503 self.run('''insert into longdescs
504 self.run('''insert into longdescs
504 (bug_id, who, bug_when, thetext)
505 (bug_id, who, bug_when, thetext)
505 values (%s, %s, %s, %s)''',
506 values (%s, %s, %s, %s)''',
506 (bugid, userid, now, text))
507 (bugid, userid, now, text))
507 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
508 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
508 values (%s, %s, %s, %s)''',
509 values (%s, %s, %s, %s)''',
509 (bugid, userid, now, self.longdesc_id))
510 (bugid, userid, now, self.longdesc_id))
510 self.conn.commit()
511 self.conn.commit()
511
512
512 class bzmysql_2_18(bzmysql):
513 class bzmysql_2_18(bzmysql):
513 '''support for bugzilla 2.18 series.'''
514 '''support for bugzilla 2.18 series.'''
514
515
515 def __init__(self, ui):
516 def __init__(self, ui):
516 bzmysql.__init__(self, ui)
517 bzmysql.__init__(self, ui)
517 self.default_notify = \
518 self.default_notify = \
518 "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
519 "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
519
520
520 class bzmysql_3_0(bzmysql_2_18):
521 class bzmysql_3_0(bzmysql_2_18):
521 '''support for bugzilla 3.0 series.'''
522 '''support for bugzilla 3.0 series.'''
522
523
523 def __init__(self, ui):
524 def __init__(self, ui):
524 bzmysql_2_18.__init__(self, ui)
525 bzmysql_2_18.__init__(self, ui)
525
526
526 def get_longdesc_id(self):
527 def get_longdesc_id(self):
527 '''get identity of longdesc field'''
528 '''get identity of longdesc field'''
528 self.run('select id from fielddefs where name = "longdesc"')
529 self.run('select id from fielddefs where name = "longdesc"')
529 ids = self.cursor.fetchall()
530 ids = self.cursor.fetchall()
530 if len(ids) != 1:
531 if len(ids) != 1:
531 raise error.Abort(_('unknown database schema'))
532 raise error.Abort(_('unknown database schema'))
532 return ids[0][0]
533 return ids[0][0]
533
534
534 # Bugzilla via XMLRPC interface.
535 # Bugzilla via XMLRPC interface.
535
536
536 class cookietransportrequest(object):
537 class cookietransportrequest(object):
537 """A Transport request method that retains cookies over its lifetime.
538 """A Transport request method that retains cookies over its lifetime.
538
539
539 The regular xmlrpclib transports ignore cookies. Which causes
540 The regular xmlrpclib transports ignore cookies. Which causes
540 a bit of a problem when you need a cookie-based login, as with
541 a bit of a problem when you need a cookie-based login, as with
541 the Bugzilla XMLRPC interface prior to 4.4.3.
542 the Bugzilla XMLRPC interface prior to 4.4.3.
542
543
543 So this is a helper for defining a Transport which looks for
544 So this is a helper for defining a Transport which looks for
544 cookies being set in responses and saves them to add to all future
545 cookies being set in responses and saves them to add to all future
545 requests.
546 requests.
546 """
547 """
547
548
548 # Inspiration drawn from
549 # Inspiration drawn from
549 # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
550 # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
550 # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
551 # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
551
552
552 cookies = []
553 cookies = []
553 def send_cookies(self, connection):
554 def send_cookies(self, connection):
554 if self.cookies:
555 if self.cookies:
555 for cookie in self.cookies:
556 for cookie in self.cookies:
556 connection.putheader("Cookie", cookie)
557 connection.putheader("Cookie", cookie)
557
558
558 def request(self, host, handler, request_body, verbose=0):
559 def request(self, host, handler, request_body, verbose=0):
559 self.verbose = verbose
560 self.verbose = verbose
560 self.accept_gzip_encoding = False
561 self.accept_gzip_encoding = False
561
562
562 # issue XML-RPC request
563 # issue XML-RPC request
563 h = self.make_connection(host)
564 h = self.make_connection(host)
564 if verbose:
565 if verbose:
565 h.set_debuglevel(1)
566 h.set_debuglevel(1)
566
567
567 self.send_request(h, handler, request_body)
568 self.send_request(h, handler, request_body)
568 self.send_host(h, host)
569 self.send_host(h, host)
569 self.send_cookies(h)
570 self.send_cookies(h)
570 self.send_user_agent(h)
571 self.send_user_agent(h)
571 self.send_content(h, request_body)
572 self.send_content(h, request_body)
572
573
573 # Deal with differences between Python 2.4-2.6 and 2.7.
574 # Deal with differences between Python 2.4-2.6 and 2.7.
574 # In the former h is a HTTP(S). In the latter it's a
575 # In the former h is a HTTP(S). In the latter it's a
575 # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
576 # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
576 # HTTP(S) has an underlying HTTP(S)Connection, so extract
577 # HTTP(S) has an underlying HTTP(S)Connection, so extract
577 # that and use it.
578 # that and use it.
578 try:
579 try:
579 response = h.getresponse()
580 response = h.getresponse()
580 except AttributeError:
581 except AttributeError:
581 response = h._conn.getresponse()
582 response = h._conn.getresponse()
582
583
583 # Add any cookie definitions to our list.
584 # Add any cookie definitions to our list.
584 for header in response.msg.getallmatchingheaders("Set-Cookie"):
585 for header in response.msg.getallmatchingheaders("Set-Cookie"):
585 val = header.split(": ", 1)[1]
586 val = header.split(": ", 1)[1]
586 cookie = val.split(";", 1)[0]
587 cookie = val.split(";", 1)[0]
587 self.cookies.append(cookie)
588 self.cookies.append(cookie)
588
589
589 if response.status != 200:
590 if response.status != 200:
590 raise xmlrpclib.ProtocolError(host + handler, response.status,
591 raise xmlrpclib.ProtocolError(host + handler, response.status,
591 response.reason, response.msg.headers)
592 response.reason, response.msg.headers)
592
593
593 payload = response.read()
594 payload = response.read()
594 parser, unmarshaller = self.getparser()
595 parser, unmarshaller = self.getparser()
595 parser.feed(payload)
596 parser.feed(payload)
596 parser.close()
597 parser.close()
597
598
598 return unmarshaller.close()
599 return unmarshaller.close()
599
600
600 # The explicit calls to the underlying xmlrpclib __init__() methods are
601 # The explicit calls to the underlying xmlrpclib __init__() methods are
601 # necessary. The xmlrpclib.Transport classes are old-style classes, and
602 # necessary. The xmlrpclib.Transport classes are old-style classes, and
602 # it turns out their __init__() doesn't get called when doing multiple
603 # it turns out their __init__() doesn't get called when doing multiple
603 # inheritance with a new-style class.
604 # inheritance with a new-style class.
604 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
605 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
605 def __init__(self, use_datetime=0):
606 def __init__(self, use_datetime=0):
606 if util.safehasattr(xmlrpclib.Transport, "__init__"):
607 if util.safehasattr(xmlrpclib.Transport, "__init__"):
607 xmlrpclib.Transport.__init__(self, use_datetime)
608 xmlrpclib.Transport.__init__(self, use_datetime)
608
609
609 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
610 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
610 def __init__(self, use_datetime=0):
611 def __init__(self, use_datetime=0):
611 if util.safehasattr(xmlrpclib.Transport, "__init__"):
612 if util.safehasattr(xmlrpclib.Transport, "__init__"):
612 xmlrpclib.SafeTransport.__init__(self, use_datetime)
613 xmlrpclib.SafeTransport.__init__(self, use_datetime)
613
614
614 class bzxmlrpc(bzaccess):
615 class bzxmlrpc(bzaccess):
615 """Support for access to Bugzilla via the Bugzilla XMLRPC API.
616 """Support for access to Bugzilla via the Bugzilla XMLRPC API.
616
617
617 Requires a minimum Bugzilla version 3.4.
618 Requires a minimum Bugzilla version 3.4.
618 """
619 """
619
620
620 def __init__(self, ui):
621 def __init__(self, ui):
621 bzaccess.__init__(self, ui)
622 bzaccess.__init__(self, ui)
622
623
623 bzweb = self.ui.config('bugzilla', 'bzurl',
624 bzweb = self.ui.config('bugzilla', 'bzurl',
624 'http://localhost/bugzilla/')
625 'http://localhost/bugzilla/')
625 bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
626 bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
626
627
627 user = self.ui.config('bugzilla', 'user', 'bugs')
628 user = self.ui.config('bugzilla', 'user', 'bugs')
628 passwd = self.ui.config('bugzilla', 'password')
629 passwd = self.ui.config('bugzilla', 'password')
629
630
630 self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
631 self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
631 self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
632 self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
632 'FIXED')
633 'FIXED')
633
634
634 self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
635 self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
635 ver = self.bzproxy.Bugzilla.version()['version'].split('.')
636 ver = self.bzproxy.Bugzilla.version()['version'].split('.')
636 self.bzvermajor = int(ver[0])
637 self.bzvermajor = int(ver[0])
637 self.bzverminor = int(ver[1])
638 self.bzverminor = int(ver[1])
638 login = self.bzproxy.User.login({'login': user, 'password': passwd,
639 login = self.bzproxy.User.login({'login': user, 'password': passwd,
639 'restrict_login': True})
640 'restrict_login': True})
640 self.bztoken = login.get('token', '')
641 self.bztoken = login.get('token', '')
641
642
642 def transport(self, uri):
643 def transport(self, uri):
643 if urlparse.urlparse(uri, "http")[0] == "https":
644 if urlparse.urlparse(uri, "http")[0] == "https":
644 return cookiesafetransport()
645 return cookiesafetransport()
645 else:
646 else:
646 return cookietransport()
647 return cookietransport()
647
648
648 def get_bug_comments(self, id):
649 def get_bug_comments(self, id):
649 """Return a string with all comment text for a bug."""
650 """Return a string with all comment text for a bug."""
650 c = self.bzproxy.Bug.comments({'ids': [id],
651 c = self.bzproxy.Bug.comments({'ids': [id],
651 'include_fields': ['text'],
652 'include_fields': ['text'],
652 'token': self.bztoken})
653 'token': self.bztoken})
653 return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
654 return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
654
655
655 def filter_real_bug_ids(self, bugs):
656 def filter_real_bug_ids(self, bugs):
656 probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
657 probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
657 'include_fields': [],
658 'include_fields': [],
658 'permissive': True,
659 'permissive': True,
659 'token': self.bztoken,
660 'token': self.bztoken,
660 })
661 })
661 for badbug in probe['faults']:
662 for badbug in probe['faults']:
662 id = badbug['id']
663 id = badbug['id']
663 self.ui.status(_('bug %d does not exist\n') % id)
664 self.ui.status(_('bug %d does not exist\n') % id)
664 del bugs[id]
665 del bugs[id]
665
666
666 def filter_cset_known_bug_ids(self, node, bugs):
667 def filter_cset_known_bug_ids(self, node, bugs):
667 for id in sorted(bugs.keys()):
668 for id in sorted(bugs.keys()):
668 if self.get_bug_comments(id).find(short(node)) != -1:
669 if self.get_bug_comments(id).find(short(node)) != -1:
669 self.ui.status(_('bug %d already knows about changeset %s\n') %
670 self.ui.status(_('bug %d already knows about changeset %s\n') %
670 (id, short(node)))
671 (id, short(node)))
671 del bugs[id]
672 del bugs[id]
672
673
673 def updatebug(self, bugid, newstate, text, committer):
674 def updatebug(self, bugid, newstate, text, committer):
674 args = {}
675 args = {}
675 if 'hours' in newstate:
676 if 'hours' in newstate:
676 args['work_time'] = newstate['hours']
677 args['work_time'] = newstate['hours']
677
678
678 if self.bzvermajor >= 4:
679 if self.bzvermajor >= 4:
679 args['ids'] = [bugid]
680 args['ids'] = [bugid]
680 args['comment'] = {'body' : text}
681 args['comment'] = {'body' : text}
681 if 'fix' in newstate:
682 if 'fix' in newstate:
682 args['status'] = self.fixstatus
683 args['status'] = self.fixstatus
683 args['resolution'] = self.fixresolution
684 args['resolution'] = self.fixresolution
684 args['token'] = self.bztoken
685 args['token'] = self.bztoken
685 self.bzproxy.Bug.update(args)
686 self.bzproxy.Bug.update(args)
686 else:
687 else:
687 if 'fix' in newstate:
688 if 'fix' in newstate:
688 self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
689 self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
689 "to mark bugs fixed\n"))
690 "to mark bugs fixed\n"))
690 args['id'] = bugid
691 args['id'] = bugid
691 args['comment'] = text
692 args['comment'] = text
692 self.bzproxy.Bug.add_comment(args)
693 self.bzproxy.Bug.add_comment(args)
693
694
694 class bzxmlrpcemail(bzxmlrpc):
695 class bzxmlrpcemail(bzxmlrpc):
695 """Read data from Bugzilla via XMLRPC, send updates via email.
696 """Read data from Bugzilla via XMLRPC, send updates via email.
696
697
697 Advantages of sending updates via email:
698 Advantages of sending updates via email:
698 1. Comments can be added as any user, not just logged in user.
699 1. Comments can be added as any user, not just logged in user.
699 2. Bug statuses or other fields not accessible via XMLRPC can
700 2. Bug statuses or other fields not accessible via XMLRPC can
700 potentially be updated.
701 potentially be updated.
701
702
702 There is no XMLRPC function to change bug status before Bugzilla
703 There is no XMLRPC function to change bug status before Bugzilla
703 4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
704 4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
704 But bugs can be marked fixed via email from 3.4 onwards.
705 But bugs can be marked fixed via email from 3.4 onwards.
705 """
706 """
706
707
707 # The email interface changes subtly between 3.4 and 3.6. In 3.4,
708 # The email interface changes subtly between 3.4 and 3.6. In 3.4,
708 # in-email fields are specified as '@<fieldname> = <value>'. In
709 # in-email fields are specified as '@<fieldname> = <value>'. In
709 # 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
710 # 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
710 # in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
711 # in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
711 # compatibility, but rather than rely on this use the new format for
712 # compatibility, but rather than rely on this use the new format for
712 # 4.0 onwards.
713 # 4.0 onwards.
713
714
714 def __init__(self, ui):
715 def __init__(self, ui):
715 bzxmlrpc.__init__(self, ui)
716 bzxmlrpc.__init__(self, ui)
716
717
717 self.bzemail = self.ui.config('bugzilla', 'bzemail')
718 self.bzemail = self.ui.config('bugzilla', 'bzemail')
718 if not self.bzemail:
719 if not self.bzemail:
719 raise error.Abort(_("configuration 'bzemail' missing"))
720 raise error.Abort(_("configuration 'bzemail' missing"))
720 mail.validateconfig(self.ui)
721 mail.validateconfig(self.ui)
721
722
722 def makecommandline(self, fieldname, value):
723 def makecommandline(self, fieldname, value):
723 if self.bzvermajor >= 4:
724 if self.bzvermajor >= 4:
724 return "@%s %s" % (fieldname, str(value))
725 return "@%s %s" % (fieldname, str(value))
725 else:
726 else:
726 if fieldname == "id":
727 if fieldname == "id":
727 fieldname = "bug_id"
728 fieldname = "bug_id"
728 return "@%s = %s" % (fieldname, str(value))
729 return "@%s = %s" % (fieldname, str(value))
729
730
730 def send_bug_modify_email(self, bugid, commands, comment, committer):
731 def send_bug_modify_email(self, bugid, commands, comment, committer):
731 '''send modification message to Bugzilla bug via email.
732 '''send modification message to Bugzilla bug via email.
732
733
733 The message format is documented in the Bugzilla email_in.pl
734 The message format is documented in the Bugzilla email_in.pl
734 specification. commands is a list of command lines, comment is the
735 specification. commands is a list of command lines, comment is the
735 comment text.
736 comment text.
736
737
737 To stop users from crafting commit comments with
738 To stop users from crafting commit comments with
738 Bugzilla commands, specify the bug ID via the message body, rather
739 Bugzilla commands, specify the bug ID via the message body, rather
739 than the subject line, and leave a blank line after it.
740 than the subject line, and leave a blank line after it.
740 '''
741 '''
741 user = self.map_committer(committer)
742 user = self.map_committer(committer)
742 matches = self.bzproxy.User.get({'match': [user],
743 matches = self.bzproxy.User.get({'match': [user],
743 'token': self.bztoken})
744 'token': self.bztoken})
744 if not matches['users']:
745 if not matches['users']:
745 user = self.ui.config('bugzilla', 'user', 'bugs')
746 user = self.ui.config('bugzilla', 'user', 'bugs')
746 matches = self.bzproxy.User.get({'match': [user],
747 matches = self.bzproxy.User.get({'match': [user],
747 'token': self.bztoken})
748 'token': self.bztoken})
748 if not matches['users']:
749 if not matches['users']:
749 raise error.Abort(_("default bugzilla user %s email not found")
750 raise error.Abort(_("default bugzilla user %s email not found")
750 % user)
751 % user)
751 user = matches['users'][0]['email']
752 user = matches['users'][0]['email']
752 commands.append(self.makecommandline("id", bugid))
753 commands.append(self.makecommandline("id", bugid))
753
754
754 text = "\n".join(commands) + "\n\n" + comment
755 text = "\n".join(commands) + "\n\n" + comment
755
756
756 _charsets = mail._charsets(self.ui)
757 _charsets = mail._charsets(self.ui)
757 user = mail.addressencode(self.ui, user, _charsets)
758 user = mail.addressencode(self.ui, user, _charsets)
758 bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
759 bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
759 msg = mail.mimeencode(self.ui, text, _charsets)
760 msg = mail.mimeencode(self.ui, text, _charsets)
760 msg['From'] = user
761 msg['From'] = user
761 msg['To'] = bzemail
762 msg['To'] = bzemail
762 msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
763 msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
763 sendmail = mail.connect(self.ui)
764 sendmail = mail.connect(self.ui)
764 sendmail(user, bzemail, msg.as_string())
765 sendmail(user, bzemail, msg.as_string())
765
766
766 def updatebug(self, bugid, newstate, text, committer):
767 def updatebug(self, bugid, newstate, text, committer):
767 cmds = []
768 cmds = []
768 if 'hours' in newstate:
769 if 'hours' in newstate:
769 cmds.append(self.makecommandline("work_time", newstate['hours']))
770 cmds.append(self.makecommandline("work_time", newstate['hours']))
770 if 'fix' in newstate:
771 if 'fix' in newstate:
771 cmds.append(self.makecommandline("bug_status", self.fixstatus))
772 cmds.append(self.makecommandline("bug_status", self.fixstatus))
772 cmds.append(self.makecommandline("resolution", self.fixresolution))
773 cmds.append(self.makecommandline("resolution", self.fixresolution))
773 self.send_bug_modify_email(bugid, cmds, text, committer)
774 self.send_bug_modify_email(bugid, cmds, text, committer)
774
775
775 class bugzilla(object):
776 class bugzilla(object):
776 # supported versions of bugzilla. different versions have
777 # supported versions of bugzilla. different versions have
777 # different schemas.
778 # different schemas.
778 _versions = {
779 _versions = {
779 '2.16': bzmysql,
780 '2.16': bzmysql,
780 '2.18': bzmysql_2_18,
781 '2.18': bzmysql_2_18,
781 '3.0': bzmysql_3_0,
782 '3.0': bzmysql_3_0,
782 'xmlrpc': bzxmlrpc,
783 'xmlrpc': bzxmlrpc,
783 'xmlrpc+email': bzxmlrpcemail
784 'xmlrpc+email': bzxmlrpcemail
784 }
785 }
785
786
786 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
787 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
787 r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
788 r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
788 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
789 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
789
790
790 _default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
791 _default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
791 r'(?:nos?\.?|num(?:ber)?s?)?\s*'
792 r'(?:nos?\.?|num(?:ber)?s?)?\s*'
792 r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
793 r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
793 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
794 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
794
795
795 def __init__(self, ui, repo):
796 def __init__(self, ui, repo):
796 self.ui = ui
797 self.ui = ui
797 self.repo = repo
798 self.repo = repo
798
799
799 bzversion = self.ui.config('bugzilla', 'version')
800 bzversion = self.ui.config('bugzilla', 'version')
800 try:
801 try:
801 bzclass = bugzilla._versions[bzversion]
802 bzclass = bugzilla._versions[bzversion]
802 except KeyError:
803 except KeyError:
803 raise error.Abort(_('bugzilla version %s not supported') %
804 raise error.Abort(_('bugzilla version %s not supported') %
804 bzversion)
805 bzversion)
805 self.bzdriver = bzclass(self.ui)
806 self.bzdriver = bzclass(self.ui)
806
807
807 self.bug_re = re.compile(
808 self.bug_re = re.compile(
808 self.ui.config('bugzilla', 'regexp',
809 self.ui.config('bugzilla', 'regexp',
809 bugzilla._default_bug_re), re.IGNORECASE)
810 bugzilla._default_bug_re), re.IGNORECASE)
810 self.fix_re = re.compile(
811 self.fix_re = re.compile(
811 self.ui.config('bugzilla', 'fixregexp',
812 self.ui.config('bugzilla', 'fixregexp',
812 bugzilla._default_fix_re), re.IGNORECASE)
813 bugzilla._default_fix_re), re.IGNORECASE)
813 self.split_re = re.compile(r'\D+')
814 self.split_re = re.compile(r'\D+')
814
815
815 def find_bugs(self, ctx):
816 def find_bugs(self, ctx):
816 '''return bugs dictionary created from commit comment.
817 '''return bugs dictionary created from commit comment.
817
818
818 Extract bug info from changeset comments. Filter out any that are
819 Extract bug info from changeset comments. Filter out any that are
819 not known to Bugzilla, and any that already have a reference to
820 not known to Bugzilla, and any that already have a reference to
820 the given changeset in their comments.
821 the given changeset in their comments.
821 '''
822 '''
822 start = 0
823 start = 0
823 hours = 0.0
824 hours = 0.0
824 bugs = {}
825 bugs = {}
825 bugmatch = self.bug_re.search(ctx.description(), start)
826 bugmatch = self.bug_re.search(ctx.description(), start)
826 fixmatch = self.fix_re.search(ctx.description(), start)
827 fixmatch = self.fix_re.search(ctx.description(), start)
827 while True:
828 while True:
828 bugattribs = {}
829 bugattribs = {}
829 if not bugmatch and not fixmatch:
830 if not bugmatch and not fixmatch:
830 break
831 break
831 if not bugmatch:
832 if not bugmatch:
832 m = fixmatch
833 m = fixmatch
833 elif not fixmatch:
834 elif not fixmatch:
834 m = bugmatch
835 m = bugmatch
835 else:
836 else:
836 if bugmatch.start() < fixmatch.start():
837 if bugmatch.start() < fixmatch.start():
837 m = bugmatch
838 m = bugmatch
838 else:
839 else:
839 m = fixmatch
840 m = fixmatch
840 start = m.end()
841 start = m.end()
841 if m is bugmatch:
842 if m is bugmatch:
842 bugmatch = self.bug_re.search(ctx.description(), start)
843 bugmatch = self.bug_re.search(ctx.description(), start)
843 if 'fix' in bugattribs:
844 if 'fix' in bugattribs:
844 del bugattribs['fix']
845 del bugattribs['fix']
845 else:
846 else:
846 fixmatch = self.fix_re.search(ctx.description(), start)
847 fixmatch = self.fix_re.search(ctx.description(), start)
847 bugattribs['fix'] = None
848 bugattribs['fix'] = None
848
849
849 try:
850 try:
850 ids = m.group('ids')
851 ids = m.group('ids')
851 except IndexError:
852 except IndexError:
852 ids = m.group(1)
853 ids = m.group(1)
853 try:
854 try:
854 hours = float(m.group('hours'))
855 hours = float(m.group('hours'))
855 bugattribs['hours'] = hours
856 bugattribs['hours'] = hours
856 except IndexError:
857 except IndexError:
857 pass
858 pass
858 except TypeError:
859 except TypeError:
859 pass
860 pass
860 except ValueError:
861 except ValueError:
861 self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
862 self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
862
863
863 for id in self.split_re.split(ids):
864 for id in self.split_re.split(ids):
864 if not id:
865 if not id:
865 continue
866 continue
866 bugs[int(id)] = bugattribs
867 bugs[int(id)] = bugattribs
867 if bugs:
868 if bugs:
868 self.bzdriver.filter_real_bug_ids(bugs)
869 self.bzdriver.filter_real_bug_ids(bugs)
869 if bugs:
870 if bugs:
870 self.bzdriver.filter_cset_known_bug_ids(ctx.node(), bugs)
871 self.bzdriver.filter_cset_known_bug_ids(ctx.node(), bugs)
871 return bugs
872 return bugs
872
873
873 def update(self, bugid, newstate, ctx):
874 def update(self, bugid, newstate, ctx):
874 '''update bugzilla bug with reference to changeset.'''
875 '''update bugzilla bug with reference to changeset.'''
875
876
876 def webroot(root):
877 def webroot(root):
877 '''strip leading prefix of repo root and turn into
878 '''strip leading prefix of repo root and turn into
878 url-safe path.'''
879 url-safe path.'''
879 count = int(self.ui.config('bugzilla', 'strip', 0))
880 count = int(self.ui.config('bugzilla', 'strip', 0))
880 root = util.pconvert(root)
881 root = util.pconvert(root)
881 while count > 0:
882 while count > 0:
882 c = root.find('/')
883 c = root.find('/')
883 if c == -1:
884 if c == -1:
884 break
885 break
885 root = root[c + 1:]
886 root = root[c + 1:]
886 count -= 1
887 count -= 1
887 return root
888 return root
888
889
889 mapfile = None
890 mapfile = None
890 tmpl = self.ui.config('bugzilla', 'template')
891 tmpl = self.ui.config('bugzilla', 'template')
891 if not tmpl:
892 if not tmpl:
892 mapfile = self.ui.config('bugzilla', 'style')
893 mapfile = self.ui.config('bugzilla', 'style')
893 if not mapfile and not tmpl:
894 if not mapfile and not tmpl:
894 tmpl = _('changeset {node|short} in repo {root} refers '
895 tmpl = _('changeset {node|short} in repo {root} refers '
895 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
896 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
896 t = cmdutil.changeset_templater(self.ui, self.repo,
897 t = cmdutil.changeset_templater(self.ui, self.repo,
897 False, None, tmpl, mapfile, False)
898 False, None, tmpl, mapfile, False)
898 self.ui.pushbuffer()
899 self.ui.pushbuffer()
899 t.show(ctx, changes=ctx.changeset(),
900 t.show(ctx, changes=ctx.changeset(),
900 bug=str(bugid),
901 bug=str(bugid),
901 hgweb=self.ui.config('web', 'baseurl'),
902 hgweb=self.ui.config('web', 'baseurl'),
902 root=self.repo.root,
903 root=self.repo.root,
903 webroot=webroot(self.repo.root))
904 webroot=webroot(self.repo.root))
904 data = self.ui.popbuffer()
905 data = self.ui.popbuffer()
905 self.bzdriver.updatebug(bugid, newstate, data, util.email(ctx.user()))
906 self.bzdriver.updatebug(bugid, newstate, data, util.email(ctx.user()))
906
907
907 def notify(self, bugs, committer):
908 def notify(self, bugs, committer):
908 '''ensure Bugzilla users are notified of bug change.'''
909 '''ensure Bugzilla users are notified of bug change.'''
909 self.bzdriver.notify(bugs, committer)
910 self.bzdriver.notify(bugs, committer)
910
911
911 def hook(ui, repo, hooktype, node=None, **kwargs):
912 def hook(ui, repo, hooktype, node=None, **kwargs):
912 '''add comment to bugzilla for each changeset that refers to a
913 '''add comment to bugzilla for each changeset that refers to a
913 bugzilla bug id. only add a comment once per bug, so same change
914 bugzilla bug id. only add a comment once per bug, so same change
914 seen multiple times does not fill bug with duplicate data.'''
915 seen multiple times does not fill bug with duplicate data.'''
915 if node is None:
916 if node is None:
916 raise error.Abort(_('hook type %s does not pass a changeset id') %
917 raise error.Abort(_('hook type %s does not pass a changeset id') %
917 hooktype)
918 hooktype)
918 try:
919 try:
919 bz = bugzilla(ui, repo)
920 bz = bugzilla(ui, repo)
920 ctx = repo[node]
921 ctx = repo[node]
921 bugs = bz.find_bugs(ctx)
922 bugs = bz.find_bugs(ctx)
922 if bugs:
923 if bugs:
923 for bug in bugs:
924 for bug in bugs:
924 bz.update(bug, bugs[bug], ctx)
925 bz.update(bug, bugs[bug], ctx)
925 bz.notify(bugs, util.email(ctx.user()))
926 bz.notify(bugs, util.email(ctx.user()))
926 except Exception as e:
927 except Exception as e:
927 raise error.Abort(_('Bugzilla error: %s') % e)
928 raise error.Abort(_('Bugzilla error: %s') % e)
@@ -1,131 +1,138 b''
1 # pycompat.py - portability shim for python 3
1 # pycompat.py - portability shim for python 3
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """Mercurial portability shim for python 3.
6 """Mercurial portability shim for python 3.
7
7
8 This contains aliases to hide python version-specific details from the core.
8 This contains aliases to hide python version-specific details from the core.
9 """
9 """
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 try:
13 try:
14 import cPickle as pickle
14 import cPickle as pickle
15 pickle.dumps
15 pickle.dumps
16 except ImportError:
16 except ImportError:
17 import pickle
17 import pickle
18 pickle.dumps # silence pyflakes
18 pickle.dumps # silence pyflakes
19
19
20 try:
20 try:
21 import urlparse
22 urlparse.urlparse
23 except ImportError:
24 import urllib.parse as urlparse
25 urlparse.urlparse
26
27 try:
21 import cStringIO as io
28 import cStringIO as io
22 stringio = io.StringIO
29 stringio = io.StringIO
23 except ImportError:
30 except ImportError:
24 import io
31 import io
25 stringio = io.StringIO
32 stringio = io.StringIO
26
33
27 try:
34 try:
28 import Queue as _queue
35 import Queue as _queue
29 _queue.Queue
36 _queue.Queue
30 except ImportError:
37 except ImportError:
31 import queue as _queue
38 import queue as _queue
32 empty = _queue.Empty
39 empty = _queue.Empty
33 queue = _queue.Queue
40 queue = _queue.Queue
34
41
35 class _pycompatstub(object):
42 class _pycompatstub(object):
36 pass
43 pass
37
44
38 def _alias(alias, origin, items):
45 def _alias(alias, origin, items):
39 """ populate a _pycompatstub
46 """ populate a _pycompatstub
40
47
41 copies items from origin to alias
48 copies items from origin to alias
42 """
49 """
43 def hgcase(item):
50 def hgcase(item):
44 return item.replace('_', '').lower()
51 return item.replace('_', '').lower()
45 for item in items:
52 for item in items:
46 try:
53 try:
47 setattr(alias, hgcase(item), getattr(origin, item))
54 setattr(alias, hgcase(item), getattr(origin, item))
48 except AttributeError:
55 except AttributeError:
49 pass
56 pass
50
57
51 urlreq = _pycompatstub()
58 urlreq = _pycompatstub()
52 urlerr = _pycompatstub()
59 urlerr = _pycompatstub()
53 try:
60 try:
54 import urllib2
61 import urllib2
55 import urllib
62 import urllib
56 _alias(urlreq, urllib, (
63 _alias(urlreq, urllib, (
57 "addclosehook",
64 "addclosehook",
58 "addinfourl",
65 "addinfourl",
59 "ftpwrapper",
66 "ftpwrapper",
60 "pathname2url",
67 "pathname2url",
61 "quote",
68 "quote",
62 "splitattr",
69 "splitattr",
63 "splitpasswd",
70 "splitpasswd",
64 "splitport",
71 "splitport",
65 "splituser",
72 "splituser",
66 "unquote",
73 "unquote",
67 "url2pathname",
74 "url2pathname",
68 "urlencode",
75 "urlencode",
69 "urlencode",
76 "urlencode",
70 ))
77 ))
71 _alias(urlreq, urllib2, (
78 _alias(urlreq, urllib2, (
72 "AbstractHTTPHandler",
79 "AbstractHTTPHandler",
73 "BaseHandler",
80 "BaseHandler",
74 "build_opener",
81 "build_opener",
75 "FileHandler",
82 "FileHandler",
76 "FTPHandler",
83 "FTPHandler",
77 "HTTPBasicAuthHandler",
84 "HTTPBasicAuthHandler",
78 "HTTPDigestAuthHandler",
85 "HTTPDigestAuthHandler",
79 "HTTPHandler",
86 "HTTPHandler",
80 "HTTPPasswordMgrWithDefaultRealm",
87 "HTTPPasswordMgrWithDefaultRealm",
81 "HTTPSHandler",
88 "HTTPSHandler",
82 "install_opener",
89 "install_opener",
83 "ProxyHandler",
90 "ProxyHandler",
84 "Request",
91 "Request",
85 "urlopen",
92 "urlopen",
86 ))
93 ))
87 _alias(urlerr, urllib2, (
94 _alias(urlerr, urllib2, (
88 "HTTPError",
95 "HTTPError",
89 "URLError",
96 "URLError",
90 ))
97 ))
91
98
92 except ImportError:
99 except ImportError:
93 import urllib.request
100 import urllib.request
94 _alias(urlreq, urllib.request, (
101 _alias(urlreq, urllib.request, (
95 "AbstractHTTPHandler",
102 "AbstractHTTPHandler",
96 "addclosehook",
103 "addclosehook",
97 "addinfourl",
104 "addinfourl",
98 "BaseHandler",
105 "BaseHandler",
99 "build_opener",
106 "build_opener",
100 "FileHandler",
107 "FileHandler",
101 "FTPHandler",
108 "FTPHandler",
102 "ftpwrapper",
109 "ftpwrapper",
103 "HTTPHandler",
110 "HTTPHandler",
104 "HTTPSHandler",
111 "HTTPSHandler",
105 "install_opener",
112 "install_opener",
106 "pathname2url",
113 "pathname2url",
107 "HTTPBasicAuthHandler",
114 "HTTPBasicAuthHandler",
108 "HTTPDigestAuthHandler",
115 "HTTPDigestAuthHandler",
109 "HTTPPasswordMgrWithDefaultRealm",
116 "HTTPPasswordMgrWithDefaultRealm",
110 "ProxyHandler",
117 "ProxyHandler",
111 "quote",
118 "quote",
112 "Request",
119 "Request",
113 "splitattr",
120 "splitattr",
114 "splitpasswd",
121 "splitpasswd",
115 "splitport",
122 "splitport",
116 "splituser",
123 "splituser",
117 "unquote",
124 "unquote",
118 "url2pathname",
125 "url2pathname",
119 "urlopen",
126 "urlopen",
120 ))
127 ))
121 import urllib.error
128 import urllib.error
122 _alias(urlerr, urllib.error, (
129 _alias(urlerr, urllib.error, (
123 "HTTPError",
130 "HTTPError",
124 "URLError",
131 "URLError",
125 ))
132 ))
126
133
127 try:
134 try:
128 xrange
135 xrange
129 except NameError:
136 except NameError:
130 import builtins
137 import builtins
131 builtins.xrange = range
138 builtins.xrange = range
@@ -1,2853 +1,2854 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import zlib
37 import zlib
38
38
39 from . import (
39 from . import (
40 encoding,
40 encoding,
41 error,
41 error,
42 i18n,
42 i18n,
43 osutil,
43 osutil,
44 parsers,
44 parsers,
45 pycompat,
45 pycompat,
46 )
46 )
47
47
48 for attr in (
48 for attr in (
49 'empty',
49 'empty',
50 'pickle',
50 'pickle',
51 'queue',
51 'queue',
52 'urlerr',
52 'urlerr',
53 'urlparse',
53 # we do import urlreq, but we do it outside the loop
54 # we do import urlreq, but we do it outside the loop
54 #'urlreq',
55 #'urlreq',
55 'stringio',
56 'stringio',
56 ):
57 ):
57 globals()[attr] = getattr(pycompat, attr)
58 globals()[attr] = getattr(pycompat, attr)
58
59
59 # This line is to make pyflakes happy:
60 # This line is to make pyflakes happy:
60 urlreq = pycompat.urlreq
61 urlreq = pycompat.urlreq
61
62
62 if os.name == 'nt':
63 if os.name == 'nt':
63 from . import windows as platform
64 from . import windows as platform
64 else:
65 else:
65 from . import posix as platform
66 from . import posix as platform
66
67
67 _ = i18n._
68 _ = i18n._
68
69
69 cachestat = platform.cachestat
70 cachestat = platform.cachestat
70 checkexec = platform.checkexec
71 checkexec = platform.checkexec
71 checklink = platform.checklink
72 checklink = platform.checklink
72 copymode = platform.copymode
73 copymode = platform.copymode
73 executablepath = platform.executablepath
74 executablepath = platform.executablepath
74 expandglobs = platform.expandglobs
75 expandglobs = platform.expandglobs
75 explainexit = platform.explainexit
76 explainexit = platform.explainexit
76 findexe = platform.findexe
77 findexe = platform.findexe
77 gethgcmd = platform.gethgcmd
78 gethgcmd = platform.gethgcmd
78 getuser = platform.getuser
79 getuser = platform.getuser
79 getpid = os.getpid
80 getpid = os.getpid
80 groupmembers = platform.groupmembers
81 groupmembers = platform.groupmembers
81 groupname = platform.groupname
82 groupname = platform.groupname
82 hidewindow = platform.hidewindow
83 hidewindow = platform.hidewindow
83 isexec = platform.isexec
84 isexec = platform.isexec
84 isowner = platform.isowner
85 isowner = platform.isowner
85 localpath = platform.localpath
86 localpath = platform.localpath
86 lookupreg = platform.lookupreg
87 lookupreg = platform.lookupreg
87 makedir = platform.makedir
88 makedir = platform.makedir
88 nlinks = platform.nlinks
89 nlinks = platform.nlinks
89 normpath = platform.normpath
90 normpath = platform.normpath
90 normcase = platform.normcase
91 normcase = platform.normcase
91 normcasespec = platform.normcasespec
92 normcasespec = platform.normcasespec
92 normcasefallback = platform.normcasefallback
93 normcasefallback = platform.normcasefallback
93 openhardlinks = platform.openhardlinks
94 openhardlinks = platform.openhardlinks
94 oslink = platform.oslink
95 oslink = platform.oslink
95 parsepatchoutput = platform.parsepatchoutput
96 parsepatchoutput = platform.parsepatchoutput
96 pconvert = platform.pconvert
97 pconvert = platform.pconvert
97 poll = platform.poll
98 poll = platform.poll
98 popen = platform.popen
99 popen = platform.popen
99 posixfile = platform.posixfile
100 posixfile = platform.posixfile
100 quotecommand = platform.quotecommand
101 quotecommand = platform.quotecommand
101 readpipe = platform.readpipe
102 readpipe = platform.readpipe
102 rename = platform.rename
103 rename = platform.rename
103 removedirs = platform.removedirs
104 removedirs = platform.removedirs
104 samedevice = platform.samedevice
105 samedevice = platform.samedevice
105 samefile = platform.samefile
106 samefile = platform.samefile
106 samestat = platform.samestat
107 samestat = platform.samestat
107 setbinary = platform.setbinary
108 setbinary = platform.setbinary
108 setflags = platform.setflags
109 setflags = platform.setflags
109 setsignalhandler = platform.setsignalhandler
110 setsignalhandler = platform.setsignalhandler
110 shellquote = platform.shellquote
111 shellquote = platform.shellquote
111 spawndetached = platform.spawndetached
112 spawndetached = platform.spawndetached
112 split = platform.split
113 split = platform.split
113 sshargs = platform.sshargs
114 sshargs = platform.sshargs
114 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
115 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
115 statisexec = platform.statisexec
116 statisexec = platform.statisexec
116 statislink = platform.statislink
117 statislink = platform.statislink
117 termwidth = platform.termwidth
118 termwidth = platform.termwidth
118 testpid = platform.testpid
119 testpid = platform.testpid
119 umask = platform.umask
120 umask = platform.umask
120 unlink = platform.unlink
121 unlink = platform.unlink
121 unlinkpath = platform.unlinkpath
122 unlinkpath = platform.unlinkpath
122 username = platform.username
123 username = platform.username
123
124
124 # Python compatibility
125 # Python compatibility
125
126
126 _notset = object()
127 _notset = object()
127
128
128 # disable Python's problematic floating point timestamps (issue4836)
129 # disable Python's problematic floating point timestamps (issue4836)
129 # (Python hypocritically says you shouldn't change this behavior in
130 # (Python hypocritically says you shouldn't change this behavior in
130 # libraries, and sure enough Mercurial is not a library.)
131 # libraries, and sure enough Mercurial is not a library.)
131 os.stat_float_times(False)
132 os.stat_float_times(False)
132
133
133 def safehasattr(thing, attr):
134 def safehasattr(thing, attr):
134 return getattr(thing, attr, _notset) is not _notset
135 return getattr(thing, attr, _notset) is not _notset
135
136
136 DIGESTS = {
137 DIGESTS = {
137 'md5': hashlib.md5,
138 'md5': hashlib.md5,
138 'sha1': hashlib.sha1,
139 'sha1': hashlib.sha1,
139 'sha512': hashlib.sha512,
140 'sha512': hashlib.sha512,
140 }
141 }
141 # List of digest types from strongest to weakest
142 # List of digest types from strongest to weakest
142 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
143 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
143
144
144 for k in DIGESTS_BY_STRENGTH:
145 for k in DIGESTS_BY_STRENGTH:
145 assert k in DIGESTS
146 assert k in DIGESTS
146
147
147 class digester(object):
148 class digester(object):
148 """helper to compute digests.
149 """helper to compute digests.
149
150
150 This helper can be used to compute one or more digests given their name.
151 This helper can be used to compute one or more digests given their name.
151
152
152 >>> d = digester(['md5', 'sha1'])
153 >>> d = digester(['md5', 'sha1'])
153 >>> d.update('foo')
154 >>> d.update('foo')
154 >>> [k for k in sorted(d)]
155 >>> [k for k in sorted(d)]
155 ['md5', 'sha1']
156 ['md5', 'sha1']
156 >>> d['md5']
157 >>> d['md5']
157 'acbd18db4cc2f85cedef654fccc4a4d8'
158 'acbd18db4cc2f85cedef654fccc4a4d8'
158 >>> d['sha1']
159 >>> d['sha1']
159 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
160 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
160 >>> digester.preferred(['md5', 'sha1'])
161 >>> digester.preferred(['md5', 'sha1'])
161 'sha1'
162 'sha1'
162 """
163 """
163
164
164 def __init__(self, digests, s=''):
165 def __init__(self, digests, s=''):
165 self._hashes = {}
166 self._hashes = {}
166 for k in digests:
167 for k in digests:
167 if k not in DIGESTS:
168 if k not in DIGESTS:
168 raise Abort(_('unknown digest type: %s') % k)
169 raise Abort(_('unknown digest type: %s') % k)
169 self._hashes[k] = DIGESTS[k]()
170 self._hashes[k] = DIGESTS[k]()
170 if s:
171 if s:
171 self.update(s)
172 self.update(s)
172
173
173 def update(self, data):
174 def update(self, data):
174 for h in self._hashes.values():
175 for h in self._hashes.values():
175 h.update(data)
176 h.update(data)
176
177
177 def __getitem__(self, key):
178 def __getitem__(self, key):
178 if key not in DIGESTS:
179 if key not in DIGESTS:
179 raise Abort(_('unknown digest type: %s') % k)
180 raise Abort(_('unknown digest type: %s') % k)
180 return self._hashes[key].hexdigest()
181 return self._hashes[key].hexdigest()
181
182
182 def __iter__(self):
183 def __iter__(self):
183 return iter(self._hashes)
184 return iter(self._hashes)
184
185
185 @staticmethod
186 @staticmethod
186 def preferred(supported):
187 def preferred(supported):
187 """returns the strongest digest type in both supported and DIGESTS."""
188 """returns the strongest digest type in both supported and DIGESTS."""
188
189
189 for k in DIGESTS_BY_STRENGTH:
190 for k in DIGESTS_BY_STRENGTH:
190 if k in supported:
191 if k in supported:
191 return k
192 return k
192 return None
193 return None
193
194
194 class digestchecker(object):
195 class digestchecker(object):
195 """file handle wrapper that additionally checks content against a given
196 """file handle wrapper that additionally checks content against a given
196 size and digests.
197 size and digests.
197
198
198 d = digestchecker(fh, size, {'md5': '...'})
199 d = digestchecker(fh, size, {'md5': '...'})
199
200
200 When multiple digests are given, all of them are validated.
201 When multiple digests are given, all of them are validated.
201 """
202 """
202
203
203 def __init__(self, fh, size, digests):
204 def __init__(self, fh, size, digests):
204 self._fh = fh
205 self._fh = fh
205 self._size = size
206 self._size = size
206 self._got = 0
207 self._got = 0
207 self._digests = dict(digests)
208 self._digests = dict(digests)
208 self._digester = digester(self._digests.keys())
209 self._digester = digester(self._digests.keys())
209
210
210 def read(self, length=-1):
211 def read(self, length=-1):
211 content = self._fh.read(length)
212 content = self._fh.read(length)
212 self._digester.update(content)
213 self._digester.update(content)
213 self._got += len(content)
214 self._got += len(content)
214 return content
215 return content
215
216
216 def validate(self):
217 def validate(self):
217 if self._size != self._got:
218 if self._size != self._got:
218 raise Abort(_('size mismatch: expected %d, got %d') %
219 raise Abort(_('size mismatch: expected %d, got %d') %
219 (self._size, self._got))
220 (self._size, self._got))
220 for k, v in self._digests.items():
221 for k, v in self._digests.items():
221 if v != self._digester[k]:
222 if v != self._digester[k]:
222 # i18n: first parameter is a digest name
223 # i18n: first parameter is a digest name
223 raise Abort(_('%s mismatch: expected %s, got %s') %
224 raise Abort(_('%s mismatch: expected %s, got %s') %
224 (k, v, self._digester[k]))
225 (k, v, self._digester[k]))
225
226
226 try:
227 try:
227 buffer = buffer
228 buffer = buffer
228 except NameError:
229 except NameError:
229 if sys.version_info[0] < 3:
230 if sys.version_info[0] < 3:
230 def buffer(sliceable, offset=0):
231 def buffer(sliceable, offset=0):
231 return sliceable[offset:]
232 return sliceable[offset:]
232 else:
233 else:
233 def buffer(sliceable, offset=0):
234 def buffer(sliceable, offset=0):
234 return memoryview(sliceable)[offset:]
235 return memoryview(sliceable)[offset:]
235
236
236 closefds = os.name == 'posix'
237 closefds = os.name == 'posix'
237
238
238 _chunksize = 4096
239 _chunksize = 4096
239
240
240 class bufferedinputpipe(object):
241 class bufferedinputpipe(object):
241 """a manually buffered input pipe
242 """a manually buffered input pipe
242
243
243 Python will not let us use buffered IO and lazy reading with 'polling' at
244 Python will not let us use buffered IO and lazy reading with 'polling' at
244 the same time. We cannot probe the buffer state and select will not detect
245 the same time. We cannot probe the buffer state and select will not detect
245 that data are ready to read if they are already buffered.
246 that data are ready to read if they are already buffered.
246
247
247 This class let us work around that by implementing its own buffering
248 This class let us work around that by implementing its own buffering
248 (allowing efficient readline) while offering a way to know if the buffer is
249 (allowing efficient readline) while offering a way to know if the buffer is
249 empty from the output (allowing collaboration of the buffer with polling).
250 empty from the output (allowing collaboration of the buffer with polling).
250
251
251 This class lives in the 'util' module because it makes use of the 'os'
252 This class lives in the 'util' module because it makes use of the 'os'
252 module from the python stdlib.
253 module from the python stdlib.
253 """
254 """
254
255
255 def __init__(self, input):
256 def __init__(self, input):
256 self._input = input
257 self._input = input
257 self._buffer = []
258 self._buffer = []
258 self._eof = False
259 self._eof = False
259 self._lenbuf = 0
260 self._lenbuf = 0
260
261
261 @property
262 @property
262 def hasbuffer(self):
263 def hasbuffer(self):
263 """True is any data is currently buffered
264 """True is any data is currently buffered
264
265
265 This will be used externally a pre-step for polling IO. If there is
266 This will be used externally a pre-step for polling IO. If there is
266 already data then no polling should be set in place."""
267 already data then no polling should be set in place."""
267 return bool(self._buffer)
268 return bool(self._buffer)
268
269
269 @property
270 @property
270 def closed(self):
271 def closed(self):
271 return self._input.closed
272 return self._input.closed
272
273
273 def fileno(self):
274 def fileno(self):
274 return self._input.fileno()
275 return self._input.fileno()
275
276
276 def close(self):
277 def close(self):
277 return self._input.close()
278 return self._input.close()
278
279
279 def read(self, size):
280 def read(self, size):
280 while (not self._eof) and (self._lenbuf < size):
281 while (not self._eof) and (self._lenbuf < size):
281 self._fillbuffer()
282 self._fillbuffer()
282 return self._frombuffer(size)
283 return self._frombuffer(size)
283
284
284 def readline(self, *args, **kwargs):
285 def readline(self, *args, **kwargs):
285 if 1 < len(self._buffer):
286 if 1 < len(self._buffer):
286 # this should not happen because both read and readline end with a
287 # this should not happen because both read and readline end with a
287 # _frombuffer call that collapse it.
288 # _frombuffer call that collapse it.
288 self._buffer = [''.join(self._buffer)]
289 self._buffer = [''.join(self._buffer)]
289 self._lenbuf = len(self._buffer[0])
290 self._lenbuf = len(self._buffer[0])
290 lfi = -1
291 lfi = -1
291 if self._buffer:
292 if self._buffer:
292 lfi = self._buffer[-1].find('\n')
293 lfi = self._buffer[-1].find('\n')
293 while (not self._eof) and lfi < 0:
294 while (not self._eof) and lfi < 0:
294 self._fillbuffer()
295 self._fillbuffer()
295 if self._buffer:
296 if self._buffer:
296 lfi = self._buffer[-1].find('\n')
297 lfi = self._buffer[-1].find('\n')
297 size = lfi + 1
298 size = lfi + 1
298 if lfi < 0: # end of file
299 if lfi < 0: # end of file
299 size = self._lenbuf
300 size = self._lenbuf
300 elif 1 < len(self._buffer):
301 elif 1 < len(self._buffer):
301 # we need to take previous chunks into account
302 # we need to take previous chunks into account
302 size += self._lenbuf - len(self._buffer[-1])
303 size += self._lenbuf - len(self._buffer[-1])
303 return self._frombuffer(size)
304 return self._frombuffer(size)
304
305
305 def _frombuffer(self, size):
306 def _frombuffer(self, size):
306 """return at most 'size' data from the buffer
307 """return at most 'size' data from the buffer
307
308
308 The data are removed from the buffer."""
309 The data are removed from the buffer."""
309 if size == 0 or not self._buffer:
310 if size == 0 or not self._buffer:
310 return ''
311 return ''
311 buf = self._buffer[0]
312 buf = self._buffer[0]
312 if 1 < len(self._buffer):
313 if 1 < len(self._buffer):
313 buf = ''.join(self._buffer)
314 buf = ''.join(self._buffer)
314
315
315 data = buf[:size]
316 data = buf[:size]
316 buf = buf[len(data):]
317 buf = buf[len(data):]
317 if buf:
318 if buf:
318 self._buffer = [buf]
319 self._buffer = [buf]
319 self._lenbuf = len(buf)
320 self._lenbuf = len(buf)
320 else:
321 else:
321 self._buffer = []
322 self._buffer = []
322 self._lenbuf = 0
323 self._lenbuf = 0
323 return data
324 return data
324
325
325 def _fillbuffer(self):
326 def _fillbuffer(self):
326 """read data to the buffer"""
327 """read data to the buffer"""
327 data = os.read(self._input.fileno(), _chunksize)
328 data = os.read(self._input.fileno(), _chunksize)
328 if not data:
329 if not data:
329 self._eof = True
330 self._eof = True
330 else:
331 else:
331 self._lenbuf += len(data)
332 self._lenbuf += len(data)
332 self._buffer.append(data)
333 self._buffer.append(data)
333
334
334 def popen2(cmd, env=None, newlines=False):
335 def popen2(cmd, env=None, newlines=False):
335 # Setting bufsize to -1 lets the system decide the buffer size.
336 # Setting bufsize to -1 lets the system decide the buffer size.
336 # The default for bufsize is 0, meaning unbuffered. This leads to
337 # The default for bufsize is 0, meaning unbuffered. This leads to
337 # poor performance on Mac OS X: http://bugs.python.org/issue4194
338 # poor performance on Mac OS X: http://bugs.python.org/issue4194
338 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
339 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
339 close_fds=closefds,
340 close_fds=closefds,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 universal_newlines=newlines,
342 universal_newlines=newlines,
342 env=env)
343 env=env)
343 return p.stdin, p.stdout
344 return p.stdin, p.stdout
344
345
345 def popen3(cmd, env=None, newlines=False):
346 def popen3(cmd, env=None, newlines=False):
346 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
347 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
347 return stdin, stdout, stderr
348 return stdin, stdout, stderr
348
349
349 def popen4(cmd, env=None, newlines=False, bufsize=-1):
350 def popen4(cmd, env=None, newlines=False, bufsize=-1):
350 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
351 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
351 close_fds=closefds,
352 close_fds=closefds,
352 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
353 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
353 stderr=subprocess.PIPE,
354 stderr=subprocess.PIPE,
354 universal_newlines=newlines,
355 universal_newlines=newlines,
355 env=env)
356 env=env)
356 return p.stdin, p.stdout, p.stderr, p
357 return p.stdin, p.stdout, p.stderr, p
357
358
358 def version():
359 def version():
359 """Return version information if available."""
360 """Return version information if available."""
360 try:
361 try:
361 from . import __version__
362 from . import __version__
362 return __version__.version
363 return __version__.version
363 except ImportError:
364 except ImportError:
364 return 'unknown'
365 return 'unknown'
365
366
366 def versiontuple(v=None, n=4):
367 def versiontuple(v=None, n=4):
367 """Parses a Mercurial version string into an N-tuple.
368 """Parses a Mercurial version string into an N-tuple.
368
369
369 The version string to be parsed is specified with the ``v`` argument.
370 The version string to be parsed is specified with the ``v`` argument.
370 If it isn't defined, the current Mercurial version string will be parsed.
371 If it isn't defined, the current Mercurial version string will be parsed.
371
372
372 ``n`` can be 2, 3, or 4. Here is how some version strings map to
373 ``n`` can be 2, 3, or 4. Here is how some version strings map to
373 returned values:
374 returned values:
374
375
375 >>> v = '3.6.1+190-df9b73d2d444'
376 >>> v = '3.6.1+190-df9b73d2d444'
376 >>> versiontuple(v, 2)
377 >>> versiontuple(v, 2)
377 (3, 6)
378 (3, 6)
378 >>> versiontuple(v, 3)
379 >>> versiontuple(v, 3)
379 (3, 6, 1)
380 (3, 6, 1)
380 >>> versiontuple(v, 4)
381 >>> versiontuple(v, 4)
381 (3, 6, 1, '190-df9b73d2d444')
382 (3, 6, 1, '190-df9b73d2d444')
382
383
383 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
384 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
384 (3, 6, 1, '190-df9b73d2d444+20151118')
385 (3, 6, 1, '190-df9b73d2d444+20151118')
385
386
386 >>> v = '3.6'
387 >>> v = '3.6'
387 >>> versiontuple(v, 2)
388 >>> versiontuple(v, 2)
388 (3, 6)
389 (3, 6)
389 >>> versiontuple(v, 3)
390 >>> versiontuple(v, 3)
390 (3, 6, None)
391 (3, 6, None)
391 >>> versiontuple(v, 4)
392 >>> versiontuple(v, 4)
392 (3, 6, None, None)
393 (3, 6, None, None)
393 """
394 """
394 if not v:
395 if not v:
395 v = version()
396 v = version()
396 parts = v.split('+', 1)
397 parts = v.split('+', 1)
397 if len(parts) == 1:
398 if len(parts) == 1:
398 vparts, extra = parts[0], None
399 vparts, extra = parts[0], None
399 else:
400 else:
400 vparts, extra = parts
401 vparts, extra = parts
401
402
402 vints = []
403 vints = []
403 for i in vparts.split('.'):
404 for i in vparts.split('.'):
404 try:
405 try:
405 vints.append(int(i))
406 vints.append(int(i))
406 except ValueError:
407 except ValueError:
407 break
408 break
408 # (3, 6) -> (3, 6, None)
409 # (3, 6) -> (3, 6, None)
409 while len(vints) < 3:
410 while len(vints) < 3:
410 vints.append(None)
411 vints.append(None)
411
412
412 if n == 2:
413 if n == 2:
413 return (vints[0], vints[1])
414 return (vints[0], vints[1])
414 if n == 3:
415 if n == 3:
415 return (vints[0], vints[1], vints[2])
416 return (vints[0], vints[1], vints[2])
416 if n == 4:
417 if n == 4:
417 return (vints[0], vints[1], vints[2], extra)
418 return (vints[0], vints[1], vints[2], extra)
418
419
419 # used by parsedate
420 # used by parsedate
420 defaultdateformats = (
421 defaultdateformats = (
421 '%Y-%m-%d %H:%M:%S',
422 '%Y-%m-%d %H:%M:%S',
422 '%Y-%m-%d %I:%M:%S%p',
423 '%Y-%m-%d %I:%M:%S%p',
423 '%Y-%m-%d %H:%M',
424 '%Y-%m-%d %H:%M',
424 '%Y-%m-%d %I:%M%p',
425 '%Y-%m-%d %I:%M%p',
425 '%Y-%m-%d',
426 '%Y-%m-%d',
426 '%m-%d',
427 '%m-%d',
427 '%m/%d',
428 '%m/%d',
428 '%m/%d/%y',
429 '%m/%d/%y',
429 '%m/%d/%Y',
430 '%m/%d/%Y',
430 '%a %b %d %H:%M:%S %Y',
431 '%a %b %d %H:%M:%S %Y',
431 '%a %b %d %I:%M:%S%p %Y',
432 '%a %b %d %I:%M:%S%p %Y',
432 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
433 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
433 '%b %d %H:%M:%S %Y',
434 '%b %d %H:%M:%S %Y',
434 '%b %d %I:%M:%S%p %Y',
435 '%b %d %I:%M:%S%p %Y',
435 '%b %d %H:%M:%S',
436 '%b %d %H:%M:%S',
436 '%b %d %I:%M:%S%p',
437 '%b %d %I:%M:%S%p',
437 '%b %d %H:%M',
438 '%b %d %H:%M',
438 '%b %d %I:%M%p',
439 '%b %d %I:%M%p',
439 '%b %d %Y',
440 '%b %d %Y',
440 '%b %d',
441 '%b %d',
441 '%H:%M:%S',
442 '%H:%M:%S',
442 '%I:%M:%S%p',
443 '%I:%M:%S%p',
443 '%H:%M',
444 '%H:%M',
444 '%I:%M%p',
445 '%I:%M%p',
445 )
446 )
446
447
447 extendeddateformats = defaultdateformats + (
448 extendeddateformats = defaultdateformats + (
448 "%Y",
449 "%Y",
449 "%Y-%m",
450 "%Y-%m",
450 "%b",
451 "%b",
451 "%b %Y",
452 "%b %Y",
452 )
453 )
453
454
454 def cachefunc(func):
455 def cachefunc(func):
455 '''cache the result of function calls'''
456 '''cache the result of function calls'''
456 # XXX doesn't handle keywords args
457 # XXX doesn't handle keywords args
457 if func.__code__.co_argcount == 0:
458 if func.__code__.co_argcount == 0:
458 cache = []
459 cache = []
459 def f():
460 def f():
460 if len(cache) == 0:
461 if len(cache) == 0:
461 cache.append(func())
462 cache.append(func())
462 return cache[0]
463 return cache[0]
463 return f
464 return f
464 cache = {}
465 cache = {}
465 if func.__code__.co_argcount == 1:
466 if func.__code__.co_argcount == 1:
466 # we gain a small amount of time because
467 # we gain a small amount of time because
467 # we don't need to pack/unpack the list
468 # we don't need to pack/unpack the list
468 def f(arg):
469 def f(arg):
469 if arg not in cache:
470 if arg not in cache:
470 cache[arg] = func(arg)
471 cache[arg] = func(arg)
471 return cache[arg]
472 return cache[arg]
472 else:
473 else:
473 def f(*args):
474 def f(*args):
474 if args not in cache:
475 if args not in cache:
475 cache[args] = func(*args)
476 cache[args] = func(*args)
476 return cache[args]
477 return cache[args]
477
478
478 return f
479 return f
479
480
480 class sortdict(dict):
481 class sortdict(dict):
481 '''a simple sorted dictionary'''
482 '''a simple sorted dictionary'''
482 def __init__(self, data=None):
483 def __init__(self, data=None):
483 self._list = []
484 self._list = []
484 if data:
485 if data:
485 self.update(data)
486 self.update(data)
486 def copy(self):
487 def copy(self):
487 return sortdict(self)
488 return sortdict(self)
488 def __setitem__(self, key, val):
489 def __setitem__(self, key, val):
489 if key in self:
490 if key in self:
490 self._list.remove(key)
491 self._list.remove(key)
491 self._list.append(key)
492 self._list.append(key)
492 dict.__setitem__(self, key, val)
493 dict.__setitem__(self, key, val)
493 def __iter__(self):
494 def __iter__(self):
494 return self._list.__iter__()
495 return self._list.__iter__()
495 def update(self, src):
496 def update(self, src):
496 if isinstance(src, dict):
497 if isinstance(src, dict):
497 src = src.iteritems()
498 src = src.iteritems()
498 for k, v in src:
499 for k, v in src:
499 self[k] = v
500 self[k] = v
500 def clear(self):
501 def clear(self):
501 dict.clear(self)
502 dict.clear(self)
502 self._list = []
503 self._list = []
503 def items(self):
504 def items(self):
504 return [(k, self[k]) for k in self._list]
505 return [(k, self[k]) for k in self._list]
505 def __delitem__(self, key):
506 def __delitem__(self, key):
506 dict.__delitem__(self, key)
507 dict.__delitem__(self, key)
507 self._list.remove(key)
508 self._list.remove(key)
508 def pop(self, key, *args, **kwargs):
509 def pop(self, key, *args, **kwargs):
509 dict.pop(self, key, *args, **kwargs)
510 dict.pop(self, key, *args, **kwargs)
510 try:
511 try:
511 self._list.remove(key)
512 self._list.remove(key)
512 except ValueError:
513 except ValueError:
513 pass
514 pass
514 def keys(self):
515 def keys(self):
515 return self._list
516 return self._list
516 def iterkeys(self):
517 def iterkeys(self):
517 return self._list.__iter__()
518 return self._list.__iter__()
518 def iteritems(self):
519 def iteritems(self):
519 for k in self._list:
520 for k in self._list:
520 yield k, self[k]
521 yield k, self[k]
521 def insert(self, index, key, val):
522 def insert(self, index, key, val):
522 self._list.insert(index, key)
523 self._list.insert(index, key)
523 dict.__setitem__(self, key, val)
524 dict.__setitem__(self, key, val)
524
525
525 class _lrucachenode(object):
526 class _lrucachenode(object):
526 """A node in a doubly linked list.
527 """A node in a doubly linked list.
527
528
528 Holds a reference to nodes on either side as well as a key-value
529 Holds a reference to nodes on either side as well as a key-value
529 pair for the dictionary entry.
530 pair for the dictionary entry.
530 """
531 """
531 __slots__ = ('next', 'prev', 'key', 'value')
532 __slots__ = ('next', 'prev', 'key', 'value')
532
533
533 def __init__(self):
534 def __init__(self):
534 self.next = None
535 self.next = None
535 self.prev = None
536 self.prev = None
536
537
537 self.key = _notset
538 self.key = _notset
538 self.value = None
539 self.value = None
539
540
540 def markempty(self):
541 def markempty(self):
541 """Mark the node as emptied."""
542 """Mark the node as emptied."""
542 self.key = _notset
543 self.key = _notset
543
544
544 class lrucachedict(object):
545 class lrucachedict(object):
545 """Dict that caches most recent accesses and sets.
546 """Dict that caches most recent accesses and sets.
546
547
547 The dict consists of an actual backing dict - indexed by original
548 The dict consists of an actual backing dict - indexed by original
548 key - and a doubly linked circular list defining the order of entries in
549 key - and a doubly linked circular list defining the order of entries in
549 the cache.
550 the cache.
550
551
551 The head node is the newest entry in the cache. If the cache is full,
552 The head node is the newest entry in the cache. If the cache is full,
552 we recycle head.prev and make it the new head. Cache accesses result in
553 we recycle head.prev and make it the new head. Cache accesses result in
553 the node being moved to before the existing head and being marked as the
554 the node being moved to before the existing head and being marked as the
554 new head node.
555 new head node.
555 """
556 """
556 def __init__(self, max):
557 def __init__(self, max):
557 self._cache = {}
558 self._cache = {}
558
559
559 self._head = head = _lrucachenode()
560 self._head = head = _lrucachenode()
560 head.prev = head
561 head.prev = head
561 head.next = head
562 head.next = head
562 self._size = 1
563 self._size = 1
563 self._capacity = max
564 self._capacity = max
564
565
565 def __len__(self):
566 def __len__(self):
566 return len(self._cache)
567 return len(self._cache)
567
568
568 def __contains__(self, k):
569 def __contains__(self, k):
569 return k in self._cache
570 return k in self._cache
570
571
571 def __iter__(self):
572 def __iter__(self):
572 # We don't have to iterate in cache order, but why not.
573 # We don't have to iterate in cache order, but why not.
573 n = self._head
574 n = self._head
574 for i in range(len(self._cache)):
575 for i in range(len(self._cache)):
575 yield n.key
576 yield n.key
576 n = n.next
577 n = n.next
577
578
578 def __getitem__(self, k):
579 def __getitem__(self, k):
579 node = self._cache[k]
580 node = self._cache[k]
580 self._movetohead(node)
581 self._movetohead(node)
581 return node.value
582 return node.value
582
583
583 def __setitem__(self, k, v):
584 def __setitem__(self, k, v):
584 node = self._cache.get(k)
585 node = self._cache.get(k)
585 # Replace existing value and mark as newest.
586 # Replace existing value and mark as newest.
586 if node is not None:
587 if node is not None:
587 node.value = v
588 node.value = v
588 self._movetohead(node)
589 self._movetohead(node)
589 return
590 return
590
591
591 if self._size < self._capacity:
592 if self._size < self._capacity:
592 node = self._addcapacity()
593 node = self._addcapacity()
593 else:
594 else:
594 # Grab the last/oldest item.
595 # Grab the last/oldest item.
595 node = self._head.prev
596 node = self._head.prev
596
597
597 # At capacity. Kill the old entry.
598 # At capacity. Kill the old entry.
598 if node.key is not _notset:
599 if node.key is not _notset:
599 del self._cache[node.key]
600 del self._cache[node.key]
600
601
601 node.key = k
602 node.key = k
602 node.value = v
603 node.value = v
603 self._cache[k] = node
604 self._cache[k] = node
604 # And mark it as newest entry. No need to adjust order since it
605 # And mark it as newest entry. No need to adjust order since it
605 # is already self._head.prev.
606 # is already self._head.prev.
606 self._head = node
607 self._head = node
607
608
608 def __delitem__(self, k):
609 def __delitem__(self, k):
609 node = self._cache.pop(k)
610 node = self._cache.pop(k)
610 node.markempty()
611 node.markempty()
611
612
612 # Temporarily mark as newest item before re-adjusting head to make
613 # Temporarily mark as newest item before re-adjusting head to make
613 # this node the oldest item.
614 # this node the oldest item.
614 self._movetohead(node)
615 self._movetohead(node)
615 self._head = node.next
616 self._head = node.next
616
617
617 # Additional dict methods.
618 # Additional dict methods.
618
619
619 def get(self, k, default=None):
620 def get(self, k, default=None):
620 try:
621 try:
621 return self._cache[k]
622 return self._cache[k]
622 except KeyError:
623 except KeyError:
623 return default
624 return default
624
625
625 def clear(self):
626 def clear(self):
626 n = self._head
627 n = self._head
627 while n.key is not _notset:
628 while n.key is not _notset:
628 n.markempty()
629 n.markempty()
629 n = n.next
630 n = n.next
630
631
631 self._cache.clear()
632 self._cache.clear()
632
633
633 def copy(self):
634 def copy(self):
634 result = lrucachedict(self._capacity)
635 result = lrucachedict(self._capacity)
635 n = self._head.prev
636 n = self._head.prev
636 # Iterate in oldest-to-newest order, so the copy has the right ordering
637 # Iterate in oldest-to-newest order, so the copy has the right ordering
637 for i in range(len(self._cache)):
638 for i in range(len(self._cache)):
638 result[n.key] = n.value
639 result[n.key] = n.value
639 n = n.prev
640 n = n.prev
640 return result
641 return result
641
642
642 def _movetohead(self, node):
643 def _movetohead(self, node):
643 """Mark a node as the newest, making it the new head.
644 """Mark a node as the newest, making it the new head.
644
645
645 When a node is accessed, it becomes the freshest entry in the LRU
646 When a node is accessed, it becomes the freshest entry in the LRU
646 list, which is denoted by self._head.
647 list, which is denoted by self._head.
647
648
648 Visually, let's make ``N`` the new head node (* denotes head):
649 Visually, let's make ``N`` the new head node (* denotes head):
649
650
650 previous/oldest <-> head <-> next/next newest
651 previous/oldest <-> head <-> next/next newest
651
652
652 ----<->--- A* ---<->-----
653 ----<->--- A* ---<->-----
653 | |
654 | |
654 E <-> D <-> N <-> C <-> B
655 E <-> D <-> N <-> C <-> B
655
656
656 To:
657 To:
657
658
658 ----<->--- N* ---<->-----
659 ----<->--- N* ---<->-----
659 | |
660 | |
660 E <-> D <-> C <-> B <-> A
661 E <-> D <-> C <-> B <-> A
661
662
662 This requires the following moves:
663 This requires the following moves:
663
664
664 C.next = D (node.prev.next = node.next)
665 C.next = D (node.prev.next = node.next)
665 D.prev = C (node.next.prev = node.prev)
666 D.prev = C (node.next.prev = node.prev)
666 E.next = N (head.prev.next = node)
667 E.next = N (head.prev.next = node)
667 N.prev = E (node.prev = head.prev)
668 N.prev = E (node.prev = head.prev)
668 N.next = A (node.next = head)
669 N.next = A (node.next = head)
669 A.prev = N (head.prev = node)
670 A.prev = N (head.prev = node)
670 """
671 """
671 head = self._head
672 head = self._head
672 # C.next = D
673 # C.next = D
673 node.prev.next = node.next
674 node.prev.next = node.next
674 # D.prev = C
675 # D.prev = C
675 node.next.prev = node.prev
676 node.next.prev = node.prev
676 # N.prev = E
677 # N.prev = E
677 node.prev = head.prev
678 node.prev = head.prev
678 # N.next = A
679 # N.next = A
679 # It is tempting to do just "head" here, however if node is
680 # It is tempting to do just "head" here, however if node is
680 # adjacent to head, this will do bad things.
681 # adjacent to head, this will do bad things.
681 node.next = head.prev.next
682 node.next = head.prev.next
682 # E.next = N
683 # E.next = N
683 node.next.prev = node
684 node.next.prev = node
684 # A.prev = N
685 # A.prev = N
685 node.prev.next = node
686 node.prev.next = node
686
687
687 self._head = node
688 self._head = node
688
689
689 def _addcapacity(self):
690 def _addcapacity(self):
690 """Add a node to the circular linked list.
691 """Add a node to the circular linked list.
691
692
692 The new node is inserted before the head node.
693 The new node is inserted before the head node.
693 """
694 """
694 head = self._head
695 head = self._head
695 node = _lrucachenode()
696 node = _lrucachenode()
696 head.prev.next = node
697 head.prev.next = node
697 node.prev = head.prev
698 node.prev = head.prev
698 node.next = head
699 node.next = head
699 head.prev = node
700 head.prev = node
700 self._size += 1
701 self._size += 1
701 return node
702 return node
702
703
703 def lrucachefunc(func):
704 def lrucachefunc(func):
704 '''cache most recent results of function calls'''
705 '''cache most recent results of function calls'''
705 cache = {}
706 cache = {}
706 order = collections.deque()
707 order = collections.deque()
707 if func.__code__.co_argcount == 1:
708 if func.__code__.co_argcount == 1:
708 def f(arg):
709 def f(arg):
709 if arg not in cache:
710 if arg not in cache:
710 if len(cache) > 20:
711 if len(cache) > 20:
711 del cache[order.popleft()]
712 del cache[order.popleft()]
712 cache[arg] = func(arg)
713 cache[arg] = func(arg)
713 else:
714 else:
714 order.remove(arg)
715 order.remove(arg)
715 order.append(arg)
716 order.append(arg)
716 return cache[arg]
717 return cache[arg]
717 else:
718 else:
718 def f(*args):
719 def f(*args):
719 if args not in cache:
720 if args not in cache:
720 if len(cache) > 20:
721 if len(cache) > 20:
721 del cache[order.popleft()]
722 del cache[order.popleft()]
722 cache[args] = func(*args)
723 cache[args] = func(*args)
723 else:
724 else:
724 order.remove(args)
725 order.remove(args)
725 order.append(args)
726 order.append(args)
726 return cache[args]
727 return cache[args]
727
728
728 return f
729 return f
729
730
730 class propertycache(object):
731 class propertycache(object):
731 def __init__(self, func):
732 def __init__(self, func):
732 self.func = func
733 self.func = func
733 self.name = func.__name__
734 self.name = func.__name__
734 def __get__(self, obj, type=None):
735 def __get__(self, obj, type=None):
735 result = self.func(obj)
736 result = self.func(obj)
736 self.cachevalue(obj, result)
737 self.cachevalue(obj, result)
737 return result
738 return result
738
739
739 def cachevalue(self, obj, value):
740 def cachevalue(self, obj, value):
740 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
741 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
741 obj.__dict__[self.name] = value
742 obj.__dict__[self.name] = value
742
743
743 def pipefilter(s, cmd):
744 def pipefilter(s, cmd):
744 '''filter string S through command CMD, returning its output'''
745 '''filter string S through command CMD, returning its output'''
745 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
746 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
746 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
747 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
747 pout, perr = p.communicate(s)
748 pout, perr = p.communicate(s)
748 return pout
749 return pout
749
750
750 def tempfilter(s, cmd):
751 def tempfilter(s, cmd):
751 '''filter string S through a pair of temporary files with CMD.
752 '''filter string S through a pair of temporary files with CMD.
752 CMD is used as a template to create the real command to be run,
753 CMD is used as a template to create the real command to be run,
753 with the strings INFILE and OUTFILE replaced by the real names of
754 with the strings INFILE and OUTFILE replaced by the real names of
754 the temporary files generated.'''
755 the temporary files generated.'''
755 inname, outname = None, None
756 inname, outname = None, None
756 try:
757 try:
757 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
758 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
758 fp = os.fdopen(infd, 'wb')
759 fp = os.fdopen(infd, 'wb')
759 fp.write(s)
760 fp.write(s)
760 fp.close()
761 fp.close()
761 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
762 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
762 os.close(outfd)
763 os.close(outfd)
763 cmd = cmd.replace('INFILE', inname)
764 cmd = cmd.replace('INFILE', inname)
764 cmd = cmd.replace('OUTFILE', outname)
765 cmd = cmd.replace('OUTFILE', outname)
765 code = os.system(cmd)
766 code = os.system(cmd)
766 if sys.platform == 'OpenVMS' and code & 1:
767 if sys.platform == 'OpenVMS' and code & 1:
767 code = 0
768 code = 0
768 if code:
769 if code:
769 raise Abort(_("command '%s' failed: %s") %
770 raise Abort(_("command '%s' failed: %s") %
770 (cmd, explainexit(code)))
771 (cmd, explainexit(code)))
771 return readfile(outname)
772 return readfile(outname)
772 finally:
773 finally:
773 try:
774 try:
774 if inname:
775 if inname:
775 os.unlink(inname)
776 os.unlink(inname)
776 except OSError:
777 except OSError:
777 pass
778 pass
778 try:
779 try:
779 if outname:
780 if outname:
780 os.unlink(outname)
781 os.unlink(outname)
781 except OSError:
782 except OSError:
782 pass
783 pass
783
784
784 filtertable = {
785 filtertable = {
785 'tempfile:': tempfilter,
786 'tempfile:': tempfilter,
786 'pipe:': pipefilter,
787 'pipe:': pipefilter,
787 }
788 }
788
789
789 def filter(s, cmd):
790 def filter(s, cmd):
790 "filter a string through a command that transforms its input to its output"
791 "filter a string through a command that transforms its input to its output"
791 for name, fn in filtertable.iteritems():
792 for name, fn in filtertable.iteritems():
792 if cmd.startswith(name):
793 if cmd.startswith(name):
793 return fn(s, cmd[len(name):].lstrip())
794 return fn(s, cmd[len(name):].lstrip())
794 return pipefilter(s, cmd)
795 return pipefilter(s, cmd)
795
796
796 def binary(s):
797 def binary(s):
797 """return true if a string is binary data"""
798 """return true if a string is binary data"""
798 return bool(s and '\0' in s)
799 return bool(s and '\0' in s)
799
800
800 def increasingchunks(source, min=1024, max=65536):
801 def increasingchunks(source, min=1024, max=65536):
801 '''return no less than min bytes per chunk while data remains,
802 '''return no less than min bytes per chunk while data remains,
802 doubling min after each chunk until it reaches max'''
803 doubling min after each chunk until it reaches max'''
803 def log2(x):
804 def log2(x):
804 if not x:
805 if not x:
805 return 0
806 return 0
806 i = 0
807 i = 0
807 while x:
808 while x:
808 x >>= 1
809 x >>= 1
809 i += 1
810 i += 1
810 return i - 1
811 return i - 1
811
812
812 buf = []
813 buf = []
813 blen = 0
814 blen = 0
814 for chunk in source:
815 for chunk in source:
815 buf.append(chunk)
816 buf.append(chunk)
816 blen += len(chunk)
817 blen += len(chunk)
817 if blen >= min:
818 if blen >= min:
818 if min < max:
819 if min < max:
819 min = min << 1
820 min = min << 1
820 nmin = 1 << log2(blen)
821 nmin = 1 << log2(blen)
821 if nmin > min:
822 if nmin > min:
822 min = nmin
823 min = nmin
823 if min > max:
824 if min > max:
824 min = max
825 min = max
825 yield ''.join(buf)
826 yield ''.join(buf)
826 blen = 0
827 blen = 0
827 buf = []
828 buf = []
828 if buf:
829 if buf:
829 yield ''.join(buf)
830 yield ''.join(buf)
830
831
831 Abort = error.Abort
832 Abort = error.Abort
832
833
833 def always(fn):
834 def always(fn):
834 return True
835 return True
835
836
836 def never(fn):
837 def never(fn):
837 return False
838 return False
838
839
839 def nogc(func):
840 def nogc(func):
840 """disable garbage collector
841 """disable garbage collector
841
842
842 Python's garbage collector triggers a GC each time a certain number of
843 Python's garbage collector triggers a GC each time a certain number of
843 container objects (the number being defined by gc.get_threshold()) are
844 container objects (the number being defined by gc.get_threshold()) are
844 allocated even when marked not to be tracked by the collector. Tracking has
845 allocated even when marked not to be tracked by the collector. Tracking has
845 no effect on when GCs are triggered, only on what objects the GC looks
846 no effect on when GCs are triggered, only on what objects the GC looks
846 into. As a workaround, disable GC while building complex (huge)
847 into. As a workaround, disable GC while building complex (huge)
847 containers.
848 containers.
848
849
849 This garbage collector issue have been fixed in 2.7.
850 This garbage collector issue have been fixed in 2.7.
850 """
851 """
851 def wrapper(*args, **kwargs):
852 def wrapper(*args, **kwargs):
852 gcenabled = gc.isenabled()
853 gcenabled = gc.isenabled()
853 gc.disable()
854 gc.disable()
854 try:
855 try:
855 return func(*args, **kwargs)
856 return func(*args, **kwargs)
856 finally:
857 finally:
857 if gcenabled:
858 if gcenabled:
858 gc.enable()
859 gc.enable()
859 return wrapper
860 return wrapper
860
861
861 def pathto(root, n1, n2):
862 def pathto(root, n1, n2):
862 '''return the relative path from one place to another.
863 '''return the relative path from one place to another.
863 root should use os.sep to separate directories
864 root should use os.sep to separate directories
864 n1 should use os.sep to separate directories
865 n1 should use os.sep to separate directories
865 n2 should use "/" to separate directories
866 n2 should use "/" to separate directories
866 returns an os.sep-separated path.
867 returns an os.sep-separated path.
867
868
868 If n1 is a relative path, it's assumed it's
869 If n1 is a relative path, it's assumed it's
869 relative to root.
870 relative to root.
870 n2 should always be relative to root.
871 n2 should always be relative to root.
871 '''
872 '''
872 if not n1:
873 if not n1:
873 return localpath(n2)
874 return localpath(n2)
874 if os.path.isabs(n1):
875 if os.path.isabs(n1):
875 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
876 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
876 return os.path.join(root, localpath(n2))
877 return os.path.join(root, localpath(n2))
877 n2 = '/'.join((pconvert(root), n2))
878 n2 = '/'.join((pconvert(root), n2))
878 a, b = splitpath(n1), n2.split('/')
879 a, b = splitpath(n1), n2.split('/')
879 a.reverse()
880 a.reverse()
880 b.reverse()
881 b.reverse()
881 while a and b and a[-1] == b[-1]:
882 while a and b and a[-1] == b[-1]:
882 a.pop()
883 a.pop()
883 b.pop()
884 b.pop()
884 b.reverse()
885 b.reverse()
885 return os.sep.join((['..'] * len(a)) + b) or '.'
886 return os.sep.join((['..'] * len(a)) + b) or '.'
886
887
887 def mainfrozen():
888 def mainfrozen():
888 """return True if we are a frozen executable.
889 """return True if we are a frozen executable.
889
890
890 The code supports py2exe (most common, Windows only) and tools/freeze
891 The code supports py2exe (most common, Windows only) and tools/freeze
891 (portable, not much used).
892 (portable, not much used).
892 """
893 """
893 return (safehasattr(sys, "frozen") or # new py2exe
894 return (safehasattr(sys, "frozen") or # new py2exe
894 safehasattr(sys, "importers") or # old py2exe
895 safehasattr(sys, "importers") or # old py2exe
895 imp.is_frozen("__main__")) # tools/freeze
896 imp.is_frozen("__main__")) # tools/freeze
896
897
897 # the location of data files matching the source code
898 # the location of data files matching the source code
898 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
899 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
899 # executable version (py2exe) doesn't support __file__
900 # executable version (py2exe) doesn't support __file__
900 datapath = os.path.dirname(sys.executable)
901 datapath = os.path.dirname(sys.executable)
901 else:
902 else:
902 datapath = os.path.dirname(__file__)
903 datapath = os.path.dirname(__file__)
903
904
904 i18n.setdatapath(datapath)
905 i18n.setdatapath(datapath)
905
906
906 _hgexecutable = None
907 _hgexecutable = None
907
908
908 def hgexecutable():
909 def hgexecutable():
909 """return location of the 'hg' executable.
910 """return location of the 'hg' executable.
910
911
911 Defaults to $HG or 'hg' in the search path.
912 Defaults to $HG or 'hg' in the search path.
912 """
913 """
913 if _hgexecutable is None:
914 if _hgexecutable is None:
914 hg = os.environ.get('HG')
915 hg = os.environ.get('HG')
915 mainmod = sys.modules['__main__']
916 mainmod = sys.modules['__main__']
916 if hg:
917 if hg:
917 _sethgexecutable(hg)
918 _sethgexecutable(hg)
918 elif mainfrozen():
919 elif mainfrozen():
919 if getattr(sys, 'frozen', None) == 'macosx_app':
920 if getattr(sys, 'frozen', None) == 'macosx_app':
920 # Env variable set by py2app
921 # Env variable set by py2app
921 _sethgexecutable(os.environ['EXECUTABLEPATH'])
922 _sethgexecutable(os.environ['EXECUTABLEPATH'])
922 else:
923 else:
923 _sethgexecutable(sys.executable)
924 _sethgexecutable(sys.executable)
924 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
925 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
925 _sethgexecutable(mainmod.__file__)
926 _sethgexecutable(mainmod.__file__)
926 else:
927 else:
927 exe = findexe('hg') or os.path.basename(sys.argv[0])
928 exe = findexe('hg') or os.path.basename(sys.argv[0])
928 _sethgexecutable(exe)
929 _sethgexecutable(exe)
929 return _hgexecutable
930 return _hgexecutable
930
931
931 def _sethgexecutable(path):
932 def _sethgexecutable(path):
932 """set location of the 'hg' executable"""
933 """set location of the 'hg' executable"""
933 global _hgexecutable
934 global _hgexecutable
934 _hgexecutable = path
935 _hgexecutable = path
935
936
936 def _isstdout(f):
937 def _isstdout(f):
937 fileno = getattr(f, 'fileno', None)
938 fileno = getattr(f, 'fileno', None)
938 return fileno and fileno() == sys.__stdout__.fileno()
939 return fileno and fileno() == sys.__stdout__.fileno()
939
940
940 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
941 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
941 '''enhanced shell command execution.
942 '''enhanced shell command execution.
942 run with environment maybe modified, maybe in different dir.
943 run with environment maybe modified, maybe in different dir.
943
944
944 if command fails and onerr is None, return status, else raise onerr
945 if command fails and onerr is None, return status, else raise onerr
945 object as exception.
946 object as exception.
946
947
947 if out is specified, it is assumed to be a file-like object that has a
948 if out is specified, it is assumed to be a file-like object that has a
948 write() method. stdout and stderr will be redirected to out.'''
949 write() method. stdout and stderr will be redirected to out.'''
949 if environ is None:
950 if environ is None:
950 environ = {}
951 environ = {}
951 try:
952 try:
952 sys.stdout.flush()
953 sys.stdout.flush()
953 except Exception:
954 except Exception:
954 pass
955 pass
955 def py2shell(val):
956 def py2shell(val):
956 'convert python object into string that is useful to shell'
957 'convert python object into string that is useful to shell'
957 if val is None or val is False:
958 if val is None or val is False:
958 return '0'
959 return '0'
959 if val is True:
960 if val is True:
960 return '1'
961 return '1'
961 return str(val)
962 return str(val)
962 origcmd = cmd
963 origcmd = cmd
963 cmd = quotecommand(cmd)
964 cmd = quotecommand(cmd)
964 if sys.platform == 'plan9' and (sys.version_info[0] == 2
965 if sys.platform == 'plan9' and (sys.version_info[0] == 2
965 and sys.version_info[1] < 7):
966 and sys.version_info[1] < 7):
966 # subprocess kludge to work around issues in half-baked Python
967 # subprocess kludge to work around issues in half-baked Python
967 # ports, notably bichued/python:
968 # ports, notably bichued/python:
968 if not cwd is None:
969 if not cwd is None:
969 os.chdir(cwd)
970 os.chdir(cwd)
970 rc = os.system(cmd)
971 rc = os.system(cmd)
971 else:
972 else:
972 env = dict(os.environ)
973 env = dict(os.environ)
973 env.update((k, py2shell(v)) for k, v in environ.iteritems())
974 env.update((k, py2shell(v)) for k, v in environ.iteritems())
974 env['HG'] = hgexecutable()
975 env['HG'] = hgexecutable()
975 if out is None or _isstdout(out):
976 if out is None or _isstdout(out):
976 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
977 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
977 env=env, cwd=cwd)
978 env=env, cwd=cwd)
978 else:
979 else:
979 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
980 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
980 env=env, cwd=cwd, stdout=subprocess.PIPE,
981 env=env, cwd=cwd, stdout=subprocess.PIPE,
981 stderr=subprocess.STDOUT)
982 stderr=subprocess.STDOUT)
982 while True:
983 while True:
983 line = proc.stdout.readline()
984 line = proc.stdout.readline()
984 if not line:
985 if not line:
985 break
986 break
986 out.write(line)
987 out.write(line)
987 proc.wait()
988 proc.wait()
988 rc = proc.returncode
989 rc = proc.returncode
989 if sys.platform == 'OpenVMS' and rc & 1:
990 if sys.platform == 'OpenVMS' and rc & 1:
990 rc = 0
991 rc = 0
991 if rc and onerr:
992 if rc and onerr:
992 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
993 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
993 explainexit(rc)[0])
994 explainexit(rc)[0])
994 if errprefix:
995 if errprefix:
995 errmsg = '%s: %s' % (errprefix, errmsg)
996 errmsg = '%s: %s' % (errprefix, errmsg)
996 raise onerr(errmsg)
997 raise onerr(errmsg)
997 return rc
998 return rc
998
999
999 def checksignature(func):
1000 def checksignature(func):
1000 '''wrap a function with code to check for calling errors'''
1001 '''wrap a function with code to check for calling errors'''
1001 def check(*args, **kwargs):
1002 def check(*args, **kwargs):
1002 try:
1003 try:
1003 return func(*args, **kwargs)
1004 return func(*args, **kwargs)
1004 except TypeError:
1005 except TypeError:
1005 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1006 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1006 raise error.SignatureError
1007 raise error.SignatureError
1007 raise
1008 raise
1008
1009
1009 return check
1010 return check
1010
1011
1011 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1012 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1012 '''copy a file, preserving mode and optionally other stat info like
1013 '''copy a file, preserving mode and optionally other stat info like
1013 atime/mtime
1014 atime/mtime
1014
1015
1015 checkambig argument is used with filestat, and is useful only if
1016 checkambig argument is used with filestat, and is useful only if
1016 destination file is guarded by any lock (e.g. repo.lock or
1017 destination file is guarded by any lock (e.g. repo.lock or
1017 repo.wlock).
1018 repo.wlock).
1018
1019
1019 copystat and checkambig should be exclusive.
1020 copystat and checkambig should be exclusive.
1020 '''
1021 '''
1021 assert not (copystat and checkambig)
1022 assert not (copystat and checkambig)
1022 oldstat = None
1023 oldstat = None
1023 if os.path.lexists(dest):
1024 if os.path.lexists(dest):
1024 if checkambig:
1025 if checkambig:
1025 oldstat = checkambig and filestat(dest)
1026 oldstat = checkambig and filestat(dest)
1026 unlink(dest)
1027 unlink(dest)
1027 # hardlinks are problematic on CIFS, quietly ignore this flag
1028 # hardlinks are problematic on CIFS, quietly ignore this flag
1028 # until we find a way to work around it cleanly (issue4546)
1029 # until we find a way to work around it cleanly (issue4546)
1029 if False and hardlink:
1030 if False and hardlink:
1030 try:
1031 try:
1031 oslink(src, dest)
1032 oslink(src, dest)
1032 return
1033 return
1033 except (IOError, OSError):
1034 except (IOError, OSError):
1034 pass # fall back to normal copy
1035 pass # fall back to normal copy
1035 if os.path.islink(src):
1036 if os.path.islink(src):
1036 os.symlink(os.readlink(src), dest)
1037 os.symlink(os.readlink(src), dest)
1037 # copytime is ignored for symlinks, but in general copytime isn't needed
1038 # copytime is ignored for symlinks, but in general copytime isn't needed
1038 # for them anyway
1039 # for them anyway
1039 else:
1040 else:
1040 try:
1041 try:
1041 shutil.copyfile(src, dest)
1042 shutil.copyfile(src, dest)
1042 if copystat:
1043 if copystat:
1043 # copystat also copies mode
1044 # copystat also copies mode
1044 shutil.copystat(src, dest)
1045 shutil.copystat(src, dest)
1045 else:
1046 else:
1046 shutil.copymode(src, dest)
1047 shutil.copymode(src, dest)
1047 if oldstat and oldstat.stat:
1048 if oldstat and oldstat.stat:
1048 newstat = filestat(dest)
1049 newstat = filestat(dest)
1049 if newstat.isambig(oldstat):
1050 if newstat.isambig(oldstat):
1050 # stat of copied file is ambiguous to original one
1051 # stat of copied file is ambiguous to original one
1051 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1052 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1052 os.utime(dest, (advanced, advanced))
1053 os.utime(dest, (advanced, advanced))
1053 except shutil.Error as inst:
1054 except shutil.Error as inst:
1054 raise Abort(str(inst))
1055 raise Abort(str(inst))
1055
1056
1056 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1057 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1057 """Copy a directory tree using hardlinks if possible."""
1058 """Copy a directory tree using hardlinks if possible."""
1058 num = 0
1059 num = 0
1059
1060
1060 if hardlink is None:
1061 if hardlink is None:
1061 hardlink = (os.stat(src).st_dev ==
1062 hardlink = (os.stat(src).st_dev ==
1062 os.stat(os.path.dirname(dst)).st_dev)
1063 os.stat(os.path.dirname(dst)).st_dev)
1063 if hardlink:
1064 if hardlink:
1064 topic = _('linking')
1065 topic = _('linking')
1065 else:
1066 else:
1066 topic = _('copying')
1067 topic = _('copying')
1067
1068
1068 if os.path.isdir(src):
1069 if os.path.isdir(src):
1069 os.mkdir(dst)
1070 os.mkdir(dst)
1070 for name, kind in osutil.listdir(src):
1071 for name, kind in osutil.listdir(src):
1071 srcname = os.path.join(src, name)
1072 srcname = os.path.join(src, name)
1072 dstname = os.path.join(dst, name)
1073 dstname = os.path.join(dst, name)
1073 def nprog(t, pos):
1074 def nprog(t, pos):
1074 if pos is not None:
1075 if pos is not None:
1075 return progress(t, pos + num)
1076 return progress(t, pos + num)
1076 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1077 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1077 num += n
1078 num += n
1078 else:
1079 else:
1079 if hardlink:
1080 if hardlink:
1080 try:
1081 try:
1081 oslink(src, dst)
1082 oslink(src, dst)
1082 except (IOError, OSError):
1083 except (IOError, OSError):
1083 hardlink = False
1084 hardlink = False
1084 shutil.copy(src, dst)
1085 shutil.copy(src, dst)
1085 else:
1086 else:
1086 shutil.copy(src, dst)
1087 shutil.copy(src, dst)
1087 num += 1
1088 num += 1
1088 progress(topic, num)
1089 progress(topic, num)
1089 progress(topic, None)
1090 progress(topic, None)
1090
1091
1091 return hardlink, num
1092 return hardlink, num
1092
1093
1093 _winreservednames = '''con prn aux nul
1094 _winreservednames = '''con prn aux nul
1094 com1 com2 com3 com4 com5 com6 com7 com8 com9
1095 com1 com2 com3 com4 com5 com6 com7 com8 com9
1095 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1096 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1096 _winreservedchars = ':*?"<>|'
1097 _winreservedchars = ':*?"<>|'
1097 def checkwinfilename(path):
1098 def checkwinfilename(path):
1098 r'''Check that the base-relative path is a valid filename on Windows.
1099 r'''Check that the base-relative path is a valid filename on Windows.
1099 Returns None if the path is ok, or a UI string describing the problem.
1100 Returns None if the path is ok, or a UI string describing the problem.
1100
1101
1101 >>> checkwinfilename("just/a/normal/path")
1102 >>> checkwinfilename("just/a/normal/path")
1102 >>> checkwinfilename("foo/bar/con.xml")
1103 >>> checkwinfilename("foo/bar/con.xml")
1103 "filename contains 'con', which is reserved on Windows"
1104 "filename contains 'con', which is reserved on Windows"
1104 >>> checkwinfilename("foo/con.xml/bar")
1105 >>> checkwinfilename("foo/con.xml/bar")
1105 "filename contains 'con', which is reserved on Windows"
1106 "filename contains 'con', which is reserved on Windows"
1106 >>> checkwinfilename("foo/bar/xml.con")
1107 >>> checkwinfilename("foo/bar/xml.con")
1107 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1108 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1108 "filename contains 'AUX', which is reserved on Windows"
1109 "filename contains 'AUX', which is reserved on Windows"
1109 >>> checkwinfilename("foo/bar/bla:.txt")
1110 >>> checkwinfilename("foo/bar/bla:.txt")
1110 "filename contains ':', which is reserved on Windows"
1111 "filename contains ':', which is reserved on Windows"
1111 >>> checkwinfilename("foo/bar/b\07la.txt")
1112 >>> checkwinfilename("foo/bar/b\07la.txt")
1112 "filename contains '\\x07', which is invalid on Windows"
1113 "filename contains '\\x07', which is invalid on Windows"
1113 >>> checkwinfilename("foo/bar/bla ")
1114 >>> checkwinfilename("foo/bar/bla ")
1114 "filename ends with ' ', which is not allowed on Windows"
1115 "filename ends with ' ', which is not allowed on Windows"
1115 >>> checkwinfilename("../bar")
1116 >>> checkwinfilename("../bar")
1116 >>> checkwinfilename("foo\\")
1117 >>> checkwinfilename("foo\\")
1117 "filename ends with '\\', which is invalid on Windows"
1118 "filename ends with '\\', which is invalid on Windows"
1118 >>> checkwinfilename("foo\\/bar")
1119 >>> checkwinfilename("foo\\/bar")
1119 "directory name ends with '\\', which is invalid on Windows"
1120 "directory name ends with '\\', which is invalid on Windows"
1120 '''
1121 '''
1121 if path.endswith('\\'):
1122 if path.endswith('\\'):
1122 return _("filename ends with '\\', which is invalid on Windows")
1123 return _("filename ends with '\\', which is invalid on Windows")
1123 if '\\/' in path:
1124 if '\\/' in path:
1124 return _("directory name ends with '\\', which is invalid on Windows")
1125 return _("directory name ends with '\\', which is invalid on Windows")
1125 for n in path.replace('\\', '/').split('/'):
1126 for n in path.replace('\\', '/').split('/'):
1126 if not n:
1127 if not n:
1127 continue
1128 continue
1128 for c in n:
1129 for c in n:
1129 if c in _winreservedchars:
1130 if c in _winreservedchars:
1130 return _("filename contains '%s', which is reserved "
1131 return _("filename contains '%s', which is reserved "
1131 "on Windows") % c
1132 "on Windows") % c
1132 if ord(c) <= 31:
1133 if ord(c) <= 31:
1133 return _("filename contains %r, which is invalid "
1134 return _("filename contains %r, which is invalid "
1134 "on Windows") % c
1135 "on Windows") % c
1135 base = n.split('.')[0]
1136 base = n.split('.')[0]
1136 if base and base.lower() in _winreservednames:
1137 if base and base.lower() in _winreservednames:
1137 return _("filename contains '%s', which is reserved "
1138 return _("filename contains '%s', which is reserved "
1138 "on Windows") % base
1139 "on Windows") % base
1139 t = n[-1]
1140 t = n[-1]
1140 if t in '. ' and n not in '..':
1141 if t in '. ' and n not in '..':
1141 return _("filename ends with '%s', which is not allowed "
1142 return _("filename ends with '%s', which is not allowed "
1142 "on Windows") % t
1143 "on Windows") % t
1143
1144
1144 if os.name == 'nt':
1145 if os.name == 'nt':
1145 checkosfilename = checkwinfilename
1146 checkosfilename = checkwinfilename
1146 else:
1147 else:
1147 checkosfilename = platform.checkosfilename
1148 checkosfilename = platform.checkosfilename
1148
1149
1149 def makelock(info, pathname):
1150 def makelock(info, pathname):
1150 try:
1151 try:
1151 return os.symlink(info, pathname)
1152 return os.symlink(info, pathname)
1152 except OSError as why:
1153 except OSError as why:
1153 if why.errno == errno.EEXIST:
1154 if why.errno == errno.EEXIST:
1154 raise
1155 raise
1155 except AttributeError: # no symlink in os
1156 except AttributeError: # no symlink in os
1156 pass
1157 pass
1157
1158
1158 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1159 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1159 os.write(ld, info)
1160 os.write(ld, info)
1160 os.close(ld)
1161 os.close(ld)
1161
1162
1162 def readlock(pathname):
1163 def readlock(pathname):
1163 try:
1164 try:
1164 return os.readlink(pathname)
1165 return os.readlink(pathname)
1165 except OSError as why:
1166 except OSError as why:
1166 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1167 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1167 raise
1168 raise
1168 except AttributeError: # no symlink in os
1169 except AttributeError: # no symlink in os
1169 pass
1170 pass
1170 fp = posixfile(pathname)
1171 fp = posixfile(pathname)
1171 r = fp.read()
1172 r = fp.read()
1172 fp.close()
1173 fp.close()
1173 return r
1174 return r
1174
1175
1175 def fstat(fp):
1176 def fstat(fp):
1176 '''stat file object that may not have fileno method.'''
1177 '''stat file object that may not have fileno method.'''
1177 try:
1178 try:
1178 return os.fstat(fp.fileno())
1179 return os.fstat(fp.fileno())
1179 except AttributeError:
1180 except AttributeError:
1180 return os.stat(fp.name)
1181 return os.stat(fp.name)
1181
1182
1182 # File system features
1183 # File system features
1183
1184
1184 def checkcase(path):
1185 def checkcase(path):
1185 """
1186 """
1186 Return true if the given path is on a case-sensitive filesystem
1187 Return true if the given path is on a case-sensitive filesystem
1187
1188
1188 Requires a path (like /foo/.hg) ending with a foldable final
1189 Requires a path (like /foo/.hg) ending with a foldable final
1189 directory component.
1190 directory component.
1190 """
1191 """
1191 s1 = os.lstat(path)
1192 s1 = os.lstat(path)
1192 d, b = os.path.split(path)
1193 d, b = os.path.split(path)
1193 b2 = b.upper()
1194 b2 = b.upper()
1194 if b == b2:
1195 if b == b2:
1195 b2 = b.lower()
1196 b2 = b.lower()
1196 if b == b2:
1197 if b == b2:
1197 return True # no evidence against case sensitivity
1198 return True # no evidence against case sensitivity
1198 p2 = os.path.join(d, b2)
1199 p2 = os.path.join(d, b2)
1199 try:
1200 try:
1200 s2 = os.lstat(p2)
1201 s2 = os.lstat(p2)
1201 if s2 == s1:
1202 if s2 == s1:
1202 return False
1203 return False
1203 return True
1204 return True
1204 except OSError:
1205 except OSError:
1205 return True
1206 return True
1206
1207
1207 try:
1208 try:
1208 import re2
1209 import re2
1209 _re2 = None
1210 _re2 = None
1210 except ImportError:
1211 except ImportError:
1211 _re2 = False
1212 _re2 = False
1212
1213
1213 class _re(object):
1214 class _re(object):
1214 def _checkre2(self):
1215 def _checkre2(self):
1215 global _re2
1216 global _re2
1216 try:
1217 try:
1217 # check if match works, see issue3964
1218 # check if match works, see issue3964
1218 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1219 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1219 except ImportError:
1220 except ImportError:
1220 _re2 = False
1221 _re2 = False
1221
1222
1222 def compile(self, pat, flags=0):
1223 def compile(self, pat, flags=0):
1223 '''Compile a regular expression, using re2 if possible
1224 '''Compile a regular expression, using re2 if possible
1224
1225
1225 For best performance, use only re2-compatible regexp features. The
1226 For best performance, use only re2-compatible regexp features. The
1226 only flags from the re module that are re2-compatible are
1227 only flags from the re module that are re2-compatible are
1227 IGNORECASE and MULTILINE.'''
1228 IGNORECASE and MULTILINE.'''
1228 if _re2 is None:
1229 if _re2 is None:
1229 self._checkre2()
1230 self._checkre2()
1230 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1231 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1231 if flags & remod.IGNORECASE:
1232 if flags & remod.IGNORECASE:
1232 pat = '(?i)' + pat
1233 pat = '(?i)' + pat
1233 if flags & remod.MULTILINE:
1234 if flags & remod.MULTILINE:
1234 pat = '(?m)' + pat
1235 pat = '(?m)' + pat
1235 try:
1236 try:
1236 return re2.compile(pat)
1237 return re2.compile(pat)
1237 except re2.error:
1238 except re2.error:
1238 pass
1239 pass
1239 return remod.compile(pat, flags)
1240 return remod.compile(pat, flags)
1240
1241
1241 @propertycache
1242 @propertycache
1242 def escape(self):
1243 def escape(self):
1243 '''Return the version of escape corresponding to self.compile.
1244 '''Return the version of escape corresponding to self.compile.
1244
1245
1245 This is imperfect because whether re2 or re is used for a particular
1246 This is imperfect because whether re2 or re is used for a particular
1246 function depends on the flags, etc, but it's the best we can do.
1247 function depends on the flags, etc, but it's the best we can do.
1247 '''
1248 '''
1248 global _re2
1249 global _re2
1249 if _re2 is None:
1250 if _re2 is None:
1250 self._checkre2()
1251 self._checkre2()
1251 if _re2:
1252 if _re2:
1252 return re2.escape
1253 return re2.escape
1253 else:
1254 else:
1254 return remod.escape
1255 return remod.escape
1255
1256
1256 re = _re()
1257 re = _re()
1257
1258
1258 _fspathcache = {}
1259 _fspathcache = {}
1259 def fspath(name, root):
1260 def fspath(name, root):
1260 '''Get name in the case stored in the filesystem
1261 '''Get name in the case stored in the filesystem
1261
1262
1262 The name should be relative to root, and be normcase-ed for efficiency.
1263 The name should be relative to root, and be normcase-ed for efficiency.
1263
1264
1264 Note that this function is unnecessary, and should not be
1265 Note that this function is unnecessary, and should not be
1265 called, for case-sensitive filesystems (simply because it's expensive).
1266 called, for case-sensitive filesystems (simply because it's expensive).
1266
1267
1267 The root should be normcase-ed, too.
1268 The root should be normcase-ed, too.
1268 '''
1269 '''
1269 def _makefspathcacheentry(dir):
1270 def _makefspathcacheentry(dir):
1270 return dict((normcase(n), n) for n in os.listdir(dir))
1271 return dict((normcase(n), n) for n in os.listdir(dir))
1271
1272
1272 seps = os.sep
1273 seps = os.sep
1273 if os.altsep:
1274 if os.altsep:
1274 seps = seps + os.altsep
1275 seps = seps + os.altsep
1275 # Protect backslashes. This gets silly very quickly.
1276 # Protect backslashes. This gets silly very quickly.
1276 seps.replace('\\','\\\\')
1277 seps.replace('\\','\\\\')
1277 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1278 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1278 dir = os.path.normpath(root)
1279 dir = os.path.normpath(root)
1279 result = []
1280 result = []
1280 for part, sep in pattern.findall(name):
1281 for part, sep in pattern.findall(name):
1281 if sep:
1282 if sep:
1282 result.append(sep)
1283 result.append(sep)
1283 continue
1284 continue
1284
1285
1285 if dir not in _fspathcache:
1286 if dir not in _fspathcache:
1286 _fspathcache[dir] = _makefspathcacheentry(dir)
1287 _fspathcache[dir] = _makefspathcacheentry(dir)
1287 contents = _fspathcache[dir]
1288 contents = _fspathcache[dir]
1288
1289
1289 found = contents.get(part)
1290 found = contents.get(part)
1290 if not found:
1291 if not found:
1291 # retry "once per directory" per "dirstate.walk" which
1292 # retry "once per directory" per "dirstate.walk" which
1292 # may take place for each patches of "hg qpush", for example
1293 # may take place for each patches of "hg qpush", for example
1293 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1294 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1294 found = contents.get(part)
1295 found = contents.get(part)
1295
1296
1296 result.append(found or part)
1297 result.append(found or part)
1297 dir = os.path.join(dir, part)
1298 dir = os.path.join(dir, part)
1298
1299
1299 return ''.join(result)
1300 return ''.join(result)
1300
1301
1301 def checknlink(testfile):
1302 def checknlink(testfile):
1302 '''check whether hardlink count reporting works properly'''
1303 '''check whether hardlink count reporting works properly'''
1303
1304
1304 # testfile may be open, so we need a separate file for checking to
1305 # testfile may be open, so we need a separate file for checking to
1305 # work around issue2543 (or testfile may get lost on Samba shares)
1306 # work around issue2543 (or testfile may get lost on Samba shares)
1306 f1 = testfile + ".hgtmp1"
1307 f1 = testfile + ".hgtmp1"
1307 if os.path.lexists(f1):
1308 if os.path.lexists(f1):
1308 return False
1309 return False
1309 try:
1310 try:
1310 posixfile(f1, 'w').close()
1311 posixfile(f1, 'w').close()
1311 except IOError:
1312 except IOError:
1312 return False
1313 return False
1313
1314
1314 f2 = testfile + ".hgtmp2"
1315 f2 = testfile + ".hgtmp2"
1315 fd = None
1316 fd = None
1316 try:
1317 try:
1317 oslink(f1, f2)
1318 oslink(f1, f2)
1318 # nlinks() may behave differently for files on Windows shares if
1319 # nlinks() may behave differently for files on Windows shares if
1319 # the file is open.
1320 # the file is open.
1320 fd = posixfile(f2)
1321 fd = posixfile(f2)
1321 return nlinks(f2) > 1
1322 return nlinks(f2) > 1
1322 except OSError:
1323 except OSError:
1323 return False
1324 return False
1324 finally:
1325 finally:
1325 if fd is not None:
1326 if fd is not None:
1326 fd.close()
1327 fd.close()
1327 for f in (f1, f2):
1328 for f in (f1, f2):
1328 try:
1329 try:
1329 os.unlink(f)
1330 os.unlink(f)
1330 except OSError:
1331 except OSError:
1331 pass
1332 pass
1332
1333
1333 def endswithsep(path):
1334 def endswithsep(path):
1334 '''Check path ends with os.sep or os.altsep.'''
1335 '''Check path ends with os.sep or os.altsep.'''
1335 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1336 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1336
1337
1337 def splitpath(path):
1338 def splitpath(path):
1338 '''Split path by os.sep.
1339 '''Split path by os.sep.
1339 Note that this function does not use os.altsep because this is
1340 Note that this function does not use os.altsep because this is
1340 an alternative of simple "xxx.split(os.sep)".
1341 an alternative of simple "xxx.split(os.sep)".
1341 It is recommended to use os.path.normpath() before using this
1342 It is recommended to use os.path.normpath() before using this
1342 function if need.'''
1343 function if need.'''
1343 return path.split(os.sep)
1344 return path.split(os.sep)
1344
1345
1345 def gui():
1346 def gui():
1346 '''Are we running in a GUI?'''
1347 '''Are we running in a GUI?'''
1347 if sys.platform == 'darwin':
1348 if sys.platform == 'darwin':
1348 if 'SSH_CONNECTION' in os.environ:
1349 if 'SSH_CONNECTION' in os.environ:
1349 # handle SSH access to a box where the user is logged in
1350 # handle SSH access to a box where the user is logged in
1350 return False
1351 return False
1351 elif getattr(osutil, 'isgui', None):
1352 elif getattr(osutil, 'isgui', None):
1352 # check if a CoreGraphics session is available
1353 # check if a CoreGraphics session is available
1353 return osutil.isgui()
1354 return osutil.isgui()
1354 else:
1355 else:
1355 # pure build; use a safe default
1356 # pure build; use a safe default
1356 return True
1357 return True
1357 else:
1358 else:
1358 return os.name == "nt" or os.environ.get("DISPLAY")
1359 return os.name == "nt" or os.environ.get("DISPLAY")
1359
1360
1360 def mktempcopy(name, emptyok=False, createmode=None):
1361 def mktempcopy(name, emptyok=False, createmode=None):
1361 """Create a temporary file with the same contents from name
1362 """Create a temporary file with the same contents from name
1362
1363
1363 The permission bits are copied from the original file.
1364 The permission bits are copied from the original file.
1364
1365
1365 If the temporary file is going to be truncated immediately, you
1366 If the temporary file is going to be truncated immediately, you
1366 can use emptyok=True as an optimization.
1367 can use emptyok=True as an optimization.
1367
1368
1368 Returns the name of the temporary file.
1369 Returns the name of the temporary file.
1369 """
1370 """
1370 d, fn = os.path.split(name)
1371 d, fn = os.path.split(name)
1371 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1372 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1372 os.close(fd)
1373 os.close(fd)
1373 # Temporary files are created with mode 0600, which is usually not
1374 # Temporary files are created with mode 0600, which is usually not
1374 # what we want. If the original file already exists, just copy
1375 # what we want. If the original file already exists, just copy
1375 # its mode. Otherwise, manually obey umask.
1376 # its mode. Otherwise, manually obey umask.
1376 copymode(name, temp, createmode)
1377 copymode(name, temp, createmode)
1377 if emptyok:
1378 if emptyok:
1378 return temp
1379 return temp
1379 try:
1380 try:
1380 try:
1381 try:
1381 ifp = posixfile(name, "rb")
1382 ifp = posixfile(name, "rb")
1382 except IOError as inst:
1383 except IOError as inst:
1383 if inst.errno == errno.ENOENT:
1384 if inst.errno == errno.ENOENT:
1384 return temp
1385 return temp
1385 if not getattr(inst, 'filename', None):
1386 if not getattr(inst, 'filename', None):
1386 inst.filename = name
1387 inst.filename = name
1387 raise
1388 raise
1388 ofp = posixfile(temp, "wb")
1389 ofp = posixfile(temp, "wb")
1389 for chunk in filechunkiter(ifp):
1390 for chunk in filechunkiter(ifp):
1390 ofp.write(chunk)
1391 ofp.write(chunk)
1391 ifp.close()
1392 ifp.close()
1392 ofp.close()
1393 ofp.close()
1393 except: # re-raises
1394 except: # re-raises
1394 try: os.unlink(temp)
1395 try: os.unlink(temp)
1395 except OSError: pass
1396 except OSError: pass
1396 raise
1397 raise
1397 return temp
1398 return temp
1398
1399
1399 class filestat(object):
1400 class filestat(object):
1400 """help to exactly detect change of a file
1401 """help to exactly detect change of a file
1401
1402
1402 'stat' attribute is result of 'os.stat()' if specified 'path'
1403 'stat' attribute is result of 'os.stat()' if specified 'path'
1403 exists. Otherwise, it is None. This can avoid preparative
1404 exists. Otherwise, it is None. This can avoid preparative
1404 'exists()' examination on client side of this class.
1405 'exists()' examination on client side of this class.
1405 """
1406 """
1406 def __init__(self, path):
1407 def __init__(self, path):
1407 try:
1408 try:
1408 self.stat = os.stat(path)
1409 self.stat = os.stat(path)
1409 except OSError as err:
1410 except OSError as err:
1410 if err.errno != errno.ENOENT:
1411 if err.errno != errno.ENOENT:
1411 raise
1412 raise
1412 self.stat = None
1413 self.stat = None
1413
1414
1414 __hash__ = object.__hash__
1415 __hash__ = object.__hash__
1415
1416
1416 def __eq__(self, old):
1417 def __eq__(self, old):
1417 try:
1418 try:
1418 # if ambiguity between stat of new and old file is
1419 # if ambiguity between stat of new and old file is
1419 # avoided, comparision of size, ctime and mtime is enough
1420 # avoided, comparision of size, ctime and mtime is enough
1420 # to exactly detect change of a file regardless of platform
1421 # to exactly detect change of a file regardless of platform
1421 return (self.stat.st_size == old.stat.st_size and
1422 return (self.stat.st_size == old.stat.st_size and
1422 self.stat.st_ctime == old.stat.st_ctime and
1423 self.stat.st_ctime == old.stat.st_ctime and
1423 self.stat.st_mtime == old.stat.st_mtime)
1424 self.stat.st_mtime == old.stat.st_mtime)
1424 except AttributeError:
1425 except AttributeError:
1425 return False
1426 return False
1426
1427
1427 def isambig(self, old):
1428 def isambig(self, old):
1428 """Examine whether new (= self) stat is ambiguous against old one
1429 """Examine whether new (= self) stat is ambiguous against old one
1429
1430
1430 "S[N]" below means stat of a file at N-th change:
1431 "S[N]" below means stat of a file at N-th change:
1431
1432
1432 - S[n-1].ctime < S[n].ctime: can detect change of a file
1433 - S[n-1].ctime < S[n].ctime: can detect change of a file
1433 - S[n-1].ctime == S[n].ctime
1434 - S[n-1].ctime == S[n].ctime
1434 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1435 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1435 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1436 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1436 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1437 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1437 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1438 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1438
1439
1439 Case (*2) above means that a file was changed twice or more at
1440 Case (*2) above means that a file was changed twice or more at
1440 same time in sec (= S[n-1].ctime), and comparison of timestamp
1441 same time in sec (= S[n-1].ctime), and comparison of timestamp
1441 is ambiguous.
1442 is ambiguous.
1442
1443
1443 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1444 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1444 timestamp is ambiguous".
1445 timestamp is ambiguous".
1445
1446
1446 But advancing mtime only in case (*2) doesn't work as
1447 But advancing mtime only in case (*2) doesn't work as
1447 expected, because naturally advanced S[n].mtime in case (*1)
1448 expected, because naturally advanced S[n].mtime in case (*1)
1448 might be equal to manually advanced S[n-1 or earlier].mtime.
1449 might be equal to manually advanced S[n-1 or earlier].mtime.
1449
1450
1450 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1451 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1451 treated as ambiguous regardless of mtime, to avoid overlooking
1452 treated as ambiguous regardless of mtime, to avoid overlooking
1452 by confliction between such mtime.
1453 by confliction between such mtime.
1453
1454
1454 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1455 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1455 S[n].mtime", even if size of a file isn't changed.
1456 S[n].mtime", even if size of a file isn't changed.
1456 """
1457 """
1457 try:
1458 try:
1458 return (self.stat.st_ctime == old.stat.st_ctime)
1459 return (self.stat.st_ctime == old.stat.st_ctime)
1459 except AttributeError:
1460 except AttributeError:
1460 return False
1461 return False
1461
1462
1462 def __ne__(self, other):
1463 def __ne__(self, other):
1463 return not self == other
1464 return not self == other
1464
1465
1465 class atomictempfile(object):
1466 class atomictempfile(object):
1466 '''writable file object that atomically updates a file
1467 '''writable file object that atomically updates a file
1467
1468
1468 All writes will go to a temporary copy of the original file. Call
1469 All writes will go to a temporary copy of the original file. Call
1469 close() when you are done writing, and atomictempfile will rename
1470 close() when you are done writing, and atomictempfile will rename
1470 the temporary copy to the original name, making the changes
1471 the temporary copy to the original name, making the changes
1471 visible. If the object is destroyed without being closed, all your
1472 visible. If the object is destroyed without being closed, all your
1472 writes are discarded.
1473 writes are discarded.
1473
1474
1474 checkambig argument of constructor is used with filestat, and is
1475 checkambig argument of constructor is used with filestat, and is
1475 useful only if target file is guarded by any lock (e.g. repo.lock
1476 useful only if target file is guarded by any lock (e.g. repo.lock
1476 or repo.wlock).
1477 or repo.wlock).
1477 '''
1478 '''
1478 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1479 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1479 self.__name = name # permanent name
1480 self.__name = name # permanent name
1480 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1481 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1481 createmode=createmode)
1482 createmode=createmode)
1482 self._fp = posixfile(self._tempname, mode)
1483 self._fp = posixfile(self._tempname, mode)
1483 self._checkambig = checkambig
1484 self._checkambig = checkambig
1484
1485
1485 # delegated methods
1486 # delegated methods
1486 self.read = self._fp.read
1487 self.read = self._fp.read
1487 self.write = self._fp.write
1488 self.write = self._fp.write
1488 self.seek = self._fp.seek
1489 self.seek = self._fp.seek
1489 self.tell = self._fp.tell
1490 self.tell = self._fp.tell
1490 self.fileno = self._fp.fileno
1491 self.fileno = self._fp.fileno
1491
1492
1492 def close(self):
1493 def close(self):
1493 if not self._fp.closed:
1494 if not self._fp.closed:
1494 self._fp.close()
1495 self._fp.close()
1495 filename = localpath(self.__name)
1496 filename = localpath(self.__name)
1496 oldstat = self._checkambig and filestat(filename)
1497 oldstat = self._checkambig and filestat(filename)
1497 if oldstat and oldstat.stat:
1498 if oldstat and oldstat.stat:
1498 rename(self._tempname, filename)
1499 rename(self._tempname, filename)
1499 newstat = filestat(filename)
1500 newstat = filestat(filename)
1500 if newstat.isambig(oldstat):
1501 if newstat.isambig(oldstat):
1501 # stat of changed file is ambiguous to original one
1502 # stat of changed file is ambiguous to original one
1502 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1503 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1503 os.utime(filename, (advanced, advanced))
1504 os.utime(filename, (advanced, advanced))
1504 else:
1505 else:
1505 rename(self._tempname, filename)
1506 rename(self._tempname, filename)
1506
1507
1507 def discard(self):
1508 def discard(self):
1508 if not self._fp.closed:
1509 if not self._fp.closed:
1509 try:
1510 try:
1510 os.unlink(self._tempname)
1511 os.unlink(self._tempname)
1511 except OSError:
1512 except OSError:
1512 pass
1513 pass
1513 self._fp.close()
1514 self._fp.close()
1514
1515
1515 def __del__(self):
1516 def __del__(self):
1516 if safehasattr(self, '_fp'): # constructor actually did something
1517 if safehasattr(self, '_fp'): # constructor actually did something
1517 self.discard()
1518 self.discard()
1518
1519
1519 def __enter__(self):
1520 def __enter__(self):
1520 return self
1521 return self
1521
1522
1522 def __exit__(self, exctype, excvalue, traceback):
1523 def __exit__(self, exctype, excvalue, traceback):
1523 if exctype is not None:
1524 if exctype is not None:
1524 self.discard()
1525 self.discard()
1525 else:
1526 else:
1526 self.close()
1527 self.close()
1527
1528
1528 def makedirs(name, mode=None, notindexed=False):
1529 def makedirs(name, mode=None, notindexed=False):
1529 """recursive directory creation with parent mode inheritance
1530 """recursive directory creation with parent mode inheritance
1530
1531
1531 Newly created directories are marked as "not to be indexed by
1532 Newly created directories are marked as "not to be indexed by
1532 the content indexing service", if ``notindexed`` is specified
1533 the content indexing service", if ``notindexed`` is specified
1533 for "write" mode access.
1534 for "write" mode access.
1534 """
1535 """
1535 try:
1536 try:
1536 makedir(name, notindexed)
1537 makedir(name, notindexed)
1537 except OSError as err:
1538 except OSError as err:
1538 if err.errno == errno.EEXIST:
1539 if err.errno == errno.EEXIST:
1539 return
1540 return
1540 if err.errno != errno.ENOENT or not name:
1541 if err.errno != errno.ENOENT or not name:
1541 raise
1542 raise
1542 parent = os.path.dirname(os.path.abspath(name))
1543 parent = os.path.dirname(os.path.abspath(name))
1543 if parent == name:
1544 if parent == name:
1544 raise
1545 raise
1545 makedirs(parent, mode, notindexed)
1546 makedirs(parent, mode, notindexed)
1546 try:
1547 try:
1547 makedir(name, notindexed)
1548 makedir(name, notindexed)
1548 except OSError as err:
1549 except OSError as err:
1549 # Catch EEXIST to handle races
1550 # Catch EEXIST to handle races
1550 if err.errno == errno.EEXIST:
1551 if err.errno == errno.EEXIST:
1551 return
1552 return
1552 raise
1553 raise
1553 if mode is not None:
1554 if mode is not None:
1554 os.chmod(name, mode)
1555 os.chmod(name, mode)
1555
1556
1556 def readfile(path):
1557 def readfile(path):
1557 with open(path, 'rb') as fp:
1558 with open(path, 'rb') as fp:
1558 return fp.read()
1559 return fp.read()
1559
1560
1560 def writefile(path, text):
1561 def writefile(path, text):
1561 with open(path, 'wb') as fp:
1562 with open(path, 'wb') as fp:
1562 fp.write(text)
1563 fp.write(text)
1563
1564
1564 def appendfile(path, text):
1565 def appendfile(path, text):
1565 with open(path, 'ab') as fp:
1566 with open(path, 'ab') as fp:
1566 fp.write(text)
1567 fp.write(text)
1567
1568
1568 class chunkbuffer(object):
1569 class chunkbuffer(object):
1569 """Allow arbitrary sized chunks of data to be efficiently read from an
1570 """Allow arbitrary sized chunks of data to be efficiently read from an
1570 iterator over chunks of arbitrary size."""
1571 iterator over chunks of arbitrary size."""
1571
1572
1572 def __init__(self, in_iter):
1573 def __init__(self, in_iter):
1573 """in_iter is the iterator that's iterating over the input chunks.
1574 """in_iter is the iterator that's iterating over the input chunks.
1574 targetsize is how big a buffer to try to maintain."""
1575 targetsize is how big a buffer to try to maintain."""
1575 def splitbig(chunks):
1576 def splitbig(chunks):
1576 for chunk in chunks:
1577 for chunk in chunks:
1577 if len(chunk) > 2**20:
1578 if len(chunk) > 2**20:
1578 pos = 0
1579 pos = 0
1579 while pos < len(chunk):
1580 while pos < len(chunk):
1580 end = pos + 2 ** 18
1581 end = pos + 2 ** 18
1581 yield chunk[pos:end]
1582 yield chunk[pos:end]
1582 pos = end
1583 pos = end
1583 else:
1584 else:
1584 yield chunk
1585 yield chunk
1585 self.iter = splitbig(in_iter)
1586 self.iter = splitbig(in_iter)
1586 self._queue = collections.deque()
1587 self._queue = collections.deque()
1587 self._chunkoffset = 0
1588 self._chunkoffset = 0
1588
1589
1589 def read(self, l=None):
1590 def read(self, l=None):
1590 """Read L bytes of data from the iterator of chunks of data.
1591 """Read L bytes of data from the iterator of chunks of data.
1591 Returns less than L bytes if the iterator runs dry.
1592 Returns less than L bytes if the iterator runs dry.
1592
1593
1593 If size parameter is omitted, read everything"""
1594 If size parameter is omitted, read everything"""
1594 if l is None:
1595 if l is None:
1595 return ''.join(self.iter)
1596 return ''.join(self.iter)
1596
1597
1597 left = l
1598 left = l
1598 buf = []
1599 buf = []
1599 queue = self._queue
1600 queue = self._queue
1600 while left > 0:
1601 while left > 0:
1601 # refill the queue
1602 # refill the queue
1602 if not queue:
1603 if not queue:
1603 target = 2**18
1604 target = 2**18
1604 for chunk in self.iter:
1605 for chunk in self.iter:
1605 queue.append(chunk)
1606 queue.append(chunk)
1606 target -= len(chunk)
1607 target -= len(chunk)
1607 if target <= 0:
1608 if target <= 0:
1608 break
1609 break
1609 if not queue:
1610 if not queue:
1610 break
1611 break
1611
1612
1612 # The easy way to do this would be to queue.popleft(), modify the
1613 # The easy way to do this would be to queue.popleft(), modify the
1613 # chunk (if necessary), then queue.appendleft(). However, for cases
1614 # chunk (if necessary), then queue.appendleft(). However, for cases
1614 # where we read partial chunk content, this incurs 2 dequeue
1615 # where we read partial chunk content, this incurs 2 dequeue
1615 # mutations and creates a new str for the remaining chunk in the
1616 # mutations and creates a new str for the remaining chunk in the
1616 # queue. Our code below avoids this overhead.
1617 # queue. Our code below avoids this overhead.
1617
1618
1618 chunk = queue[0]
1619 chunk = queue[0]
1619 chunkl = len(chunk)
1620 chunkl = len(chunk)
1620 offset = self._chunkoffset
1621 offset = self._chunkoffset
1621
1622
1622 # Use full chunk.
1623 # Use full chunk.
1623 if offset == 0 and left >= chunkl:
1624 if offset == 0 and left >= chunkl:
1624 left -= chunkl
1625 left -= chunkl
1625 queue.popleft()
1626 queue.popleft()
1626 buf.append(chunk)
1627 buf.append(chunk)
1627 # self._chunkoffset remains at 0.
1628 # self._chunkoffset remains at 0.
1628 continue
1629 continue
1629
1630
1630 chunkremaining = chunkl - offset
1631 chunkremaining = chunkl - offset
1631
1632
1632 # Use all of unconsumed part of chunk.
1633 # Use all of unconsumed part of chunk.
1633 if left >= chunkremaining:
1634 if left >= chunkremaining:
1634 left -= chunkremaining
1635 left -= chunkremaining
1635 queue.popleft()
1636 queue.popleft()
1636 # offset == 0 is enabled by block above, so this won't merely
1637 # offset == 0 is enabled by block above, so this won't merely
1637 # copy via ``chunk[0:]``.
1638 # copy via ``chunk[0:]``.
1638 buf.append(chunk[offset:])
1639 buf.append(chunk[offset:])
1639 self._chunkoffset = 0
1640 self._chunkoffset = 0
1640
1641
1641 # Partial chunk needed.
1642 # Partial chunk needed.
1642 else:
1643 else:
1643 buf.append(chunk[offset:offset + left])
1644 buf.append(chunk[offset:offset + left])
1644 self._chunkoffset += left
1645 self._chunkoffset += left
1645 left -= chunkremaining
1646 left -= chunkremaining
1646
1647
1647 return ''.join(buf)
1648 return ''.join(buf)
1648
1649
1649 def filechunkiter(f, size=65536, limit=None):
1650 def filechunkiter(f, size=65536, limit=None):
1650 """Create a generator that produces the data in the file size
1651 """Create a generator that produces the data in the file size
1651 (default 65536) bytes at a time, up to optional limit (default is
1652 (default 65536) bytes at a time, up to optional limit (default is
1652 to read all data). Chunks may be less than size bytes if the
1653 to read all data). Chunks may be less than size bytes if the
1653 chunk is the last chunk in the file, or the file is a socket or
1654 chunk is the last chunk in the file, or the file is a socket or
1654 some other type of file that sometimes reads less data than is
1655 some other type of file that sometimes reads less data than is
1655 requested."""
1656 requested."""
1656 assert size >= 0
1657 assert size >= 0
1657 assert limit is None or limit >= 0
1658 assert limit is None or limit >= 0
1658 while True:
1659 while True:
1659 if limit is None:
1660 if limit is None:
1660 nbytes = size
1661 nbytes = size
1661 else:
1662 else:
1662 nbytes = min(limit, size)
1663 nbytes = min(limit, size)
1663 s = nbytes and f.read(nbytes)
1664 s = nbytes and f.read(nbytes)
1664 if not s:
1665 if not s:
1665 break
1666 break
1666 if limit:
1667 if limit:
1667 limit -= len(s)
1668 limit -= len(s)
1668 yield s
1669 yield s
1669
1670
1670 def makedate(timestamp=None):
1671 def makedate(timestamp=None):
1671 '''Return a unix timestamp (or the current time) as a (unixtime,
1672 '''Return a unix timestamp (or the current time) as a (unixtime,
1672 offset) tuple based off the local timezone.'''
1673 offset) tuple based off the local timezone.'''
1673 if timestamp is None:
1674 if timestamp is None:
1674 timestamp = time.time()
1675 timestamp = time.time()
1675 if timestamp < 0:
1676 if timestamp < 0:
1676 hint = _("check your clock")
1677 hint = _("check your clock")
1677 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1678 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1678 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1679 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1679 datetime.datetime.fromtimestamp(timestamp))
1680 datetime.datetime.fromtimestamp(timestamp))
1680 tz = delta.days * 86400 + delta.seconds
1681 tz = delta.days * 86400 + delta.seconds
1681 return timestamp, tz
1682 return timestamp, tz
1682
1683
1683 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1684 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1684 """represent a (unixtime, offset) tuple as a localized time.
1685 """represent a (unixtime, offset) tuple as a localized time.
1685 unixtime is seconds since the epoch, and offset is the time zone's
1686 unixtime is seconds since the epoch, and offset is the time zone's
1686 number of seconds away from UTC.
1687 number of seconds away from UTC.
1687
1688
1688 >>> datestr((0, 0))
1689 >>> datestr((0, 0))
1689 'Thu Jan 01 00:00:00 1970 +0000'
1690 'Thu Jan 01 00:00:00 1970 +0000'
1690 >>> datestr((42, 0))
1691 >>> datestr((42, 0))
1691 'Thu Jan 01 00:00:42 1970 +0000'
1692 'Thu Jan 01 00:00:42 1970 +0000'
1692 >>> datestr((-42, 0))
1693 >>> datestr((-42, 0))
1693 'Wed Dec 31 23:59:18 1969 +0000'
1694 'Wed Dec 31 23:59:18 1969 +0000'
1694 >>> datestr((0x7fffffff, 0))
1695 >>> datestr((0x7fffffff, 0))
1695 'Tue Jan 19 03:14:07 2038 +0000'
1696 'Tue Jan 19 03:14:07 2038 +0000'
1696 >>> datestr((-0x80000000, 0))
1697 >>> datestr((-0x80000000, 0))
1697 'Fri Dec 13 20:45:52 1901 +0000'
1698 'Fri Dec 13 20:45:52 1901 +0000'
1698 """
1699 """
1699 t, tz = date or makedate()
1700 t, tz = date or makedate()
1700 if "%1" in format or "%2" in format or "%z" in format:
1701 if "%1" in format or "%2" in format or "%z" in format:
1701 sign = (tz > 0) and "-" or "+"
1702 sign = (tz > 0) and "-" or "+"
1702 minutes = abs(tz) // 60
1703 minutes = abs(tz) // 60
1703 q, r = divmod(minutes, 60)
1704 q, r = divmod(minutes, 60)
1704 format = format.replace("%z", "%1%2")
1705 format = format.replace("%z", "%1%2")
1705 format = format.replace("%1", "%c%02d" % (sign, q))
1706 format = format.replace("%1", "%c%02d" % (sign, q))
1706 format = format.replace("%2", "%02d" % r)
1707 format = format.replace("%2", "%02d" % r)
1707 d = t - tz
1708 d = t - tz
1708 if d > 0x7fffffff:
1709 if d > 0x7fffffff:
1709 d = 0x7fffffff
1710 d = 0x7fffffff
1710 elif d < -0x80000000:
1711 elif d < -0x80000000:
1711 d = -0x80000000
1712 d = -0x80000000
1712 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1713 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1713 # because they use the gmtime() system call which is buggy on Windows
1714 # because they use the gmtime() system call which is buggy on Windows
1714 # for negative values.
1715 # for negative values.
1715 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1716 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1716 s = t.strftime(format)
1717 s = t.strftime(format)
1717 return s
1718 return s
1718
1719
1719 def shortdate(date=None):
1720 def shortdate(date=None):
1720 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1721 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1721 return datestr(date, format='%Y-%m-%d')
1722 return datestr(date, format='%Y-%m-%d')
1722
1723
1723 def parsetimezone(tz):
1724 def parsetimezone(tz):
1724 """parse a timezone string and return an offset integer"""
1725 """parse a timezone string and return an offset integer"""
1725 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1726 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1726 sign = (tz[0] == "+") and 1 or -1
1727 sign = (tz[0] == "+") and 1 or -1
1727 hours = int(tz[1:3])
1728 hours = int(tz[1:3])
1728 minutes = int(tz[3:5])
1729 minutes = int(tz[3:5])
1729 return -sign * (hours * 60 + minutes) * 60
1730 return -sign * (hours * 60 + minutes) * 60
1730 if tz == "GMT" or tz == "UTC":
1731 if tz == "GMT" or tz == "UTC":
1731 return 0
1732 return 0
1732 return None
1733 return None
1733
1734
1734 def strdate(string, format, defaults=[]):
1735 def strdate(string, format, defaults=[]):
1735 """parse a localized time string and return a (unixtime, offset) tuple.
1736 """parse a localized time string and return a (unixtime, offset) tuple.
1736 if the string cannot be parsed, ValueError is raised."""
1737 if the string cannot be parsed, ValueError is raised."""
1737 # NOTE: unixtime = localunixtime + offset
1738 # NOTE: unixtime = localunixtime + offset
1738 offset, date = parsetimezone(string.split()[-1]), string
1739 offset, date = parsetimezone(string.split()[-1]), string
1739 if offset is not None:
1740 if offset is not None:
1740 date = " ".join(string.split()[:-1])
1741 date = " ".join(string.split()[:-1])
1741
1742
1742 # add missing elements from defaults
1743 # add missing elements from defaults
1743 usenow = False # default to using biased defaults
1744 usenow = False # default to using biased defaults
1744 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1745 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1745 found = [True for p in part if ("%"+p) in format]
1746 found = [True for p in part if ("%"+p) in format]
1746 if not found:
1747 if not found:
1747 date += "@" + defaults[part][usenow]
1748 date += "@" + defaults[part][usenow]
1748 format += "@%" + part[0]
1749 format += "@%" + part[0]
1749 else:
1750 else:
1750 # We've found a specific time element, less specific time
1751 # We've found a specific time element, less specific time
1751 # elements are relative to today
1752 # elements are relative to today
1752 usenow = True
1753 usenow = True
1753
1754
1754 timetuple = time.strptime(date, format)
1755 timetuple = time.strptime(date, format)
1755 localunixtime = int(calendar.timegm(timetuple))
1756 localunixtime = int(calendar.timegm(timetuple))
1756 if offset is None:
1757 if offset is None:
1757 # local timezone
1758 # local timezone
1758 unixtime = int(time.mktime(timetuple))
1759 unixtime = int(time.mktime(timetuple))
1759 offset = unixtime - localunixtime
1760 offset = unixtime - localunixtime
1760 else:
1761 else:
1761 unixtime = localunixtime + offset
1762 unixtime = localunixtime + offset
1762 return unixtime, offset
1763 return unixtime, offset
1763
1764
1764 def parsedate(date, formats=None, bias=None):
1765 def parsedate(date, formats=None, bias=None):
1765 """parse a localized date/time and return a (unixtime, offset) tuple.
1766 """parse a localized date/time and return a (unixtime, offset) tuple.
1766
1767
1767 The date may be a "unixtime offset" string or in one of the specified
1768 The date may be a "unixtime offset" string or in one of the specified
1768 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1769 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1769
1770
1770 >>> parsedate(' today ') == parsedate(\
1771 >>> parsedate(' today ') == parsedate(\
1771 datetime.date.today().strftime('%b %d'))
1772 datetime.date.today().strftime('%b %d'))
1772 True
1773 True
1773 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1774 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1774 datetime.timedelta(days=1)\
1775 datetime.timedelta(days=1)\
1775 ).strftime('%b %d'))
1776 ).strftime('%b %d'))
1776 True
1777 True
1777 >>> now, tz = makedate()
1778 >>> now, tz = makedate()
1778 >>> strnow, strtz = parsedate('now')
1779 >>> strnow, strtz = parsedate('now')
1779 >>> (strnow - now) < 1
1780 >>> (strnow - now) < 1
1780 True
1781 True
1781 >>> tz == strtz
1782 >>> tz == strtz
1782 True
1783 True
1783 """
1784 """
1784 if bias is None:
1785 if bias is None:
1785 bias = {}
1786 bias = {}
1786 if not date:
1787 if not date:
1787 return 0, 0
1788 return 0, 0
1788 if isinstance(date, tuple) and len(date) == 2:
1789 if isinstance(date, tuple) and len(date) == 2:
1789 return date
1790 return date
1790 if not formats:
1791 if not formats:
1791 formats = defaultdateformats
1792 formats = defaultdateformats
1792 date = date.strip()
1793 date = date.strip()
1793
1794
1794 if date == 'now' or date == _('now'):
1795 if date == 'now' or date == _('now'):
1795 return makedate()
1796 return makedate()
1796 if date == 'today' or date == _('today'):
1797 if date == 'today' or date == _('today'):
1797 date = datetime.date.today().strftime('%b %d')
1798 date = datetime.date.today().strftime('%b %d')
1798 elif date == 'yesterday' or date == _('yesterday'):
1799 elif date == 'yesterday' or date == _('yesterday'):
1799 date = (datetime.date.today() -
1800 date = (datetime.date.today() -
1800 datetime.timedelta(days=1)).strftime('%b %d')
1801 datetime.timedelta(days=1)).strftime('%b %d')
1801
1802
1802 try:
1803 try:
1803 when, offset = map(int, date.split(' '))
1804 when, offset = map(int, date.split(' '))
1804 except ValueError:
1805 except ValueError:
1805 # fill out defaults
1806 # fill out defaults
1806 now = makedate()
1807 now = makedate()
1807 defaults = {}
1808 defaults = {}
1808 for part in ("d", "mb", "yY", "HI", "M", "S"):
1809 for part in ("d", "mb", "yY", "HI", "M", "S"):
1809 # this piece is for rounding the specific end of unknowns
1810 # this piece is for rounding the specific end of unknowns
1810 b = bias.get(part)
1811 b = bias.get(part)
1811 if b is None:
1812 if b is None:
1812 if part[0] in "HMS":
1813 if part[0] in "HMS":
1813 b = "00"
1814 b = "00"
1814 else:
1815 else:
1815 b = "0"
1816 b = "0"
1816
1817
1817 # this piece is for matching the generic end to today's date
1818 # this piece is for matching the generic end to today's date
1818 n = datestr(now, "%" + part[0])
1819 n = datestr(now, "%" + part[0])
1819
1820
1820 defaults[part] = (b, n)
1821 defaults[part] = (b, n)
1821
1822
1822 for format in formats:
1823 for format in formats:
1823 try:
1824 try:
1824 when, offset = strdate(date, format, defaults)
1825 when, offset = strdate(date, format, defaults)
1825 except (ValueError, OverflowError):
1826 except (ValueError, OverflowError):
1826 pass
1827 pass
1827 else:
1828 else:
1828 break
1829 break
1829 else:
1830 else:
1830 raise Abort(_('invalid date: %r') % date)
1831 raise Abort(_('invalid date: %r') % date)
1831 # validate explicit (probably user-specified) date and
1832 # validate explicit (probably user-specified) date and
1832 # time zone offset. values must fit in signed 32 bits for
1833 # time zone offset. values must fit in signed 32 bits for
1833 # current 32-bit linux runtimes. timezones go from UTC-12
1834 # current 32-bit linux runtimes. timezones go from UTC-12
1834 # to UTC+14
1835 # to UTC+14
1835 if when < -0x80000000 or when > 0x7fffffff:
1836 if when < -0x80000000 or when > 0x7fffffff:
1836 raise Abort(_('date exceeds 32 bits: %d') % when)
1837 raise Abort(_('date exceeds 32 bits: %d') % when)
1837 if offset < -50400 or offset > 43200:
1838 if offset < -50400 or offset > 43200:
1838 raise Abort(_('impossible time zone offset: %d') % offset)
1839 raise Abort(_('impossible time zone offset: %d') % offset)
1839 return when, offset
1840 return when, offset
1840
1841
1841 def matchdate(date):
1842 def matchdate(date):
1842 """Return a function that matches a given date match specifier
1843 """Return a function that matches a given date match specifier
1843
1844
1844 Formats include:
1845 Formats include:
1845
1846
1846 '{date}' match a given date to the accuracy provided
1847 '{date}' match a given date to the accuracy provided
1847
1848
1848 '<{date}' on or before a given date
1849 '<{date}' on or before a given date
1849
1850
1850 '>{date}' on or after a given date
1851 '>{date}' on or after a given date
1851
1852
1852 >>> p1 = parsedate("10:29:59")
1853 >>> p1 = parsedate("10:29:59")
1853 >>> p2 = parsedate("10:30:00")
1854 >>> p2 = parsedate("10:30:00")
1854 >>> p3 = parsedate("10:30:59")
1855 >>> p3 = parsedate("10:30:59")
1855 >>> p4 = parsedate("10:31:00")
1856 >>> p4 = parsedate("10:31:00")
1856 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1857 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1857 >>> f = matchdate("10:30")
1858 >>> f = matchdate("10:30")
1858 >>> f(p1[0])
1859 >>> f(p1[0])
1859 False
1860 False
1860 >>> f(p2[0])
1861 >>> f(p2[0])
1861 True
1862 True
1862 >>> f(p3[0])
1863 >>> f(p3[0])
1863 True
1864 True
1864 >>> f(p4[0])
1865 >>> f(p4[0])
1865 False
1866 False
1866 >>> f(p5[0])
1867 >>> f(p5[0])
1867 False
1868 False
1868 """
1869 """
1869
1870
1870 def lower(date):
1871 def lower(date):
1871 d = {'mb': "1", 'd': "1"}
1872 d = {'mb': "1", 'd': "1"}
1872 return parsedate(date, extendeddateformats, d)[0]
1873 return parsedate(date, extendeddateformats, d)[0]
1873
1874
1874 def upper(date):
1875 def upper(date):
1875 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1876 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1876 for days in ("31", "30", "29"):
1877 for days in ("31", "30", "29"):
1877 try:
1878 try:
1878 d["d"] = days
1879 d["d"] = days
1879 return parsedate(date, extendeddateformats, d)[0]
1880 return parsedate(date, extendeddateformats, d)[0]
1880 except Abort:
1881 except Abort:
1881 pass
1882 pass
1882 d["d"] = "28"
1883 d["d"] = "28"
1883 return parsedate(date, extendeddateformats, d)[0]
1884 return parsedate(date, extendeddateformats, d)[0]
1884
1885
1885 date = date.strip()
1886 date = date.strip()
1886
1887
1887 if not date:
1888 if not date:
1888 raise Abort(_("dates cannot consist entirely of whitespace"))
1889 raise Abort(_("dates cannot consist entirely of whitespace"))
1889 elif date[0] == "<":
1890 elif date[0] == "<":
1890 if not date[1:]:
1891 if not date[1:]:
1891 raise Abort(_("invalid day spec, use '<DATE'"))
1892 raise Abort(_("invalid day spec, use '<DATE'"))
1892 when = upper(date[1:])
1893 when = upper(date[1:])
1893 return lambda x: x <= when
1894 return lambda x: x <= when
1894 elif date[0] == ">":
1895 elif date[0] == ">":
1895 if not date[1:]:
1896 if not date[1:]:
1896 raise Abort(_("invalid day spec, use '>DATE'"))
1897 raise Abort(_("invalid day spec, use '>DATE'"))
1897 when = lower(date[1:])
1898 when = lower(date[1:])
1898 return lambda x: x >= when
1899 return lambda x: x >= when
1899 elif date[0] == "-":
1900 elif date[0] == "-":
1900 try:
1901 try:
1901 days = int(date[1:])
1902 days = int(date[1:])
1902 except ValueError:
1903 except ValueError:
1903 raise Abort(_("invalid day spec: %s") % date[1:])
1904 raise Abort(_("invalid day spec: %s") % date[1:])
1904 if days < 0:
1905 if days < 0:
1905 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1906 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1906 % date[1:])
1907 % date[1:])
1907 when = makedate()[0] - days * 3600 * 24
1908 when = makedate()[0] - days * 3600 * 24
1908 return lambda x: x >= when
1909 return lambda x: x >= when
1909 elif " to " in date:
1910 elif " to " in date:
1910 a, b = date.split(" to ")
1911 a, b = date.split(" to ")
1911 start, stop = lower(a), upper(b)
1912 start, stop = lower(a), upper(b)
1912 return lambda x: x >= start and x <= stop
1913 return lambda x: x >= start and x <= stop
1913 else:
1914 else:
1914 start, stop = lower(date), upper(date)
1915 start, stop = lower(date), upper(date)
1915 return lambda x: x >= start and x <= stop
1916 return lambda x: x >= start and x <= stop
1916
1917
1917 def stringmatcher(pattern):
1918 def stringmatcher(pattern):
1918 """
1919 """
1919 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1920 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1920 returns the matcher name, pattern, and matcher function.
1921 returns the matcher name, pattern, and matcher function.
1921 missing or unknown prefixes are treated as literal matches.
1922 missing or unknown prefixes are treated as literal matches.
1922
1923
1923 helper for tests:
1924 helper for tests:
1924 >>> def test(pattern, *tests):
1925 >>> def test(pattern, *tests):
1925 ... kind, pattern, matcher = stringmatcher(pattern)
1926 ... kind, pattern, matcher = stringmatcher(pattern)
1926 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1927 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1927
1928
1928 exact matching (no prefix):
1929 exact matching (no prefix):
1929 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1930 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1930 ('literal', 'abcdefg', [False, False, True])
1931 ('literal', 'abcdefg', [False, False, True])
1931
1932
1932 regex matching ('re:' prefix)
1933 regex matching ('re:' prefix)
1933 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1934 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1934 ('re', 'a.+b', [False, False, True])
1935 ('re', 'a.+b', [False, False, True])
1935
1936
1936 force exact matches ('literal:' prefix)
1937 force exact matches ('literal:' prefix)
1937 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1938 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1938 ('literal', 're:foobar', [False, True])
1939 ('literal', 're:foobar', [False, True])
1939
1940
1940 unknown prefixes are ignored and treated as literals
1941 unknown prefixes are ignored and treated as literals
1941 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1942 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1942 ('literal', 'foo:bar', [False, False, True])
1943 ('literal', 'foo:bar', [False, False, True])
1943 """
1944 """
1944 if pattern.startswith('re:'):
1945 if pattern.startswith('re:'):
1945 pattern = pattern[3:]
1946 pattern = pattern[3:]
1946 try:
1947 try:
1947 regex = remod.compile(pattern)
1948 regex = remod.compile(pattern)
1948 except remod.error as e:
1949 except remod.error as e:
1949 raise error.ParseError(_('invalid regular expression: %s')
1950 raise error.ParseError(_('invalid regular expression: %s')
1950 % e)
1951 % e)
1951 return 're', pattern, regex.search
1952 return 're', pattern, regex.search
1952 elif pattern.startswith('literal:'):
1953 elif pattern.startswith('literal:'):
1953 pattern = pattern[8:]
1954 pattern = pattern[8:]
1954 return 'literal', pattern, pattern.__eq__
1955 return 'literal', pattern, pattern.__eq__
1955
1956
1956 def shortuser(user):
1957 def shortuser(user):
1957 """Return a short representation of a user name or email address."""
1958 """Return a short representation of a user name or email address."""
1958 f = user.find('@')
1959 f = user.find('@')
1959 if f >= 0:
1960 if f >= 0:
1960 user = user[:f]
1961 user = user[:f]
1961 f = user.find('<')
1962 f = user.find('<')
1962 if f >= 0:
1963 if f >= 0:
1963 user = user[f + 1:]
1964 user = user[f + 1:]
1964 f = user.find(' ')
1965 f = user.find(' ')
1965 if f >= 0:
1966 if f >= 0:
1966 user = user[:f]
1967 user = user[:f]
1967 f = user.find('.')
1968 f = user.find('.')
1968 if f >= 0:
1969 if f >= 0:
1969 user = user[:f]
1970 user = user[:f]
1970 return user
1971 return user
1971
1972
1972 def emailuser(user):
1973 def emailuser(user):
1973 """Return the user portion of an email address."""
1974 """Return the user portion of an email address."""
1974 f = user.find('@')
1975 f = user.find('@')
1975 if f >= 0:
1976 if f >= 0:
1976 user = user[:f]
1977 user = user[:f]
1977 f = user.find('<')
1978 f = user.find('<')
1978 if f >= 0:
1979 if f >= 0:
1979 user = user[f + 1:]
1980 user = user[f + 1:]
1980 return user
1981 return user
1981
1982
1982 def email(author):
1983 def email(author):
1983 '''get email of author.'''
1984 '''get email of author.'''
1984 r = author.find('>')
1985 r = author.find('>')
1985 if r == -1:
1986 if r == -1:
1986 r = None
1987 r = None
1987 return author[author.find('<') + 1:r]
1988 return author[author.find('<') + 1:r]
1988
1989
1989 def ellipsis(text, maxlength=400):
1990 def ellipsis(text, maxlength=400):
1990 """Trim string to at most maxlength (default: 400) columns in display."""
1991 """Trim string to at most maxlength (default: 400) columns in display."""
1991 return encoding.trim(text, maxlength, ellipsis='...')
1992 return encoding.trim(text, maxlength, ellipsis='...')
1992
1993
1993 def unitcountfn(*unittable):
1994 def unitcountfn(*unittable):
1994 '''return a function that renders a readable count of some quantity'''
1995 '''return a function that renders a readable count of some quantity'''
1995
1996
1996 def go(count):
1997 def go(count):
1997 for multiplier, divisor, format in unittable:
1998 for multiplier, divisor, format in unittable:
1998 if count >= divisor * multiplier:
1999 if count >= divisor * multiplier:
1999 return format % (count / float(divisor))
2000 return format % (count / float(divisor))
2000 return unittable[-1][2] % count
2001 return unittable[-1][2] % count
2001
2002
2002 return go
2003 return go
2003
2004
2004 bytecount = unitcountfn(
2005 bytecount = unitcountfn(
2005 (100, 1 << 30, _('%.0f GB')),
2006 (100, 1 << 30, _('%.0f GB')),
2006 (10, 1 << 30, _('%.1f GB')),
2007 (10, 1 << 30, _('%.1f GB')),
2007 (1, 1 << 30, _('%.2f GB')),
2008 (1, 1 << 30, _('%.2f GB')),
2008 (100, 1 << 20, _('%.0f MB')),
2009 (100, 1 << 20, _('%.0f MB')),
2009 (10, 1 << 20, _('%.1f MB')),
2010 (10, 1 << 20, _('%.1f MB')),
2010 (1, 1 << 20, _('%.2f MB')),
2011 (1, 1 << 20, _('%.2f MB')),
2011 (100, 1 << 10, _('%.0f KB')),
2012 (100, 1 << 10, _('%.0f KB')),
2012 (10, 1 << 10, _('%.1f KB')),
2013 (10, 1 << 10, _('%.1f KB')),
2013 (1, 1 << 10, _('%.2f KB')),
2014 (1, 1 << 10, _('%.2f KB')),
2014 (1, 1, _('%.0f bytes')),
2015 (1, 1, _('%.0f bytes')),
2015 )
2016 )
2016
2017
2017 def uirepr(s):
2018 def uirepr(s):
2018 # Avoid double backslash in Windows path repr()
2019 # Avoid double backslash in Windows path repr()
2019 return repr(s).replace('\\\\', '\\')
2020 return repr(s).replace('\\\\', '\\')
2020
2021
2021 # delay import of textwrap
2022 # delay import of textwrap
2022 def MBTextWrapper(**kwargs):
2023 def MBTextWrapper(**kwargs):
2023 class tw(textwrap.TextWrapper):
2024 class tw(textwrap.TextWrapper):
2024 """
2025 """
2025 Extend TextWrapper for width-awareness.
2026 Extend TextWrapper for width-awareness.
2026
2027
2027 Neither number of 'bytes' in any encoding nor 'characters' is
2028 Neither number of 'bytes' in any encoding nor 'characters' is
2028 appropriate to calculate terminal columns for specified string.
2029 appropriate to calculate terminal columns for specified string.
2029
2030
2030 Original TextWrapper implementation uses built-in 'len()' directly,
2031 Original TextWrapper implementation uses built-in 'len()' directly,
2031 so overriding is needed to use width information of each characters.
2032 so overriding is needed to use width information of each characters.
2032
2033
2033 In addition, characters classified into 'ambiguous' width are
2034 In addition, characters classified into 'ambiguous' width are
2034 treated as wide in East Asian area, but as narrow in other.
2035 treated as wide in East Asian area, but as narrow in other.
2035
2036
2036 This requires use decision to determine width of such characters.
2037 This requires use decision to determine width of such characters.
2037 """
2038 """
2038 def _cutdown(self, ucstr, space_left):
2039 def _cutdown(self, ucstr, space_left):
2039 l = 0
2040 l = 0
2040 colwidth = encoding.ucolwidth
2041 colwidth = encoding.ucolwidth
2041 for i in xrange(len(ucstr)):
2042 for i in xrange(len(ucstr)):
2042 l += colwidth(ucstr[i])
2043 l += colwidth(ucstr[i])
2043 if space_left < l:
2044 if space_left < l:
2044 return (ucstr[:i], ucstr[i:])
2045 return (ucstr[:i], ucstr[i:])
2045 return ucstr, ''
2046 return ucstr, ''
2046
2047
2047 # overriding of base class
2048 # overriding of base class
2048 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2049 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2049 space_left = max(width - cur_len, 1)
2050 space_left = max(width - cur_len, 1)
2050
2051
2051 if self.break_long_words:
2052 if self.break_long_words:
2052 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2053 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2053 cur_line.append(cut)
2054 cur_line.append(cut)
2054 reversed_chunks[-1] = res
2055 reversed_chunks[-1] = res
2055 elif not cur_line:
2056 elif not cur_line:
2056 cur_line.append(reversed_chunks.pop())
2057 cur_line.append(reversed_chunks.pop())
2057
2058
2058 # this overriding code is imported from TextWrapper of Python 2.6
2059 # this overriding code is imported from TextWrapper of Python 2.6
2059 # to calculate columns of string by 'encoding.ucolwidth()'
2060 # to calculate columns of string by 'encoding.ucolwidth()'
2060 def _wrap_chunks(self, chunks):
2061 def _wrap_chunks(self, chunks):
2061 colwidth = encoding.ucolwidth
2062 colwidth = encoding.ucolwidth
2062
2063
2063 lines = []
2064 lines = []
2064 if self.width <= 0:
2065 if self.width <= 0:
2065 raise ValueError("invalid width %r (must be > 0)" % self.width)
2066 raise ValueError("invalid width %r (must be > 0)" % self.width)
2066
2067
2067 # Arrange in reverse order so items can be efficiently popped
2068 # Arrange in reverse order so items can be efficiently popped
2068 # from a stack of chucks.
2069 # from a stack of chucks.
2069 chunks.reverse()
2070 chunks.reverse()
2070
2071
2071 while chunks:
2072 while chunks:
2072
2073
2073 # Start the list of chunks that will make up the current line.
2074 # Start the list of chunks that will make up the current line.
2074 # cur_len is just the length of all the chunks in cur_line.
2075 # cur_len is just the length of all the chunks in cur_line.
2075 cur_line = []
2076 cur_line = []
2076 cur_len = 0
2077 cur_len = 0
2077
2078
2078 # Figure out which static string will prefix this line.
2079 # Figure out which static string will prefix this line.
2079 if lines:
2080 if lines:
2080 indent = self.subsequent_indent
2081 indent = self.subsequent_indent
2081 else:
2082 else:
2082 indent = self.initial_indent
2083 indent = self.initial_indent
2083
2084
2084 # Maximum width for this line.
2085 # Maximum width for this line.
2085 width = self.width - len(indent)
2086 width = self.width - len(indent)
2086
2087
2087 # First chunk on line is whitespace -- drop it, unless this
2088 # First chunk on line is whitespace -- drop it, unless this
2088 # is the very beginning of the text (i.e. no lines started yet).
2089 # is the very beginning of the text (i.e. no lines started yet).
2089 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2090 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2090 del chunks[-1]
2091 del chunks[-1]
2091
2092
2092 while chunks:
2093 while chunks:
2093 l = colwidth(chunks[-1])
2094 l = colwidth(chunks[-1])
2094
2095
2095 # Can at least squeeze this chunk onto the current line.
2096 # Can at least squeeze this chunk onto the current line.
2096 if cur_len + l <= width:
2097 if cur_len + l <= width:
2097 cur_line.append(chunks.pop())
2098 cur_line.append(chunks.pop())
2098 cur_len += l
2099 cur_len += l
2099
2100
2100 # Nope, this line is full.
2101 # Nope, this line is full.
2101 else:
2102 else:
2102 break
2103 break
2103
2104
2104 # The current line is full, and the next chunk is too big to
2105 # The current line is full, and the next chunk is too big to
2105 # fit on *any* line (not just this one).
2106 # fit on *any* line (not just this one).
2106 if chunks and colwidth(chunks[-1]) > width:
2107 if chunks and colwidth(chunks[-1]) > width:
2107 self._handle_long_word(chunks, cur_line, cur_len, width)
2108 self._handle_long_word(chunks, cur_line, cur_len, width)
2108
2109
2109 # If the last chunk on this line is all whitespace, drop it.
2110 # If the last chunk on this line is all whitespace, drop it.
2110 if (self.drop_whitespace and
2111 if (self.drop_whitespace and
2111 cur_line and cur_line[-1].strip() == ''):
2112 cur_line and cur_line[-1].strip() == ''):
2112 del cur_line[-1]
2113 del cur_line[-1]
2113
2114
2114 # Convert current line back to a string and store it in list
2115 # Convert current line back to a string and store it in list
2115 # of all lines (return value).
2116 # of all lines (return value).
2116 if cur_line:
2117 if cur_line:
2117 lines.append(indent + ''.join(cur_line))
2118 lines.append(indent + ''.join(cur_line))
2118
2119
2119 return lines
2120 return lines
2120
2121
2121 global MBTextWrapper
2122 global MBTextWrapper
2122 MBTextWrapper = tw
2123 MBTextWrapper = tw
2123 return tw(**kwargs)
2124 return tw(**kwargs)
2124
2125
2125 def wrap(line, width, initindent='', hangindent=''):
2126 def wrap(line, width, initindent='', hangindent=''):
2126 maxindent = max(len(hangindent), len(initindent))
2127 maxindent = max(len(hangindent), len(initindent))
2127 if width <= maxindent:
2128 if width <= maxindent:
2128 # adjust for weird terminal size
2129 # adjust for weird terminal size
2129 width = max(78, maxindent + 1)
2130 width = max(78, maxindent + 1)
2130 line = line.decode(encoding.encoding, encoding.encodingmode)
2131 line = line.decode(encoding.encoding, encoding.encodingmode)
2131 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2132 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2132 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2133 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2133 wrapper = MBTextWrapper(width=width,
2134 wrapper = MBTextWrapper(width=width,
2134 initial_indent=initindent,
2135 initial_indent=initindent,
2135 subsequent_indent=hangindent)
2136 subsequent_indent=hangindent)
2136 return wrapper.fill(line).encode(encoding.encoding)
2137 return wrapper.fill(line).encode(encoding.encoding)
2137
2138
2138 def iterlines(iterator):
2139 def iterlines(iterator):
2139 for chunk in iterator:
2140 for chunk in iterator:
2140 for line in chunk.splitlines():
2141 for line in chunk.splitlines():
2141 yield line
2142 yield line
2142
2143
2143 def expandpath(path):
2144 def expandpath(path):
2144 return os.path.expanduser(os.path.expandvars(path))
2145 return os.path.expanduser(os.path.expandvars(path))
2145
2146
2146 def hgcmd():
2147 def hgcmd():
2147 """Return the command used to execute current hg
2148 """Return the command used to execute current hg
2148
2149
2149 This is different from hgexecutable() because on Windows we want
2150 This is different from hgexecutable() because on Windows we want
2150 to avoid things opening new shell windows like batch files, so we
2151 to avoid things opening new shell windows like batch files, so we
2151 get either the python call or current executable.
2152 get either the python call or current executable.
2152 """
2153 """
2153 if mainfrozen():
2154 if mainfrozen():
2154 if getattr(sys, 'frozen', None) == 'macosx_app':
2155 if getattr(sys, 'frozen', None) == 'macosx_app':
2155 # Env variable set by py2app
2156 # Env variable set by py2app
2156 return [os.environ['EXECUTABLEPATH']]
2157 return [os.environ['EXECUTABLEPATH']]
2157 else:
2158 else:
2158 return [sys.executable]
2159 return [sys.executable]
2159 return gethgcmd()
2160 return gethgcmd()
2160
2161
2161 def rundetached(args, condfn):
2162 def rundetached(args, condfn):
2162 """Execute the argument list in a detached process.
2163 """Execute the argument list in a detached process.
2163
2164
2164 condfn is a callable which is called repeatedly and should return
2165 condfn is a callable which is called repeatedly and should return
2165 True once the child process is known to have started successfully.
2166 True once the child process is known to have started successfully.
2166 At this point, the child process PID is returned. If the child
2167 At this point, the child process PID is returned. If the child
2167 process fails to start or finishes before condfn() evaluates to
2168 process fails to start or finishes before condfn() evaluates to
2168 True, return -1.
2169 True, return -1.
2169 """
2170 """
2170 # Windows case is easier because the child process is either
2171 # Windows case is easier because the child process is either
2171 # successfully starting and validating the condition or exiting
2172 # successfully starting and validating the condition or exiting
2172 # on failure. We just poll on its PID. On Unix, if the child
2173 # on failure. We just poll on its PID. On Unix, if the child
2173 # process fails to start, it will be left in a zombie state until
2174 # process fails to start, it will be left in a zombie state until
2174 # the parent wait on it, which we cannot do since we expect a long
2175 # the parent wait on it, which we cannot do since we expect a long
2175 # running process on success. Instead we listen for SIGCHLD telling
2176 # running process on success. Instead we listen for SIGCHLD telling
2176 # us our child process terminated.
2177 # us our child process terminated.
2177 terminated = set()
2178 terminated = set()
2178 def handler(signum, frame):
2179 def handler(signum, frame):
2179 terminated.add(os.wait())
2180 terminated.add(os.wait())
2180 prevhandler = None
2181 prevhandler = None
2181 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2182 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2182 if SIGCHLD is not None:
2183 if SIGCHLD is not None:
2183 prevhandler = signal.signal(SIGCHLD, handler)
2184 prevhandler = signal.signal(SIGCHLD, handler)
2184 try:
2185 try:
2185 pid = spawndetached(args)
2186 pid = spawndetached(args)
2186 while not condfn():
2187 while not condfn():
2187 if ((pid in terminated or not testpid(pid))
2188 if ((pid in terminated or not testpid(pid))
2188 and not condfn()):
2189 and not condfn()):
2189 return -1
2190 return -1
2190 time.sleep(0.1)
2191 time.sleep(0.1)
2191 return pid
2192 return pid
2192 finally:
2193 finally:
2193 if prevhandler is not None:
2194 if prevhandler is not None:
2194 signal.signal(signal.SIGCHLD, prevhandler)
2195 signal.signal(signal.SIGCHLD, prevhandler)
2195
2196
2196 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2197 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2197 """Return the result of interpolating items in the mapping into string s.
2198 """Return the result of interpolating items in the mapping into string s.
2198
2199
2199 prefix is a single character string, or a two character string with
2200 prefix is a single character string, or a two character string with
2200 a backslash as the first character if the prefix needs to be escaped in
2201 a backslash as the first character if the prefix needs to be escaped in
2201 a regular expression.
2202 a regular expression.
2202
2203
2203 fn is an optional function that will be applied to the replacement text
2204 fn is an optional function that will be applied to the replacement text
2204 just before replacement.
2205 just before replacement.
2205
2206
2206 escape_prefix is an optional flag that allows using doubled prefix for
2207 escape_prefix is an optional flag that allows using doubled prefix for
2207 its escaping.
2208 its escaping.
2208 """
2209 """
2209 fn = fn or (lambda s: s)
2210 fn = fn or (lambda s: s)
2210 patterns = '|'.join(mapping.keys())
2211 patterns = '|'.join(mapping.keys())
2211 if escape_prefix:
2212 if escape_prefix:
2212 patterns += '|' + prefix
2213 patterns += '|' + prefix
2213 if len(prefix) > 1:
2214 if len(prefix) > 1:
2214 prefix_char = prefix[1:]
2215 prefix_char = prefix[1:]
2215 else:
2216 else:
2216 prefix_char = prefix
2217 prefix_char = prefix
2217 mapping[prefix_char] = prefix_char
2218 mapping[prefix_char] = prefix_char
2218 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2219 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2219 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2220 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2220
2221
2221 def getport(port):
2222 def getport(port):
2222 """Return the port for a given network service.
2223 """Return the port for a given network service.
2223
2224
2224 If port is an integer, it's returned as is. If it's a string, it's
2225 If port is an integer, it's returned as is. If it's a string, it's
2225 looked up using socket.getservbyname(). If there's no matching
2226 looked up using socket.getservbyname(). If there's no matching
2226 service, error.Abort is raised.
2227 service, error.Abort is raised.
2227 """
2228 """
2228 try:
2229 try:
2229 return int(port)
2230 return int(port)
2230 except ValueError:
2231 except ValueError:
2231 pass
2232 pass
2232
2233
2233 try:
2234 try:
2234 return socket.getservbyname(port)
2235 return socket.getservbyname(port)
2235 except socket.error:
2236 except socket.error:
2236 raise Abort(_("no port number associated with service '%s'") % port)
2237 raise Abort(_("no port number associated with service '%s'") % port)
2237
2238
2238 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2239 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2239 '0': False, 'no': False, 'false': False, 'off': False,
2240 '0': False, 'no': False, 'false': False, 'off': False,
2240 'never': False}
2241 'never': False}
2241
2242
2242 def parsebool(s):
2243 def parsebool(s):
2243 """Parse s into a boolean.
2244 """Parse s into a boolean.
2244
2245
2245 If s is not a valid boolean, returns None.
2246 If s is not a valid boolean, returns None.
2246 """
2247 """
2247 return _booleans.get(s.lower(), None)
2248 return _booleans.get(s.lower(), None)
2248
2249
2249 _hexdig = '0123456789ABCDEFabcdef'
2250 _hexdig = '0123456789ABCDEFabcdef'
2250 _hextochr = dict((a + b, chr(int(a + b, 16)))
2251 _hextochr = dict((a + b, chr(int(a + b, 16)))
2251 for a in _hexdig for b in _hexdig)
2252 for a in _hexdig for b in _hexdig)
2252
2253
2253 def _urlunquote(s):
2254 def _urlunquote(s):
2254 """Decode HTTP/HTML % encoding.
2255 """Decode HTTP/HTML % encoding.
2255
2256
2256 >>> _urlunquote('abc%20def')
2257 >>> _urlunquote('abc%20def')
2257 'abc def'
2258 'abc def'
2258 """
2259 """
2259 res = s.split('%')
2260 res = s.split('%')
2260 # fastpath
2261 # fastpath
2261 if len(res) == 1:
2262 if len(res) == 1:
2262 return s
2263 return s
2263 s = res[0]
2264 s = res[0]
2264 for item in res[1:]:
2265 for item in res[1:]:
2265 try:
2266 try:
2266 s += _hextochr[item[:2]] + item[2:]
2267 s += _hextochr[item[:2]] + item[2:]
2267 except KeyError:
2268 except KeyError:
2268 s += '%' + item
2269 s += '%' + item
2269 except UnicodeDecodeError:
2270 except UnicodeDecodeError:
2270 s += unichr(int(item[:2], 16)) + item[2:]
2271 s += unichr(int(item[:2], 16)) + item[2:]
2271 return s
2272 return s
2272
2273
2273 class url(object):
2274 class url(object):
2274 r"""Reliable URL parser.
2275 r"""Reliable URL parser.
2275
2276
2276 This parses URLs and provides attributes for the following
2277 This parses URLs and provides attributes for the following
2277 components:
2278 components:
2278
2279
2279 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2280 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2280
2281
2281 Missing components are set to None. The only exception is
2282 Missing components are set to None. The only exception is
2282 fragment, which is set to '' if present but empty.
2283 fragment, which is set to '' if present but empty.
2283
2284
2284 If parsefragment is False, fragment is included in query. If
2285 If parsefragment is False, fragment is included in query. If
2285 parsequery is False, query is included in path. If both are
2286 parsequery is False, query is included in path. If both are
2286 False, both fragment and query are included in path.
2287 False, both fragment and query are included in path.
2287
2288
2288 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2289 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2289
2290
2290 Note that for backward compatibility reasons, bundle URLs do not
2291 Note that for backward compatibility reasons, bundle URLs do not
2291 take host names. That means 'bundle://../' has a path of '../'.
2292 take host names. That means 'bundle://../' has a path of '../'.
2292
2293
2293 Examples:
2294 Examples:
2294
2295
2295 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2296 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2296 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2297 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2297 >>> url('ssh://[::1]:2200//home/joe/repo')
2298 >>> url('ssh://[::1]:2200//home/joe/repo')
2298 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2299 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2299 >>> url('file:///home/joe/repo')
2300 >>> url('file:///home/joe/repo')
2300 <url scheme: 'file', path: '/home/joe/repo'>
2301 <url scheme: 'file', path: '/home/joe/repo'>
2301 >>> url('file:///c:/temp/foo/')
2302 >>> url('file:///c:/temp/foo/')
2302 <url scheme: 'file', path: 'c:/temp/foo/'>
2303 <url scheme: 'file', path: 'c:/temp/foo/'>
2303 >>> url('bundle:foo')
2304 >>> url('bundle:foo')
2304 <url scheme: 'bundle', path: 'foo'>
2305 <url scheme: 'bundle', path: 'foo'>
2305 >>> url('bundle://../foo')
2306 >>> url('bundle://../foo')
2306 <url scheme: 'bundle', path: '../foo'>
2307 <url scheme: 'bundle', path: '../foo'>
2307 >>> url(r'c:\foo\bar')
2308 >>> url(r'c:\foo\bar')
2308 <url path: 'c:\\foo\\bar'>
2309 <url path: 'c:\\foo\\bar'>
2309 >>> url(r'\\blah\blah\blah')
2310 >>> url(r'\\blah\blah\blah')
2310 <url path: '\\\\blah\\blah\\blah'>
2311 <url path: '\\\\blah\\blah\\blah'>
2311 >>> url(r'\\blah\blah\blah#baz')
2312 >>> url(r'\\blah\blah\blah#baz')
2312 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2313 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2313 >>> url(r'file:///C:\users\me')
2314 >>> url(r'file:///C:\users\me')
2314 <url scheme: 'file', path: 'C:\\users\\me'>
2315 <url scheme: 'file', path: 'C:\\users\\me'>
2315
2316
2316 Authentication credentials:
2317 Authentication credentials:
2317
2318
2318 >>> url('ssh://joe:xyz@x/repo')
2319 >>> url('ssh://joe:xyz@x/repo')
2319 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2320 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2320 >>> url('ssh://joe@x/repo')
2321 >>> url('ssh://joe@x/repo')
2321 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2322 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2322
2323
2323 Query strings and fragments:
2324 Query strings and fragments:
2324
2325
2325 >>> url('http://host/a?b#c')
2326 >>> url('http://host/a?b#c')
2326 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2327 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2327 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2328 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2328 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2329 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2329 """
2330 """
2330
2331
2331 _safechars = "!~*'()+"
2332 _safechars = "!~*'()+"
2332 _safepchars = "/!~*'()+:\\"
2333 _safepchars = "/!~*'()+:\\"
2333 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2334 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2334
2335
2335 def __init__(self, path, parsequery=True, parsefragment=True):
2336 def __init__(self, path, parsequery=True, parsefragment=True):
2336 # We slowly chomp away at path until we have only the path left
2337 # We slowly chomp away at path until we have only the path left
2337 self.scheme = self.user = self.passwd = self.host = None
2338 self.scheme = self.user = self.passwd = self.host = None
2338 self.port = self.path = self.query = self.fragment = None
2339 self.port = self.path = self.query = self.fragment = None
2339 self._localpath = True
2340 self._localpath = True
2340 self._hostport = ''
2341 self._hostport = ''
2341 self._origpath = path
2342 self._origpath = path
2342
2343
2343 if parsefragment and '#' in path:
2344 if parsefragment and '#' in path:
2344 path, self.fragment = path.split('#', 1)
2345 path, self.fragment = path.split('#', 1)
2345 if not path:
2346 if not path:
2346 path = None
2347 path = None
2347
2348
2348 # special case for Windows drive letters and UNC paths
2349 # special case for Windows drive letters and UNC paths
2349 if hasdriveletter(path) or path.startswith(r'\\'):
2350 if hasdriveletter(path) or path.startswith(r'\\'):
2350 self.path = path
2351 self.path = path
2351 return
2352 return
2352
2353
2353 # For compatibility reasons, we can't handle bundle paths as
2354 # For compatibility reasons, we can't handle bundle paths as
2354 # normal URLS
2355 # normal URLS
2355 if path.startswith('bundle:'):
2356 if path.startswith('bundle:'):
2356 self.scheme = 'bundle'
2357 self.scheme = 'bundle'
2357 path = path[7:]
2358 path = path[7:]
2358 if path.startswith('//'):
2359 if path.startswith('//'):
2359 path = path[2:]
2360 path = path[2:]
2360 self.path = path
2361 self.path = path
2361 return
2362 return
2362
2363
2363 if self._matchscheme(path):
2364 if self._matchscheme(path):
2364 parts = path.split(':', 1)
2365 parts = path.split(':', 1)
2365 if parts[0]:
2366 if parts[0]:
2366 self.scheme, path = parts
2367 self.scheme, path = parts
2367 self._localpath = False
2368 self._localpath = False
2368
2369
2369 if not path:
2370 if not path:
2370 path = None
2371 path = None
2371 if self._localpath:
2372 if self._localpath:
2372 self.path = ''
2373 self.path = ''
2373 return
2374 return
2374 else:
2375 else:
2375 if self._localpath:
2376 if self._localpath:
2376 self.path = path
2377 self.path = path
2377 return
2378 return
2378
2379
2379 if parsequery and '?' in path:
2380 if parsequery and '?' in path:
2380 path, self.query = path.split('?', 1)
2381 path, self.query = path.split('?', 1)
2381 if not path:
2382 if not path:
2382 path = None
2383 path = None
2383 if not self.query:
2384 if not self.query:
2384 self.query = None
2385 self.query = None
2385
2386
2386 # // is required to specify a host/authority
2387 # // is required to specify a host/authority
2387 if path and path.startswith('//'):
2388 if path and path.startswith('//'):
2388 parts = path[2:].split('/', 1)
2389 parts = path[2:].split('/', 1)
2389 if len(parts) > 1:
2390 if len(parts) > 1:
2390 self.host, path = parts
2391 self.host, path = parts
2391 else:
2392 else:
2392 self.host = parts[0]
2393 self.host = parts[0]
2393 path = None
2394 path = None
2394 if not self.host:
2395 if not self.host:
2395 self.host = None
2396 self.host = None
2396 # path of file:///d is /d
2397 # path of file:///d is /d
2397 # path of file:///d:/ is d:/, not /d:/
2398 # path of file:///d:/ is d:/, not /d:/
2398 if path and not hasdriveletter(path):
2399 if path and not hasdriveletter(path):
2399 path = '/' + path
2400 path = '/' + path
2400
2401
2401 if self.host and '@' in self.host:
2402 if self.host and '@' in self.host:
2402 self.user, self.host = self.host.rsplit('@', 1)
2403 self.user, self.host = self.host.rsplit('@', 1)
2403 if ':' in self.user:
2404 if ':' in self.user:
2404 self.user, self.passwd = self.user.split(':', 1)
2405 self.user, self.passwd = self.user.split(':', 1)
2405 if not self.host:
2406 if not self.host:
2406 self.host = None
2407 self.host = None
2407
2408
2408 # Don't split on colons in IPv6 addresses without ports
2409 # Don't split on colons in IPv6 addresses without ports
2409 if (self.host and ':' in self.host and
2410 if (self.host and ':' in self.host and
2410 not (self.host.startswith('[') and self.host.endswith(']'))):
2411 not (self.host.startswith('[') and self.host.endswith(']'))):
2411 self._hostport = self.host
2412 self._hostport = self.host
2412 self.host, self.port = self.host.rsplit(':', 1)
2413 self.host, self.port = self.host.rsplit(':', 1)
2413 if not self.host:
2414 if not self.host:
2414 self.host = None
2415 self.host = None
2415
2416
2416 if (self.host and self.scheme == 'file' and
2417 if (self.host and self.scheme == 'file' and
2417 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2418 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2418 raise Abort(_('file:// URLs can only refer to localhost'))
2419 raise Abort(_('file:// URLs can only refer to localhost'))
2419
2420
2420 self.path = path
2421 self.path = path
2421
2422
2422 # leave the query string escaped
2423 # leave the query string escaped
2423 for a in ('user', 'passwd', 'host', 'port',
2424 for a in ('user', 'passwd', 'host', 'port',
2424 'path', 'fragment'):
2425 'path', 'fragment'):
2425 v = getattr(self, a)
2426 v = getattr(self, a)
2426 if v is not None:
2427 if v is not None:
2427 setattr(self, a, _urlunquote(v))
2428 setattr(self, a, _urlunquote(v))
2428
2429
2429 def __repr__(self):
2430 def __repr__(self):
2430 attrs = []
2431 attrs = []
2431 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2432 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2432 'query', 'fragment'):
2433 'query', 'fragment'):
2433 v = getattr(self, a)
2434 v = getattr(self, a)
2434 if v is not None:
2435 if v is not None:
2435 attrs.append('%s: %r' % (a, v))
2436 attrs.append('%s: %r' % (a, v))
2436 return '<url %s>' % ', '.join(attrs)
2437 return '<url %s>' % ', '.join(attrs)
2437
2438
2438 def __str__(self):
2439 def __str__(self):
2439 r"""Join the URL's components back into a URL string.
2440 r"""Join the URL's components back into a URL string.
2440
2441
2441 Examples:
2442 Examples:
2442
2443
2443 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2444 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2444 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2445 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2445 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2446 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2446 'http://user:pw@host:80/?foo=bar&baz=42'
2447 'http://user:pw@host:80/?foo=bar&baz=42'
2447 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2448 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2448 'http://user:pw@host:80/?foo=bar%3dbaz'
2449 'http://user:pw@host:80/?foo=bar%3dbaz'
2449 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2450 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2450 'ssh://user:pw@[::1]:2200//home/joe#'
2451 'ssh://user:pw@[::1]:2200//home/joe#'
2451 >>> str(url('http://localhost:80//'))
2452 >>> str(url('http://localhost:80//'))
2452 'http://localhost:80//'
2453 'http://localhost:80//'
2453 >>> str(url('http://localhost:80/'))
2454 >>> str(url('http://localhost:80/'))
2454 'http://localhost:80/'
2455 'http://localhost:80/'
2455 >>> str(url('http://localhost:80'))
2456 >>> str(url('http://localhost:80'))
2456 'http://localhost:80/'
2457 'http://localhost:80/'
2457 >>> str(url('bundle:foo'))
2458 >>> str(url('bundle:foo'))
2458 'bundle:foo'
2459 'bundle:foo'
2459 >>> str(url('bundle://../foo'))
2460 >>> str(url('bundle://../foo'))
2460 'bundle:../foo'
2461 'bundle:../foo'
2461 >>> str(url('path'))
2462 >>> str(url('path'))
2462 'path'
2463 'path'
2463 >>> str(url('file:///tmp/foo/bar'))
2464 >>> str(url('file:///tmp/foo/bar'))
2464 'file:///tmp/foo/bar'
2465 'file:///tmp/foo/bar'
2465 >>> str(url('file:///c:/tmp/foo/bar'))
2466 >>> str(url('file:///c:/tmp/foo/bar'))
2466 'file:///c:/tmp/foo/bar'
2467 'file:///c:/tmp/foo/bar'
2467 >>> print url(r'bundle:foo\bar')
2468 >>> print url(r'bundle:foo\bar')
2468 bundle:foo\bar
2469 bundle:foo\bar
2469 >>> print url(r'file:///D:\data\hg')
2470 >>> print url(r'file:///D:\data\hg')
2470 file:///D:\data\hg
2471 file:///D:\data\hg
2471 """
2472 """
2472 if self._localpath:
2473 if self._localpath:
2473 s = self.path
2474 s = self.path
2474 if self.scheme == 'bundle':
2475 if self.scheme == 'bundle':
2475 s = 'bundle:' + s
2476 s = 'bundle:' + s
2476 if self.fragment:
2477 if self.fragment:
2477 s += '#' + self.fragment
2478 s += '#' + self.fragment
2478 return s
2479 return s
2479
2480
2480 s = self.scheme + ':'
2481 s = self.scheme + ':'
2481 if self.user or self.passwd or self.host:
2482 if self.user or self.passwd or self.host:
2482 s += '//'
2483 s += '//'
2483 elif self.scheme and (not self.path or self.path.startswith('/')
2484 elif self.scheme and (not self.path or self.path.startswith('/')
2484 or hasdriveletter(self.path)):
2485 or hasdriveletter(self.path)):
2485 s += '//'
2486 s += '//'
2486 if hasdriveletter(self.path):
2487 if hasdriveletter(self.path):
2487 s += '/'
2488 s += '/'
2488 if self.user:
2489 if self.user:
2489 s += urlreq.quote(self.user, safe=self._safechars)
2490 s += urlreq.quote(self.user, safe=self._safechars)
2490 if self.passwd:
2491 if self.passwd:
2491 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2492 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2492 if self.user or self.passwd:
2493 if self.user or self.passwd:
2493 s += '@'
2494 s += '@'
2494 if self.host:
2495 if self.host:
2495 if not (self.host.startswith('[') and self.host.endswith(']')):
2496 if not (self.host.startswith('[') and self.host.endswith(']')):
2496 s += urlreq.quote(self.host)
2497 s += urlreq.quote(self.host)
2497 else:
2498 else:
2498 s += self.host
2499 s += self.host
2499 if self.port:
2500 if self.port:
2500 s += ':' + urlreq.quote(self.port)
2501 s += ':' + urlreq.quote(self.port)
2501 if self.host:
2502 if self.host:
2502 s += '/'
2503 s += '/'
2503 if self.path:
2504 if self.path:
2504 # TODO: similar to the query string, we should not unescape the
2505 # TODO: similar to the query string, we should not unescape the
2505 # path when we store it, the path might contain '%2f' = '/',
2506 # path when we store it, the path might contain '%2f' = '/',
2506 # which we should *not* escape.
2507 # which we should *not* escape.
2507 s += urlreq.quote(self.path, safe=self._safepchars)
2508 s += urlreq.quote(self.path, safe=self._safepchars)
2508 if self.query:
2509 if self.query:
2509 # we store the query in escaped form.
2510 # we store the query in escaped form.
2510 s += '?' + self.query
2511 s += '?' + self.query
2511 if self.fragment is not None:
2512 if self.fragment is not None:
2512 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2513 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2513 return s
2514 return s
2514
2515
2515 def authinfo(self):
2516 def authinfo(self):
2516 user, passwd = self.user, self.passwd
2517 user, passwd = self.user, self.passwd
2517 try:
2518 try:
2518 self.user, self.passwd = None, None
2519 self.user, self.passwd = None, None
2519 s = str(self)
2520 s = str(self)
2520 finally:
2521 finally:
2521 self.user, self.passwd = user, passwd
2522 self.user, self.passwd = user, passwd
2522 if not self.user:
2523 if not self.user:
2523 return (s, None)
2524 return (s, None)
2524 # authinfo[1] is passed to urllib2 password manager, and its
2525 # authinfo[1] is passed to urllib2 password manager, and its
2525 # URIs must not contain credentials. The host is passed in the
2526 # URIs must not contain credentials. The host is passed in the
2526 # URIs list because Python < 2.4.3 uses only that to search for
2527 # URIs list because Python < 2.4.3 uses only that to search for
2527 # a password.
2528 # a password.
2528 return (s, (None, (s, self.host),
2529 return (s, (None, (s, self.host),
2529 self.user, self.passwd or ''))
2530 self.user, self.passwd or ''))
2530
2531
2531 def isabs(self):
2532 def isabs(self):
2532 if self.scheme and self.scheme != 'file':
2533 if self.scheme and self.scheme != 'file':
2533 return True # remote URL
2534 return True # remote URL
2534 if hasdriveletter(self.path):
2535 if hasdriveletter(self.path):
2535 return True # absolute for our purposes - can't be joined()
2536 return True # absolute for our purposes - can't be joined()
2536 if self.path.startswith(r'\\'):
2537 if self.path.startswith(r'\\'):
2537 return True # Windows UNC path
2538 return True # Windows UNC path
2538 if self.path.startswith('/'):
2539 if self.path.startswith('/'):
2539 return True # POSIX-style
2540 return True # POSIX-style
2540 return False
2541 return False
2541
2542
2542 def localpath(self):
2543 def localpath(self):
2543 if self.scheme == 'file' or self.scheme == 'bundle':
2544 if self.scheme == 'file' or self.scheme == 'bundle':
2544 path = self.path or '/'
2545 path = self.path or '/'
2545 # For Windows, we need to promote hosts containing drive
2546 # For Windows, we need to promote hosts containing drive
2546 # letters to paths with drive letters.
2547 # letters to paths with drive letters.
2547 if hasdriveletter(self._hostport):
2548 if hasdriveletter(self._hostport):
2548 path = self._hostport + '/' + self.path
2549 path = self._hostport + '/' + self.path
2549 elif (self.host is not None and self.path
2550 elif (self.host is not None and self.path
2550 and not hasdriveletter(path)):
2551 and not hasdriveletter(path)):
2551 path = '/' + path
2552 path = '/' + path
2552 return path
2553 return path
2553 return self._origpath
2554 return self._origpath
2554
2555
2555 def islocal(self):
2556 def islocal(self):
2556 '''whether localpath will return something that posixfile can open'''
2557 '''whether localpath will return something that posixfile can open'''
2557 return (not self.scheme or self.scheme == 'file'
2558 return (not self.scheme or self.scheme == 'file'
2558 or self.scheme == 'bundle')
2559 or self.scheme == 'bundle')
2559
2560
2560 def hasscheme(path):
2561 def hasscheme(path):
2561 return bool(url(path).scheme)
2562 return bool(url(path).scheme)
2562
2563
2563 def hasdriveletter(path):
2564 def hasdriveletter(path):
2564 return path and path[1:2] == ':' and path[0:1].isalpha()
2565 return path and path[1:2] == ':' and path[0:1].isalpha()
2565
2566
2566 def urllocalpath(path):
2567 def urllocalpath(path):
2567 return url(path, parsequery=False, parsefragment=False).localpath()
2568 return url(path, parsequery=False, parsefragment=False).localpath()
2568
2569
2569 def hidepassword(u):
2570 def hidepassword(u):
2570 '''hide user credential in a url string'''
2571 '''hide user credential in a url string'''
2571 u = url(u)
2572 u = url(u)
2572 if u.passwd:
2573 if u.passwd:
2573 u.passwd = '***'
2574 u.passwd = '***'
2574 return str(u)
2575 return str(u)
2575
2576
2576 def removeauth(u):
2577 def removeauth(u):
2577 '''remove all authentication information from a url string'''
2578 '''remove all authentication information from a url string'''
2578 u = url(u)
2579 u = url(u)
2579 u.user = u.passwd = None
2580 u.user = u.passwd = None
2580 return str(u)
2581 return str(u)
2581
2582
2582 def isatty(fp):
2583 def isatty(fp):
2583 try:
2584 try:
2584 return fp.isatty()
2585 return fp.isatty()
2585 except AttributeError:
2586 except AttributeError:
2586 return False
2587 return False
2587
2588
2588 timecount = unitcountfn(
2589 timecount = unitcountfn(
2589 (1, 1e3, _('%.0f s')),
2590 (1, 1e3, _('%.0f s')),
2590 (100, 1, _('%.1f s')),
2591 (100, 1, _('%.1f s')),
2591 (10, 1, _('%.2f s')),
2592 (10, 1, _('%.2f s')),
2592 (1, 1, _('%.3f s')),
2593 (1, 1, _('%.3f s')),
2593 (100, 0.001, _('%.1f ms')),
2594 (100, 0.001, _('%.1f ms')),
2594 (10, 0.001, _('%.2f ms')),
2595 (10, 0.001, _('%.2f ms')),
2595 (1, 0.001, _('%.3f ms')),
2596 (1, 0.001, _('%.3f ms')),
2596 (100, 0.000001, _('%.1f us')),
2597 (100, 0.000001, _('%.1f us')),
2597 (10, 0.000001, _('%.2f us')),
2598 (10, 0.000001, _('%.2f us')),
2598 (1, 0.000001, _('%.3f us')),
2599 (1, 0.000001, _('%.3f us')),
2599 (100, 0.000000001, _('%.1f ns')),
2600 (100, 0.000000001, _('%.1f ns')),
2600 (10, 0.000000001, _('%.2f ns')),
2601 (10, 0.000000001, _('%.2f ns')),
2601 (1, 0.000000001, _('%.3f ns')),
2602 (1, 0.000000001, _('%.3f ns')),
2602 )
2603 )
2603
2604
2604 _timenesting = [0]
2605 _timenesting = [0]
2605
2606
2606 def timed(func):
2607 def timed(func):
2607 '''Report the execution time of a function call to stderr.
2608 '''Report the execution time of a function call to stderr.
2608
2609
2609 During development, use as a decorator when you need to measure
2610 During development, use as a decorator when you need to measure
2610 the cost of a function, e.g. as follows:
2611 the cost of a function, e.g. as follows:
2611
2612
2612 @util.timed
2613 @util.timed
2613 def foo(a, b, c):
2614 def foo(a, b, c):
2614 pass
2615 pass
2615 '''
2616 '''
2616
2617
2617 def wrapper(*args, **kwargs):
2618 def wrapper(*args, **kwargs):
2618 start = time.time()
2619 start = time.time()
2619 indent = 2
2620 indent = 2
2620 _timenesting[0] += indent
2621 _timenesting[0] += indent
2621 try:
2622 try:
2622 return func(*args, **kwargs)
2623 return func(*args, **kwargs)
2623 finally:
2624 finally:
2624 elapsed = time.time() - start
2625 elapsed = time.time() - start
2625 _timenesting[0] -= indent
2626 _timenesting[0] -= indent
2626 sys.stderr.write('%s%s: %s\n' %
2627 sys.stderr.write('%s%s: %s\n' %
2627 (' ' * _timenesting[0], func.__name__,
2628 (' ' * _timenesting[0], func.__name__,
2628 timecount(elapsed)))
2629 timecount(elapsed)))
2629 return wrapper
2630 return wrapper
2630
2631
2631 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2632 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2632 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2633 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2633
2634
2634 def sizetoint(s):
2635 def sizetoint(s):
2635 '''Convert a space specifier to a byte count.
2636 '''Convert a space specifier to a byte count.
2636
2637
2637 >>> sizetoint('30')
2638 >>> sizetoint('30')
2638 30
2639 30
2639 >>> sizetoint('2.2kb')
2640 >>> sizetoint('2.2kb')
2640 2252
2641 2252
2641 >>> sizetoint('6M')
2642 >>> sizetoint('6M')
2642 6291456
2643 6291456
2643 '''
2644 '''
2644 t = s.strip().lower()
2645 t = s.strip().lower()
2645 try:
2646 try:
2646 for k, u in _sizeunits:
2647 for k, u in _sizeunits:
2647 if t.endswith(k):
2648 if t.endswith(k):
2648 return int(float(t[:-len(k)]) * u)
2649 return int(float(t[:-len(k)]) * u)
2649 return int(t)
2650 return int(t)
2650 except ValueError:
2651 except ValueError:
2651 raise error.ParseError(_("couldn't parse size: %s") % s)
2652 raise error.ParseError(_("couldn't parse size: %s") % s)
2652
2653
2653 class hooks(object):
2654 class hooks(object):
2654 '''A collection of hook functions that can be used to extend a
2655 '''A collection of hook functions that can be used to extend a
2655 function's behavior. Hooks are called in lexicographic order,
2656 function's behavior. Hooks are called in lexicographic order,
2656 based on the names of their sources.'''
2657 based on the names of their sources.'''
2657
2658
2658 def __init__(self):
2659 def __init__(self):
2659 self._hooks = []
2660 self._hooks = []
2660
2661
2661 def add(self, source, hook):
2662 def add(self, source, hook):
2662 self._hooks.append((source, hook))
2663 self._hooks.append((source, hook))
2663
2664
2664 def __call__(self, *args):
2665 def __call__(self, *args):
2665 self._hooks.sort(key=lambda x: x[0])
2666 self._hooks.sort(key=lambda x: x[0])
2666 results = []
2667 results = []
2667 for source, hook in self._hooks:
2668 for source, hook in self._hooks:
2668 results.append(hook(*args))
2669 results.append(hook(*args))
2669 return results
2670 return results
2670
2671
2671 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2672 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2672 '''Yields lines for a nicely formatted stacktrace.
2673 '''Yields lines for a nicely formatted stacktrace.
2673 Skips the 'skip' last entries.
2674 Skips the 'skip' last entries.
2674 Each file+linenumber is formatted according to fileline.
2675 Each file+linenumber is formatted according to fileline.
2675 Each line is formatted according to line.
2676 Each line is formatted according to line.
2676 If line is None, it yields:
2677 If line is None, it yields:
2677 length of longest filepath+line number,
2678 length of longest filepath+line number,
2678 filepath+linenumber,
2679 filepath+linenumber,
2679 function
2680 function
2680
2681
2681 Not be used in production code but very convenient while developing.
2682 Not be used in production code but very convenient while developing.
2682 '''
2683 '''
2683 entries = [(fileline % (fn, ln), func)
2684 entries = [(fileline % (fn, ln), func)
2684 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2685 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2685 if entries:
2686 if entries:
2686 fnmax = max(len(entry[0]) for entry in entries)
2687 fnmax = max(len(entry[0]) for entry in entries)
2687 for fnln, func in entries:
2688 for fnln, func in entries:
2688 if line is None:
2689 if line is None:
2689 yield (fnmax, fnln, func)
2690 yield (fnmax, fnln, func)
2690 else:
2691 else:
2691 yield line % (fnmax, fnln, func)
2692 yield line % (fnmax, fnln, func)
2692
2693
2693 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2694 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2694 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2695 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2695 Skips the 'skip' last entries. By default it will flush stdout first.
2696 Skips the 'skip' last entries. By default it will flush stdout first.
2696 It can be used everywhere and intentionally does not require an ui object.
2697 It can be used everywhere and intentionally does not require an ui object.
2697 Not be used in production code but very convenient while developing.
2698 Not be used in production code but very convenient while developing.
2698 '''
2699 '''
2699 if otherf:
2700 if otherf:
2700 otherf.flush()
2701 otherf.flush()
2701 f.write('%s at:\n' % msg)
2702 f.write('%s at:\n' % msg)
2702 for line in getstackframes(skip + 1):
2703 for line in getstackframes(skip + 1):
2703 f.write(line)
2704 f.write(line)
2704 f.flush()
2705 f.flush()
2705
2706
2706 class dirs(object):
2707 class dirs(object):
2707 '''a multiset of directory names from a dirstate or manifest'''
2708 '''a multiset of directory names from a dirstate or manifest'''
2708
2709
2709 def __init__(self, map, skip=None):
2710 def __init__(self, map, skip=None):
2710 self._dirs = {}
2711 self._dirs = {}
2711 addpath = self.addpath
2712 addpath = self.addpath
2712 if safehasattr(map, 'iteritems') and skip is not None:
2713 if safehasattr(map, 'iteritems') and skip is not None:
2713 for f, s in map.iteritems():
2714 for f, s in map.iteritems():
2714 if s[0] != skip:
2715 if s[0] != skip:
2715 addpath(f)
2716 addpath(f)
2716 else:
2717 else:
2717 for f in map:
2718 for f in map:
2718 addpath(f)
2719 addpath(f)
2719
2720
2720 def addpath(self, path):
2721 def addpath(self, path):
2721 dirs = self._dirs
2722 dirs = self._dirs
2722 for base in finddirs(path):
2723 for base in finddirs(path):
2723 if base in dirs:
2724 if base in dirs:
2724 dirs[base] += 1
2725 dirs[base] += 1
2725 return
2726 return
2726 dirs[base] = 1
2727 dirs[base] = 1
2727
2728
2728 def delpath(self, path):
2729 def delpath(self, path):
2729 dirs = self._dirs
2730 dirs = self._dirs
2730 for base in finddirs(path):
2731 for base in finddirs(path):
2731 if dirs[base] > 1:
2732 if dirs[base] > 1:
2732 dirs[base] -= 1
2733 dirs[base] -= 1
2733 return
2734 return
2734 del dirs[base]
2735 del dirs[base]
2735
2736
2736 def __iter__(self):
2737 def __iter__(self):
2737 return self._dirs.iterkeys()
2738 return self._dirs.iterkeys()
2738
2739
2739 def __contains__(self, d):
2740 def __contains__(self, d):
2740 return d in self._dirs
2741 return d in self._dirs
2741
2742
2742 if safehasattr(parsers, 'dirs'):
2743 if safehasattr(parsers, 'dirs'):
2743 dirs = parsers.dirs
2744 dirs = parsers.dirs
2744
2745
2745 def finddirs(path):
2746 def finddirs(path):
2746 pos = path.rfind('/')
2747 pos = path.rfind('/')
2747 while pos != -1:
2748 while pos != -1:
2748 yield path[:pos]
2749 yield path[:pos]
2749 pos = path.rfind('/', 0, pos)
2750 pos = path.rfind('/', 0, pos)
2750
2751
2751 # compression utility
2752 # compression utility
2752
2753
2753 class nocompress(object):
2754 class nocompress(object):
2754 def compress(self, x):
2755 def compress(self, x):
2755 return x
2756 return x
2756 def flush(self):
2757 def flush(self):
2757 return ""
2758 return ""
2758
2759
2759 compressors = {
2760 compressors = {
2760 None: nocompress,
2761 None: nocompress,
2761 # lambda to prevent early import
2762 # lambda to prevent early import
2762 'BZ': lambda: bz2.BZ2Compressor(),
2763 'BZ': lambda: bz2.BZ2Compressor(),
2763 'GZ': lambda: zlib.compressobj(),
2764 'GZ': lambda: zlib.compressobj(),
2764 }
2765 }
2765 # also support the old form by courtesies
2766 # also support the old form by courtesies
2766 compressors['UN'] = compressors[None]
2767 compressors['UN'] = compressors[None]
2767
2768
2768 def _makedecompressor(decompcls):
2769 def _makedecompressor(decompcls):
2769 def generator(f):
2770 def generator(f):
2770 d = decompcls()
2771 d = decompcls()
2771 for chunk in filechunkiter(f):
2772 for chunk in filechunkiter(f):
2772 yield d.decompress(chunk)
2773 yield d.decompress(chunk)
2773 def func(fh):
2774 def func(fh):
2774 return chunkbuffer(generator(fh))
2775 return chunkbuffer(generator(fh))
2775 return func
2776 return func
2776
2777
2777 class ctxmanager(object):
2778 class ctxmanager(object):
2778 '''A context manager for use in 'with' blocks to allow multiple
2779 '''A context manager for use in 'with' blocks to allow multiple
2779 contexts to be entered at once. This is both safer and more
2780 contexts to be entered at once. This is both safer and more
2780 flexible than contextlib.nested.
2781 flexible than contextlib.nested.
2781
2782
2782 Once Mercurial supports Python 2.7+, this will become mostly
2783 Once Mercurial supports Python 2.7+, this will become mostly
2783 unnecessary.
2784 unnecessary.
2784 '''
2785 '''
2785
2786
2786 def __init__(self, *args):
2787 def __init__(self, *args):
2787 '''Accepts a list of no-argument functions that return context
2788 '''Accepts a list of no-argument functions that return context
2788 managers. These will be invoked at __call__ time.'''
2789 managers. These will be invoked at __call__ time.'''
2789 self._pending = args
2790 self._pending = args
2790 self._atexit = []
2791 self._atexit = []
2791
2792
2792 def __enter__(self):
2793 def __enter__(self):
2793 return self
2794 return self
2794
2795
2795 def enter(self):
2796 def enter(self):
2796 '''Create and enter context managers in the order in which they were
2797 '''Create and enter context managers in the order in which they were
2797 passed to the constructor.'''
2798 passed to the constructor.'''
2798 values = []
2799 values = []
2799 for func in self._pending:
2800 for func in self._pending:
2800 obj = func()
2801 obj = func()
2801 values.append(obj.__enter__())
2802 values.append(obj.__enter__())
2802 self._atexit.append(obj.__exit__)
2803 self._atexit.append(obj.__exit__)
2803 del self._pending
2804 del self._pending
2804 return values
2805 return values
2805
2806
2806 def atexit(self, func, *args, **kwargs):
2807 def atexit(self, func, *args, **kwargs):
2807 '''Add a function to call when this context manager exits. The
2808 '''Add a function to call when this context manager exits. The
2808 ordering of multiple atexit calls is unspecified, save that
2809 ordering of multiple atexit calls is unspecified, save that
2809 they will happen before any __exit__ functions.'''
2810 they will happen before any __exit__ functions.'''
2810 def wrapper(exc_type, exc_val, exc_tb):
2811 def wrapper(exc_type, exc_val, exc_tb):
2811 func(*args, **kwargs)
2812 func(*args, **kwargs)
2812 self._atexit.append(wrapper)
2813 self._atexit.append(wrapper)
2813 return func
2814 return func
2814
2815
2815 def __exit__(self, exc_type, exc_val, exc_tb):
2816 def __exit__(self, exc_type, exc_val, exc_tb):
2816 '''Context managers are exited in the reverse order from which
2817 '''Context managers are exited in the reverse order from which
2817 they were created.'''
2818 they were created.'''
2818 received = exc_type is not None
2819 received = exc_type is not None
2819 suppressed = False
2820 suppressed = False
2820 pending = None
2821 pending = None
2821 self._atexit.reverse()
2822 self._atexit.reverse()
2822 for exitfunc in self._atexit:
2823 for exitfunc in self._atexit:
2823 try:
2824 try:
2824 if exitfunc(exc_type, exc_val, exc_tb):
2825 if exitfunc(exc_type, exc_val, exc_tb):
2825 suppressed = True
2826 suppressed = True
2826 exc_type = None
2827 exc_type = None
2827 exc_val = None
2828 exc_val = None
2828 exc_tb = None
2829 exc_tb = None
2829 except BaseException:
2830 except BaseException:
2830 pending = sys.exc_info()
2831 pending = sys.exc_info()
2831 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2832 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2832 del self._atexit
2833 del self._atexit
2833 if pending:
2834 if pending:
2834 raise exc_val
2835 raise exc_val
2835 return received and suppressed
2836 return received and suppressed
2836
2837
2837 def _bz2():
2838 def _bz2():
2838 d = bz2.BZ2Decompressor()
2839 d = bz2.BZ2Decompressor()
2839 # Bzip2 stream start with BZ, but we stripped it.
2840 # Bzip2 stream start with BZ, but we stripped it.
2840 # we put it back for good measure.
2841 # we put it back for good measure.
2841 d.decompress('BZ')
2842 d.decompress('BZ')
2842 return d
2843 return d
2843
2844
2844 decompressors = {None: lambda fh: fh,
2845 decompressors = {None: lambda fh: fh,
2845 '_truncatedBZ': _makedecompressor(_bz2),
2846 '_truncatedBZ': _makedecompressor(_bz2),
2846 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2847 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2847 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2848 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2848 }
2849 }
2849 # also support the old form by courtesies
2850 # also support the old form by courtesies
2850 decompressors['UN'] = decompressors[None]
2851 decompressors['UN'] = decompressors[None]
2851
2852
2852 # convenient shortcut
2853 # convenient shortcut
2853 dst = debugstacktrace
2854 dst = debugstacktrace
@@ -1,151 +1,151 b''
1 #require test-repo
1 #require test-repo
2
2
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4 $ cd "$TESTDIR"/..
4 $ cd "$TESTDIR"/..
5
5
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
11 hgext/highlight/__init__.py not using absolute_import
11 hgext/highlight/__init__.py not using absolute_import
12 hgext/highlight/highlight.py not using absolute_import
12 hgext/highlight/highlight.py not using absolute_import
13 hgext/share.py not using absolute_import
13 hgext/share.py not using absolute_import
14 hgext/win32text.py not using absolute_import
14 hgext/win32text.py not using absolute_import
15 i18n/check-translation.py not using absolute_import
15 i18n/check-translation.py not using absolute_import
16 i18n/polib.py not using absolute_import
16 i18n/polib.py not using absolute_import
17 setup.py not using absolute_import
17 setup.py not using absolute_import
18 tests/heredoctest.py requires print_function
18 tests/heredoctest.py requires print_function
19 tests/md5sum.py not using absolute_import
19 tests/md5sum.py not using absolute_import
20 tests/readlink.py not using absolute_import
20 tests/readlink.py not using absolute_import
21 tests/run-tests.py not using absolute_import
21 tests/run-tests.py not using absolute_import
22 tests/test-demandimport.py not using absolute_import
22 tests/test-demandimport.py not using absolute_import
23
23
24 #if py3exe
24 #if py3exe
25 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py
25 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py
26 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
26 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
27 hgext/automv.py: error importing module: <SyntaxError> invalid syntax (commands.py, line *) (line *) (glob)
27 hgext/automv.py: error importing module: <SyntaxError> invalid syntax (commands.py, line *) (line *) (glob)
28 hgext/blackbox.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
28 hgext/blackbox.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
29 hgext/bugzilla.py: error importing module: <ImportError> No module named 'urlparse' (line *) (glob)
29 hgext/bugzilla.py: error importing module: <ImportError> No module named 'xmlrpclib' (line *) (glob)
30 hgext/censor.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
30 hgext/censor.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
31 hgext/chgserver.py: error importing module: <ImportError> No module named 'SocketServer' (line *) (glob)
31 hgext/chgserver.py: error importing module: <ImportError> No module named 'SocketServer' (line *) (glob)
32 hgext/children.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
32 hgext/children.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
33 hgext/churn.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
33 hgext/churn.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
34 hgext/clonebundles.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
34 hgext/clonebundles.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
35 hgext/color.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
35 hgext/color.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
36 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
36 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
37 hgext/convert/convcmd.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
37 hgext/convert/convcmd.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
38 hgext/convert/cvs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
38 hgext/convert/cvs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
39 hgext/convert/cvsps.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
39 hgext/convert/cvsps.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
40 hgext/convert/darcs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
40 hgext/convert/darcs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
41 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
41 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
42 hgext/convert/git.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
42 hgext/convert/git.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
43 hgext/convert/gnuarch.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
43 hgext/convert/gnuarch.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
44 hgext/convert/hg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
44 hgext/convert/hg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
45 hgext/convert/monotone.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
45 hgext/convert/monotone.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
46 hgext/convert/p*.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
46 hgext/convert/p*.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
47 hgext/convert/subversion.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
47 hgext/convert/subversion.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
48 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *) (glob)
48 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *) (glob)
49 hgext/eol.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
49 hgext/eol.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
50 hgext/extdiff.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
50 hgext/extdiff.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
51 hgext/factotum.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
51 hgext/factotum.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
52 hgext/fetch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
52 hgext/fetch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
53 hgext/fsmonitor/watchmanclient.py: error importing module: <SystemError> Parent module 'hgext.fsmonitor' not loaded, cannot perform relative import (line *) (glob)
53 hgext/fsmonitor/watchmanclient.py: error importing module: <SystemError> Parent module 'hgext.fsmonitor' not loaded, cannot perform relative import (line *) (glob)
54 hgext/gpg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
54 hgext/gpg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
55 hgext/graphlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
55 hgext/graphlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
56 hgext/hgk.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
56 hgext/hgk.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
57 hgext/histedit.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
57 hgext/histedit.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
58 hgext/keyword.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
58 hgext/keyword.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
59 hgext/largefiles/basestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
59 hgext/largefiles/basestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
60 hgext/largefiles/lfcommands.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
60 hgext/largefiles/lfcommands.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
61 hgext/largefiles/lfutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
61 hgext/largefiles/lfutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
62 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
62 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
63 hgext/largefiles/overrides.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
63 hgext/largefiles/overrides.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
64 hgext/largefiles/proto.py: error importing: <ImportError> No module named 'httplib' (error at httppeer.py:*) (glob)
64 hgext/largefiles/proto.py: error importing: <ImportError> No module named 'httplib' (error at httppeer.py:*) (glob)
65 hgext/largefiles/remotestore.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
65 hgext/largefiles/remotestore.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
66 hgext/largefiles/reposetup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
66 hgext/largefiles/reposetup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
67 hgext/largefiles/storefactory.py: error importing: <SyntaxError> invalid syntax (bundle2.py, line *) (error at bundlerepo.py:*) (glob)
67 hgext/largefiles/storefactory.py: error importing: <SyntaxError> invalid syntax (bundle2.py, line *) (error at bundlerepo.py:*) (glob)
68 hgext/largefiles/uisetup.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
68 hgext/largefiles/uisetup.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
69 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
69 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
70 hgext/mq.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
70 hgext/mq.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
71 hgext/notify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
71 hgext/notify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
72 hgext/pager.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
72 hgext/pager.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
73 hgext/patchbomb.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
73 hgext/patchbomb.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
74 hgext/purge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
74 hgext/purge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
75 hgext/rebase.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
75 hgext/rebase.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
76 hgext/record.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
76 hgext/record.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
77 hgext/relink.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
77 hgext/relink.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
78 hgext/schemes.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
78 hgext/schemes.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
79 hgext/share.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
79 hgext/share.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
80 hgext/shelve.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
80 hgext/shelve.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
81 hgext/strip.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
81 hgext/strip.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
82 hgext/transplant.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
82 hgext/transplant.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
83 mercurial/archival.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
83 mercurial/archival.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
84 mercurial/branchmap.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
84 mercurial/branchmap.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
85 mercurial/bundle*.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
85 mercurial/bundle*.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
86 mercurial/bundlerepo.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
86 mercurial/bundlerepo.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
87 mercurial/changegroup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
87 mercurial/changegroup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
88 mercurial/changelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
88 mercurial/changelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
89 mercurial/cmdutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
89 mercurial/cmdutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
90 mercurial/commands.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
90 mercurial/commands.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
91 mercurial/commandserver.py: error importing module: <ImportError> No module named 'SocketServer' (line *) (glob)
91 mercurial/commandserver.py: error importing module: <ImportError> No module named 'SocketServer' (line *) (glob)
92 mercurial/context.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
92 mercurial/context.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
93 mercurial/copies.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
93 mercurial/copies.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
94 mercurial/crecord.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
94 mercurial/crecord.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
95 mercurial/dirstate.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
95 mercurial/dirstate.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
96 mercurial/discovery.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
96 mercurial/discovery.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
97 mercurial/dispatch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
97 mercurial/dispatch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
98 mercurial/exchange.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
98 mercurial/exchange.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
99 mercurial/extensions.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
99 mercurial/extensions.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
100 mercurial/filelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
100 mercurial/filelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
101 mercurial/filemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
101 mercurial/filemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
102 mercurial/fileset.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
102 mercurial/fileset.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
103 mercurial/formatter.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
103 mercurial/formatter.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
104 mercurial/graphmod.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
104 mercurial/graphmod.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
105 mercurial/help.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
105 mercurial/help.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
106 mercurial/hg.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
106 mercurial/hg.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
107 mercurial/hgweb/common.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
107 mercurial/hgweb/common.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
108 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
108 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
109 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
109 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
110 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
110 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
111 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
111 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
112 mercurial/hgweb/server.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
112 mercurial/hgweb/server.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
113 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
113 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
114 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
114 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
115 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
115 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
116 mercurial/hook.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
116 mercurial/hook.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
117 mercurial/httpconnection.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
117 mercurial/httpconnection.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
118 mercurial/httppeer.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
118 mercurial/httppeer.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
119 mercurial/keepalive.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
119 mercurial/keepalive.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
120 mercurial/localrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
120 mercurial/localrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
121 mercurial/mail.py: error importing module: <AttributeError> module 'email' has no attribute 'Header' (line *) (glob)
121 mercurial/mail.py: error importing module: <AttributeError> module 'email' has no attribute 'Header' (line *) (glob)
122 mercurial/manifest.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
122 mercurial/manifest.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
123 mercurial/merge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
123 mercurial/merge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
124 mercurial/namespaces.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
124 mercurial/namespaces.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
125 mercurial/patch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
125 mercurial/patch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
126 mercurial/pure/mpatch.py: error importing module: <ImportError> cannot import name 'pycompat' (line *) (glob)
126 mercurial/pure/mpatch.py: error importing module: <ImportError> cannot import name 'pycompat' (line *) (glob)
127 mercurial/pure/parsers.py: error importing module: <ImportError> No module named 'mercurial.pure.node' (line *) (glob)
127 mercurial/pure/parsers.py: error importing module: <ImportError> No module named 'mercurial.pure.node' (line *) (glob)
128 mercurial/repair.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
128 mercurial/repair.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
129 mercurial/revlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
129 mercurial/revlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
130 mercurial/revset.py: error importing module: <AttributeError> 'dict' object has no attribute 'iteritems' (line *) (glob)
130 mercurial/revset.py: error importing module: <AttributeError> 'dict' object has no attribute 'iteritems' (line *) (glob)
131 mercurial/scmutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
131 mercurial/scmutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
132 mercurial/scmwindows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
132 mercurial/scmwindows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
133 mercurial/simplemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
133 mercurial/simplemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
134 mercurial/sshpeer.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
134 mercurial/sshpeer.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
135 mercurial/sshserver.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
135 mercurial/sshserver.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
136 mercurial/statichttprepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
136 mercurial/statichttprepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
137 mercurial/store.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
137 mercurial/store.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
138 mercurial/streamclone.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
138 mercurial/streamclone.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
139 mercurial/subrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
139 mercurial/subrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
140 mercurial/templatefilters.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
140 mercurial/templatefilters.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
141 mercurial/templatekw.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
141 mercurial/templatekw.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
142 mercurial/templater.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
142 mercurial/templater.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
143 mercurial/ui.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
143 mercurial/ui.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
144 mercurial/unionrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
144 mercurial/unionrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
145 mercurial/url.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
145 mercurial/url.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
146 mercurial/verify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
146 mercurial/verify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
147 mercurial/win*.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
147 mercurial/win*.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
148 mercurial/windows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
148 mercurial/windows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
149 mercurial/wireproto.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
149 mercurial/wireproto.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
150
150
151 #endif
151 #endif
@@ -1,158 +1,161 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 __doc__ = """Tiny HTTP Proxy.
5 __doc__ = """Tiny HTTP Proxy.
6
6
7 This module implements GET, HEAD, POST, PUT and DELETE methods
7 This module implements GET, HEAD, POST, PUT and DELETE methods
8 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
8 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
9 method is also implemented experimentally, but has not been
9 method is also implemented experimentally, but has not been
10 tested yet.
10 tested yet.
11
11
12 Any help will be greatly appreciated. SUZUKI Hisao
12 Any help will be greatly appreciated. SUZUKI Hisao
13 """
13 """
14
14
15 __version__ = "0.2.1"
15 __version__ = "0.2.1"
16
16
17 import BaseHTTPServer
17 import BaseHTTPServer
18 import SocketServer
18 import SocketServer
19 import os
19 import os
20 import select
20 import select
21 import socket
21 import socket
22 import sys
22 import sys
23 import urlparse
23
24 from mercurial import util
25
26 urlparse = util.urlparse
24
27
25 class ProxyHandler (BaseHTTPServer.BaseHTTPRequestHandler):
28 class ProxyHandler (BaseHTTPServer.BaseHTTPRequestHandler):
26 __base = BaseHTTPServer.BaseHTTPRequestHandler
29 __base = BaseHTTPServer.BaseHTTPRequestHandler
27 __base_handle = __base.handle
30 __base_handle = __base.handle
28
31
29 server_version = "TinyHTTPProxy/" + __version__
32 server_version = "TinyHTTPProxy/" + __version__
30 rbufsize = 0 # self.rfile Be unbuffered
33 rbufsize = 0 # self.rfile Be unbuffered
31
34
32 def handle(self):
35 def handle(self):
33 (ip, port) = self.client_address
36 (ip, port) = self.client_address
34 allowed = getattr(self, 'allowed_clients', None)
37 allowed = getattr(self, 'allowed_clients', None)
35 if allowed is not None and ip not in allowed:
38 if allowed is not None and ip not in allowed:
36 self.raw_requestline = self.rfile.readline()
39 self.raw_requestline = self.rfile.readline()
37 if self.parse_request():
40 if self.parse_request():
38 self.send_error(403)
41 self.send_error(403)
39 else:
42 else:
40 self.__base_handle()
43 self.__base_handle()
41
44
42 def log_request(self, code='-', size='-'):
45 def log_request(self, code='-', size='-'):
43 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
46 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
44 self.log_message('"%s" %s %s%s',
47 self.log_message('"%s" %s %s%s',
45 self.requestline, str(code), str(size),
48 self.requestline, str(code), str(size),
46 ''.join([' %s:%s' % h for h in sorted(xheaders)]))
49 ''.join([' %s:%s' % h for h in sorted(xheaders)]))
47
50
48 def _connect_to(self, netloc, soc):
51 def _connect_to(self, netloc, soc):
49 i = netloc.find(':')
52 i = netloc.find(':')
50 if i >= 0:
53 if i >= 0:
51 host_port = netloc[:i], int(netloc[i + 1:])
54 host_port = netloc[:i], int(netloc[i + 1:])
52 else:
55 else:
53 host_port = netloc, 80
56 host_port = netloc, 80
54 print("\t" "connect to %s:%d" % host_port)
57 print("\t" "connect to %s:%d" % host_port)
55 try: soc.connect(host_port)
58 try: soc.connect(host_port)
56 except socket.error as arg:
59 except socket.error as arg:
57 try: msg = arg[1]
60 try: msg = arg[1]
58 except (IndexError, TypeError): msg = arg
61 except (IndexError, TypeError): msg = arg
59 self.send_error(404, msg)
62 self.send_error(404, msg)
60 return 0
63 return 0
61 return 1
64 return 1
62
65
63 def do_CONNECT(self):
66 def do_CONNECT(self):
64 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
67 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
65 try:
68 try:
66 if self._connect_to(self.path, soc):
69 if self._connect_to(self.path, soc):
67 self.log_request(200)
70 self.log_request(200)
68 self.wfile.write(self.protocol_version +
71 self.wfile.write(self.protocol_version +
69 " 200 Connection established\r\n")
72 " 200 Connection established\r\n")
70 self.wfile.write("Proxy-agent: %s\r\n" % self.version_string())
73 self.wfile.write("Proxy-agent: %s\r\n" % self.version_string())
71 self.wfile.write("\r\n")
74 self.wfile.write("\r\n")
72 self._read_write(soc, 300)
75 self._read_write(soc, 300)
73 finally:
76 finally:
74 print("\t" "bye")
77 print("\t" "bye")
75 soc.close()
78 soc.close()
76 self.connection.close()
79 self.connection.close()
77
80
78 def do_GET(self):
81 def do_GET(self):
79 (scm, netloc, path, params, query, fragment) = urlparse.urlparse(
82 (scm, netloc, path, params, query, fragment) = urlparse.urlparse(
80 self.path, 'http')
83 self.path, 'http')
81 if scm != 'http' or fragment or not netloc:
84 if scm != 'http' or fragment or not netloc:
82 self.send_error(400, "bad url %s" % self.path)
85 self.send_error(400, "bad url %s" % self.path)
83 return
86 return
84 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
87 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
85 try:
88 try:
86 if self._connect_to(netloc, soc):
89 if self._connect_to(netloc, soc):
87 self.log_request()
90 self.log_request()
88 soc.send("%s %s %s\r\n" % (
91 soc.send("%s %s %s\r\n" % (
89 self.command,
92 self.command,
90 urlparse.urlunparse(('', '', path, params, query, '')),
93 urlparse.urlunparse(('', '', path, params, query, '')),
91 self.request_version))
94 self.request_version))
92 self.headers['Connection'] = 'close'
95 self.headers['Connection'] = 'close'
93 del self.headers['Proxy-Connection']
96 del self.headers['Proxy-Connection']
94 for key_val in self.headers.items():
97 for key_val in self.headers.items():
95 soc.send("%s: %s\r\n" % key_val)
98 soc.send("%s: %s\r\n" % key_val)
96 soc.send("\r\n")
99 soc.send("\r\n")
97 self._read_write(soc)
100 self._read_write(soc)
98 finally:
101 finally:
99 print("\t" "bye")
102 print("\t" "bye")
100 soc.close()
103 soc.close()
101 self.connection.close()
104 self.connection.close()
102
105
103 def _read_write(self, soc, max_idling=20):
106 def _read_write(self, soc, max_idling=20):
104 iw = [self.connection, soc]
107 iw = [self.connection, soc]
105 ow = []
108 ow = []
106 count = 0
109 count = 0
107 while True:
110 while True:
108 count += 1
111 count += 1
109 (ins, _, exs) = select.select(iw, ow, iw, 3)
112 (ins, _, exs) = select.select(iw, ow, iw, 3)
110 if exs:
113 if exs:
111 break
114 break
112 if ins:
115 if ins:
113 for i in ins:
116 for i in ins:
114 if i is soc:
117 if i is soc:
115 out = self.connection
118 out = self.connection
116 else:
119 else:
117 out = soc
120 out = soc
118 try:
121 try:
119 data = i.recv(8192)
122 data = i.recv(8192)
120 except socket.error:
123 except socket.error:
121 break
124 break
122 if data:
125 if data:
123 out.send(data)
126 out.send(data)
124 count = 0
127 count = 0
125 else:
128 else:
126 print("\t" "idle", count)
129 print("\t" "idle", count)
127 if count == max_idling:
130 if count == max_idling:
128 break
131 break
129
132
130 do_HEAD = do_GET
133 do_HEAD = do_GET
131 do_POST = do_GET
134 do_POST = do_GET
132 do_PUT = do_GET
135 do_PUT = do_GET
133 do_DELETE = do_GET
136 do_DELETE = do_GET
134
137
135 class ThreadingHTTPServer (SocketServer.ThreadingMixIn,
138 class ThreadingHTTPServer (SocketServer.ThreadingMixIn,
136 BaseHTTPServer.HTTPServer):
139 BaseHTTPServer.HTTPServer):
137 def __init__(self, *args, **kwargs):
140 def __init__(self, *args, **kwargs):
138 BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
141 BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
139 a = open("proxy.pid", "w")
142 a = open("proxy.pid", "w")
140 a.write(str(os.getpid()) + "\n")
143 a.write(str(os.getpid()) + "\n")
141 a.close()
144 a.close()
142
145
143 if __name__ == '__main__':
146 if __name__ == '__main__':
144 argv = sys.argv
147 argv = sys.argv
145 if argv[1:] and argv[1] in ('-h', '--help'):
148 if argv[1:] and argv[1] in ('-h', '--help'):
146 print(argv[0], "[port [allowed_client_name ...]]")
149 print(argv[0], "[port [allowed_client_name ...]]")
147 else:
150 else:
148 if argv[2:]:
151 if argv[2:]:
149 allowed = []
152 allowed = []
150 for name in argv[2:]:
153 for name in argv[2:]:
151 client = socket.gethostbyname(name)
154 client = socket.gethostbyname(name)
152 allowed.append(client)
155 allowed.append(client)
153 print("Accept: %s (%s)" % (client, name))
156 print("Accept: %s (%s)" % (client, name))
154 ProxyHandler.allowed_clients = allowed
157 ProxyHandler.allowed_clients = allowed
155 del argv[2:]
158 del argv[2:]
156 else:
159 else:
157 print("Any clients will be served...")
160 print("Any clients will be served...")
158 BaseHTTPServer.test(ProxyHandler, ThreadingHTTPServer)
161 BaseHTTPServer.test(ProxyHandler, ThreadingHTTPServer)
General Comments 0
You need to be logged in to leave comments. Login now