##// END OF EJS Templates
py3: conditionalize the urlparse import...
Pulkit Goyal -
r29431:80880ad3 default
parent child Browse files
Show More
@@ -1,927 +1,928 b''
1 1 # bugzilla.py - bugzilla integration for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 # Copyright 2011-4 Jim Hague <jim.hague@acm.org>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''hooks for integrating with the Bugzilla bug tracker
10 10
11 11 This hook extension adds comments on bugs in Bugzilla when changesets
12 12 that refer to bugs by Bugzilla ID are seen. The comment is formatted using
13 13 the Mercurial template mechanism.
14 14
15 15 The bug references can optionally include an update for Bugzilla of the
16 16 hours spent working on the bug. Bugs can also be marked fixed.
17 17
18 18 Three basic modes of access to Bugzilla are provided:
19 19
20 20 1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
21 21
22 22 2. Check data via the Bugzilla XMLRPC interface and submit bug change
23 23 via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
24 24
25 25 3. Writing directly to the Bugzilla database. Only Bugzilla installations
26 26 using MySQL are supported. Requires Python MySQLdb.
27 27
28 28 Writing directly to the database is susceptible to schema changes, and
29 29 relies on a Bugzilla contrib script to send out bug change
30 30 notification emails. This script runs as the user running Mercurial,
31 31 must be run on the host with the Bugzilla install, and requires
32 32 permission to read Bugzilla configuration details and the necessary
33 33 MySQL user and password to have full access rights to the Bugzilla
34 34 database. For these reasons this access mode is now considered
35 35 deprecated, and will not be updated for new Bugzilla versions going
36 36 forward. Only adding comments is supported in this access mode.
37 37
38 38 Access via XMLRPC needs a Bugzilla username and password to be specified
39 39 in the configuration. Comments are added under that username. Since the
40 40 configuration must be readable by all Mercurial users, it is recommended
41 41 that the rights of that user are restricted in Bugzilla to the minimum
42 42 necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
43 43
44 44 Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
45 45 email to the Bugzilla email interface to submit comments to bugs.
46 46 The From: address in the email is set to the email address of the Mercurial
47 47 user, so the comment appears to come from the Mercurial user. In the event
48 48 that the Mercurial user email is not recognized by Bugzilla as a Bugzilla
49 49 user, the email associated with the Bugzilla username used to log into
50 50 Bugzilla is used instead as the source of the comment. Marking bugs fixed
51 51 works on all supported Bugzilla versions.
52 52
53 53 Configuration items common to all access modes:
54 54
55 55 bugzilla.version
56 56 The access type to use. Values recognized are:
57 57
58 58 :``xmlrpc``: Bugzilla XMLRPC interface.
59 59 :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
60 60 :``3.0``: MySQL access, Bugzilla 3.0 and later.
61 61 :``2.18``: MySQL access, Bugzilla 2.18 and up to but not
62 62 including 3.0.
63 63 :``2.16``: MySQL access, Bugzilla 2.16 and up to but not
64 64 including 2.18.
65 65
66 66 bugzilla.regexp
67 67 Regular expression to match bug IDs for update in changeset commit message.
68 68 It must contain one "()" named group ``<ids>`` containing the bug
69 69 IDs separated by non-digit characters. It may also contain
70 70 a named group ``<hours>`` with a floating-point number giving the
71 71 hours worked on the bug. If no named groups are present, the first
72 72 "()" group is assumed to contain the bug IDs, and work time is not
73 73 updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
74 74 ``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
75 75 variations thereof, followed by an hours number prefixed by ``h`` or
76 76 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
77 77
78 78 bugzilla.fixregexp
79 79 Regular expression to match bug IDs for marking fixed in changeset
80 80 commit message. This must contain a "()" named group ``<ids>` containing
81 81 the bug IDs separated by non-digit characters. It may also contain
82 82 a named group ``<hours>`` with a floating-point number giving the
83 83 hours worked on the bug. If no named groups are present, the first
84 84 "()" group is assumed to contain the bug IDs, and work time is not
85 85 updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
86 86 ``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
87 87 variations thereof, followed by an hours number prefixed by ``h`` or
88 88 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
89 89
90 90 bugzilla.fixstatus
91 91 The status to set a bug to when marking fixed. Default ``RESOLVED``.
92 92
93 93 bugzilla.fixresolution
94 94 The resolution to set a bug to when marking fixed. Default ``FIXED``.
95 95
96 96 bugzilla.style
97 97 The style file to use when formatting comments.
98 98
99 99 bugzilla.template
100 100 Template to use when formatting comments. Overrides style if
101 101 specified. In addition to the usual Mercurial keywords, the
102 102 extension specifies:
103 103
104 104 :``{bug}``: The Bugzilla bug ID.
105 105 :``{root}``: The full pathname of the Mercurial repository.
106 106 :``{webroot}``: Stripped pathname of the Mercurial repository.
107 107 :``{hgweb}``: Base URL for browsing Mercurial repositories.
108 108
109 109 Default ``changeset {node|short} in repo {root} refers to bug
110 110 {bug}.\\ndetails:\\n\\t{desc|tabindent}``
111 111
112 112 bugzilla.strip
113 113 The number of path separator characters to strip from the front of
114 114 the Mercurial repository path (``{root}`` in templates) to produce
115 115 ``{webroot}``. For example, a repository with ``{root}``
116 116 ``/var/local/my-project`` with a strip of 2 gives a value for
117 117 ``{webroot}`` of ``my-project``. Default 0.
118 118
119 119 web.baseurl
120 120 Base URL for browsing Mercurial repositories. Referenced from
121 121 templates as ``{hgweb}``.
122 122
123 123 Configuration items common to XMLRPC+email and MySQL access modes:
124 124
125 125 bugzilla.usermap
126 126 Path of file containing Mercurial committer email to Bugzilla user email
127 127 mappings. If specified, the file should contain one mapping per
128 128 line::
129 129
130 130 committer = Bugzilla user
131 131
132 132 See also the ``[usermap]`` section.
133 133
134 134 The ``[usermap]`` section is used to specify mappings of Mercurial
135 135 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
136 136 Contains entries of the form ``committer = Bugzilla user``.
137 137
138 138 XMLRPC access mode configuration:
139 139
140 140 bugzilla.bzurl
141 141 The base URL for the Bugzilla installation.
142 142 Default ``http://localhost/bugzilla``.
143 143
144 144 bugzilla.user
145 145 The username to use to log into Bugzilla via XMLRPC. Default
146 146 ``bugs``.
147 147
148 148 bugzilla.password
149 149 The password for Bugzilla login.
150 150
151 151 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
152 152 and also:
153 153
154 154 bugzilla.bzemail
155 155 The Bugzilla email address.
156 156
157 157 In addition, the Mercurial email settings must be configured. See the
158 158 documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
159 159
160 160 MySQL access mode configuration:
161 161
162 162 bugzilla.host
163 163 Hostname of the MySQL server holding the Bugzilla database.
164 164 Default ``localhost``.
165 165
166 166 bugzilla.db
167 167 Name of the Bugzilla database in MySQL. Default ``bugs``.
168 168
169 169 bugzilla.user
170 170 Username to use to access MySQL server. Default ``bugs``.
171 171
172 172 bugzilla.password
173 173 Password to use to access MySQL server.
174 174
175 175 bugzilla.timeout
176 176 Database connection timeout (seconds). Default 5.
177 177
178 178 bugzilla.bzuser
179 179 Fallback Bugzilla user name to record comments with, if changeset
180 180 committer cannot be found as a Bugzilla user.
181 181
182 182 bugzilla.bzdir
183 183 Bugzilla install directory. Used by default notify. Default
184 184 ``/var/www/html/bugzilla``.
185 185
186 186 bugzilla.notify
187 187 The command to run to get Bugzilla to send bug change notification
188 188 emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
189 189 id) and ``user`` (committer bugzilla email). Default depends on
190 190 version; from 2.18 it is "cd %(bzdir)s && perl -T
191 191 contrib/sendbugmail.pl %(id)s %(user)s".
192 192
193 193 Activating the extension::
194 194
195 195 [extensions]
196 196 bugzilla =
197 197
198 198 [hooks]
199 199 # run bugzilla hook on every change pulled or pushed in here
200 200 incoming.bugzilla = python:hgext.bugzilla.hook
201 201
202 202 Example configurations:
203 203
204 204 XMLRPC example configuration. This uses the Bugzilla at
205 205 ``http://my-project.org/bugzilla``, logging in as user
206 206 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
207 207 collection of Mercurial repositories in ``/var/local/hg/repos/``,
208 208 with a web interface at ``http://my-project.org/hg``. ::
209 209
210 210 [bugzilla]
211 211 bzurl=http://my-project.org/bugzilla
212 212 user=bugmail@my-project.org
213 213 password=plugh
214 214 version=xmlrpc
215 215 template=Changeset {node|short} in {root|basename}.
216 216 {hgweb}/{webroot}/rev/{node|short}\\n
217 217 {desc}\\n
218 218 strip=5
219 219
220 220 [web]
221 221 baseurl=http://my-project.org/hg
222 222
223 223 XMLRPC+email example configuration. This uses the Bugzilla at
224 224 ``http://my-project.org/bugzilla``, logging in as user
225 225 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
226 226 collection of Mercurial repositories in ``/var/local/hg/repos/``,
227 227 with a web interface at ``http://my-project.org/hg``. Bug comments
228 228 are sent to the Bugzilla email address
229 229 ``bugzilla@my-project.org``. ::
230 230
231 231 [bugzilla]
232 232 bzurl=http://my-project.org/bugzilla
233 233 user=bugmail@my-project.org
234 234 password=plugh
235 235 version=xmlrpc+email
236 236 bzemail=bugzilla@my-project.org
237 237 template=Changeset {node|short} in {root|basename}.
238 238 {hgweb}/{webroot}/rev/{node|short}\\n
239 239 {desc}\\n
240 240 strip=5
241 241
242 242 [web]
243 243 baseurl=http://my-project.org/hg
244 244
245 245 [usermap]
246 246 user@emaildomain.com=user.name@bugzilladomain.com
247 247
248 248 MySQL example configuration. This has a local Bugzilla 3.2 installation
249 249 in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
250 250 the Bugzilla database name is ``bugs`` and MySQL is
251 251 accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
252 252 with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
253 253 with a web interface at ``http://my-project.org/hg``. ::
254 254
255 255 [bugzilla]
256 256 host=localhost
257 257 password=XYZZY
258 258 version=3.0
259 259 bzuser=unknown@domain.com
260 260 bzdir=/opt/bugzilla-3.2
261 261 template=Changeset {node|short} in {root|basename}.
262 262 {hgweb}/{webroot}/rev/{node|short}\\n
263 263 {desc}\\n
264 264 strip=5
265 265
266 266 [web]
267 267 baseurl=http://my-project.org/hg
268 268
269 269 [usermap]
270 270 user@emaildomain.com=user.name@bugzilladomain.com
271 271
272 272 All the above add a comment to the Bugzilla bug record of the form::
273 273
274 274 Changeset 3b16791d6642 in repository-name.
275 275 http://my-project.org/hg/repository-name/rev/3b16791d6642
276 276
277 277 Changeset commit comment. Bug 1234.
278 278 '''
279 279
280 280 from __future__ import absolute_import
281 281
282 282 import re
283 283 import time
284 import urlparse
285 284 import xmlrpclib
286 285
287 286 from mercurial.i18n import _
288 287 from mercurial.node import short
289 288 from mercurial import (
290 289 cmdutil,
291 290 error,
292 291 mail,
293 292 util,
294 293 )
295 294
295 urlparse = util.urlparse
296
296 297 # Note for extension authors: ONLY specify testedwith = 'internal' for
297 298 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
298 299 # be specifying the version(s) of Mercurial they are tested with, or
299 300 # leave the attribute unspecified.
300 301 testedwith = 'internal'
301 302
302 303 class bzaccess(object):
303 304 '''Base class for access to Bugzilla.'''
304 305
305 306 def __init__(self, ui):
306 307 self.ui = ui
307 308 usermap = self.ui.config('bugzilla', 'usermap')
308 309 if usermap:
309 310 self.ui.readconfig(usermap, sections=['usermap'])
310 311
311 312 def map_committer(self, user):
312 313 '''map name of committer to Bugzilla user name.'''
313 314 for committer, bzuser in self.ui.configitems('usermap'):
314 315 if committer.lower() == user.lower():
315 316 return bzuser
316 317 return user
317 318
318 319 # Methods to be implemented by access classes.
319 320 #
320 321 # 'bugs' is a dict keyed on bug id, where values are a dict holding
321 322 # updates to bug state. Recognized dict keys are:
322 323 #
323 324 # 'hours': Value, float containing work hours to be updated.
324 325 # 'fix': If key present, bug is to be marked fixed. Value ignored.
325 326
326 327 def filter_real_bug_ids(self, bugs):
327 328 '''remove bug IDs that do not exist in Bugzilla from bugs.'''
328 329 pass
329 330
330 331 def filter_cset_known_bug_ids(self, node, bugs):
331 332 '''remove bug IDs where node occurs in comment text from bugs.'''
332 333 pass
333 334
334 335 def updatebug(self, bugid, newstate, text, committer):
335 336 '''update the specified bug. Add comment text and set new states.
336 337
337 338 If possible add the comment as being from the committer of
338 339 the changeset. Otherwise use the default Bugzilla user.
339 340 '''
340 341 pass
341 342
342 343 def notify(self, bugs, committer):
343 344 '''Force sending of Bugzilla notification emails.
344 345
345 346 Only required if the access method does not trigger notification
346 347 emails automatically.
347 348 '''
348 349 pass
349 350
350 351 # Bugzilla via direct access to MySQL database.
351 352 class bzmysql(bzaccess):
352 353 '''Support for direct MySQL access to Bugzilla.
353 354
354 355 The earliest Bugzilla version this is tested with is version 2.16.
355 356
356 357 If your Bugzilla is version 3.4 or above, you are strongly
357 358 recommended to use the XMLRPC access method instead.
358 359 '''
359 360
360 361 @staticmethod
361 362 def sql_buglist(ids):
362 363 '''return SQL-friendly list of bug ids'''
363 364 return '(' + ','.join(map(str, ids)) + ')'
364 365
365 366 _MySQLdb = None
366 367
367 368 def __init__(self, ui):
368 369 try:
369 370 import MySQLdb as mysql
370 371 bzmysql._MySQLdb = mysql
371 372 except ImportError as err:
372 373 raise error.Abort(_('python mysql support not available: %s') % err)
373 374
374 375 bzaccess.__init__(self, ui)
375 376
376 377 host = self.ui.config('bugzilla', 'host', 'localhost')
377 378 user = self.ui.config('bugzilla', 'user', 'bugs')
378 379 passwd = self.ui.config('bugzilla', 'password')
379 380 db = self.ui.config('bugzilla', 'db', 'bugs')
380 381 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
381 382 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
382 383 (host, db, user, '*' * len(passwd)))
383 384 self.conn = bzmysql._MySQLdb.connect(host=host,
384 385 user=user, passwd=passwd,
385 386 db=db,
386 387 connect_timeout=timeout)
387 388 self.cursor = self.conn.cursor()
388 389 self.longdesc_id = self.get_longdesc_id()
389 390 self.user_ids = {}
390 391 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
391 392
392 393 def run(self, *args, **kwargs):
393 394 '''run a query.'''
394 395 self.ui.note(_('query: %s %s\n') % (args, kwargs))
395 396 try:
396 397 self.cursor.execute(*args, **kwargs)
397 398 except bzmysql._MySQLdb.MySQLError:
398 399 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
399 400 raise
400 401
401 402 def get_longdesc_id(self):
402 403 '''get identity of longdesc field'''
403 404 self.run('select fieldid from fielddefs where name = "longdesc"')
404 405 ids = self.cursor.fetchall()
405 406 if len(ids) != 1:
406 407 raise error.Abort(_('unknown database schema'))
407 408 return ids[0][0]
408 409
409 410 def filter_real_bug_ids(self, bugs):
410 411 '''filter not-existing bugs from set.'''
411 412 self.run('select bug_id from bugs where bug_id in %s' %
412 413 bzmysql.sql_buglist(bugs.keys()))
413 414 existing = [id for (id,) in self.cursor.fetchall()]
414 415 for id in bugs.keys():
415 416 if id not in existing:
416 417 self.ui.status(_('bug %d does not exist\n') % id)
417 418 del bugs[id]
418 419
419 420 def filter_cset_known_bug_ids(self, node, bugs):
420 421 '''filter bug ids that already refer to this changeset from set.'''
421 422 self.run('''select bug_id from longdescs where
422 423 bug_id in %s and thetext like "%%%s%%"''' %
423 424 (bzmysql.sql_buglist(bugs.keys()), short(node)))
424 425 for (id,) in self.cursor.fetchall():
425 426 self.ui.status(_('bug %d already knows about changeset %s\n') %
426 427 (id, short(node)))
427 428 del bugs[id]
428 429
429 430 def notify(self, bugs, committer):
430 431 '''tell bugzilla to send mail.'''
431 432 self.ui.status(_('telling bugzilla to send mail:\n'))
432 433 (user, userid) = self.get_bugzilla_user(committer)
433 434 for id in bugs.keys():
434 435 self.ui.status(_(' bug %s\n') % id)
435 436 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
436 437 bzdir = self.ui.config('bugzilla', 'bzdir',
437 438 '/var/www/html/bugzilla')
438 439 try:
439 440 # Backwards-compatible with old notify string, which
440 441 # took one string. This will throw with a new format
441 442 # string.
442 443 cmd = cmdfmt % id
443 444 except TypeError:
444 445 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
445 446 self.ui.note(_('running notify command %s\n') % cmd)
446 447 fp = util.popen('(%s) 2>&1' % cmd)
447 448 out = fp.read()
448 449 ret = fp.close()
449 450 if ret:
450 451 self.ui.warn(out)
451 452 raise error.Abort(_('bugzilla notify command %s') %
452 453 util.explainexit(ret)[0])
453 454 self.ui.status(_('done\n'))
454 455
455 456 def get_user_id(self, user):
456 457 '''look up numeric bugzilla user id.'''
457 458 try:
458 459 return self.user_ids[user]
459 460 except KeyError:
460 461 try:
461 462 userid = int(user)
462 463 except ValueError:
463 464 self.ui.note(_('looking up user %s\n') % user)
464 465 self.run('''select userid from profiles
465 466 where login_name like %s''', user)
466 467 all = self.cursor.fetchall()
467 468 if len(all) != 1:
468 469 raise KeyError(user)
469 470 userid = int(all[0][0])
470 471 self.user_ids[user] = userid
471 472 return userid
472 473
473 474 def get_bugzilla_user(self, committer):
474 475 '''See if committer is a registered bugzilla user. Return
475 476 bugzilla username and userid if so. If not, return default
476 477 bugzilla username and userid.'''
477 478 user = self.map_committer(committer)
478 479 try:
479 480 userid = self.get_user_id(user)
480 481 except KeyError:
481 482 try:
482 483 defaultuser = self.ui.config('bugzilla', 'bzuser')
483 484 if not defaultuser:
484 485 raise error.Abort(_('cannot find bugzilla user id for %s') %
485 486 user)
486 487 userid = self.get_user_id(defaultuser)
487 488 user = defaultuser
488 489 except KeyError:
489 490 raise error.Abort(_('cannot find bugzilla user id for %s or %s')
490 491 % (user, defaultuser))
491 492 return (user, userid)
492 493
493 494 def updatebug(self, bugid, newstate, text, committer):
494 495 '''update bug state with comment text.
495 496
496 497 Try adding comment as committer of changeset, otherwise as
497 498 default bugzilla user.'''
498 499 if len(newstate) > 0:
499 500 self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
500 501
501 502 (user, userid) = self.get_bugzilla_user(committer)
502 503 now = time.strftime('%Y-%m-%d %H:%M:%S')
503 504 self.run('''insert into longdescs
504 505 (bug_id, who, bug_when, thetext)
505 506 values (%s, %s, %s, %s)''',
506 507 (bugid, userid, now, text))
507 508 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
508 509 values (%s, %s, %s, %s)''',
509 510 (bugid, userid, now, self.longdesc_id))
510 511 self.conn.commit()
511 512
512 513 class bzmysql_2_18(bzmysql):
513 514 '''support for bugzilla 2.18 series.'''
514 515
515 516 def __init__(self, ui):
516 517 bzmysql.__init__(self, ui)
517 518 self.default_notify = \
518 519 "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
519 520
520 521 class bzmysql_3_0(bzmysql_2_18):
521 522 '''support for bugzilla 3.0 series.'''
522 523
523 524 def __init__(self, ui):
524 525 bzmysql_2_18.__init__(self, ui)
525 526
526 527 def get_longdesc_id(self):
527 528 '''get identity of longdesc field'''
528 529 self.run('select id from fielddefs where name = "longdesc"')
529 530 ids = self.cursor.fetchall()
530 531 if len(ids) != 1:
531 532 raise error.Abort(_('unknown database schema'))
532 533 return ids[0][0]
533 534
534 535 # Bugzilla via XMLRPC interface.
535 536
536 537 class cookietransportrequest(object):
537 538 """A Transport request method that retains cookies over its lifetime.
538 539
539 540 The regular xmlrpclib transports ignore cookies. Which causes
540 541 a bit of a problem when you need a cookie-based login, as with
541 542 the Bugzilla XMLRPC interface prior to 4.4.3.
542 543
543 544 So this is a helper for defining a Transport which looks for
544 545 cookies being set in responses and saves them to add to all future
545 546 requests.
546 547 """
547 548
548 549 # Inspiration drawn from
549 550 # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
550 551 # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
551 552
552 553 cookies = []
553 554 def send_cookies(self, connection):
554 555 if self.cookies:
555 556 for cookie in self.cookies:
556 557 connection.putheader("Cookie", cookie)
557 558
558 559 def request(self, host, handler, request_body, verbose=0):
559 560 self.verbose = verbose
560 561 self.accept_gzip_encoding = False
561 562
562 563 # issue XML-RPC request
563 564 h = self.make_connection(host)
564 565 if verbose:
565 566 h.set_debuglevel(1)
566 567
567 568 self.send_request(h, handler, request_body)
568 569 self.send_host(h, host)
569 570 self.send_cookies(h)
570 571 self.send_user_agent(h)
571 572 self.send_content(h, request_body)
572 573
573 574 # Deal with differences between Python 2.4-2.6 and 2.7.
574 575 # In the former h is a HTTP(S). In the latter it's a
575 576 # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
576 577 # HTTP(S) has an underlying HTTP(S)Connection, so extract
577 578 # that and use it.
578 579 try:
579 580 response = h.getresponse()
580 581 except AttributeError:
581 582 response = h._conn.getresponse()
582 583
583 584 # Add any cookie definitions to our list.
584 585 for header in response.msg.getallmatchingheaders("Set-Cookie"):
585 586 val = header.split(": ", 1)[1]
586 587 cookie = val.split(";", 1)[0]
587 588 self.cookies.append(cookie)
588 589
589 590 if response.status != 200:
590 591 raise xmlrpclib.ProtocolError(host + handler, response.status,
591 592 response.reason, response.msg.headers)
592 593
593 594 payload = response.read()
594 595 parser, unmarshaller = self.getparser()
595 596 parser.feed(payload)
596 597 parser.close()
597 598
598 599 return unmarshaller.close()
599 600
600 601 # The explicit calls to the underlying xmlrpclib __init__() methods are
601 602 # necessary. The xmlrpclib.Transport classes are old-style classes, and
602 603 # it turns out their __init__() doesn't get called when doing multiple
603 604 # inheritance with a new-style class.
604 605 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
605 606 def __init__(self, use_datetime=0):
606 607 if util.safehasattr(xmlrpclib.Transport, "__init__"):
607 608 xmlrpclib.Transport.__init__(self, use_datetime)
608 609
609 610 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
610 611 def __init__(self, use_datetime=0):
611 612 if util.safehasattr(xmlrpclib.Transport, "__init__"):
612 613 xmlrpclib.SafeTransport.__init__(self, use_datetime)
613 614
614 615 class bzxmlrpc(bzaccess):
615 616 """Support for access to Bugzilla via the Bugzilla XMLRPC API.
616 617
617 618 Requires a minimum Bugzilla version 3.4.
618 619 """
619 620
620 621 def __init__(self, ui):
621 622 bzaccess.__init__(self, ui)
622 623
623 624 bzweb = self.ui.config('bugzilla', 'bzurl',
624 625 'http://localhost/bugzilla/')
625 626 bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
626 627
627 628 user = self.ui.config('bugzilla', 'user', 'bugs')
628 629 passwd = self.ui.config('bugzilla', 'password')
629 630
630 631 self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
631 632 self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
632 633 'FIXED')
633 634
634 635 self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
635 636 ver = self.bzproxy.Bugzilla.version()['version'].split('.')
636 637 self.bzvermajor = int(ver[0])
637 638 self.bzverminor = int(ver[1])
638 639 login = self.bzproxy.User.login({'login': user, 'password': passwd,
639 640 'restrict_login': True})
640 641 self.bztoken = login.get('token', '')
641 642
642 643 def transport(self, uri):
643 644 if urlparse.urlparse(uri, "http")[0] == "https":
644 645 return cookiesafetransport()
645 646 else:
646 647 return cookietransport()
647 648
648 649 def get_bug_comments(self, id):
649 650 """Return a string with all comment text for a bug."""
650 651 c = self.bzproxy.Bug.comments({'ids': [id],
651 652 'include_fields': ['text'],
652 653 'token': self.bztoken})
653 654 return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
654 655
655 656 def filter_real_bug_ids(self, bugs):
656 657 probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
657 658 'include_fields': [],
658 659 'permissive': True,
659 660 'token': self.bztoken,
660 661 })
661 662 for badbug in probe['faults']:
662 663 id = badbug['id']
663 664 self.ui.status(_('bug %d does not exist\n') % id)
664 665 del bugs[id]
665 666
666 667 def filter_cset_known_bug_ids(self, node, bugs):
667 668 for id in sorted(bugs.keys()):
668 669 if self.get_bug_comments(id).find(short(node)) != -1:
669 670 self.ui.status(_('bug %d already knows about changeset %s\n') %
670 671 (id, short(node)))
671 672 del bugs[id]
672 673
673 674 def updatebug(self, bugid, newstate, text, committer):
674 675 args = {}
675 676 if 'hours' in newstate:
676 677 args['work_time'] = newstate['hours']
677 678
678 679 if self.bzvermajor >= 4:
679 680 args['ids'] = [bugid]
680 681 args['comment'] = {'body' : text}
681 682 if 'fix' in newstate:
682 683 args['status'] = self.fixstatus
683 684 args['resolution'] = self.fixresolution
684 685 args['token'] = self.bztoken
685 686 self.bzproxy.Bug.update(args)
686 687 else:
687 688 if 'fix' in newstate:
688 689 self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
689 690 "to mark bugs fixed\n"))
690 691 args['id'] = bugid
691 692 args['comment'] = text
692 693 self.bzproxy.Bug.add_comment(args)
693 694
694 695 class bzxmlrpcemail(bzxmlrpc):
695 696 """Read data from Bugzilla via XMLRPC, send updates via email.
696 697
697 698 Advantages of sending updates via email:
698 699 1. Comments can be added as any user, not just logged in user.
699 700 2. Bug statuses or other fields not accessible via XMLRPC can
700 701 potentially be updated.
701 702
702 703 There is no XMLRPC function to change bug status before Bugzilla
703 704 4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
704 705 But bugs can be marked fixed via email from 3.4 onwards.
705 706 """
706 707
707 708 # The email interface changes subtly between 3.4 and 3.6. In 3.4,
708 709 # in-email fields are specified as '@<fieldname> = <value>'. In
709 710 # 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
710 711 # in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
711 712 # compatibility, but rather than rely on this use the new format for
712 713 # 4.0 onwards.
713 714
714 715 def __init__(self, ui):
715 716 bzxmlrpc.__init__(self, ui)
716 717
717 718 self.bzemail = self.ui.config('bugzilla', 'bzemail')
718 719 if not self.bzemail:
719 720 raise error.Abort(_("configuration 'bzemail' missing"))
720 721 mail.validateconfig(self.ui)
721 722
722 723 def makecommandline(self, fieldname, value):
723 724 if self.bzvermajor >= 4:
724 725 return "@%s %s" % (fieldname, str(value))
725 726 else:
726 727 if fieldname == "id":
727 728 fieldname = "bug_id"
728 729 return "@%s = %s" % (fieldname, str(value))
729 730
730 731 def send_bug_modify_email(self, bugid, commands, comment, committer):
731 732 '''send modification message to Bugzilla bug via email.
732 733
733 734 The message format is documented in the Bugzilla email_in.pl
734 735 specification. commands is a list of command lines, comment is the
735 736 comment text.
736 737
737 738 To stop users from crafting commit comments with
738 739 Bugzilla commands, specify the bug ID via the message body, rather
739 740 than the subject line, and leave a blank line after it.
740 741 '''
741 742 user = self.map_committer(committer)
742 743 matches = self.bzproxy.User.get({'match': [user],
743 744 'token': self.bztoken})
744 745 if not matches['users']:
745 746 user = self.ui.config('bugzilla', 'user', 'bugs')
746 747 matches = self.bzproxy.User.get({'match': [user],
747 748 'token': self.bztoken})
748 749 if not matches['users']:
749 750 raise error.Abort(_("default bugzilla user %s email not found")
750 751 % user)
751 752 user = matches['users'][0]['email']
752 753 commands.append(self.makecommandline("id", bugid))
753 754
754 755 text = "\n".join(commands) + "\n\n" + comment
755 756
756 757 _charsets = mail._charsets(self.ui)
757 758 user = mail.addressencode(self.ui, user, _charsets)
758 759 bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
759 760 msg = mail.mimeencode(self.ui, text, _charsets)
760 761 msg['From'] = user
761 762 msg['To'] = bzemail
762 763 msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
763 764 sendmail = mail.connect(self.ui)
764 765 sendmail(user, bzemail, msg.as_string())
765 766
766 767 def updatebug(self, bugid, newstate, text, committer):
767 768 cmds = []
768 769 if 'hours' in newstate:
769 770 cmds.append(self.makecommandline("work_time", newstate['hours']))
770 771 if 'fix' in newstate:
771 772 cmds.append(self.makecommandline("bug_status", self.fixstatus))
772 773 cmds.append(self.makecommandline("resolution", self.fixresolution))
773 774 self.send_bug_modify_email(bugid, cmds, text, committer)
774 775
775 776 class bugzilla(object):
776 777 # supported versions of bugzilla. different versions have
777 778 # different schemas.
778 779 _versions = {
779 780 '2.16': bzmysql,
780 781 '2.18': bzmysql_2_18,
781 782 '3.0': bzmysql_3_0,
782 783 'xmlrpc': bzxmlrpc,
783 784 'xmlrpc+email': bzxmlrpcemail
784 785 }
785 786
786 787 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
787 788 r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
788 789 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
789 790
790 791 _default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
791 792 r'(?:nos?\.?|num(?:ber)?s?)?\s*'
792 793 r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
793 794 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
794 795
795 796 def __init__(self, ui, repo):
796 797 self.ui = ui
797 798 self.repo = repo
798 799
799 800 bzversion = self.ui.config('bugzilla', 'version')
800 801 try:
801 802 bzclass = bugzilla._versions[bzversion]
802 803 except KeyError:
803 804 raise error.Abort(_('bugzilla version %s not supported') %
804 805 bzversion)
805 806 self.bzdriver = bzclass(self.ui)
806 807
807 808 self.bug_re = re.compile(
808 809 self.ui.config('bugzilla', 'regexp',
809 810 bugzilla._default_bug_re), re.IGNORECASE)
810 811 self.fix_re = re.compile(
811 812 self.ui.config('bugzilla', 'fixregexp',
812 813 bugzilla._default_fix_re), re.IGNORECASE)
813 814 self.split_re = re.compile(r'\D+')
814 815
815 816 def find_bugs(self, ctx):
816 817 '''return bugs dictionary created from commit comment.
817 818
818 819 Extract bug info from changeset comments. Filter out any that are
819 820 not known to Bugzilla, and any that already have a reference to
820 821 the given changeset in their comments.
821 822 '''
822 823 start = 0
823 824 hours = 0.0
824 825 bugs = {}
825 826 bugmatch = self.bug_re.search(ctx.description(), start)
826 827 fixmatch = self.fix_re.search(ctx.description(), start)
827 828 while True:
828 829 bugattribs = {}
829 830 if not bugmatch and not fixmatch:
830 831 break
831 832 if not bugmatch:
832 833 m = fixmatch
833 834 elif not fixmatch:
834 835 m = bugmatch
835 836 else:
836 837 if bugmatch.start() < fixmatch.start():
837 838 m = bugmatch
838 839 else:
839 840 m = fixmatch
840 841 start = m.end()
841 842 if m is bugmatch:
842 843 bugmatch = self.bug_re.search(ctx.description(), start)
843 844 if 'fix' in bugattribs:
844 845 del bugattribs['fix']
845 846 else:
846 847 fixmatch = self.fix_re.search(ctx.description(), start)
847 848 bugattribs['fix'] = None
848 849
849 850 try:
850 851 ids = m.group('ids')
851 852 except IndexError:
852 853 ids = m.group(1)
853 854 try:
854 855 hours = float(m.group('hours'))
855 856 bugattribs['hours'] = hours
856 857 except IndexError:
857 858 pass
858 859 except TypeError:
859 860 pass
860 861 except ValueError:
861 862 self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
862 863
863 864 for id in self.split_re.split(ids):
864 865 if not id:
865 866 continue
866 867 bugs[int(id)] = bugattribs
867 868 if bugs:
868 869 self.bzdriver.filter_real_bug_ids(bugs)
869 870 if bugs:
870 871 self.bzdriver.filter_cset_known_bug_ids(ctx.node(), bugs)
871 872 return bugs
872 873
873 874 def update(self, bugid, newstate, ctx):
874 875 '''update bugzilla bug with reference to changeset.'''
875 876
876 877 def webroot(root):
877 878 '''strip leading prefix of repo root and turn into
878 879 url-safe path.'''
879 880 count = int(self.ui.config('bugzilla', 'strip', 0))
880 881 root = util.pconvert(root)
881 882 while count > 0:
882 883 c = root.find('/')
883 884 if c == -1:
884 885 break
885 886 root = root[c + 1:]
886 887 count -= 1
887 888 return root
888 889
889 890 mapfile = None
890 891 tmpl = self.ui.config('bugzilla', 'template')
891 892 if not tmpl:
892 893 mapfile = self.ui.config('bugzilla', 'style')
893 894 if not mapfile and not tmpl:
894 895 tmpl = _('changeset {node|short} in repo {root} refers '
895 896 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
896 897 t = cmdutil.changeset_templater(self.ui, self.repo,
897 898 False, None, tmpl, mapfile, False)
898 899 self.ui.pushbuffer()
899 900 t.show(ctx, changes=ctx.changeset(),
900 901 bug=str(bugid),
901 902 hgweb=self.ui.config('web', 'baseurl'),
902 903 root=self.repo.root,
903 904 webroot=webroot(self.repo.root))
904 905 data = self.ui.popbuffer()
905 906 self.bzdriver.updatebug(bugid, newstate, data, util.email(ctx.user()))
906 907
907 908 def notify(self, bugs, committer):
908 909 '''ensure Bugzilla users are notified of bug change.'''
909 910 self.bzdriver.notify(bugs, committer)
910 911
911 912 def hook(ui, repo, hooktype, node=None, **kwargs):
912 913 '''add comment to bugzilla for each changeset that refers to a
913 914 bugzilla bug id. only add a comment once per bug, so same change
914 915 seen multiple times does not fill bug with duplicate data.'''
915 916 if node is None:
916 917 raise error.Abort(_('hook type %s does not pass a changeset id') %
917 918 hooktype)
918 919 try:
919 920 bz = bugzilla(ui, repo)
920 921 ctx = repo[node]
921 922 bugs = bz.find_bugs(ctx)
922 923 if bugs:
923 924 for bug in bugs:
924 925 bz.update(bug, bugs[bug], ctx)
925 926 bz.notify(bugs, util.email(ctx.user()))
926 927 except Exception as e:
927 928 raise error.Abort(_('Bugzilla error: %s') % e)
@@ -1,131 +1,138 b''
1 1 # pycompat.py - portability shim for python 3
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 """Mercurial portability shim for python 3.
7 7
8 8 This contains aliases to hide python version-specific details from the core.
9 9 """
10 10
11 11 from __future__ import absolute_import
12 12
13 13 try:
14 14 import cPickle as pickle
15 15 pickle.dumps
16 16 except ImportError:
17 17 import pickle
18 18 pickle.dumps # silence pyflakes
19 19
20 20 try:
21 import urlparse
22 urlparse.urlparse
23 except ImportError:
24 import urllib.parse as urlparse
25 urlparse.urlparse
26
27 try:
21 28 import cStringIO as io
22 29 stringio = io.StringIO
23 30 except ImportError:
24 31 import io
25 32 stringio = io.StringIO
26 33
27 34 try:
28 35 import Queue as _queue
29 36 _queue.Queue
30 37 except ImportError:
31 38 import queue as _queue
32 39 empty = _queue.Empty
33 40 queue = _queue.Queue
34 41
35 42 class _pycompatstub(object):
36 43 pass
37 44
38 45 def _alias(alias, origin, items):
39 46 """ populate a _pycompatstub
40 47
41 48 copies items from origin to alias
42 49 """
43 50 def hgcase(item):
44 51 return item.replace('_', '').lower()
45 52 for item in items:
46 53 try:
47 54 setattr(alias, hgcase(item), getattr(origin, item))
48 55 except AttributeError:
49 56 pass
50 57
51 58 urlreq = _pycompatstub()
52 59 urlerr = _pycompatstub()
53 60 try:
54 61 import urllib2
55 62 import urllib
56 63 _alias(urlreq, urllib, (
57 64 "addclosehook",
58 65 "addinfourl",
59 66 "ftpwrapper",
60 67 "pathname2url",
61 68 "quote",
62 69 "splitattr",
63 70 "splitpasswd",
64 71 "splitport",
65 72 "splituser",
66 73 "unquote",
67 74 "url2pathname",
68 75 "urlencode",
69 76 "urlencode",
70 77 ))
71 78 _alias(urlreq, urllib2, (
72 79 "AbstractHTTPHandler",
73 80 "BaseHandler",
74 81 "build_opener",
75 82 "FileHandler",
76 83 "FTPHandler",
77 84 "HTTPBasicAuthHandler",
78 85 "HTTPDigestAuthHandler",
79 86 "HTTPHandler",
80 87 "HTTPPasswordMgrWithDefaultRealm",
81 88 "HTTPSHandler",
82 89 "install_opener",
83 90 "ProxyHandler",
84 91 "Request",
85 92 "urlopen",
86 93 ))
87 94 _alias(urlerr, urllib2, (
88 95 "HTTPError",
89 96 "URLError",
90 97 ))
91 98
92 99 except ImportError:
93 100 import urllib.request
94 101 _alias(urlreq, urllib.request, (
95 102 "AbstractHTTPHandler",
96 103 "addclosehook",
97 104 "addinfourl",
98 105 "BaseHandler",
99 106 "build_opener",
100 107 "FileHandler",
101 108 "FTPHandler",
102 109 "ftpwrapper",
103 110 "HTTPHandler",
104 111 "HTTPSHandler",
105 112 "install_opener",
106 113 "pathname2url",
107 114 "HTTPBasicAuthHandler",
108 115 "HTTPDigestAuthHandler",
109 116 "HTTPPasswordMgrWithDefaultRealm",
110 117 "ProxyHandler",
111 118 "quote",
112 119 "Request",
113 120 "splitattr",
114 121 "splitpasswd",
115 122 "splitport",
116 123 "splituser",
117 124 "unquote",
118 125 "url2pathname",
119 126 "urlopen",
120 127 ))
121 128 import urllib.error
122 129 _alias(urlerr, urllib.error, (
123 130 "HTTPError",
124 131 "URLError",
125 132 ))
126 133
127 134 try:
128 135 xrange
129 136 except NameError:
130 137 import builtins
131 138 builtins.xrange = range
@@ -1,2853 +1,2854 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import zlib
38 38
39 39 from . import (
40 40 encoding,
41 41 error,
42 42 i18n,
43 43 osutil,
44 44 parsers,
45 45 pycompat,
46 46 )
47 47
48 48 for attr in (
49 49 'empty',
50 50 'pickle',
51 51 'queue',
52 52 'urlerr',
53 'urlparse',
53 54 # we do import urlreq, but we do it outside the loop
54 55 #'urlreq',
55 56 'stringio',
56 57 ):
57 58 globals()[attr] = getattr(pycompat, attr)
58 59
59 60 # This line is to make pyflakes happy:
60 61 urlreq = pycompat.urlreq
61 62
62 63 if os.name == 'nt':
63 64 from . import windows as platform
64 65 else:
65 66 from . import posix as platform
66 67
67 68 _ = i18n._
68 69
69 70 cachestat = platform.cachestat
70 71 checkexec = platform.checkexec
71 72 checklink = platform.checklink
72 73 copymode = platform.copymode
73 74 executablepath = platform.executablepath
74 75 expandglobs = platform.expandglobs
75 76 explainexit = platform.explainexit
76 77 findexe = platform.findexe
77 78 gethgcmd = platform.gethgcmd
78 79 getuser = platform.getuser
79 80 getpid = os.getpid
80 81 groupmembers = platform.groupmembers
81 82 groupname = platform.groupname
82 83 hidewindow = platform.hidewindow
83 84 isexec = platform.isexec
84 85 isowner = platform.isowner
85 86 localpath = platform.localpath
86 87 lookupreg = platform.lookupreg
87 88 makedir = platform.makedir
88 89 nlinks = platform.nlinks
89 90 normpath = platform.normpath
90 91 normcase = platform.normcase
91 92 normcasespec = platform.normcasespec
92 93 normcasefallback = platform.normcasefallback
93 94 openhardlinks = platform.openhardlinks
94 95 oslink = platform.oslink
95 96 parsepatchoutput = platform.parsepatchoutput
96 97 pconvert = platform.pconvert
97 98 poll = platform.poll
98 99 popen = platform.popen
99 100 posixfile = platform.posixfile
100 101 quotecommand = platform.quotecommand
101 102 readpipe = platform.readpipe
102 103 rename = platform.rename
103 104 removedirs = platform.removedirs
104 105 samedevice = platform.samedevice
105 106 samefile = platform.samefile
106 107 samestat = platform.samestat
107 108 setbinary = platform.setbinary
108 109 setflags = platform.setflags
109 110 setsignalhandler = platform.setsignalhandler
110 111 shellquote = platform.shellquote
111 112 spawndetached = platform.spawndetached
112 113 split = platform.split
113 114 sshargs = platform.sshargs
114 115 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
115 116 statisexec = platform.statisexec
116 117 statislink = platform.statislink
117 118 termwidth = platform.termwidth
118 119 testpid = platform.testpid
119 120 umask = platform.umask
120 121 unlink = platform.unlink
121 122 unlinkpath = platform.unlinkpath
122 123 username = platform.username
123 124
124 125 # Python compatibility
125 126
126 127 _notset = object()
127 128
128 129 # disable Python's problematic floating point timestamps (issue4836)
129 130 # (Python hypocritically says you shouldn't change this behavior in
130 131 # libraries, and sure enough Mercurial is not a library.)
131 132 os.stat_float_times(False)
132 133
133 134 def safehasattr(thing, attr):
134 135 return getattr(thing, attr, _notset) is not _notset
135 136
136 137 DIGESTS = {
137 138 'md5': hashlib.md5,
138 139 'sha1': hashlib.sha1,
139 140 'sha512': hashlib.sha512,
140 141 }
141 142 # List of digest types from strongest to weakest
142 143 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
143 144
144 145 for k in DIGESTS_BY_STRENGTH:
145 146 assert k in DIGESTS
146 147
147 148 class digester(object):
148 149 """helper to compute digests.
149 150
150 151 This helper can be used to compute one or more digests given their name.
151 152
152 153 >>> d = digester(['md5', 'sha1'])
153 154 >>> d.update('foo')
154 155 >>> [k for k in sorted(d)]
155 156 ['md5', 'sha1']
156 157 >>> d['md5']
157 158 'acbd18db4cc2f85cedef654fccc4a4d8'
158 159 >>> d['sha1']
159 160 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
160 161 >>> digester.preferred(['md5', 'sha1'])
161 162 'sha1'
162 163 """
163 164
164 165 def __init__(self, digests, s=''):
165 166 self._hashes = {}
166 167 for k in digests:
167 168 if k not in DIGESTS:
168 169 raise Abort(_('unknown digest type: %s') % k)
169 170 self._hashes[k] = DIGESTS[k]()
170 171 if s:
171 172 self.update(s)
172 173
173 174 def update(self, data):
174 175 for h in self._hashes.values():
175 176 h.update(data)
176 177
177 178 def __getitem__(self, key):
178 179 if key not in DIGESTS:
179 180 raise Abort(_('unknown digest type: %s') % k)
180 181 return self._hashes[key].hexdigest()
181 182
182 183 def __iter__(self):
183 184 return iter(self._hashes)
184 185
185 186 @staticmethod
186 187 def preferred(supported):
187 188 """returns the strongest digest type in both supported and DIGESTS."""
188 189
189 190 for k in DIGESTS_BY_STRENGTH:
190 191 if k in supported:
191 192 return k
192 193 return None
193 194
194 195 class digestchecker(object):
195 196 """file handle wrapper that additionally checks content against a given
196 197 size and digests.
197 198
198 199 d = digestchecker(fh, size, {'md5': '...'})
199 200
200 201 When multiple digests are given, all of them are validated.
201 202 """
202 203
203 204 def __init__(self, fh, size, digests):
204 205 self._fh = fh
205 206 self._size = size
206 207 self._got = 0
207 208 self._digests = dict(digests)
208 209 self._digester = digester(self._digests.keys())
209 210
210 211 def read(self, length=-1):
211 212 content = self._fh.read(length)
212 213 self._digester.update(content)
213 214 self._got += len(content)
214 215 return content
215 216
216 217 def validate(self):
217 218 if self._size != self._got:
218 219 raise Abort(_('size mismatch: expected %d, got %d') %
219 220 (self._size, self._got))
220 221 for k, v in self._digests.items():
221 222 if v != self._digester[k]:
222 223 # i18n: first parameter is a digest name
223 224 raise Abort(_('%s mismatch: expected %s, got %s') %
224 225 (k, v, self._digester[k]))
225 226
226 227 try:
227 228 buffer = buffer
228 229 except NameError:
229 230 if sys.version_info[0] < 3:
230 231 def buffer(sliceable, offset=0):
231 232 return sliceable[offset:]
232 233 else:
233 234 def buffer(sliceable, offset=0):
234 235 return memoryview(sliceable)[offset:]
235 236
236 237 closefds = os.name == 'posix'
237 238
238 239 _chunksize = 4096
239 240
240 241 class bufferedinputpipe(object):
241 242 """a manually buffered input pipe
242 243
243 244 Python will not let us use buffered IO and lazy reading with 'polling' at
244 245 the same time. We cannot probe the buffer state and select will not detect
245 246 that data are ready to read if they are already buffered.
246 247
247 248 This class let us work around that by implementing its own buffering
248 249 (allowing efficient readline) while offering a way to know if the buffer is
249 250 empty from the output (allowing collaboration of the buffer with polling).
250 251
251 252 This class lives in the 'util' module because it makes use of the 'os'
252 253 module from the python stdlib.
253 254 """
254 255
255 256 def __init__(self, input):
256 257 self._input = input
257 258 self._buffer = []
258 259 self._eof = False
259 260 self._lenbuf = 0
260 261
261 262 @property
262 263 def hasbuffer(self):
263 264 """True is any data is currently buffered
264 265
265 266 This will be used externally a pre-step for polling IO. If there is
266 267 already data then no polling should be set in place."""
267 268 return bool(self._buffer)
268 269
269 270 @property
270 271 def closed(self):
271 272 return self._input.closed
272 273
273 274 def fileno(self):
274 275 return self._input.fileno()
275 276
276 277 def close(self):
277 278 return self._input.close()
278 279
279 280 def read(self, size):
280 281 while (not self._eof) and (self._lenbuf < size):
281 282 self._fillbuffer()
282 283 return self._frombuffer(size)
283 284
284 285 def readline(self, *args, **kwargs):
285 286 if 1 < len(self._buffer):
286 287 # this should not happen because both read and readline end with a
287 288 # _frombuffer call that collapse it.
288 289 self._buffer = [''.join(self._buffer)]
289 290 self._lenbuf = len(self._buffer[0])
290 291 lfi = -1
291 292 if self._buffer:
292 293 lfi = self._buffer[-1].find('\n')
293 294 while (not self._eof) and lfi < 0:
294 295 self._fillbuffer()
295 296 if self._buffer:
296 297 lfi = self._buffer[-1].find('\n')
297 298 size = lfi + 1
298 299 if lfi < 0: # end of file
299 300 size = self._lenbuf
300 301 elif 1 < len(self._buffer):
301 302 # we need to take previous chunks into account
302 303 size += self._lenbuf - len(self._buffer[-1])
303 304 return self._frombuffer(size)
304 305
305 306 def _frombuffer(self, size):
306 307 """return at most 'size' data from the buffer
307 308
308 309 The data are removed from the buffer."""
309 310 if size == 0 or not self._buffer:
310 311 return ''
311 312 buf = self._buffer[0]
312 313 if 1 < len(self._buffer):
313 314 buf = ''.join(self._buffer)
314 315
315 316 data = buf[:size]
316 317 buf = buf[len(data):]
317 318 if buf:
318 319 self._buffer = [buf]
319 320 self._lenbuf = len(buf)
320 321 else:
321 322 self._buffer = []
322 323 self._lenbuf = 0
323 324 return data
324 325
325 326 def _fillbuffer(self):
326 327 """read data to the buffer"""
327 328 data = os.read(self._input.fileno(), _chunksize)
328 329 if not data:
329 330 self._eof = True
330 331 else:
331 332 self._lenbuf += len(data)
332 333 self._buffer.append(data)
333 334
334 335 def popen2(cmd, env=None, newlines=False):
335 336 # Setting bufsize to -1 lets the system decide the buffer size.
336 337 # The default for bufsize is 0, meaning unbuffered. This leads to
337 338 # poor performance on Mac OS X: http://bugs.python.org/issue4194
338 339 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
339 340 close_fds=closefds,
340 341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 342 universal_newlines=newlines,
342 343 env=env)
343 344 return p.stdin, p.stdout
344 345
345 346 def popen3(cmd, env=None, newlines=False):
346 347 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
347 348 return stdin, stdout, stderr
348 349
349 350 def popen4(cmd, env=None, newlines=False, bufsize=-1):
350 351 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
351 352 close_fds=closefds,
352 353 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
353 354 stderr=subprocess.PIPE,
354 355 universal_newlines=newlines,
355 356 env=env)
356 357 return p.stdin, p.stdout, p.stderr, p
357 358
358 359 def version():
359 360 """Return version information if available."""
360 361 try:
361 362 from . import __version__
362 363 return __version__.version
363 364 except ImportError:
364 365 return 'unknown'
365 366
366 367 def versiontuple(v=None, n=4):
367 368 """Parses a Mercurial version string into an N-tuple.
368 369
369 370 The version string to be parsed is specified with the ``v`` argument.
370 371 If it isn't defined, the current Mercurial version string will be parsed.
371 372
372 373 ``n`` can be 2, 3, or 4. Here is how some version strings map to
373 374 returned values:
374 375
375 376 >>> v = '3.6.1+190-df9b73d2d444'
376 377 >>> versiontuple(v, 2)
377 378 (3, 6)
378 379 >>> versiontuple(v, 3)
379 380 (3, 6, 1)
380 381 >>> versiontuple(v, 4)
381 382 (3, 6, 1, '190-df9b73d2d444')
382 383
383 384 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
384 385 (3, 6, 1, '190-df9b73d2d444+20151118')
385 386
386 387 >>> v = '3.6'
387 388 >>> versiontuple(v, 2)
388 389 (3, 6)
389 390 >>> versiontuple(v, 3)
390 391 (3, 6, None)
391 392 >>> versiontuple(v, 4)
392 393 (3, 6, None, None)
393 394 """
394 395 if not v:
395 396 v = version()
396 397 parts = v.split('+', 1)
397 398 if len(parts) == 1:
398 399 vparts, extra = parts[0], None
399 400 else:
400 401 vparts, extra = parts
401 402
402 403 vints = []
403 404 for i in vparts.split('.'):
404 405 try:
405 406 vints.append(int(i))
406 407 except ValueError:
407 408 break
408 409 # (3, 6) -> (3, 6, None)
409 410 while len(vints) < 3:
410 411 vints.append(None)
411 412
412 413 if n == 2:
413 414 return (vints[0], vints[1])
414 415 if n == 3:
415 416 return (vints[0], vints[1], vints[2])
416 417 if n == 4:
417 418 return (vints[0], vints[1], vints[2], extra)
418 419
419 420 # used by parsedate
420 421 defaultdateformats = (
421 422 '%Y-%m-%d %H:%M:%S',
422 423 '%Y-%m-%d %I:%M:%S%p',
423 424 '%Y-%m-%d %H:%M',
424 425 '%Y-%m-%d %I:%M%p',
425 426 '%Y-%m-%d',
426 427 '%m-%d',
427 428 '%m/%d',
428 429 '%m/%d/%y',
429 430 '%m/%d/%Y',
430 431 '%a %b %d %H:%M:%S %Y',
431 432 '%a %b %d %I:%M:%S%p %Y',
432 433 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
433 434 '%b %d %H:%M:%S %Y',
434 435 '%b %d %I:%M:%S%p %Y',
435 436 '%b %d %H:%M:%S',
436 437 '%b %d %I:%M:%S%p',
437 438 '%b %d %H:%M',
438 439 '%b %d %I:%M%p',
439 440 '%b %d %Y',
440 441 '%b %d',
441 442 '%H:%M:%S',
442 443 '%I:%M:%S%p',
443 444 '%H:%M',
444 445 '%I:%M%p',
445 446 )
446 447
447 448 extendeddateformats = defaultdateformats + (
448 449 "%Y",
449 450 "%Y-%m",
450 451 "%b",
451 452 "%b %Y",
452 453 )
453 454
454 455 def cachefunc(func):
455 456 '''cache the result of function calls'''
456 457 # XXX doesn't handle keywords args
457 458 if func.__code__.co_argcount == 0:
458 459 cache = []
459 460 def f():
460 461 if len(cache) == 0:
461 462 cache.append(func())
462 463 return cache[0]
463 464 return f
464 465 cache = {}
465 466 if func.__code__.co_argcount == 1:
466 467 # we gain a small amount of time because
467 468 # we don't need to pack/unpack the list
468 469 def f(arg):
469 470 if arg not in cache:
470 471 cache[arg] = func(arg)
471 472 return cache[arg]
472 473 else:
473 474 def f(*args):
474 475 if args not in cache:
475 476 cache[args] = func(*args)
476 477 return cache[args]
477 478
478 479 return f
479 480
480 481 class sortdict(dict):
481 482 '''a simple sorted dictionary'''
482 483 def __init__(self, data=None):
483 484 self._list = []
484 485 if data:
485 486 self.update(data)
486 487 def copy(self):
487 488 return sortdict(self)
488 489 def __setitem__(self, key, val):
489 490 if key in self:
490 491 self._list.remove(key)
491 492 self._list.append(key)
492 493 dict.__setitem__(self, key, val)
493 494 def __iter__(self):
494 495 return self._list.__iter__()
495 496 def update(self, src):
496 497 if isinstance(src, dict):
497 498 src = src.iteritems()
498 499 for k, v in src:
499 500 self[k] = v
500 501 def clear(self):
501 502 dict.clear(self)
502 503 self._list = []
503 504 def items(self):
504 505 return [(k, self[k]) for k in self._list]
505 506 def __delitem__(self, key):
506 507 dict.__delitem__(self, key)
507 508 self._list.remove(key)
508 509 def pop(self, key, *args, **kwargs):
509 510 dict.pop(self, key, *args, **kwargs)
510 511 try:
511 512 self._list.remove(key)
512 513 except ValueError:
513 514 pass
514 515 def keys(self):
515 516 return self._list
516 517 def iterkeys(self):
517 518 return self._list.__iter__()
518 519 def iteritems(self):
519 520 for k in self._list:
520 521 yield k, self[k]
521 522 def insert(self, index, key, val):
522 523 self._list.insert(index, key)
523 524 dict.__setitem__(self, key, val)
524 525
525 526 class _lrucachenode(object):
526 527 """A node in a doubly linked list.
527 528
528 529 Holds a reference to nodes on either side as well as a key-value
529 530 pair for the dictionary entry.
530 531 """
531 532 __slots__ = ('next', 'prev', 'key', 'value')
532 533
533 534 def __init__(self):
534 535 self.next = None
535 536 self.prev = None
536 537
537 538 self.key = _notset
538 539 self.value = None
539 540
540 541 def markempty(self):
541 542 """Mark the node as emptied."""
542 543 self.key = _notset
543 544
544 545 class lrucachedict(object):
545 546 """Dict that caches most recent accesses and sets.
546 547
547 548 The dict consists of an actual backing dict - indexed by original
548 549 key - and a doubly linked circular list defining the order of entries in
549 550 the cache.
550 551
551 552 The head node is the newest entry in the cache. If the cache is full,
552 553 we recycle head.prev and make it the new head. Cache accesses result in
553 554 the node being moved to before the existing head and being marked as the
554 555 new head node.
555 556 """
556 557 def __init__(self, max):
557 558 self._cache = {}
558 559
559 560 self._head = head = _lrucachenode()
560 561 head.prev = head
561 562 head.next = head
562 563 self._size = 1
563 564 self._capacity = max
564 565
565 566 def __len__(self):
566 567 return len(self._cache)
567 568
568 569 def __contains__(self, k):
569 570 return k in self._cache
570 571
571 572 def __iter__(self):
572 573 # We don't have to iterate in cache order, but why not.
573 574 n = self._head
574 575 for i in range(len(self._cache)):
575 576 yield n.key
576 577 n = n.next
577 578
578 579 def __getitem__(self, k):
579 580 node = self._cache[k]
580 581 self._movetohead(node)
581 582 return node.value
582 583
583 584 def __setitem__(self, k, v):
584 585 node = self._cache.get(k)
585 586 # Replace existing value and mark as newest.
586 587 if node is not None:
587 588 node.value = v
588 589 self._movetohead(node)
589 590 return
590 591
591 592 if self._size < self._capacity:
592 593 node = self._addcapacity()
593 594 else:
594 595 # Grab the last/oldest item.
595 596 node = self._head.prev
596 597
597 598 # At capacity. Kill the old entry.
598 599 if node.key is not _notset:
599 600 del self._cache[node.key]
600 601
601 602 node.key = k
602 603 node.value = v
603 604 self._cache[k] = node
604 605 # And mark it as newest entry. No need to adjust order since it
605 606 # is already self._head.prev.
606 607 self._head = node
607 608
608 609 def __delitem__(self, k):
609 610 node = self._cache.pop(k)
610 611 node.markempty()
611 612
612 613 # Temporarily mark as newest item before re-adjusting head to make
613 614 # this node the oldest item.
614 615 self._movetohead(node)
615 616 self._head = node.next
616 617
617 618 # Additional dict methods.
618 619
619 620 def get(self, k, default=None):
620 621 try:
621 622 return self._cache[k]
622 623 except KeyError:
623 624 return default
624 625
625 626 def clear(self):
626 627 n = self._head
627 628 while n.key is not _notset:
628 629 n.markempty()
629 630 n = n.next
630 631
631 632 self._cache.clear()
632 633
633 634 def copy(self):
634 635 result = lrucachedict(self._capacity)
635 636 n = self._head.prev
636 637 # Iterate in oldest-to-newest order, so the copy has the right ordering
637 638 for i in range(len(self._cache)):
638 639 result[n.key] = n.value
639 640 n = n.prev
640 641 return result
641 642
642 643 def _movetohead(self, node):
643 644 """Mark a node as the newest, making it the new head.
644 645
645 646 When a node is accessed, it becomes the freshest entry in the LRU
646 647 list, which is denoted by self._head.
647 648
648 649 Visually, let's make ``N`` the new head node (* denotes head):
649 650
650 651 previous/oldest <-> head <-> next/next newest
651 652
652 653 ----<->--- A* ---<->-----
653 654 | |
654 655 E <-> D <-> N <-> C <-> B
655 656
656 657 To:
657 658
658 659 ----<->--- N* ---<->-----
659 660 | |
660 661 E <-> D <-> C <-> B <-> A
661 662
662 663 This requires the following moves:
663 664
664 665 C.next = D (node.prev.next = node.next)
665 666 D.prev = C (node.next.prev = node.prev)
666 667 E.next = N (head.prev.next = node)
667 668 N.prev = E (node.prev = head.prev)
668 669 N.next = A (node.next = head)
669 670 A.prev = N (head.prev = node)
670 671 """
671 672 head = self._head
672 673 # C.next = D
673 674 node.prev.next = node.next
674 675 # D.prev = C
675 676 node.next.prev = node.prev
676 677 # N.prev = E
677 678 node.prev = head.prev
678 679 # N.next = A
679 680 # It is tempting to do just "head" here, however if node is
680 681 # adjacent to head, this will do bad things.
681 682 node.next = head.prev.next
682 683 # E.next = N
683 684 node.next.prev = node
684 685 # A.prev = N
685 686 node.prev.next = node
686 687
687 688 self._head = node
688 689
689 690 def _addcapacity(self):
690 691 """Add a node to the circular linked list.
691 692
692 693 The new node is inserted before the head node.
693 694 """
694 695 head = self._head
695 696 node = _lrucachenode()
696 697 head.prev.next = node
697 698 node.prev = head.prev
698 699 node.next = head
699 700 head.prev = node
700 701 self._size += 1
701 702 return node
702 703
703 704 def lrucachefunc(func):
704 705 '''cache most recent results of function calls'''
705 706 cache = {}
706 707 order = collections.deque()
707 708 if func.__code__.co_argcount == 1:
708 709 def f(arg):
709 710 if arg not in cache:
710 711 if len(cache) > 20:
711 712 del cache[order.popleft()]
712 713 cache[arg] = func(arg)
713 714 else:
714 715 order.remove(arg)
715 716 order.append(arg)
716 717 return cache[arg]
717 718 else:
718 719 def f(*args):
719 720 if args not in cache:
720 721 if len(cache) > 20:
721 722 del cache[order.popleft()]
722 723 cache[args] = func(*args)
723 724 else:
724 725 order.remove(args)
725 726 order.append(args)
726 727 return cache[args]
727 728
728 729 return f
729 730
730 731 class propertycache(object):
731 732 def __init__(self, func):
732 733 self.func = func
733 734 self.name = func.__name__
734 735 def __get__(self, obj, type=None):
735 736 result = self.func(obj)
736 737 self.cachevalue(obj, result)
737 738 return result
738 739
739 740 def cachevalue(self, obj, value):
740 741 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
741 742 obj.__dict__[self.name] = value
742 743
743 744 def pipefilter(s, cmd):
744 745 '''filter string S through command CMD, returning its output'''
745 746 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
746 747 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
747 748 pout, perr = p.communicate(s)
748 749 return pout
749 750
750 751 def tempfilter(s, cmd):
751 752 '''filter string S through a pair of temporary files with CMD.
752 753 CMD is used as a template to create the real command to be run,
753 754 with the strings INFILE and OUTFILE replaced by the real names of
754 755 the temporary files generated.'''
755 756 inname, outname = None, None
756 757 try:
757 758 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
758 759 fp = os.fdopen(infd, 'wb')
759 760 fp.write(s)
760 761 fp.close()
761 762 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
762 763 os.close(outfd)
763 764 cmd = cmd.replace('INFILE', inname)
764 765 cmd = cmd.replace('OUTFILE', outname)
765 766 code = os.system(cmd)
766 767 if sys.platform == 'OpenVMS' and code & 1:
767 768 code = 0
768 769 if code:
769 770 raise Abort(_("command '%s' failed: %s") %
770 771 (cmd, explainexit(code)))
771 772 return readfile(outname)
772 773 finally:
773 774 try:
774 775 if inname:
775 776 os.unlink(inname)
776 777 except OSError:
777 778 pass
778 779 try:
779 780 if outname:
780 781 os.unlink(outname)
781 782 except OSError:
782 783 pass
783 784
784 785 filtertable = {
785 786 'tempfile:': tempfilter,
786 787 'pipe:': pipefilter,
787 788 }
788 789
789 790 def filter(s, cmd):
790 791 "filter a string through a command that transforms its input to its output"
791 792 for name, fn in filtertable.iteritems():
792 793 if cmd.startswith(name):
793 794 return fn(s, cmd[len(name):].lstrip())
794 795 return pipefilter(s, cmd)
795 796
796 797 def binary(s):
797 798 """return true if a string is binary data"""
798 799 return bool(s and '\0' in s)
799 800
800 801 def increasingchunks(source, min=1024, max=65536):
801 802 '''return no less than min bytes per chunk while data remains,
802 803 doubling min after each chunk until it reaches max'''
803 804 def log2(x):
804 805 if not x:
805 806 return 0
806 807 i = 0
807 808 while x:
808 809 x >>= 1
809 810 i += 1
810 811 return i - 1
811 812
812 813 buf = []
813 814 blen = 0
814 815 for chunk in source:
815 816 buf.append(chunk)
816 817 blen += len(chunk)
817 818 if blen >= min:
818 819 if min < max:
819 820 min = min << 1
820 821 nmin = 1 << log2(blen)
821 822 if nmin > min:
822 823 min = nmin
823 824 if min > max:
824 825 min = max
825 826 yield ''.join(buf)
826 827 blen = 0
827 828 buf = []
828 829 if buf:
829 830 yield ''.join(buf)
830 831
831 832 Abort = error.Abort
832 833
833 834 def always(fn):
834 835 return True
835 836
836 837 def never(fn):
837 838 return False
838 839
839 840 def nogc(func):
840 841 """disable garbage collector
841 842
842 843 Python's garbage collector triggers a GC each time a certain number of
843 844 container objects (the number being defined by gc.get_threshold()) are
844 845 allocated even when marked not to be tracked by the collector. Tracking has
845 846 no effect on when GCs are triggered, only on what objects the GC looks
846 847 into. As a workaround, disable GC while building complex (huge)
847 848 containers.
848 849
849 850 This garbage collector issue have been fixed in 2.7.
850 851 """
851 852 def wrapper(*args, **kwargs):
852 853 gcenabled = gc.isenabled()
853 854 gc.disable()
854 855 try:
855 856 return func(*args, **kwargs)
856 857 finally:
857 858 if gcenabled:
858 859 gc.enable()
859 860 return wrapper
860 861
861 862 def pathto(root, n1, n2):
862 863 '''return the relative path from one place to another.
863 864 root should use os.sep to separate directories
864 865 n1 should use os.sep to separate directories
865 866 n2 should use "/" to separate directories
866 867 returns an os.sep-separated path.
867 868
868 869 If n1 is a relative path, it's assumed it's
869 870 relative to root.
870 871 n2 should always be relative to root.
871 872 '''
872 873 if not n1:
873 874 return localpath(n2)
874 875 if os.path.isabs(n1):
875 876 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
876 877 return os.path.join(root, localpath(n2))
877 878 n2 = '/'.join((pconvert(root), n2))
878 879 a, b = splitpath(n1), n2.split('/')
879 880 a.reverse()
880 881 b.reverse()
881 882 while a and b and a[-1] == b[-1]:
882 883 a.pop()
883 884 b.pop()
884 885 b.reverse()
885 886 return os.sep.join((['..'] * len(a)) + b) or '.'
886 887
887 888 def mainfrozen():
888 889 """return True if we are a frozen executable.
889 890
890 891 The code supports py2exe (most common, Windows only) and tools/freeze
891 892 (portable, not much used).
892 893 """
893 894 return (safehasattr(sys, "frozen") or # new py2exe
894 895 safehasattr(sys, "importers") or # old py2exe
895 896 imp.is_frozen("__main__")) # tools/freeze
896 897
897 898 # the location of data files matching the source code
898 899 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
899 900 # executable version (py2exe) doesn't support __file__
900 901 datapath = os.path.dirname(sys.executable)
901 902 else:
902 903 datapath = os.path.dirname(__file__)
903 904
904 905 i18n.setdatapath(datapath)
905 906
906 907 _hgexecutable = None
907 908
908 909 def hgexecutable():
909 910 """return location of the 'hg' executable.
910 911
911 912 Defaults to $HG or 'hg' in the search path.
912 913 """
913 914 if _hgexecutable is None:
914 915 hg = os.environ.get('HG')
915 916 mainmod = sys.modules['__main__']
916 917 if hg:
917 918 _sethgexecutable(hg)
918 919 elif mainfrozen():
919 920 if getattr(sys, 'frozen', None) == 'macosx_app':
920 921 # Env variable set by py2app
921 922 _sethgexecutable(os.environ['EXECUTABLEPATH'])
922 923 else:
923 924 _sethgexecutable(sys.executable)
924 925 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
925 926 _sethgexecutable(mainmod.__file__)
926 927 else:
927 928 exe = findexe('hg') or os.path.basename(sys.argv[0])
928 929 _sethgexecutable(exe)
929 930 return _hgexecutable
930 931
931 932 def _sethgexecutable(path):
932 933 """set location of the 'hg' executable"""
933 934 global _hgexecutable
934 935 _hgexecutable = path
935 936
936 937 def _isstdout(f):
937 938 fileno = getattr(f, 'fileno', None)
938 939 return fileno and fileno() == sys.__stdout__.fileno()
939 940
940 941 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
941 942 '''enhanced shell command execution.
942 943 run with environment maybe modified, maybe in different dir.
943 944
944 945 if command fails and onerr is None, return status, else raise onerr
945 946 object as exception.
946 947
947 948 if out is specified, it is assumed to be a file-like object that has a
948 949 write() method. stdout and stderr will be redirected to out.'''
949 950 if environ is None:
950 951 environ = {}
951 952 try:
952 953 sys.stdout.flush()
953 954 except Exception:
954 955 pass
955 956 def py2shell(val):
956 957 'convert python object into string that is useful to shell'
957 958 if val is None or val is False:
958 959 return '0'
959 960 if val is True:
960 961 return '1'
961 962 return str(val)
962 963 origcmd = cmd
963 964 cmd = quotecommand(cmd)
964 965 if sys.platform == 'plan9' and (sys.version_info[0] == 2
965 966 and sys.version_info[1] < 7):
966 967 # subprocess kludge to work around issues in half-baked Python
967 968 # ports, notably bichued/python:
968 969 if not cwd is None:
969 970 os.chdir(cwd)
970 971 rc = os.system(cmd)
971 972 else:
972 973 env = dict(os.environ)
973 974 env.update((k, py2shell(v)) for k, v in environ.iteritems())
974 975 env['HG'] = hgexecutable()
975 976 if out is None or _isstdout(out):
976 977 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
977 978 env=env, cwd=cwd)
978 979 else:
979 980 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
980 981 env=env, cwd=cwd, stdout=subprocess.PIPE,
981 982 stderr=subprocess.STDOUT)
982 983 while True:
983 984 line = proc.stdout.readline()
984 985 if not line:
985 986 break
986 987 out.write(line)
987 988 proc.wait()
988 989 rc = proc.returncode
989 990 if sys.platform == 'OpenVMS' and rc & 1:
990 991 rc = 0
991 992 if rc and onerr:
992 993 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
993 994 explainexit(rc)[0])
994 995 if errprefix:
995 996 errmsg = '%s: %s' % (errprefix, errmsg)
996 997 raise onerr(errmsg)
997 998 return rc
998 999
999 1000 def checksignature(func):
1000 1001 '''wrap a function with code to check for calling errors'''
1001 1002 def check(*args, **kwargs):
1002 1003 try:
1003 1004 return func(*args, **kwargs)
1004 1005 except TypeError:
1005 1006 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1006 1007 raise error.SignatureError
1007 1008 raise
1008 1009
1009 1010 return check
1010 1011
1011 1012 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1012 1013 '''copy a file, preserving mode and optionally other stat info like
1013 1014 atime/mtime
1014 1015
1015 1016 checkambig argument is used with filestat, and is useful only if
1016 1017 destination file is guarded by any lock (e.g. repo.lock or
1017 1018 repo.wlock).
1018 1019
1019 1020 copystat and checkambig should be exclusive.
1020 1021 '''
1021 1022 assert not (copystat and checkambig)
1022 1023 oldstat = None
1023 1024 if os.path.lexists(dest):
1024 1025 if checkambig:
1025 1026 oldstat = checkambig and filestat(dest)
1026 1027 unlink(dest)
1027 1028 # hardlinks are problematic on CIFS, quietly ignore this flag
1028 1029 # until we find a way to work around it cleanly (issue4546)
1029 1030 if False and hardlink:
1030 1031 try:
1031 1032 oslink(src, dest)
1032 1033 return
1033 1034 except (IOError, OSError):
1034 1035 pass # fall back to normal copy
1035 1036 if os.path.islink(src):
1036 1037 os.symlink(os.readlink(src), dest)
1037 1038 # copytime is ignored for symlinks, but in general copytime isn't needed
1038 1039 # for them anyway
1039 1040 else:
1040 1041 try:
1041 1042 shutil.copyfile(src, dest)
1042 1043 if copystat:
1043 1044 # copystat also copies mode
1044 1045 shutil.copystat(src, dest)
1045 1046 else:
1046 1047 shutil.copymode(src, dest)
1047 1048 if oldstat and oldstat.stat:
1048 1049 newstat = filestat(dest)
1049 1050 if newstat.isambig(oldstat):
1050 1051 # stat of copied file is ambiguous to original one
1051 1052 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1052 1053 os.utime(dest, (advanced, advanced))
1053 1054 except shutil.Error as inst:
1054 1055 raise Abort(str(inst))
1055 1056
1056 1057 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1057 1058 """Copy a directory tree using hardlinks if possible."""
1058 1059 num = 0
1059 1060
1060 1061 if hardlink is None:
1061 1062 hardlink = (os.stat(src).st_dev ==
1062 1063 os.stat(os.path.dirname(dst)).st_dev)
1063 1064 if hardlink:
1064 1065 topic = _('linking')
1065 1066 else:
1066 1067 topic = _('copying')
1067 1068
1068 1069 if os.path.isdir(src):
1069 1070 os.mkdir(dst)
1070 1071 for name, kind in osutil.listdir(src):
1071 1072 srcname = os.path.join(src, name)
1072 1073 dstname = os.path.join(dst, name)
1073 1074 def nprog(t, pos):
1074 1075 if pos is not None:
1075 1076 return progress(t, pos + num)
1076 1077 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1077 1078 num += n
1078 1079 else:
1079 1080 if hardlink:
1080 1081 try:
1081 1082 oslink(src, dst)
1082 1083 except (IOError, OSError):
1083 1084 hardlink = False
1084 1085 shutil.copy(src, dst)
1085 1086 else:
1086 1087 shutil.copy(src, dst)
1087 1088 num += 1
1088 1089 progress(topic, num)
1089 1090 progress(topic, None)
1090 1091
1091 1092 return hardlink, num
1092 1093
1093 1094 _winreservednames = '''con prn aux nul
1094 1095 com1 com2 com3 com4 com5 com6 com7 com8 com9
1095 1096 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1096 1097 _winreservedchars = ':*?"<>|'
1097 1098 def checkwinfilename(path):
1098 1099 r'''Check that the base-relative path is a valid filename on Windows.
1099 1100 Returns None if the path is ok, or a UI string describing the problem.
1100 1101
1101 1102 >>> checkwinfilename("just/a/normal/path")
1102 1103 >>> checkwinfilename("foo/bar/con.xml")
1103 1104 "filename contains 'con', which is reserved on Windows"
1104 1105 >>> checkwinfilename("foo/con.xml/bar")
1105 1106 "filename contains 'con', which is reserved on Windows"
1106 1107 >>> checkwinfilename("foo/bar/xml.con")
1107 1108 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1108 1109 "filename contains 'AUX', which is reserved on Windows"
1109 1110 >>> checkwinfilename("foo/bar/bla:.txt")
1110 1111 "filename contains ':', which is reserved on Windows"
1111 1112 >>> checkwinfilename("foo/bar/b\07la.txt")
1112 1113 "filename contains '\\x07', which is invalid on Windows"
1113 1114 >>> checkwinfilename("foo/bar/bla ")
1114 1115 "filename ends with ' ', which is not allowed on Windows"
1115 1116 >>> checkwinfilename("../bar")
1116 1117 >>> checkwinfilename("foo\\")
1117 1118 "filename ends with '\\', which is invalid on Windows"
1118 1119 >>> checkwinfilename("foo\\/bar")
1119 1120 "directory name ends with '\\', which is invalid on Windows"
1120 1121 '''
1121 1122 if path.endswith('\\'):
1122 1123 return _("filename ends with '\\', which is invalid on Windows")
1123 1124 if '\\/' in path:
1124 1125 return _("directory name ends with '\\', which is invalid on Windows")
1125 1126 for n in path.replace('\\', '/').split('/'):
1126 1127 if not n:
1127 1128 continue
1128 1129 for c in n:
1129 1130 if c in _winreservedchars:
1130 1131 return _("filename contains '%s', which is reserved "
1131 1132 "on Windows") % c
1132 1133 if ord(c) <= 31:
1133 1134 return _("filename contains %r, which is invalid "
1134 1135 "on Windows") % c
1135 1136 base = n.split('.')[0]
1136 1137 if base and base.lower() in _winreservednames:
1137 1138 return _("filename contains '%s', which is reserved "
1138 1139 "on Windows") % base
1139 1140 t = n[-1]
1140 1141 if t in '. ' and n not in '..':
1141 1142 return _("filename ends with '%s', which is not allowed "
1142 1143 "on Windows") % t
1143 1144
1144 1145 if os.name == 'nt':
1145 1146 checkosfilename = checkwinfilename
1146 1147 else:
1147 1148 checkosfilename = platform.checkosfilename
1148 1149
1149 1150 def makelock(info, pathname):
1150 1151 try:
1151 1152 return os.symlink(info, pathname)
1152 1153 except OSError as why:
1153 1154 if why.errno == errno.EEXIST:
1154 1155 raise
1155 1156 except AttributeError: # no symlink in os
1156 1157 pass
1157 1158
1158 1159 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1159 1160 os.write(ld, info)
1160 1161 os.close(ld)
1161 1162
1162 1163 def readlock(pathname):
1163 1164 try:
1164 1165 return os.readlink(pathname)
1165 1166 except OSError as why:
1166 1167 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1167 1168 raise
1168 1169 except AttributeError: # no symlink in os
1169 1170 pass
1170 1171 fp = posixfile(pathname)
1171 1172 r = fp.read()
1172 1173 fp.close()
1173 1174 return r
1174 1175
1175 1176 def fstat(fp):
1176 1177 '''stat file object that may not have fileno method.'''
1177 1178 try:
1178 1179 return os.fstat(fp.fileno())
1179 1180 except AttributeError:
1180 1181 return os.stat(fp.name)
1181 1182
1182 1183 # File system features
1183 1184
1184 1185 def checkcase(path):
1185 1186 """
1186 1187 Return true if the given path is on a case-sensitive filesystem
1187 1188
1188 1189 Requires a path (like /foo/.hg) ending with a foldable final
1189 1190 directory component.
1190 1191 """
1191 1192 s1 = os.lstat(path)
1192 1193 d, b = os.path.split(path)
1193 1194 b2 = b.upper()
1194 1195 if b == b2:
1195 1196 b2 = b.lower()
1196 1197 if b == b2:
1197 1198 return True # no evidence against case sensitivity
1198 1199 p2 = os.path.join(d, b2)
1199 1200 try:
1200 1201 s2 = os.lstat(p2)
1201 1202 if s2 == s1:
1202 1203 return False
1203 1204 return True
1204 1205 except OSError:
1205 1206 return True
1206 1207
1207 1208 try:
1208 1209 import re2
1209 1210 _re2 = None
1210 1211 except ImportError:
1211 1212 _re2 = False
1212 1213
1213 1214 class _re(object):
1214 1215 def _checkre2(self):
1215 1216 global _re2
1216 1217 try:
1217 1218 # check if match works, see issue3964
1218 1219 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1219 1220 except ImportError:
1220 1221 _re2 = False
1221 1222
1222 1223 def compile(self, pat, flags=0):
1223 1224 '''Compile a regular expression, using re2 if possible
1224 1225
1225 1226 For best performance, use only re2-compatible regexp features. The
1226 1227 only flags from the re module that are re2-compatible are
1227 1228 IGNORECASE and MULTILINE.'''
1228 1229 if _re2 is None:
1229 1230 self._checkre2()
1230 1231 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1231 1232 if flags & remod.IGNORECASE:
1232 1233 pat = '(?i)' + pat
1233 1234 if flags & remod.MULTILINE:
1234 1235 pat = '(?m)' + pat
1235 1236 try:
1236 1237 return re2.compile(pat)
1237 1238 except re2.error:
1238 1239 pass
1239 1240 return remod.compile(pat, flags)
1240 1241
1241 1242 @propertycache
1242 1243 def escape(self):
1243 1244 '''Return the version of escape corresponding to self.compile.
1244 1245
1245 1246 This is imperfect because whether re2 or re is used for a particular
1246 1247 function depends on the flags, etc, but it's the best we can do.
1247 1248 '''
1248 1249 global _re2
1249 1250 if _re2 is None:
1250 1251 self._checkre2()
1251 1252 if _re2:
1252 1253 return re2.escape
1253 1254 else:
1254 1255 return remod.escape
1255 1256
1256 1257 re = _re()
1257 1258
1258 1259 _fspathcache = {}
1259 1260 def fspath(name, root):
1260 1261 '''Get name in the case stored in the filesystem
1261 1262
1262 1263 The name should be relative to root, and be normcase-ed for efficiency.
1263 1264
1264 1265 Note that this function is unnecessary, and should not be
1265 1266 called, for case-sensitive filesystems (simply because it's expensive).
1266 1267
1267 1268 The root should be normcase-ed, too.
1268 1269 '''
1269 1270 def _makefspathcacheentry(dir):
1270 1271 return dict((normcase(n), n) for n in os.listdir(dir))
1271 1272
1272 1273 seps = os.sep
1273 1274 if os.altsep:
1274 1275 seps = seps + os.altsep
1275 1276 # Protect backslashes. This gets silly very quickly.
1276 1277 seps.replace('\\','\\\\')
1277 1278 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1278 1279 dir = os.path.normpath(root)
1279 1280 result = []
1280 1281 for part, sep in pattern.findall(name):
1281 1282 if sep:
1282 1283 result.append(sep)
1283 1284 continue
1284 1285
1285 1286 if dir not in _fspathcache:
1286 1287 _fspathcache[dir] = _makefspathcacheentry(dir)
1287 1288 contents = _fspathcache[dir]
1288 1289
1289 1290 found = contents.get(part)
1290 1291 if not found:
1291 1292 # retry "once per directory" per "dirstate.walk" which
1292 1293 # may take place for each patches of "hg qpush", for example
1293 1294 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1294 1295 found = contents.get(part)
1295 1296
1296 1297 result.append(found or part)
1297 1298 dir = os.path.join(dir, part)
1298 1299
1299 1300 return ''.join(result)
1300 1301
1301 1302 def checknlink(testfile):
1302 1303 '''check whether hardlink count reporting works properly'''
1303 1304
1304 1305 # testfile may be open, so we need a separate file for checking to
1305 1306 # work around issue2543 (or testfile may get lost on Samba shares)
1306 1307 f1 = testfile + ".hgtmp1"
1307 1308 if os.path.lexists(f1):
1308 1309 return False
1309 1310 try:
1310 1311 posixfile(f1, 'w').close()
1311 1312 except IOError:
1312 1313 return False
1313 1314
1314 1315 f2 = testfile + ".hgtmp2"
1315 1316 fd = None
1316 1317 try:
1317 1318 oslink(f1, f2)
1318 1319 # nlinks() may behave differently for files on Windows shares if
1319 1320 # the file is open.
1320 1321 fd = posixfile(f2)
1321 1322 return nlinks(f2) > 1
1322 1323 except OSError:
1323 1324 return False
1324 1325 finally:
1325 1326 if fd is not None:
1326 1327 fd.close()
1327 1328 for f in (f1, f2):
1328 1329 try:
1329 1330 os.unlink(f)
1330 1331 except OSError:
1331 1332 pass
1332 1333
1333 1334 def endswithsep(path):
1334 1335 '''Check path ends with os.sep or os.altsep.'''
1335 1336 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1336 1337
1337 1338 def splitpath(path):
1338 1339 '''Split path by os.sep.
1339 1340 Note that this function does not use os.altsep because this is
1340 1341 an alternative of simple "xxx.split(os.sep)".
1341 1342 It is recommended to use os.path.normpath() before using this
1342 1343 function if need.'''
1343 1344 return path.split(os.sep)
1344 1345
1345 1346 def gui():
1346 1347 '''Are we running in a GUI?'''
1347 1348 if sys.platform == 'darwin':
1348 1349 if 'SSH_CONNECTION' in os.environ:
1349 1350 # handle SSH access to a box where the user is logged in
1350 1351 return False
1351 1352 elif getattr(osutil, 'isgui', None):
1352 1353 # check if a CoreGraphics session is available
1353 1354 return osutil.isgui()
1354 1355 else:
1355 1356 # pure build; use a safe default
1356 1357 return True
1357 1358 else:
1358 1359 return os.name == "nt" or os.environ.get("DISPLAY")
1359 1360
1360 1361 def mktempcopy(name, emptyok=False, createmode=None):
1361 1362 """Create a temporary file with the same contents from name
1362 1363
1363 1364 The permission bits are copied from the original file.
1364 1365
1365 1366 If the temporary file is going to be truncated immediately, you
1366 1367 can use emptyok=True as an optimization.
1367 1368
1368 1369 Returns the name of the temporary file.
1369 1370 """
1370 1371 d, fn = os.path.split(name)
1371 1372 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1372 1373 os.close(fd)
1373 1374 # Temporary files are created with mode 0600, which is usually not
1374 1375 # what we want. If the original file already exists, just copy
1375 1376 # its mode. Otherwise, manually obey umask.
1376 1377 copymode(name, temp, createmode)
1377 1378 if emptyok:
1378 1379 return temp
1379 1380 try:
1380 1381 try:
1381 1382 ifp = posixfile(name, "rb")
1382 1383 except IOError as inst:
1383 1384 if inst.errno == errno.ENOENT:
1384 1385 return temp
1385 1386 if not getattr(inst, 'filename', None):
1386 1387 inst.filename = name
1387 1388 raise
1388 1389 ofp = posixfile(temp, "wb")
1389 1390 for chunk in filechunkiter(ifp):
1390 1391 ofp.write(chunk)
1391 1392 ifp.close()
1392 1393 ofp.close()
1393 1394 except: # re-raises
1394 1395 try: os.unlink(temp)
1395 1396 except OSError: pass
1396 1397 raise
1397 1398 return temp
1398 1399
1399 1400 class filestat(object):
1400 1401 """help to exactly detect change of a file
1401 1402
1402 1403 'stat' attribute is result of 'os.stat()' if specified 'path'
1403 1404 exists. Otherwise, it is None. This can avoid preparative
1404 1405 'exists()' examination on client side of this class.
1405 1406 """
1406 1407 def __init__(self, path):
1407 1408 try:
1408 1409 self.stat = os.stat(path)
1409 1410 except OSError as err:
1410 1411 if err.errno != errno.ENOENT:
1411 1412 raise
1412 1413 self.stat = None
1413 1414
1414 1415 __hash__ = object.__hash__
1415 1416
1416 1417 def __eq__(self, old):
1417 1418 try:
1418 1419 # if ambiguity between stat of new and old file is
1419 1420 # avoided, comparision of size, ctime and mtime is enough
1420 1421 # to exactly detect change of a file regardless of platform
1421 1422 return (self.stat.st_size == old.stat.st_size and
1422 1423 self.stat.st_ctime == old.stat.st_ctime and
1423 1424 self.stat.st_mtime == old.stat.st_mtime)
1424 1425 except AttributeError:
1425 1426 return False
1426 1427
1427 1428 def isambig(self, old):
1428 1429 """Examine whether new (= self) stat is ambiguous against old one
1429 1430
1430 1431 "S[N]" below means stat of a file at N-th change:
1431 1432
1432 1433 - S[n-1].ctime < S[n].ctime: can detect change of a file
1433 1434 - S[n-1].ctime == S[n].ctime
1434 1435 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1435 1436 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1436 1437 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1437 1438 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1438 1439
1439 1440 Case (*2) above means that a file was changed twice or more at
1440 1441 same time in sec (= S[n-1].ctime), and comparison of timestamp
1441 1442 is ambiguous.
1442 1443
1443 1444 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1444 1445 timestamp is ambiguous".
1445 1446
1446 1447 But advancing mtime only in case (*2) doesn't work as
1447 1448 expected, because naturally advanced S[n].mtime in case (*1)
1448 1449 might be equal to manually advanced S[n-1 or earlier].mtime.
1449 1450
1450 1451 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1451 1452 treated as ambiguous regardless of mtime, to avoid overlooking
1452 1453 by confliction between such mtime.
1453 1454
1454 1455 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1455 1456 S[n].mtime", even if size of a file isn't changed.
1456 1457 """
1457 1458 try:
1458 1459 return (self.stat.st_ctime == old.stat.st_ctime)
1459 1460 except AttributeError:
1460 1461 return False
1461 1462
1462 1463 def __ne__(self, other):
1463 1464 return not self == other
1464 1465
1465 1466 class atomictempfile(object):
1466 1467 '''writable file object that atomically updates a file
1467 1468
1468 1469 All writes will go to a temporary copy of the original file. Call
1469 1470 close() when you are done writing, and atomictempfile will rename
1470 1471 the temporary copy to the original name, making the changes
1471 1472 visible. If the object is destroyed without being closed, all your
1472 1473 writes are discarded.
1473 1474
1474 1475 checkambig argument of constructor is used with filestat, and is
1475 1476 useful only if target file is guarded by any lock (e.g. repo.lock
1476 1477 or repo.wlock).
1477 1478 '''
1478 1479 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1479 1480 self.__name = name # permanent name
1480 1481 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1481 1482 createmode=createmode)
1482 1483 self._fp = posixfile(self._tempname, mode)
1483 1484 self._checkambig = checkambig
1484 1485
1485 1486 # delegated methods
1486 1487 self.read = self._fp.read
1487 1488 self.write = self._fp.write
1488 1489 self.seek = self._fp.seek
1489 1490 self.tell = self._fp.tell
1490 1491 self.fileno = self._fp.fileno
1491 1492
1492 1493 def close(self):
1493 1494 if not self._fp.closed:
1494 1495 self._fp.close()
1495 1496 filename = localpath(self.__name)
1496 1497 oldstat = self._checkambig and filestat(filename)
1497 1498 if oldstat and oldstat.stat:
1498 1499 rename(self._tempname, filename)
1499 1500 newstat = filestat(filename)
1500 1501 if newstat.isambig(oldstat):
1501 1502 # stat of changed file is ambiguous to original one
1502 1503 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1503 1504 os.utime(filename, (advanced, advanced))
1504 1505 else:
1505 1506 rename(self._tempname, filename)
1506 1507
1507 1508 def discard(self):
1508 1509 if not self._fp.closed:
1509 1510 try:
1510 1511 os.unlink(self._tempname)
1511 1512 except OSError:
1512 1513 pass
1513 1514 self._fp.close()
1514 1515
1515 1516 def __del__(self):
1516 1517 if safehasattr(self, '_fp'): # constructor actually did something
1517 1518 self.discard()
1518 1519
1519 1520 def __enter__(self):
1520 1521 return self
1521 1522
1522 1523 def __exit__(self, exctype, excvalue, traceback):
1523 1524 if exctype is not None:
1524 1525 self.discard()
1525 1526 else:
1526 1527 self.close()
1527 1528
1528 1529 def makedirs(name, mode=None, notindexed=False):
1529 1530 """recursive directory creation with parent mode inheritance
1530 1531
1531 1532 Newly created directories are marked as "not to be indexed by
1532 1533 the content indexing service", if ``notindexed`` is specified
1533 1534 for "write" mode access.
1534 1535 """
1535 1536 try:
1536 1537 makedir(name, notindexed)
1537 1538 except OSError as err:
1538 1539 if err.errno == errno.EEXIST:
1539 1540 return
1540 1541 if err.errno != errno.ENOENT or not name:
1541 1542 raise
1542 1543 parent = os.path.dirname(os.path.abspath(name))
1543 1544 if parent == name:
1544 1545 raise
1545 1546 makedirs(parent, mode, notindexed)
1546 1547 try:
1547 1548 makedir(name, notindexed)
1548 1549 except OSError as err:
1549 1550 # Catch EEXIST to handle races
1550 1551 if err.errno == errno.EEXIST:
1551 1552 return
1552 1553 raise
1553 1554 if mode is not None:
1554 1555 os.chmod(name, mode)
1555 1556
1556 1557 def readfile(path):
1557 1558 with open(path, 'rb') as fp:
1558 1559 return fp.read()
1559 1560
1560 1561 def writefile(path, text):
1561 1562 with open(path, 'wb') as fp:
1562 1563 fp.write(text)
1563 1564
1564 1565 def appendfile(path, text):
1565 1566 with open(path, 'ab') as fp:
1566 1567 fp.write(text)
1567 1568
1568 1569 class chunkbuffer(object):
1569 1570 """Allow arbitrary sized chunks of data to be efficiently read from an
1570 1571 iterator over chunks of arbitrary size."""
1571 1572
1572 1573 def __init__(self, in_iter):
1573 1574 """in_iter is the iterator that's iterating over the input chunks.
1574 1575 targetsize is how big a buffer to try to maintain."""
1575 1576 def splitbig(chunks):
1576 1577 for chunk in chunks:
1577 1578 if len(chunk) > 2**20:
1578 1579 pos = 0
1579 1580 while pos < len(chunk):
1580 1581 end = pos + 2 ** 18
1581 1582 yield chunk[pos:end]
1582 1583 pos = end
1583 1584 else:
1584 1585 yield chunk
1585 1586 self.iter = splitbig(in_iter)
1586 1587 self._queue = collections.deque()
1587 1588 self._chunkoffset = 0
1588 1589
1589 1590 def read(self, l=None):
1590 1591 """Read L bytes of data from the iterator of chunks of data.
1591 1592 Returns less than L bytes if the iterator runs dry.
1592 1593
1593 1594 If size parameter is omitted, read everything"""
1594 1595 if l is None:
1595 1596 return ''.join(self.iter)
1596 1597
1597 1598 left = l
1598 1599 buf = []
1599 1600 queue = self._queue
1600 1601 while left > 0:
1601 1602 # refill the queue
1602 1603 if not queue:
1603 1604 target = 2**18
1604 1605 for chunk in self.iter:
1605 1606 queue.append(chunk)
1606 1607 target -= len(chunk)
1607 1608 if target <= 0:
1608 1609 break
1609 1610 if not queue:
1610 1611 break
1611 1612
1612 1613 # The easy way to do this would be to queue.popleft(), modify the
1613 1614 # chunk (if necessary), then queue.appendleft(). However, for cases
1614 1615 # where we read partial chunk content, this incurs 2 dequeue
1615 1616 # mutations and creates a new str for the remaining chunk in the
1616 1617 # queue. Our code below avoids this overhead.
1617 1618
1618 1619 chunk = queue[0]
1619 1620 chunkl = len(chunk)
1620 1621 offset = self._chunkoffset
1621 1622
1622 1623 # Use full chunk.
1623 1624 if offset == 0 and left >= chunkl:
1624 1625 left -= chunkl
1625 1626 queue.popleft()
1626 1627 buf.append(chunk)
1627 1628 # self._chunkoffset remains at 0.
1628 1629 continue
1629 1630
1630 1631 chunkremaining = chunkl - offset
1631 1632
1632 1633 # Use all of unconsumed part of chunk.
1633 1634 if left >= chunkremaining:
1634 1635 left -= chunkremaining
1635 1636 queue.popleft()
1636 1637 # offset == 0 is enabled by block above, so this won't merely
1637 1638 # copy via ``chunk[0:]``.
1638 1639 buf.append(chunk[offset:])
1639 1640 self._chunkoffset = 0
1640 1641
1641 1642 # Partial chunk needed.
1642 1643 else:
1643 1644 buf.append(chunk[offset:offset + left])
1644 1645 self._chunkoffset += left
1645 1646 left -= chunkremaining
1646 1647
1647 1648 return ''.join(buf)
1648 1649
1649 1650 def filechunkiter(f, size=65536, limit=None):
1650 1651 """Create a generator that produces the data in the file size
1651 1652 (default 65536) bytes at a time, up to optional limit (default is
1652 1653 to read all data). Chunks may be less than size bytes if the
1653 1654 chunk is the last chunk in the file, or the file is a socket or
1654 1655 some other type of file that sometimes reads less data than is
1655 1656 requested."""
1656 1657 assert size >= 0
1657 1658 assert limit is None or limit >= 0
1658 1659 while True:
1659 1660 if limit is None:
1660 1661 nbytes = size
1661 1662 else:
1662 1663 nbytes = min(limit, size)
1663 1664 s = nbytes and f.read(nbytes)
1664 1665 if not s:
1665 1666 break
1666 1667 if limit:
1667 1668 limit -= len(s)
1668 1669 yield s
1669 1670
1670 1671 def makedate(timestamp=None):
1671 1672 '''Return a unix timestamp (or the current time) as a (unixtime,
1672 1673 offset) tuple based off the local timezone.'''
1673 1674 if timestamp is None:
1674 1675 timestamp = time.time()
1675 1676 if timestamp < 0:
1676 1677 hint = _("check your clock")
1677 1678 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1678 1679 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1679 1680 datetime.datetime.fromtimestamp(timestamp))
1680 1681 tz = delta.days * 86400 + delta.seconds
1681 1682 return timestamp, tz
1682 1683
1683 1684 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1684 1685 """represent a (unixtime, offset) tuple as a localized time.
1685 1686 unixtime is seconds since the epoch, and offset is the time zone's
1686 1687 number of seconds away from UTC.
1687 1688
1688 1689 >>> datestr((0, 0))
1689 1690 'Thu Jan 01 00:00:00 1970 +0000'
1690 1691 >>> datestr((42, 0))
1691 1692 'Thu Jan 01 00:00:42 1970 +0000'
1692 1693 >>> datestr((-42, 0))
1693 1694 'Wed Dec 31 23:59:18 1969 +0000'
1694 1695 >>> datestr((0x7fffffff, 0))
1695 1696 'Tue Jan 19 03:14:07 2038 +0000'
1696 1697 >>> datestr((-0x80000000, 0))
1697 1698 'Fri Dec 13 20:45:52 1901 +0000'
1698 1699 """
1699 1700 t, tz = date or makedate()
1700 1701 if "%1" in format or "%2" in format or "%z" in format:
1701 1702 sign = (tz > 0) and "-" or "+"
1702 1703 minutes = abs(tz) // 60
1703 1704 q, r = divmod(minutes, 60)
1704 1705 format = format.replace("%z", "%1%2")
1705 1706 format = format.replace("%1", "%c%02d" % (sign, q))
1706 1707 format = format.replace("%2", "%02d" % r)
1707 1708 d = t - tz
1708 1709 if d > 0x7fffffff:
1709 1710 d = 0x7fffffff
1710 1711 elif d < -0x80000000:
1711 1712 d = -0x80000000
1712 1713 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1713 1714 # because they use the gmtime() system call which is buggy on Windows
1714 1715 # for negative values.
1715 1716 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1716 1717 s = t.strftime(format)
1717 1718 return s
1718 1719
1719 1720 def shortdate(date=None):
1720 1721 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1721 1722 return datestr(date, format='%Y-%m-%d')
1722 1723
1723 1724 def parsetimezone(tz):
1724 1725 """parse a timezone string and return an offset integer"""
1725 1726 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1726 1727 sign = (tz[0] == "+") and 1 or -1
1727 1728 hours = int(tz[1:3])
1728 1729 minutes = int(tz[3:5])
1729 1730 return -sign * (hours * 60 + minutes) * 60
1730 1731 if tz == "GMT" or tz == "UTC":
1731 1732 return 0
1732 1733 return None
1733 1734
1734 1735 def strdate(string, format, defaults=[]):
1735 1736 """parse a localized time string and return a (unixtime, offset) tuple.
1736 1737 if the string cannot be parsed, ValueError is raised."""
1737 1738 # NOTE: unixtime = localunixtime + offset
1738 1739 offset, date = parsetimezone(string.split()[-1]), string
1739 1740 if offset is not None:
1740 1741 date = " ".join(string.split()[:-1])
1741 1742
1742 1743 # add missing elements from defaults
1743 1744 usenow = False # default to using biased defaults
1744 1745 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1745 1746 found = [True for p in part if ("%"+p) in format]
1746 1747 if not found:
1747 1748 date += "@" + defaults[part][usenow]
1748 1749 format += "@%" + part[0]
1749 1750 else:
1750 1751 # We've found a specific time element, less specific time
1751 1752 # elements are relative to today
1752 1753 usenow = True
1753 1754
1754 1755 timetuple = time.strptime(date, format)
1755 1756 localunixtime = int(calendar.timegm(timetuple))
1756 1757 if offset is None:
1757 1758 # local timezone
1758 1759 unixtime = int(time.mktime(timetuple))
1759 1760 offset = unixtime - localunixtime
1760 1761 else:
1761 1762 unixtime = localunixtime + offset
1762 1763 return unixtime, offset
1763 1764
1764 1765 def parsedate(date, formats=None, bias=None):
1765 1766 """parse a localized date/time and return a (unixtime, offset) tuple.
1766 1767
1767 1768 The date may be a "unixtime offset" string or in one of the specified
1768 1769 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1769 1770
1770 1771 >>> parsedate(' today ') == parsedate(\
1771 1772 datetime.date.today().strftime('%b %d'))
1772 1773 True
1773 1774 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1774 1775 datetime.timedelta(days=1)\
1775 1776 ).strftime('%b %d'))
1776 1777 True
1777 1778 >>> now, tz = makedate()
1778 1779 >>> strnow, strtz = parsedate('now')
1779 1780 >>> (strnow - now) < 1
1780 1781 True
1781 1782 >>> tz == strtz
1782 1783 True
1783 1784 """
1784 1785 if bias is None:
1785 1786 bias = {}
1786 1787 if not date:
1787 1788 return 0, 0
1788 1789 if isinstance(date, tuple) and len(date) == 2:
1789 1790 return date
1790 1791 if not formats:
1791 1792 formats = defaultdateformats
1792 1793 date = date.strip()
1793 1794
1794 1795 if date == 'now' or date == _('now'):
1795 1796 return makedate()
1796 1797 if date == 'today' or date == _('today'):
1797 1798 date = datetime.date.today().strftime('%b %d')
1798 1799 elif date == 'yesterday' or date == _('yesterday'):
1799 1800 date = (datetime.date.today() -
1800 1801 datetime.timedelta(days=1)).strftime('%b %d')
1801 1802
1802 1803 try:
1803 1804 when, offset = map(int, date.split(' '))
1804 1805 except ValueError:
1805 1806 # fill out defaults
1806 1807 now = makedate()
1807 1808 defaults = {}
1808 1809 for part in ("d", "mb", "yY", "HI", "M", "S"):
1809 1810 # this piece is for rounding the specific end of unknowns
1810 1811 b = bias.get(part)
1811 1812 if b is None:
1812 1813 if part[0] in "HMS":
1813 1814 b = "00"
1814 1815 else:
1815 1816 b = "0"
1816 1817
1817 1818 # this piece is for matching the generic end to today's date
1818 1819 n = datestr(now, "%" + part[0])
1819 1820
1820 1821 defaults[part] = (b, n)
1821 1822
1822 1823 for format in formats:
1823 1824 try:
1824 1825 when, offset = strdate(date, format, defaults)
1825 1826 except (ValueError, OverflowError):
1826 1827 pass
1827 1828 else:
1828 1829 break
1829 1830 else:
1830 1831 raise Abort(_('invalid date: %r') % date)
1831 1832 # validate explicit (probably user-specified) date and
1832 1833 # time zone offset. values must fit in signed 32 bits for
1833 1834 # current 32-bit linux runtimes. timezones go from UTC-12
1834 1835 # to UTC+14
1835 1836 if when < -0x80000000 or when > 0x7fffffff:
1836 1837 raise Abort(_('date exceeds 32 bits: %d') % when)
1837 1838 if offset < -50400 or offset > 43200:
1838 1839 raise Abort(_('impossible time zone offset: %d') % offset)
1839 1840 return when, offset
1840 1841
1841 1842 def matchdate(date):
1842 1843 """Return a function that matches a given date match specifier
1843 1844
1844 1845 Formats include:
1845 1846
1846 1847 '{date}' match a given date to the accuracy provided
1847 1848
1848 1849 '<{date}' on or before a given date
1849 1850
1850 1851 '>{date}' on or after a given date
1851 1852
1852 1853 >>> p1 = parsedate("10:29:59")
1853 1854 >>> p2 = parsedate("10:30:00")
1854 1855 >>> p3 = parsedate("10:30:59")
1855 1856 >>> p4 = parsedate("10:31:00")
1856 1857 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1857 1858 >>> f = matchdate("10:30")
1858 1859 >>> f(p1[0])
1859 1860 False
1860 1861 >>> f(p2[0])
1861 1862 True
1862 1863 >>> f(p3[0])
1863 1864 True
1864 1865 >>> f(p4[0])
1865 1866 False
1866 1867 >>> f(p5[0])
1867 1868 False
1868 1869 """
1869 1870
1870 1871 def lower(date):
1871 1872 d = {'mb': "1", 'd': "1"}
1872 1873 return parsedate(date, extendeddateformats, d)[0]
1873 1874
1874 1875 def upper(date):
1875 1876 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1876 1877 for days in ("31", "30", "29"):
1877 1878 try:
1878 1879 d["d"] = days
1879 1880 return parsedate(date, extendeddateformats, d)[0]
1880 1881 except Abort:
1881 1882 pass
1882 1883 d["d"] = "28"
1883 1884 return parsedate(date, extendeddateformats, d)[0]
1884 1885
1885 1886 date = date.strip()
1886 1887
1887 1888 if not date:
1888 1889 raise Abort(_("dates cannot consist entirely of whitespace"))
1889 1890 elif date[0] == "<":
1890 1891 if not date[1:]:
1891 1892 raise Abort(_("invalid day spec, use '<DATE'"))
1892 1893 when = upper(date[1:])
1893 1894 return lambda x: x <= when
1894 1895 elif date[0] == ">":
1895 1896 if not date[1:]:
1896 1897 raise Abort(_("invalid day spec, use '>DATE'"))
1897 1898 when = lower(date[1:])
1898 1899 return lambda x: x >= when
1899 1900 elif date[0] == "-":
1900 1901 try:
1901 1902 days = int(date[1:])
1902 1903 except ValueError:
1903 1904 raise Abort(_("invalid day spec: %s") % date[1:])
1904 1905 if days < 0:
1905 1906 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1906 1907 % date[1:])
1907 1908 when = makedate()[0] - days * 3600 * 24
1908 1909 return lambda x: x >= when
1909 1910 elif " to " in date:
1910 1911 a, b = date.split(" to ")
1911 1912 start, stop = lower(a), upper(b)
1912 1913 return lambda x: x >= start and x <= stop
1913 1914 else:
1914 1915 start, stop = lower(date), upper(date)
1915 1916 return lambda x: x >= start and x <= stop
1916 1917
1917 1918 def stringmatcher(pattern):
1918 1919 """
1919 1920 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1920 1921 returns the matcher name, pattern, and matcher function.
1921 1922 missing or unknown prefixes are treated as literal matches.
1922 1923
1923 1924 helper for tests:
1924 1925 >>> def test(pattern, *tests):
1925 1926 ... kind, pattern, matcher = stringmatcher(pattern)
1926 1927 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1927 1928
1928 1929 exact matching (no prefix):
1929 1930 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1930 1931 ('literal', 'abcdefg', [False, False, True])
1931 1932
1932 1933 regex matching ('re:' prefix)
1933 1934 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1934 1935 ('re', 'a.+b', [False, False, True])
1935 1936
1936 1937 force exact matches ('literal:' prefix)
1937 1938 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1938 1939 ('literal', 're:foobar', [False, True])
1939 1940
1940 1941 unknown prefixes are ignored and treated as literals
1941 1942 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1942 1943 ('literal', 'foo:bar', [False, False, True])
1943 1944 """
1944 1945 if pattern.startswith('re:'):
1945 1946 pattern = pattern[3:]
1946 1947 try:
1947 1948 regex = remod.compile(pattern)
1948 1949 except remod.error as e:
1949 1950 raise error.ParseError(_('invalid regular expression: %s')
1950 1951 % e)
1951 1952 return 're', pattern, regex.search
1952 1953 elif pattern.startswith('literal:'):
1953 1954 pattern = pattern[8:]
1954 1955 return 'literal', pattern, pattern.__eq__
1955 1956
1956 1957 def shortuser(user):
1957 1958 """Return a short representation of a user name or email address."""
1958 1959 f = user.find('@')
1959 1960 if f >= 0:
1960 1961 user = user[:f]
1961 1962 f = user.find('<')
1962 1963 if f >= 0:
1963 1964 user = user[f + 1:]
1964 1965 f = user.find(' ')
1965 1966 if f >= 0:
1966 1967 user = user[:f]
1967 1968 f = user.find('.')
1968 1969 if f >= 0:
1969 1970 user = user[:f]
1970 1971 return user
1971 1972
1972 1973 def emailuser(user):
1973 1974 """Return the user portion of an email address."""
1974 1975 f = user.find('@')
1975 1976 if f >= 0:
1976 1977 user = user[:f]
1977 1978 f = user.find('<')
1978 1979 if f >= 0:
1979 1980 user = user[f + 1:]
1980 1981 return user
1981 1982
1982 1983 def email(author):
1983 1984 '''get email of author.'''
1984 1985 r = author.find('>')
1985 1986 if r == -1:
1986 1987 r = None
1987 1988 return author[author.find('<') + 1:r]
1988 1989
1989 1990 def ellipsis(text, maxlength=400):
1990 1991 """Trim string to at most maxlength (default: 400) columns in display."""
1991 1992 return encoding.trim(text, maxlength, ellipsis='...')
1992 1993
1993 1994 def unitcountfn(*unittable):
1994 1995 '''return a function that renders a readable count of some quantity'''
1995 1996
1996 1997 def go(count):
1997 1998 for multiplier, divisor, format in unittable:
1998 1999 if count >= divisor * multiplier:
1999 2000 return format % (count / float(divisor))
2000 2001 return unittable[-1][2] % count
2001 2002
2002 2003 return go
2003 2004
2004 2005 bytecount = unitcountfn(
2005 2006 (100, 1 << 30, _('%.0f GB')),
2006 2007 (10, 1 << 30, _('%.1f GB')),
2007 2008 (1, 1 << 30, _('%.2f GB')),
2008 2009 (100, 1 << 20, _('%.0f MB')),
2009 2010 (10, 1 << 20, _('%.1f MB')),
2010 2011 (1, 1 << 20, _('%.2f MB')),
2011 2012 (100, 1 << 10, _('%.0f KB')),
2012 2013 (10, 1 << 10, _('%.1f KB')),
2013 2014 (1, 1 << 10, _('%.2f KB')),
2014 2015 (1, 1, _('%.0f bytes')),
2015 2016 )
2016 2017
2017 2018 def uirepr(s):
2018 2019 # Avoid double backslash in Windows path repr()
2019 2020 return repr(s).replace('\\\\', '\\')
2020 2021
2021 2022 # delay import of textwrap
2022 2023 def MBTextWrapper(**kwargs):
2023 2024 class tw(textwrap.TextWrapper):
2024 2025 """
2025 2026 Extend TextWrapper for width-awareness.
2026 2027
2027 2028 Neither number of 'bytes' in any encoding nor 'characters' is
2028 2029 appropriate to calculate terminal columns for specified string.
2029 2030
2030 2031 Original TextWrapper implementation uses built-in 'len()' directly,
2031 2032 so overriding is needed to use width information of each characters.
2032 2033
2033 2034 In addition, characters classified into 'ambiguous' width are
2034 2035 treated as wide in East Asian area, but as narrow in other.
2035 2036
2036 2037 This requires use decision to determine width of such characters.
2037 2038 """
2038 2039 def _cutdown(self, ucstr, space_left):
2039 2040 l = 0
2040 2041 colwidth = encoding.ucolwidth
2041 2042 for i in xrange(len(ucstr)):
2042 2043 l += colwidth(ucstr[i])
2043 2044 if space_left < l:
2044 2045 return (ucstr[:i], ucstr[i:])
2045 2046 return ucstr, ''
2046 2047
2047 2048 # overriding of base class
2048 2049 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2049 2050 space_left = max(width - cur_len, 1)
2050 2051
2051 2052 if self.break_long_words:
2052 2053 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2053 2054 cur_line.append(cut)
2054 2055 reversed_chunks[-1] = res
2055 2056 elif not cur_line:
2056 2057 cur_line.append(reversed_chunks.pop())
2057 2058
2058 2059 # this overriding code is imported from TextWrapper of Python 2.6
2059 2060 # to calculate columns of string by 'encoding.ucolwidth()'
2060 2061 def _wrap_chunks(self, chunks):
2061 2062 colwidth = encoding.ucolwidth
2062 2063
2063 2064 lines = []
2064 2065 if self.width <= 0:
2065 2066 raise ValueError("invalid width %r (must be > 0)" % self.width)
2066 2067
2067 2068 # Arrange in reverse order so items can be efficiently popped
2068 2069 # from a stack of chucks.
2069 2070 chunks.reverse()
2070 2071
2071 2072 while chunks:
2072 2073
2073 2074 # Start the list of chunks that will make up the current line.
2074 2075 # cur_len is just the length of all the chunks in cur_line.
2075 2076 cur_line = []
2076 2077 cur_len = 0
2077 2078
2078 2079 # Figure out which static string will prefix this line.
2079 2080 if lines:
2080 2081 indent = self.subsequent_indent
2081 2082 else:
2082 2083 indent = self.initial_indent
2083 2084
2084 2085 # Maximum width for this line.
2085 2086 width = self.width - len(indent)
2086 2087
2087 2088 # First chunk on line is whitespace -- drop it, unless this
2088 2089 # is the very beginning of the text (i.e. no lines started yet).
2089 2090 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2090 2091 del chunks[-1]
2091 2092
2092 2093 while chunks:
2093 2094 l = colwidth(chunks[-1])
2094 2095
2095 2096 # Can at least squeeze this chunk onto the current line.
2096 2097 if cur_len + l <= width:
2097 2098 cur_line.append(chunks.pop())
2098 2099 cur_len += l
2099 2100
2100 2101 # Nope, this line is full.
2101 2102 else:
2102 2103 break
2103 2104
2104 2105 # The current line is full, and the next chunk is too big to
2105 2106 # fit on *any* line (not just this one).
2106 2107 if chunks and colwidth(chunks[-1]) > width:
2107 2108 self._handle_long_word(chunks, cur_line, cur_len, width)
2108 2109
2109 2110 # If the last chunk on this line is all whitespace, drop it.
2110 2111 if (self.drop_whitespace and
2111 2112 cur_line and cur_line[-1].strip() == ''):
2112 2113 del cur_line[-1]
2113 2114
2114 2115 # Convert current line back to a string and store it in list
2115 2116 # of all lines (return value).
2116 2117 if cur_line:
2117 2118 lines.append(indent + ''.join(cur_line))
2118 2119
2119 2120 return lines
2120 2121
2121 2122 global MBTextWrapper
2122 2123 MBTextWrapper = tw
2123 2124 return tw(**kwargs)
2124 2125
2125 2126 def wrap(line, width, initindent='', hangindent=''):
2126 2127 maxindent = max(len(hangindent), len(initindent))
2127 2128 if width <= maxindent:
2128 2129 # adjust for weird terminal size
2129 2130 width = max(78, maxindent + 1)
2130 2131 line = line.decode(encoding.encoding, encoding.encodingmode)
2131 2132 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2132 2133 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2133 2134 wrapper = MBTextWrapper(width=width,
2134 2135 initial_indent=initindent,
2135 2136 subsequent_indent=hangindent)
2136 2137 return wrapper.fill(line).encode(encoding.encoding)
2137 2138
2138 2139 def iterlines(iterator):
2139 2140 for chunk in iterator:
2140 2141 for line in chunk.splitlines():
2141 2142 yield line
2142 2143
2143 2144 def expandpath(path):
2144 2145 return os.path.expanduser(os.path.expandvars(path))
2145 2146
2146 2147 def hgcmd():
2147 2148 """Return the command used to execute current hg
2148 2149
2149 2150 This is different from hgexecutable() because on Windows we want
2150 2151 to avoid things opening new shell windows like batch files, so we
2151 2152 get either the python call or current executable.
2152 2153 """
2153 2154 if mainfrozen():
2154 2155 if getattr(sys, 'frozen', None) == 'macosx_app':
2155 2156 # Env variable set by py2app
2156 2157 return [os.environ['EXECUTABLEPATH']]
2157 2158 else:
2158 2159 return [sys.executable]
2159 2160 return gethgcmd()
2160 2161
2161 2162 def rundetached(args, condfn):
2162 2163 """Execute the argument list in a detached process.
2163 2164
2164 2165 condfn is a callable which is called repeatedly and should return
2165 2166 True once the child process is known to have started successfully.
2166 2167 At this point, the child process PID is returned. If the child
2167 2168 process fails to start or finishes before condfn() evaluates to
2168 2169 True, return -1.
2169 2170 """
2170 2171 # Windows case is easier because the child process is either
2171 2172 # successfully starting and validating the condition or exiting
2172 2173 # on failure. We just poll on its PID. On Unix, if the child
2173 2174 # process fails to start, it will be left in a zombie state until
2174 2175 # the parent wait on it, which we cannot do since we expect a long
2175 2176 # running process on success. Instead we listen for SIGCHLD telling
2176 2177 # us our child process terminated.
2177 2178 terminated = set()
2178 2179 def handler(signum, frame):
2179 2180 terminated.add(os.wait())
2180 2181 prevhandler = None
2181 2182 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2182 2183 if SIGCHLD is not None:
2183 2184 prevhandler = signal.signal(SIGCHLD, handler)
2184 2185 try:
2185 2186 pid = spawndetached(args)
2186 2187 while not condfn():
2187 2188 if ((pid in terminated or not testpid(pid))
2188 2189 and not condfn()):
2189 2190 return -1
2190 2191 time.sleep(0.1)
2191 2192 return pid
2192 2193 finally:
2193 2194 if prevhandler is not None:
2194 2195 signal.signal(signal.SIGCHLD, prevhandler)
2195 2196
2196 2197 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2197 2198 """Return the result of interpolating items in the mapping into string s.
2198 2199
2199 2200 prefix is a single character string, or a two character string with
2200 2201 a backslash as the first character if the prefix needs to be escaped in
2201 2202 a regular expression.
2202 2203
2203 2204 fn is an optional function that will be applied to the replacement text
2204 2205 just before replacement.
2205 2206
2206 2207 escape_prefix is an optional flag that allows using doubled prefix for
2207 2208 its escaping.
2208 2209 """
2209 2210 fn = fn or (lambda s: s)
2210 2211 patterns = '|'.join(mapping.keys())
2211 2212 if escape_prefix:
2212 2213 patterns += '|' + prefix
2213 2214 if len(prefix) > 1:
2214 2215 prefix_char = prefix[1:]
2215 2216 else:
2216 2217 prefix_char = prefix
2217 2218 mapping[prefix_char] = prefix_char
2218 2219 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2219 2220 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2220 2221
2221 2222 def getport(port):
2222 2223 """Return the port for a given network service.
2223 2224
2224 2225 If port is an integer, it's returned as is. If it's a string, it's
2225 2226 looked up using socket.getservbyname(). If there's no matching
2226 2227 service, error.Abort is raised.
2227 2228 """
2228 2229 try:
2229 2230 return int(port)
2230 2231 except ValueError:
2231 2232 pass
2232 2233
2233 2234 try:
2234 2235 return socket.getservbyname(port)
2235 2236 except socket.error:
2236 2237 raise Abort(_("no port number associated with service '%s'") % port)
2237 2238
2238 2239 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2239 2240 '0': False, 'no': False, 'false': False, 'off': False,
2240 2241 'never': False}
2241 2242
2242 2243 def parsebool(s):
2243 2244 """Parse s into a boolean.
2244 2245
2245 2246 If s is not a valid boolean, returns None.
2246 2247 """
2247 2248 return _booleans.get(s.lower(), None)
2248 2249
2249 2250 _hexdig = '0123456789ABCDEFabcdef'
2250 2251 _hextochr = dict((a + b, chr(int(a + b, 16)))
2251 2252 for a in _hexdig for b in _hexdig)
2252 2253
2253 2254 def _urlunquote(s):
2254 2255 """Decode HTTP/HTML % encoding.
2255 2256
2256 2257 >>> _urlunquote('abc%20def')
2257 2258 'abc def'
2258 2259 """
2259 2260 res = s.split('%')
2260 2261 # fastpath
2261 2262 if len(res) == 1:
2262 2263 return s
2263 2264 s = res[0]
2264 2265 for item in res[1:]:
2265 2266 try:
2266 2267 s += _hextochr[item[:2]] + item[2:]
2267 2268 except KeyError:
2268 2269 s += '%' + item
2269 2270 except UnicodeDecodeError:
2270 2271 s += unichr(int(item[:2], 16)) + item[2:]
2271 2272 return s
2272 2273
2273 2274 class url(object):
2274 2275 r"""Reliable URL parser.
2275 2276
2276 2277 This parses URLs and provides attributes for the following
2277 2278 components:
2278 2279
2279 2280 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2280 2281
2281 2282 Missing components are set to None. The only exception is
2282 2283 fragment, which is set to '' if present but empty.
2283 2284
2284 2285 If parsefragment is False, fragment is included in query. If
2285 2286 parsequery is False, query is included in path. If both are
2286 2287 False, both fragment and query are included in path.
2287 2288
2288 2289 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2289 2290
2290 2291 Note that for backward compatibility reasons, bundle URLs do not
2291 2292 take host names. That means 'bundle://../' has a path of '../'.
2292 2293
2293 2294 Examples:
2294 2295
2295 2296 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2296 2297 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2297 2298 >>> url('ssh://[::1]:2200//home/joe/repo')
2298 2299 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2299 2300 >>> url('file:///home/joe/repo')
2300 2301 <url scheme: 'file', path: '/home/joe/repo'>
2301 2302 >>> url('file:///c:/temp/foo/')
2302 2303 <url scheme: 'file', path: 'c:/temp/foo/'>
2303 2304 >>> url('bundle:foo')
2304 2305 <url scheme: 'bundle', path: 'foo'>
2305 2306 >>> url('bundle://../foo')
2306 2307 <url scheme: 'bundle', path: '../foo'>
2307 2308 >>> url(r'c:\foo\bar')
2308 2309 <url path: 'c:\\foo\\bar'>
2309 2310 >>> url(r'\\blah\blah\blah')
2310 2311 <url path: '\\\\blah\\blah\\blah'>
2311 2312 >>> url(r'\\blah\blah\blah#baz')
2312 2313 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2313 2314 >>> url(r'file:///C:\users\me')
2314 2315 <url scheme: 'file', path: 'C:\\users\\me'>
2315 2316
2316 2317 Authentication credentials:
2317 2318
2318 2319 >>> url('ssh://joe:xyz@x/repo')
2319 2320 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2320 2321 >>> url('ssh://joe@x/repo')
2321 2322 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2322 2323
2323 2324 Query strings and fragments:
2324 2325
2325 2326 >>> url('http://host/a?b#c')
2326 2327 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2327 2328 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2328 2329 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2329 2330 """
2330 2331
2331 2332 _safechars = "!~*'()+"
2332 2333 _safepchars = "/!~*'()+:\\"
2333 2334 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2334 2335
2335 2336 def __init__(self, path, parsequery=True, parsefragment=True):
2336 2337 # We slowly chomp away at path until we have only the path left
2337 2338 self.scheme = self.user = self.passwd = self.host = None
2338 2339 self.port = self.path = self.query = self.fragment = None
2339 2340 self._localpath = True
2340 2341 self._hostport = ''
2341 2342 self._origpath = path
2342 2343
2343 2344 if parsefragment and '#' in path:
2344 2345 path, self.fragment = path.split('#', 1)
2345 2346 if not path:
2346 2347 path = None
2347 2348
2348 2349 # special case for Windows drive letters and UNC paths
2349 2350 if hasdriveletter(path) or path.startswith(r'\\'):
2350 2351 self.path = path
2351 2352 return
2352 2353
2353 2354 # For compatibility reasons, we can't handle bundle paths as
2354 2355 # normal URLS
2355 2356 if path.startswith('bundle:'):
2356 2357 self.scheme = 'bundle'
2357 2358 path = path[7:]
2358 2359 if path.startswith('//'):
2359 2360 path = path[2:]
2360 2361 self.path = path
2361 2362 return
2362 2363
2363 2364 if self._matchscheme(path):
2364 2365 parts = path.split(':', 1)
2365 2366 if parts[0]:
2366 2367 self.scheme, path = parts
2367 2368 self._localpath = False
2368 2369
2369 2370 if not path:
2370 2371 path = None
2371 2372 if self._localpath:
2372 2373 self.path = ''
2373 2374 return
2374 2375 else:
2375 2376 if self._localpath:
2376 2377 self.path = path
2377 2378 return
2378 2379
2379 2380 if parsequery and '?' in path:
2380 2381 path, self.query = path.split('?', 1)
2381 2382 if not path:
2382 2383 path = None
2383 2384 if not self.query:
2384 2385 self.query = None
2385 2386
2386 2387 # // is required to specify a host/authority
2387 2388 if path and path.startswith('//'):
2388 2389 parts = path[2:].split('/', 1)
2389 2390 if len(parts) > 1:
2390 2391 self.host, path = parts
2391 2392 else:
2392 2393 self.host = parts[0]
2393 2394 path = None
2394 2395 if not self.host:
2395 2396 self.host = None
2396 2397 # path of file:///d is /d
2397 2398 # path of file:///d:/ is d:/, not /d:/
2398 2399 if path and not hasdriveletter(path):
2399 2400 path = '/' + path
2400 2401
2401 2402 if self.host and '@' in self.host:
2402 2403 self.user, self.host = self.host.rsplit('@', 1)
2403 2404 if ':' in self.user:
2404 2405 self.user, self.passwd = self.user.split(':', 1)
2405 2406 if not self.host:
2406 2407 self.host = None
2407 2408
2408 2409 # Don't split on colons in IPv6 addresses without ports
2409 2410 if (self.host and ':' in self.host and
2410 2411 not (self.host.startswith('[') and self.host.endswith(']'))):
2411 2412 self._hostport = self.host
2412 2413 self.host, self.port = self.host.rsplit(':', 1)
2413 2414 if not self.host:
2414 2415 self.host = None
2415 2416
2416 2417 if (self.host and self.scheme == 'file' and
2417 2418 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2418 2419 raise Abort(_('file:// URLs can only refer to localhost'))
2419 2420
2420 2421 self.path = path
2421 2422
2422 2423 # leave the query string escaped
2423 2424 for a in ('user', 'passwd', 'host', 'port',
2424 2425 'path', 'fragment'):
2425 2426 v = getattr(self, a)
2426 2427 if v is not None:
2427 2428 setattr(self, a, _urlunquote(v))
2428 2429
2429 2430 def __repr__(self):
2430 2431 attrs = []
2431 2432 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2432 2433 'query', 'fragment'):
2433 2434 v = getattr(self, a)
2434 2435 if v is not None:
2435 2436 attrs.append('%s: %r' % (a, v))
2436 2437 return '<url %s>' % ', '.join(attrs)
2437 2438
2438 2439 def __str__(self):
2439 2440 r"""Join the URL's components back into a URL string.
2440 2441
2441 2442 Examples:
2442 2443
2443 2444 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2444 2445 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2445 2446 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2446 2447 'http://user:pw@host:80/?foo=bar&baz=42'
2447 2448 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2448 2449 'http://user:pw@host:80/?foo=bar%3dbaz'
2449 2450 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2450 2451 'ssh://user:pw@[::1]:2200//home/joe#'
2451 2452 >>> str(url('http://localhost:80//'))
2452 2453 'http://localhost:80//'
2453 2454 >>> str(url('http://localhost:80/'))
2454 2455 'http://localhost:80/'
2455 2456 >>> str(url('http://localhost:80'))
2456 2457 'http://localhost:80/'
2457 2458 >>> str(url('bundle:foo'))
2458 2459 'bundle:foo'
2459 2460 >>> str(url('bundle://../foo'))
2460 2461 'bundle:../foo'
2461 2462 >>> str(url('path'))
2462 2463 'path'
2463 2464 >>> str(url('file:///tmp/foo/bar'))
2464 2465 'file:///tmp/foo/bar'
2465 2466 >>> str(url('file:///c:/tmp/foo/bar'))
2466 2467 'file:///c:/tmp/foo/bar'
2467 2468 >>> print url(r'bundle:foo\bar')
2468 2469 bundle:foo\bar
2469 2470 >>> print url(r'file:///D:\data\hg')
2470 2471 file:///D:\data\hg
2471 2472 """
2472 2473 if self._localpath:
2473 2474 s = self.path
2474 2475 if self.scheme == 'bundle':
2475 2476 s = 'bundle:' + s
2476 2477 if self.fragment:
2477 2478 s += '#' + self.fragment
2478 2479 return s
2479 2480
2480 2481 s = self.scheme + ':'
2481 2482 if self.user or self.passwd or self.host:
2482 2483 s += '//'
2483 2484 elif self.scheme and (not self.path or self.path.startswith('/')
2484 2485 or hasdriveletter(self.path)):
2485 2486 s += '//'
2486 2487 if hasdriveletter(self.path):
2487 2488 s += '/'
2488 2489 if self.user:
2489 2490 s += urlreq.quote(self.user, safe=self._safechars)
2490 2491 if self.passwd:
2491 2492 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2492 2493 if self.user or self.passwd:
2493 2494 s += '@'
2494 2495 if self.host:
2495 2496 if not (self.host.startswith('[') and self.host.endswith(']')):
2496 2497 s += urlreq.quote(self.host)
2497 2498 else:
2498 2499 s += self.host
2499 2500 if self.port:
2500 2501 s += ':' + urlreq.quote(self.port)
2501 2502 if self.host:
2502 2503 s += '/'
2503 2504 if self.path:
2504 2505 # TODO: similar to the query string, we should not unescape the
2505 2506 # path when we store it, the path might contain '%2f' = '/',
2506 2507 # which we should *not* escape.
2507 2508 s += urlreq.quote(self.path, safe=self._safepchars)
2508 2509 if self.query:
2509 2510 # we store the query in escaped form.
2510 2511 s += '?' + self.query
2511 2512 if self.fragment is not None:
2512 2513 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2513 2514 return s
2514 2515
2515 2516 def authinfo(self):
2516 2517 user, passwd = self.user, self.passwd
2517 2518 try:
2518 2519 self.user, self.passwd = None, None
2519 2520 s = str(self)
2520 2521 finally:
2521 2522 self.user, self.passwd = user, passwd
2522 2523 if not self.user:
2523 2524 return (s, None)
2524 2525 # authinfo[1] is passed to urllib2 password manager, and its
2525 2526 # URIs must not contain credentials. The host is passed in the
2526 2527 # URIs list because Python < 2.4.3 uses only that to search for
2527 2528 # a password.
2528 2529 return (s, (None, (s, self.host),
2529 2530 self.user, self.passwd or ''))
2530 2531
2531 2532 def isabs(self):
2532 2533 if self.scheme and self.scheme != 'file':
2533 2534 return True # remote URL
2534 2535 if hasdriveletter(self.path):
2535 2536 return True # absolute for our purposes - can't be joined()
2536 2537 if self.path.startswith(r'\\'):
2537 2538 return True # Windows UNC path
2538 2539 if self.path.startswith('/'):
2539 2540 return True # POSIX-style
2540 2541 return False
2541 2542
2542 2543 def localpath(self):
2543 2544 if self.scheme == 'file' or self.scheme == 'bundle':
2544 2545 path = self.path or '/'
2545 2546 # For Windows, we need to promote hosts containing drive
2546 2547 # letters to paths with drive letters.
2547 2548 if hasdriveletter(self._hostport):
2548 2549 path = self._hostport + '/' + self.path
2549 2550 elif (self.host is not None and self.path
2550 2551 and not hasdriveletter(path)):
2551 2552 path = '/' + path
2552 2553 return path
2553 2554 return self._origpath
2554 2555
2555 2556 def islocal(self):
2556 2557 '''whether localpath will return something that posixfile can open'''
2557 2558 return (not self.scheme or self.scheme == 'file'
2558 2559 or self.scheme == 'bundle')
2559 2560
2560 2561 def hasscheme(path):
2561 2562 return bool(url(path).scheme)
2562 2563
2563 2564 def hasdriveletter(path):
2564 2565 return path and path[1:2] == ':' and path[0:1].isalpha()
2565 2566
2566 2567 def urllocalpath(path):
2567 2568 return url(path, parsequery=False, parsefragment=False).localpath()
2568 2569
2569 2570 def hidepassword(u):
2570 2571 '''hide user credential in a url string'''
2571 2572 u = url(u)
2572 2573 if u.passwd:
2573 2574 u.passwd = '***'
2574 2575 return str(u)
2575 2576
2576 2577 def removeauth(u):
2577 2578 '''remove all authentication information from a url string'''
2578 2579 u = url(u)
2579 2580 u.user = u.passwd = None
2580 2581 return str(u)
2581 2582
2582 2583 def isatty(fp):
2583 2584 try:
2584 2585 return fp.isatty()
2585 2586 except AttributeError:
2586 2587 return False
2587 2588
2588 2589 timecount = unitcountfn(
2589 2590 (1, 1e3, _('%.0f s')),
2590 2591 (100, 1, _('%.1f s')),
2591 2592 (10, 1, _('%.2f s')),
2592 2593 (1, 1, _('%.3f s')),
2593 2594 (100, 0.001, _('%.1f ms')),
2594 2595 (10, 0.001, _('%.2f ms')),
2595 2596 (1, 0.001, _('%.3f ms')),
2596 2597 (100, 0.000001, _('%.1f us')),
2597 2598 (10, 0.000001, _('%.2f us')),
2598 2599 (1, 0.000001, _('%.3f us')),
2599 2600 (100, 0.000000001, _('%.1f ns')),
2600 2601 (10, 0.000000001, _('%.2f ns')),
2601 2602 (1, 0.000000001, _('%.3f ns')),
2602 2603 )
2603 2604
2604 2605 _timenesting = [0]
2605 2606
2606 2607 def timed(func):
2607 2608 '''Report the execution time of a function call to stderr.
2608 2609
2609 2610 During development, use as a decorator when you need to measure
2610 2611 the cost of a function, e.g. as follows:
2611 2612
2612 2613 @util.timed
2613 2614 def foo(a, b, c):
2614 2615 pass
2615 2616 '''
2616 2617
2617 2618 def wrapper(*args, **kwargs):
2618 2619 start = time.time()
2619 2620 indent = 2
2620 2621 _timenesting[0] += indent
2621 2622 try:
2622 2623 return func(*args, **kwargs)
2623 2624 finally:
2624 2625 elapsed = time.time() - start
2625 2626 _timenesting[0] -= indent
2626 2627 sys.stderr.write('%s%s: %s\n' %
2627 2628 (' ' * _timenesting[0], func.__name__,
2628 2629 timecount(elapsed)))
2629 2630 return wrapper
2630 2631
2631 2632 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2632 2633 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2633 2634
2634 2635 def sizetoint(s):
2635 2636 '''Convert a space specifier to a byte count.
2636 2637
2637 2638 >>> sizetoint('30')
2638 2639 30
2639 2640 >>> sizetoint('2.2kb')
2640 2641 2252
2641 2642 >>> sizetoint('6M')
2642 2643 6291456
2643 2644 '''
2644 2645 t = s.strip().lower()
2645 2646 try:
2646 2647 for k, u in _sizeunits:
2647 2648 if t.endswith(k):
2648 2649 return int(float(t[:-len(k)]) * u)
2649 2650 return int(t)
2650 2651 except ValueError:
2651 2652 raise error.ParseError(_("couldn't parse size: %s") % s)
2652 2653
2653 2654 class hooks(object):
2654 2655 '''A collection of hook functions that can be used to extend a
2655 2656 function's behavior. Hooks are called in lexicographic order,
2656 2657 based on the names of their sources.'''
2657 2658
2658 2659 def __init__(self):
2659 2660 self._hooks = []
2660 2661
2661 2662 def add(self, source, hook):
2662 2663 self._hooks.append((source, hook))
2663 2664
2664 2665 def __call__(self, *args):
2665 2666 self._hooks.sort(key=lambda x: x[0])
2666 2667 results = []
2667 2668 for source, hook in self._hooks:
2668 2669 results.append(hook(*args))
2669 2670 return results
2670 2671
2671 2672 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2672 2673 '''Yields lines for a nicely formatted stacktrace.
2673 2674 Skips the 'skip' last entries.
2674 2675 Each file+linenumber is formatted according to fileline.
2675 2676 Each line is formatted according to line.
2676 2677 If line is None, it yields:
2677 2678 length of longest filepath+line number,
2678 2679 filepath+linenumber,
2679 2680 function
2680 2681
2681 2682 Not be used in production code but very convenient while developing.
2682 2683 '''
2683 2684 entries = [(fileline % (fn, ln), func)
2684 2685 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2685 2686 if entries:
2686 2687 fnmax = max(len(entry[0]) for entry in entries)
2687 2688 for fnln, func in entries:
2688 2689 if line is None:
2689 2690 yield (fnmax, fnln, func)
2690 2691 else:
2691 2692 yield line % (fnmax, fnln, func)
2692 2693
2693 2694 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2694 2695 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2695 2696 Skips the 'skip' last entries. By default it will flush stdout first.
2696 2697 It can be used everywhere and intentionally does not require an ui object.
2697 2698 Not be used in production code but very convenient while developing.
2698 2699 '''
2699 2700 if otherf:
2700 2701 otherf.flush()
2701 2702 f.write('%s at:\n' % msg)
2702 2703 for line in getstackframes(skip + 1):
2703 2704 f.write(line)
2704 2705 f.flush()
2705 2706
2706 2707 class dirs(object):
2707 2708 '''a multiset of directory names from a dirstate or manifest'''
2708 2709
2709 2710 def __init__(self, map, skip=None):
2710 2711 self._dirs = {}
2711 2712 addpath = self.addpath
2712 2713 if safehasattr(map, 'iteritems') and skip is not None:
2713 2714 for f, s in map.iteritems():
2714 2715 if s[0] != skip:
2715 2716 addpath(f)
2716 2717 else:
2717 2718 for f in map:
2718 2719 addpath(f)
2719 2720
2720 2721 def addpath(self, path):
2721 2722 dirs = self._dirs
2722 2723 for base in finddirs(path):
2723 2724 if base in dirs:
2724 2725 dirs[base] += 1
2725 2726 return
2726 2727 dirs[base] = 1
2727 2728
2728 2729 def delpath(self, path):
2729 2730 dirs = self._dirs
2730 2731 for base in finddirs(path):
2731 2732 if dirs[base] > 1:
2732 2733 dirs[base] -= 1
2733 2734 return
2734 2735 del dirs[base]
2735 2736
2736 2737 def __iter__(self):
2737 2738 return self._dirs.iterkeys()
2738 2739
2739 2740 def __contains__(self, d):
2740 2741 return d in self._dirs
2741 2742
2742 2743 if safehasattr(parsers, 'dirs'):
2743 2744 dirs = parsers.dirs
2744 2745
2745 2746 def finddirs(path):
2746 2747 pos = path.rfind('/')
2747 2748 while pos != -1:
2748 2749 yield path[:pos]
2749 2750 pos = path.rfind('/', 0, pos)
2750 2751
2751 2752 # compression utility
2752 2753
2753 2754 class nocompress(object):
2754 2755 def compress(self, x):
2755 2756 return x
2756 2757 def flush(self):
2757 2758 return ""
2758 2759
2759 2760 compressors = {
2760 2761 None: nocompress,
2761 2762 # lambda to prevent early import
2762 2763 'BZ': lambda: bz2.BZ2Compressor(),
2763 2764 'GZ': lambda: zlib.compressobj(),
2764 2765 }
2765 2766 # also support the old form by courtesies
2766 2767 compressors['UN'] = compressors[None]
2767 2768
2768 2769 def _makedecompressor(decompcls):
2769 2770 def generator(f):
2770 2771 d = decompcls()
2771 2772 for chunk in filechunkiter(f):
2772 2773 yield d.decompress(chunk)
2773 2774 def func(fh):
2774 2775 return chunkbuffer(generator(fh))
2775 2776 return func
2776 2777
2777 2778 class ctxmanager(object):
2778 2779 '''A context manager for use in 'with' blocks to allow multiple
2779 2780 contexts to be entered at once. This is both safer and more
2780 2781 flexible than contextlib.nested.
2781 2782
2782 2783 Once Mercurial supports Python 2.7+, this will become mostly
2783 2784 unnecessary.
2784 2785 '''
2785 2786
2786 2787 def __init__(self, *args):
2787 2788 '''Accepts a list of no-argument functions that return context
2788 2789 managers. These will be invoked at __call__ time.'''
2789 2790 self._pending = args
2790 2791 self._atexit = []
2791 2792
2792 2793 def __enter__(self):
2793 2794 return self
2794 2795
2795 2796 def enter(self):
2796 2797 '''Create and enter context managers in the order in which they were
2797 2798 passed to the constructor.'''
2798 2799 values = []
2799 2800 for func in self._pending:
2800 2801 obj = func()
2801 2802 values.append(obj.__enter__())
2802 2803 self._atexit.append(obj.__exit__)
2803 2804 del self._pending
2804 2805 return values
2805 2806
2806 2807 def atexit(self, func, *args, **kwargs):
2807 2808 '''Add a function to call when this context manager exits. The
2808 2809 ordering of multiple atexit calls is unspecified, save that
2809 2810 they will happen before any __exit__ functions.'''
2810 2811 def wrapper(exc_type, exc_val, exc_tb):
2811 2812 func(*args, **kwargs)
2812 2813 self._atexit.append(wrapper)
2813 2814 return func
2814 2815
2815 2816 def __exit__(self, exc_type, exc_val, exc_tb):
2816 2817 '''Context managers are exited in the reverse order from which
2817 2818 they were created.'''
2818 2819 received = exc_type is not None
2819 2820 suppressed = False
2820 2821 pending = None
2821 2822 self._atexit.reverse()
2822 2823 for exitfunc in self._atexit:
2823 2824 try:
2824 2825 if exitfunc(exc_type, exc_val, exc_tb):
2825 2826 suppressed = True
2826 2827 exc_type = None
2827 2828 exc_val = None
2828 2829 exc_tb = None
2829 2830 except BaseException:
2830 2831 pending = sys.exc_info()
2831 2832 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2832 2833 del self._atexit
2833 2834 if pending:
2834 2835 raise exc_val
2835 2836 return received and suppressed
2836 2837
2837 2838 def _bz2():
2838 2839 d = bz2.BZ2Decompressor()
2839 2840 # Bzip2 stream start with BZ, but we stripped it.
2840 2841 # we put it back for good measure.
2841 2842 d.decompress('BZ')
2842 2843 return d
2843 2844
2844 2845 decompressors = {None: lambda fh: fh,
2845 2846 '_truncatedBZ': _makedecompressor(_bz2),
2846 2847 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2847 2848 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2848 2849 }
2849 2850 # also support the old form by courtesies
2850 2851 decompressors['UN'] = decompressors[None]
2851 2852
2852 2853 # convenient shortcut
2853 2854 dst = debugstacktrace
@@ -1,151 +1,151 b''
1 1 #require test-repo
2 2
3 3 $ . "$TESTDIR/helpers-testrepo.sh"
4 4 $ cd "$TESTDIR"/..
5 5
6 6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
7 7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
8 8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
9 9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
10 10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
11 11 hgext/highlight/__init__.py not using absolute_import
12 12 hgext/highlight/highlight.py not using absolute_import
13 13 hgext/share.py not using absolute_import
14 14 hgext/win32text.py not using absolute_import
15 15 i18n/check-translation.py not using absolute_import
16 16 i18n/polib.py not using absolute_import
17 17 setup.py not using absolute_import
18 18 tests/heredoctest.py requires print_function
19 19 tests/md5sum.py not using absolute_import
20 20 tests/readlink.py not using absolute_import
21 21 tests/run-tests.py not using absolute_import
22 22 tests/test-demandimport.py not using absolute_import
23 23
24 24 #if py3exe
25 25 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py
26 26 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
27 27 hgext/automv.py: error importing module: <SyntaxError> invalid syntax (commands.py, line *) (line *) (glob)
28 28 hgext/blackbox.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
29 hgext/bugzilla.py: error importing module: <ImportError> No module named 'urlparse' (line *) (glob)
29 hgext/bugzilla.py: error importing module: <ImportError> No module named 'xmlrpclib' (line *) (glob)
30 30 hgext/censor.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
31 31 hgext/chgserver.py: error importing module: <ImportError> No module named 'SocketServer' (line *) (glob)
32 32 hgext/children.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
33 33 hgext/churn.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
34 34 hgext/clonebundles.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
35 35 hgext/color.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
36 36 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
37 37 hgext/convert/convcmd.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
38 38 hgext/convert/cvs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
39 39 hgext/convert/cvsps.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
40 40 hgext/convert/darcs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
41 41 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
42 42 hgext/convert/git.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
43 43 hgext/convert/gnuarch.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
44 44 hgext/convert/hg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
45 45 hgext/convert/monotone.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
46 46 hgext/convert/p*.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *) (glob)
47 47 hgext/convert/subversion.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
48 48 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *) (glob)
49 49 hgext/eol.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
50 50 hgext/extdiff.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
51 51 hgext/factotum.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
52 52 hgext/fetch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
53 53 hgext/fsmonitor/watchmanclient.py: error importing module: <SystemError> Parent module 'hgext.fsmonitor' not loaded, cannot perform relative import (line *) (glob)
54 54 hgext/gpg.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
55 55 hgext/graphlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
56 56 hgext/hgk.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
57 57 hgext/histedit.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
58 58 hgext/keyword.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
59 59 hgext/largefiles/basestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
60 60 hgext/largefiles/lfcommands.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
61 61 hgext/largefiles/lfutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
62 62 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
63 63 hgext/largefiles/overrides.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
64 64 hgext/largefiles/proto.py: error importing: <ImportError> No module named 'httplib' (error at httppeer.py:*) (glob)
65 65 hgext/largefiles/remotestore.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
66 66 hgext/largefiles/reposetup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
67 67 hgext/largefiles/storefactory.py: error importing: <SyntaxError> invalid syntax (bundle2.py, line *) (error at bundlerepo.py:*) (glob)
68 68 hgext/largefiles/uisetup.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
69 69 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *) (glob)
70 70 hgext/mq.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
71 71 hgext/notify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
72 72 hgext/pager.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
73 73 hgext/patchbomb.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
74 74 hgext/purge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
75 75 hgext/rebase.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
76 76 hgext/record.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
77 77 hgext/relink.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
78 78 hgext/schemes.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
79 79 hgext/share.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
80 80 hgext/shelve.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
81 81 hgext/strip.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
82 82 hgext/transplant.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
83 83 mercurial/archival.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
84 84 mercurial/branchmap.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
85 85 mercurial/bundle*.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
86 86 mercurial/bundlerepo.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
87 87 mercurial/changegroup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
88 88 mercurial/changelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
89 89 mercurial/cmdutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
90 90 mercurial/commands.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
91 91 mercurial/commandserver.py: error importing module: <ImportError> No module named 'SocketServer' (line *) (glob)
92 92 mercurial/context.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
93 93 mercurial/copies.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
94 94 mercurial/crecord.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
95 95 mercurial/dirstate.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
96 96 mercurial/discovery.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
97 97 mercurial/dispatch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
98 98 mercurial/exchange.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
99 99 mercurial/extensions.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
100 100 mercurial/filelog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
101 101 mercurial/filemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
102 102 mercurial/fileset.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
103 103 mercurial/formatter.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
104 104 mercurial/graphmod.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
105 105 mercurial/help.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
106 106 mercurial/hg.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
107 107 mercurial/hgweb/common.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
108 108 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
109 109 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
110 110 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
111 111 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
112 112 mercurial/hgweb/server.py: error importing module: <ImportError> No module named 'BaseHTTPServer' (line *) (glob)
113 113 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
114 114 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
115 115 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *) (glob)
116 116 mercurial/hook.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
117 117 mercurial/httpconnection.py: error importing: <ImportError> No module named 'rfc822' (error at __init__.py:*) (glob)
118 118 mercurial/httppeer.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
119 119 mercurial/keepalive.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
120 120 mercurial/localrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
121 121 mercurial/mail.py: error importing module: <AttributeError> module 'email' has no attribute 'Header' (line *) (glob)
122 122 mercurial/manifest.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
123 123 mercurial/merge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
124 124 mercurial/namespaces.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
125 125 mercurial/patch.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
126 126 mercurial/pure/mpatch.py: error importing module: <ImportError> cannot import name 'pycompat' (line *) (glob)
127 127 mercurial/pure/parsers.py: error importing module: <ImportError> No module named 'mercurial.pure.node' (line *) (glob)
128 128 mercurial/repair.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
129 129 mercurial/revlog.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
130 130 mercurial/revset.py: error importing module: <AttributeError> 'dict' object has no attribute 'iteritems' (line *) (glob)
131 131 mercurial/scmutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
132 132 mercurial/scmwindows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
133 133 mercurial/simplemerge.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
134 134 mercurial/sshpeer.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
135 135 mercurial/sshserver.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
136 136 mercurial/statichttprepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
137 137 mercurial/store.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
138 138 mercurial/streamclone.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
139 139 mercurial/subrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
140 140 mercurial/templatefilters.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
141 141 mercurial/templatekw.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
142 142 mercurial/templater.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
143 143 mercurial/ui.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
144 144 mercurial/unionrepo.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
145 145 mercurial/url.py: error importing module: <ImportError> No module named 'httplib' (line *) (glob)
146 146 mercurial/verify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
147 147 mercurial/win*.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
148 148 mercurial/windows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
149 149 mercurial/wireproto.py: error importing module: <SyntaxError> invalid syntax (bundle*.py, line *) (line *) (glob)
150 150
151 151 #endif
@@ -1,158 +1,161 b''
1 1 #!/usr/bin/env python
2 2
3 3 from __future__ import absolute_import, print_function
4 4
5 5 __doc__ = """Tiny HTTP Proxy.
6 6
7 7 This module implements GET, HEAD, POST, PUT and DELETE methods
8 8 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
9 9 method is also implemented experimentally, but has not been
10 10 tested yet.
11 11
12 12 Any help will be greatly appreciated. SUZUKI Hisao
13 13 """
14 14
15 15 __version__ = "0.2.1"
16 16
17 17 import BaseHTTPServer
18 18 import SocketServer
19 19 import os
20 20 import select
21 21 import socket
22 22 import sys
23 import urlparse
23
24 from mercurial import util
25
26 urlparse = util.urlparse
24 27
25 28 class ProxyHandler (BaseHTTPServer.BaseHTTPRequestHandler):
26 29 __base = BaseHTTPServer.BaseHTTPRequestHandler
27 30 __base_handle = __base.handle
28 31
29 32 server_version = "TinyHTTPProxy/" + __version__
30 33 rbufsize = 0 # self.rfile Be unbuffered
31 34
32 35 def handle(self):
33 36 (ip, port) = self.client_address
34 37 allowed = getattr(self, 'allowed_clients', None)
35 38 if allowed is not None and ip not in allowed:
36 39 self.raw_requestline = self.rfile.readline()
37 40 if self.parse_request():
38 41 self.send_error(403)
39 42 else:
40 43 self.__base_handle()
41 44
42 45 def log_request(self, code='-', size='-'):
43 46 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
44 47 self.log_message('"%s" %s %s%s',
45 48 self.requestline, str(code), str(size),
46 49 ''.join([' %s:%s' % h for h in sorted(xheaders)]))
47 50
48 51 def _connect_to(self, netloc, soc):
49 52 i = netloc.find(':')
50 53 if i >= 0:
51 54 host_port = netloc[:i], int(netloc[i + 1:])
52 55 else:
53 56 host_port = netloc, 80
54 57 print("\t" "connect to %s:%d" % host_port)
55 58 try: soc.connect(host_port)
56 59 except socket.error as arg:
57 60 try: msg = arg[1]
58 61 except (IndexError, TypeError): msg = arg
59 62 self.send_error(404, msg)
60 63 return 0
61 64 return 1
62 65
63 66 def do_CONNECT(self):
64 67 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
65 68 try:
66 69 if self._connect_to(self.path, soc):
67 70 self.log_request(200)
68 71 self.wfile.write(self.protocol_version +
69 72 " 200 Connection established\r\n")
70 73 self.wfile.write("Proxy-agent: %s\r\n" % self.version_string())
71 74 self.wfile.write("\r\n")
72 75 self._read_write(soc, 300)
73 76 finally:
74 77 print("\t" "bye")
75 78 soc.close()
76 79 self.connection.close()
77 80
78 81 def do_GET(self):
79 82 (scm, netloc, path, params, query, fragment) = urlparse.urlparse(
80 83 self.path, 'http')
81 84 if scm != 'http' or fragment or not netloc:
82 85 self.send_error(400, "bad url %s" % self.path)
83 86 return
84 87 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
85 88 try:
86 89 if self._connect_to(netloc, soc):
87 90 self.log_request()
88 91 soc.send("%s %s %s\r\n" % (
89 92 self.command,
90 93 urlparse.urlunparse(('', '', path, params, query, '')),
91 94 self.request_version))
92 95 self.headers['Connection'] = 'close'
93 96 del self.headers['Proxy-Connection']
94 97 for key_val in self.headers.items():
95 98 soc.send("%s: %s\r\n" % key_val)
96 99 soc.send("\r\n")
97 100 self._read_write(soc)
98 101 finally:
99 102 print("\t" "bye")
100 103 soc.close()
101 104 self.connection.close()
102 105
103 106 def _read_write(self, soc, max_idling=20):
104 107 iw = [self.connection, soc]
105 108 ow = []
106 109 count = 0
107 110 while True:
108 111 count += 1
109 112 (ins, _, exs) = select.select(iw, ow, iw, 3)
110 113 if exs:
111 114 break
112 115 if ins:
113 116 for i in ins:
114 117 if i is soc:
115 118 out = self.connection
116 119 else:
117 120 out = soc
118 121 try:
119 122 data = i.recv(8192)
120 123 except socket.error:
121 124 break
122 125 if data:
123 126 out.send(data)
124 127 count = 0
125 128 else:
126 129 print("\t" "idle", count)
127 130 if count == max_idling:
128 131 break
129 132
130 133 do_HEAD = do_GET
131 134 do_POST = do_GET
132 135 do_PUT = do_GET
133 136 do_DELETE = do_GET
134 137
135 138 class ThreadingHTTPServer (SocketServer.ThreadingMixIn,
136 139 BaseHTTPServer.HTTPServer):
137 140 def __init__(self, *args, **kwargs):
138 141 BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
139 142 a = open("proxy.pid", "w")
140 143 a.write(str(os.getpid()) + "\n")
141 144 a.close()
142 145
143 146 if __name__ == '__main__':
144 147 argv = sys.argv
145 148 if argv[1:] and argv[1] in ('-h', '--help'):
146 149 print(argv[0], "[port [allowed_client_name ...]]")
147 150 else:
148 151 if argv[2:]:
149 152 allowed = []
150 153 for name in argv[2:]:
151 154 client = socket.gethostbyname(name)
152 155 allowed.append(client)
153 156 print("Accept: %s (%s)" % (client, name))
154 157 ProxyHandler.allowed_clients = allowed
155 158 del argv[2:]
156 159 else:
157 160 print("Any clients will be served...")
158 161 BaseHTTPServer.test(ProxyHandler, ThreadingHTTPServer)
General Comments 0
You need to be logged in to leave comments. Login now