##// END OF EJS Templates
vfs: add "notindexed" argument to invoke "ensuredir" with it in write mode...
FUJIWARA Katsunori -
r23370:46265d0f default
parent child Browse files
Show More
@@ -1,1046 +1,1060
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83 for subpath, ctx in sorted(subpaths.iteritems()):
84 84 yield subpath, ctx.sub(subpath)
85 85
86 86 def nochangesfound(ui, repo, excluded=None):
87 87 '''Report no changes for push/pull, excluded is None or a list of
88 88 nodes excluded from the push/pull.
89 89 '''
90 90 secretlist = []
91 91 if excluded:
92 92 for n in excluded:
93 93 if n not in repo:
94 94 # discovery should not have included the filtered revision,
95 95 # we have to explicitly exclude it until discovery is cleanup.
96 96 continue
97 97 ctx = repo[n]
98 98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 99 secretlist.append(n)
100 100
101 101 if secretlist:
102 102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 103 % len(secretlist))
104 104 else:
105 105 ui.status(_("no changes found\n"))
106 106
107 107 def checknewlabel(repo, lbl, kind):
108 108 # Do not use the "kind" parameter in ui output.
109 109 # It makes strings difficult to translate.
110 110 if lbl in ['tip', '.', 'null']:
111 111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 112 for c in (':', '\0', '\n', '\r'):
113 113 if c in lbl:
114 114 raise util.Abort(_("%r cannot be used in a name") % c)
115 115 try:
116 116 int(lbl)
117 117 raise util.Abort(_("cannot use an integer as a name"))
118 118 except ValueError:
119 119 pass
120 120
121 121 def checkfilename(f):
122 122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 123 if '\r' in f or '\n' in f:
124 124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125 125
126 126 def checkportable(ui, f):
127 127 '''Check if filename f is portable and warn or abort depending on config'''
128 128 checkfilename(f)
129 129 abort, warn = checkportabilityalert(ui)
130 130 if abort or warn:
131 131 msg = util.checkwinfilename(f)
132 132 if msg:
133 133 msg = "%s: %r" % (msg, f)
134 134 if abort:
135 135 raise util.Abort(msg)
136 136 ui.warn(_("warning: %s\n") % msg)
137 137
138 138 def checkportabilityalert(ui):
139 139 '''check if the user's config requests nothing, a warning, or abort for
140 140 non-portable filenames'''
141 141 val = ui.config('ui', 'portablefilenames', 'warn')
142 142 lval = val.lower()
143 143 bval = util.parsebool(val)
144 144 abort = os.name == 'nt' or lval == 'abort'
145 145 warn = bval or lval == 'warn'
146 146 if bval is None and not (warn or abort or lval == 'ignore'):
147 147 raise error.ConfigError(
148 148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 149 return abort, warn
150 150
151 151 class casecollisionauditor(object):
152 152 def __init__(self, ui, abort, dirstate):
153 153 self._ui = ui
154 154 self._abort = abort
155 155 allfiles = '\0'.join(dirstate._map)
156 156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 157 self._dirstate = dirstate
158 158 # The purpose of _newfiles is so that we don't complain about
159 159 # case collisions if someone were to call this object with the
160 160 # same filename twice.
161 161 self._newfiles = set()
162 162
163 163 def __call__(self, f):
164 164 if f in self._newfiles:
165 165 return
166 166 fl = encoding.lower(f)
167 167 if fl in self._loweredfiles and f not in self._dirstate:
168 168 msg = _('possible case-folding collision for %s') % f
169 169 if self._abort:
170 170 raise util.Abort(msg)
171 171 self._ui.warn(_("warning: %s\n") % msg)
172 172 self._loweredfiles.add(fl)
173 173 self._newfiles.add(f)
174 174
175 175 class abstractvfs(object):
176 176 """Abstract base class; cannot be instantiated"""
177 177
178 178 def __init__(self, *args, **kwargs):
179 179 '''Prevent instantiation; don't call this from subclasses.'''
180 180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181 181
182 182 def tryread(self, path):
183 183 '''gracefully return an empty string for missing files'''
184 184 try:
185 185 return self.read(path)
186 186 except IOError, inst:
187 187 if inst.errno != errno.ENOENT:
188 188 raise
189 189 return ""
190 190
191 191 def tryreadlines(self, path, mode='rb'):
192 192 '''gracefully return an empty array for missing files'''
193 193 try:
194 194 return self.readlines(path, mode=mode)
195 195 except IOError, inst:
196 196 if inst.errno != errno.ENOENT:
197 197 raise
198 198 return []
199 199
200 def open(self, path, mode="r", text=False, atomictemp=False):
200 def open(self, path, mode="r", text=False, atomictemp=False,
201 notindexed=False):
202 '''Open ``path`` file, which is relative to vfs root.
203
204 Newly created directories are marked as "not to be indexed by
205 the content indexing service", if ``notindexed`` is specified
206 for "write" mode access.
207 '''
201 208 self.open = self.__call__
202 return self.__call__(path, mode, text, atomictemp)
209 return self.__call__(path, mode, text, atomictemp, notindexed)
203 210
204 211 def read(self, path):
205 212 fp = self(path, 'rb')
206 213 try:
207 214 return fp.read()
208 215 finally:
209 216 fp.close()
210 217
211 218 def readlines(self, path, mode='rb'):
212 219 fp = self(path, mode=mode)
213 220 try:
214 221 return fp.readlines()
215 222 finally:
216 223 fp.close()
217 224
218 225 def write(self, path, data):
219 226 fp = self(path, 'wb')
220 227 try:
221 228 return fp.write(data)
222 229 finally:
223 230 fp.close()
224 231
225 232 def append(self, path, data):
226 233 fp = self(path, 'ab')
227 234 try:
228 235 return fp.write(data)
229 236 finally:
230 237 fp.close()
231 238
232 239 def chmod(self, path, mode):
233 240 return os.chmod(self.join(path), mode)
234 241
235 242 def exists(self, path=None):
236 243 return os.path.exists(self.join(path))
237 244
238 245 def fstat(self, fp):
239 246 return util.fstat(fp)
240 247
241 248 def isdir(self, path=None):
242 249 return os.path.isdir(self.join(path))
243 250
244 251 def isfile(self, path=None):
245 252 return os.path.isfile(self.join(path))
246 253
247 254 def islink(self, path=None):
248 255 return os.path.islink(self.join(path))
249 256
250 257 def lexists(self, path=None):
251 258 return os.path.lexists(self.join(path))
252 259
253 260 def lstat(self, path=None):
254 261 return os.lstat(self.join(path))
255 262
256 263 def listdir(self, path=None):
257 264 return os.listdir(self.join(path))
258 265
259 266 def makedir(self, path=None, notindexed=True):
260 267 return util.makedir(self.join(path), notindexed)
261 268
262 269 def makedirs(self, path=None, mode=None):
263 270 return util.makedirs(self.join(path), mode)
264 271
265 272 def makelock(self, info, path):
266 273 return util.makelock(info, self.join(path))
267 274
268 275 def mkdir(self, path=None):
269 276 return os.mkdir(self.join(path))
270 277
271 278 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
272 279 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
273 280 dir=self.join(dir), text=text)
274 281 dname, fname = util.split(name)
275 282 if dir:
276 283 return fd, os.path.join(dir, fname)
277 284 else:
278 285 return fd, fname
279 286
280 287 def readdir(self, path=None, stat=None, skip=None):
281 288 return osutil.listdir(self.join(path), stat, skip)
282 289
283 290 def readlock(self, path):
284 291 return util.readlock(self.join(path))
285 292
286 293 def rename(self, src, dst):
287 294 return util.rename(self.join(src), self.join(dst))
288 295
289 296 def readlink(self, path):
290 297 return os.readlink(self.join(path))
291 298
292 299 def setflags(self, path, l, x):
293 300 return util.setflags(self.join(path), l, x)
294 301
295 302 def stat(self, path=None):
296 303 return os.stat(self.join(path))
297 304
298 305 def unlink(self, path=None):
299 306 return util.unlink(self.join(path))
300 307
301 308 def unlinkpath(self, path=None, ignoremissing=False):
302 309 return util.unlinkpath(self.join(path), ignoremissing)
303 310
304 311 def utime(self, path=None, t=None):
305 312 return os.utime(self.join(path), t)
306 313
307 314 class vfs(abstractvfs):
308 315 '''Operate files relative to a base directory
309 316
310 317 This class is used to hide the details of COW semantics and
311 318 remote file access from higher level code.
312 319 '''
313 320 def __init__(self, base, audit=True, expandpath=False, realpath=False):
314 321 if expandpath:
315 322 base = util.expandpath(base)
316 323 if realpath:
317 324 base = os.path.realpath(base)
318 325 self.base = base
319 326 self._setmustaudit(audit)
320 327 self.createmode = None
321 328 self._trustnlink = None
322 329
323 330 def _getmustaudit(self):
324 331 return self._audit
325 332
326 333 def _setmustaudit(self, onoff):
327 334 self._audit = onoff
328 335 if onoff:
329 336 self.audit = pathutil.pathauditor(self.base)
330 337 else:
331 338 self.audit = util.always
332 339
333 340 mustaudit = property(_getmustaudit, _setmustaudit)
334 341
335 342 @util.propertycache
336 343 def _cansymlink(self):
337 344 return util.checklink(self.base)
338 345
339 346 @util.propertycache
340 347 def _chmod(self):
341 348 return util.checkexec(self.base)
342 349
343 350 def _fixfilemode(self, name):
344 351 if self.createmode is None or not self._chmod:
345 352 return
346 353 os.chmod(name, self.createmode & 0666)
347 354
348 def __call__(self, path, mode="r", text=False, atomictemp=False):
355 def __call__(self, path, mode="r", text=False, atomictemp=False,
356 notindexed=False):
357 '''Open ``path`` file, which is relative to vfs root.
358
359 Newly created directories are marked as "not to be indexed by
360 the content indexing service", if ``notindexed`` is specified
361 for "write" mode access.
362 '''
349 363 if self._audit:
350 364 r = util.checkosfilename(path)
351 365 if r:
352 366 raise util.Abort("%s: %r" % (r, path))
353 367 self.audit(path)
354 368 f = self.join(path)
355 369
356 370 if not text and "b" not in mode:
357 371 mode += "b" # for that other OS
358 372
359 373 nlink = -1
360 374 if mode not in ('r', 'rb'):
361 375 dirname, basename = util.split(f)
362 376 # If basename is empty, then the path is malformed because it points
363 377 # to a directory. Let the posixfile() call below raise IOError.
364 378 if basename:
365 379 if atomictemp:
366 util.ensuredirs(dirname, self.createmode)
380 util.ensuredirs(dirname, self.createmode, notindexed)
367 381 return util.atomictempfile(f, mode, self.createmode)
368 382 try:
369 383 if 'w' in mode:
370 384 util.unlink(f)
371 385 nlink = 0
372 386 else:
373 387 # nlinks() may behave differently for files on Windows
374 388 # shares if the file is open.
375 389 fd = util.posixfile(f)
376 390 nlink = util.nlinks(f)
377 391 if nlink < 1:
378 392 nlink = 2 # force mktempcopy (issue1922)
379 393 fd.close()
380 394 except (OSError, IOError), e:
381 395 if e.errno != errno.ENOENT:
382 396 raise
383 397 nlink = 0
384 util.ensuredirs(dirname, self.createmode)
398 util.ensuredirs(dirname, self.createmode, notindexed)
385 399 if nlink > 0:
386 400 if self._trustnlink is None:
387 401 self._trustnlink = nlink > 1 or util.checknlink(f)
388 402 if nlink > 1 or not self._trustnlink:
389 403 util.rename(util.mktempcopy(f), f)
390 404 fp = util.posixfile(f, mode)
391 405 if nlink == 0:
392 406 self._fixfilemode(f)
393 407 return fp
394 408
395 409 def symlink(self, src, dst):
396 410 self.audit(dst)
397 411 linkname = self.join(dst)
398 412 try:
399 413 os.unlink(linkname)
400 414 except OSError:
401 415 pass
402 416
403 417 util.ensuredirs(os.path.dirname(linkname), self.createmode)
404 418
405 419 if self._cansymlink:
406 420 try:
407 421 os.symlink(src, linkname)
408 422 except OSError, err:
409 423 raise OSError(err.errno, _('could not symlink to %r: %s') %
410 424 (src, err.strerror), linkname)
411 425 else:
412 426 self.write(dst, src)
413 427
414 428 def join(self, path):
415 429 if path:
416 430 return os.path.join(self.base, path)
417 431 else:
418 432 return self.base
419 433
420 434 opener = vfs
421 435
422 436 class auditvfs(object):
423 437 def __init__(self, vfs):
424 438 self.vfs = vfs
425 439
426 440 def _getmustaudit(self):
427 441 return self.vfs.mustaudit
428 442
429 443 def _setmustaudit(self, onoff):
430 444 self.vfs.mustaudit = onoff
431 445
432 446 mustaudit = property(_getmustaudit, _setmustaudit)
433 447
434 448 class filtervfs(abstractvfs, auditvfs):
435 449 '''Wrapper vfs for filtering filenames with a function.'''
436 450
437 451 def __init__(self, vfs, filter):
438 452 auditvfs.__init__(self, vfs)
439 453 self._filter = filter
440 454
441 455 def __call__(self, path, *args, **kwargs):
442 456 return self.vfs(self._filter(path), *args, **kwargs)
443 457
444 458 def join(self, path):
445 459 if path:
446 460 return self.vfs.join(self._filter(path))
447 461 else:
448 462 return self.vfs.join(path)
449 463
450 464 filteropener = filtervfs
451 465
452 466 class readonlyvfs(abstractvfs, auditvfs):
453 467 '''Wrapper vfs preventing any writing.'''
454 468
455 469 def __init__(self, vfs):
456 470 auditvfs.__init__(self, vfs)
457 471
458 472 def __call__(self, path, mode='r', *args, **kw):
459 473 if mode not in ('r', 'rb'):
460 474 raise util.Abort('this vfs is read only')
461 475 return self.vfs(path, mode, *args, **kw)
462 476
463 477
464 478 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
465 479 '''yield every hg repository under path, always recursively.
466 480 The recurse flag will only control recursion into repo working dirs'''
467 481 def errhandler(err):
468 482 if err.filename == path:
469 483 raise err
470 484 samestat = getattr(os.path, 'samestat', None)
471 485 if followsym and samestat is not None:
472 486 def adddir(dirlst, dirname):
473 487 match = False
474 488 dirstat = os.stat(dirname)
475 489 for lstdirstat in dirlst:
476 490 if samestat(dirstat, lstdirstat):
477 491 match = True
478 492 break
479 493 if not match:
480 494 dirlst.append(dirstat)
481 495 return not match
482 496 else:
483 497 followsym = False
484 498
485 499 if (seen_dirs is None) and followsym:
486 500 seen_dirs = []
487 501 adddir(seen_dirs, path)
488 502 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
489 503 dirs.sort()
490 504 if '.hg' in dirs:
491 505 yield root # found a repository
492 506 qroot = os.path.join(root, '.hg', 'patches')
493 507 if os.path.isdir(os.path.join(qroot, '.hg')):
494 508 yield qroot # we have a patch queue repo here
495 509 if recurse:
496 510 # avoid recursing inside the .hg directory
497 511 dirs.remove('.hg')
498 512 else:
499 513 dirs[:] = [] # don't descend further
500 514 elif followsym:
501 515 newdirs = []
502 516 for d in dirs:
503 517 fname = os.path.join(root, d)
504 518 if adddir(seen_dirs, fname):
505 519 if os.path.islink(fname):
506 520 for hgname in walkrepos(fname, True, seen_dirs):
507 521 yield hgname
508 522 else:
509 523 newdirs.append(d)
510 524 dirs[:] = newdirs
511 525
512 526 def osrcpath():
513 527 '''return default os-specific hgrc search path'''
514 528 path = []
515 529 defaultpath = os.path.join(util.datapath, 'default.d')
516 530 if os.path.isdir(defaultpath):
517 531 for f, kind in osutil.listdir(defaultpath):
518 532 if f.endswith('.rc'):
519 533 path.append(os.path.join(defaultpath, f))
520 534 path.extend(systemrcpath())
521 535 path.extend(userrcpath())
522 536 path = [os.path.normpath(f) for f in path]
523 537 return path
524 538
525 539 _rcpath = None
526 540
527 541 def rcpath():
528 542 '''return hgrc search path. if env var HGRCPATH is set, use it.
529 543 for each item in path, if directory, use files ending in .rc,
530 544 else use item.
531 545 make HGRCPATH empty to only look in .hg/hgrc of current repo.
532 546 if no HGRCPATH, use default os-specific path.'''
533 547 global _rcpath
534 548 if _rcpath is None:
535 549 if 'HGRCPATH' in os.environ:
536 550 _rcpath = []
537 551 for p in os.environ['HGRCPATH'].split(os.pathsep):
538 552 if not p:
539 553 continue
540 554 p = util.expandpath(p)
541 555 if os.path.isdir(p):
542 556 for f, kind in osutil.listdir(p):
543 557 if f.endswith('.rc'):
544 558 _rcpath.append(os.path.join(p, f))
545 559 else:
546 560 _rcpath.append(p)
547 561 else:
548 562 _rcpath = osrcpath()
549 563 return _rcpath
550 564
551 565 def revsingle(repo, revspec, default='.'):
552 566 if not revspec and revspec != 0:
553 567 return repo[default]
554 568
555 569 l = revrange(repo, [revspec])
556 570 if not l:
557 571 raise util.Abort(_('empty revision set'))
558 572 return repo[l.last()]
559 573
560 574 def revpair(repo, revs):
561 575 if not revs:
562 576 return repo.dirstate.p1(), None
563 577
564 578 l = revrange(repo, revs)
565 579
566 580 if not l:
567 581 first = second = None
568 582 elif l.isascending():
569 583 first = l.min()
570 584 second = l.max()
571 585 elif l.isdescending():
572 586 first = l.max()
573 587 second = l.min()
574 588 else:
575 589 first = l.first()
576 590 second = l.last()
577 591
578 592 if first is None:
579 593 raise util.Abort(_('empty revision range'))
580 594
581 595 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
582 596 return repo.lookup(first), None
583 597
584 598 return repo.lookup(first), repo.lookup(second)
585 599
586 600 _revrangesep = ':'
587 601
588 602 def revrange(repo, revs):
589 603 """Yield revision as strings from a list of revision specifications."""
590 604
591 605 def revfix(repo, val, defval):
592 606 if not val and val != 0 and defval is not None:
593 607 return defval
594 608 return repo[val].rev()
595 609
596 610 seen, l = set(), revset.baseset([])
597 611 for spec in revs:
598 612 if l and not seen:
599 613 seen = set(l)
600 614 # attempt to parse old-style ranges first to deal with
601 615 # things like old-tag which contain query metacharacters
602 616 try:
603 617 if isinstance(spec, int):
604 618 seen.add(spec)
605 619 l = l + revset.baseset([spec])
606 620 continue
607 621
608 622 if _revrangesep in spec:
609 623 start, end = spec.split(_revrangesep, 1)
610 624 start = revfix(repo, start, 0)
611 625 end = revfix(repo, end, len(repo) - 1)
612 626 if end == nullrev and start < 0:
613 627 start = nullrev
614 628 rangeiter = repo.changelog.revs(start, end)
615 629 if not seen and not l:
616 630 # by far the most common case: revs = ["-1:0"]
617 631 l = revset.baseset(rangeiter)
618 632 # defer syncing seen until next iteration
619 633 continue
620 634 newrevs = set(rangeiter)
621 635 if seen:
622 636 newrevs.difference_update(seen)
623 637 seen.update(newrevs)
624 638 else:
625 639 seen = newrevs
626 640 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
627 641 continue
628 642 elif spec and spec in repo: # single unquoted rev
629 643 rev = revfix(repo, spec, None)
630 644 if rev in seen:
631 645 continue
632 646 seen.add(rev)
633 647 l = l + revset.baseset([rev])
634 648 continue
635 649 except error.RepoLookupError:
636 650 pass
637 651
638 652 # fall through to new-style queries if old-style fails
639 653 m = revset.match(repo.ui, spec, repo)
640 654 if seen or l:
641 655 dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
642 656 l = l + revset.baseset(dl)
643 657 seen.update(dl)
644 658 else:
645 659 l = m(repo, revset.spanset(repo))
646 660
647 661 return l
648 662
649 663 def expandpats(pats):
650 664 '''Expand bare globs when running on windows.
651 665 On posix we assume it already has already been done by sh.'''
652 666 if not util.expandglobs:
653 667 return list(pats)
654 668 ret = []
655 669 for kindpat in pats:
656 670 kind, pat = matchmod._patsplit(kindpat, None)
657 671 if kind is None:
658 672 try:
659 673 globbed = glob.glob(pat)
660 674 except re.error:
661 675 globbed = [pat]
662 676 if globbed:
663 677 ret.extend(globbed)
664 678 continue
665 679 ret.append(kindpat)
666 680 return ret
667 681
668 682 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
669 683 '''Return a matcher and the patterns that were used.
670 684 The matcher will warn about bad matches.'''
671 685 if pats == ("",):
672 686 pats = []
673 687 if not globbed and default == 'relpath':
674 688 pats = expandpats(pats or [])
675 689
676 690 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
677 691 default)
678 692 def badfn(f, msg):
679 693 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
680 694 m.bad = badfn
681 695 return m, pats
682 696
683 697 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
684 698 '''Return a matcher that will warn about bad matches.'''
685 699 return matchandpats(ctx, pats, opts, globbed, default)[0]
686 700
687 701 def matchall(repo):
688 702 '''Return a matcher that will efficiently match everything.'''
689 703 return matchmod.always(repo.root, repo.getcwd())
690 704
691 705 def matchfiles(repo, files):
692 706 '''Return a matcher that will efficiently match exactly these files.'''
693 707 return matchmod.exact(repo.root, repo.getcwd(), files)
694 708
695 709 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
696 710 if dry_run is None:
697 711 dry_run = opts.get('dry_run')
698 712 if similarity is None:
699 713 similarity = float(opts.get('similarity') or 0)
700 714 # we'd use status here, except handling of symlinks and ignore is tricky
701 715 m = match(repo[None], pats, opts)
702 716 rejected = []
703 717 m.bad = lambda x, y: rejected.append(x)
704 718
705 719 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
706 720
707 721 unknownset = set(unknown + forgotten)
708 722 toprint = unknownset.copy()
709 723 toprint.update(deleted)
710 724 for abs in sorted(toprint):
711 725 if repo.ui.verbose or not m.exact(abs):
712 726 rel = m.rel(abs)
713 727 if abs in unknownset:
714 728 status = _('adding %s\n') % ((pats and rel) or abs)
715 729 else:
716 730 status = _('removing %s\n') % ((pats and rel) or abs)
717 731 repo.ui.status(status)
718 732
719 733 renames = _findrenames(repo, m, added + unknown, removed + deleted,
720 734 similarity)
721 735
722 736 if not dry_run:
723 737 _markchanges(repo, unknown + forgotten, deleted, renames)
724 738
725 739 for f in rejected:
726 740 if f in m.files():
727 741 return 1
728 742 return 0
729 743
730 744 def marktouched(repo, files, similarity=0.0):
731 745 '''Assert that files have somehow been operated upon. files are relative to
732 746 the repo root.'''
733 747 m = matchfiles(repo, files)
734 748 rejected = []
735 749 m.bad = lambda x, y: rejected.append(x)
736 750
737 751 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
738 752
739 753 if repo.ui.verbose:
740 754 unknownset = set(unknown + forgotten)
741 755 toprint = unknownset.copy()
742 756 toprint.update(deleted)
743 757 for abs in sorted(toprint):
744 758 if abs in unknownset:
745 759 status = _('adding %s\n') % abs
746 760 else:
747 761 status = _('removing %s\n') % abs
748 762 repo.ui.status(status)
749 763
750 764 renames = _findrenames(repo, m, added + unknown, removed + deleted,
751 765 similarity)
752 766
753 767 _markchanges(repo, unknown + forgotten, deleted, renames)
754 768
755 769 for f in rejected:
756 770 if f in m.files():
757 771 return 1
758 772 return 0
759 773
760 774 def _interestingfiles(repo, matcher):
761 775 '''Walk dirstate with matcher, looking for files that addremove would care
762 776 about.
763 777
764 778 This is different from dirstate.status because it doesn't care about
765 779 whether files are modified or clean.'''
766 780 added, unknown, deleted, removed, forgotten = [], [], [], [], []
767 781 audit_path = pathutil.pathauditor(repo.root)
768 782
769 783 ctx = repo[None]
770 784 dirstate = repo.dirstate
771 785 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
772 786 full=False)
773 787 for abs, st in walkresults.iteritems():
774 788 dstate = dirstate[abs]
775 789 if dstate == '?' and audit_path.check(abs):
776 790 unknown.append(abs)
777 791 elif dstate != 'r' and not st:
778 792 deleted.append(abs)
779 793 elif dstate == 'r' and st:
780 794 forgotten.append(abs)
781 795 # for finding renames
782 796 elif dstate == 'r' and not st:
783 797 removed.append(abs)
784 798 elif dstate == 'a':
785 799 added.append(abs)
786 800
787 801 return added, unknown, deleted, removed, forgotten
788 802
789 803 def _findrenames(repo, matcher, added, removed, similarity):
790 804 '''Find renames from removed files to added ones.'''
791 805 renames = {}
792 806 if similarity > 0:
793 807 for old, new, score in similar.findrenames(repo, added, removed,
794 808 similarity):
795 809 if (repo.ui.verbose or not matcher.exact(old)
796 810 or not matcher.exact(new)):
797 811 repo.ui.status(_('recording removal of %s as rename to %s '
798 812 '(%d%% similar)\n') %
799 813 (matcher.rel(old), matcher.rel(new),
800 814 score * 100))
801 815 renames[new] = old
802 816 return renames
803 817
804 818 def _markchanges(repo, unknown, deleted, renames):
805 819 '''Marks the files in unknown as added, the files in deleted as removed,
806 820 and the files in renames as copied.'''
807 821 wctx = repo[None]
808 822 wlock = repo.wlock()
809 823 try:
810 824 wctx.forget(deleted)
811 825 wctx.add(unknown)
812 826 for new, old in renames.iteritems():
813 827 wctx.copy(old, new)
814 828 finally:
815 829 wlock.release()
816 830
817 831 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
818 832 """Update the dirstate to reflect the intent of copying src to dst. For
819 833 different reasons it might not end with dst being marked as copied from src.
820 834 """
821 835 origsrc = repo.dirstate.copied(src) or src
822 836 if dst == origsrc: # copying back a copy?
823 837 if repo.dirstate[dst] not in 'mn' and not dryrun:
824 838 repo.dirstate.normallookup(dst)
825 839 else:
826 840 if repo.dirstate[origsrc] == 'a' and origsrc == src:
827 841 if not ui.quiet:
828 842 ui.warn(_("%s has not been committed yet, so no copy "
829 843 "data will be stored for %s.\n")
830 844 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
831 845 if repo.dirstate[dst] in '?r' and not dryrun:
832 846 wctx.add([dst])
833 847 elif not dryrun:
834 848 wctx.copy(origsrc, dst)
835 849
836 850 def readrequires(opener, supported):
837 851 '''Reads and parses .hg/requires and checks if all entries found
838 852 are in the list of supported features.'''
839 853 requirements = set(opener.read("requires").splitlines())
840 854 missings = []
841 855 for r in requirements:
842 856 if r not in supported:
843 857 if not r or not r[0].isalnum():
844 858 raise error.RequirementError(_(".hg/requires file is corrupt"))
845 859 missings.append(r)
846 860 missings.sort()
847 861 if missings:
848 862 raise error.RequirementError(
849 863 _("repository requires features unknown to this Mercurial: %s")
850 864 % " ".join(missings),
851 865 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
852 866 " for more information"))
853 867 return requirements
854 868
855 869 class filecachesubentry(object):
856 870 def __init__(self, path, stat):
857 871 self.path = path
858 872 self.cachestat = None
859 873 self._cacheable = None
860 874
861 875 if stat:
862 876 self.cachestat = filecachesubentry.stat(self.path)
863 877
864 878 if self.cachestat:
865 879 self._cacheable = self.cachestat.cacheable()
866 880 else:
867 881 # None means we don't know yet
868 882 self._cacheable = None
869 883
870 884 def refresh(self):
871 885 if self.cacheable():
872 886 self.cachestat = filecachesubentry.stat(self.path)
873 887
874 888 def cacheable(self):
875 889 if self._cacheable is not None:
876 890 return self._cacheable
877 891
878 892 # we don't know yet, assume it is for now
879 893 return True
880 894
881 895 def changed(self):
882 896 # no point in going further if we can't cache it
883 897 if not self.cacheable():
884 898 return True
885 899
886 900 newstat = filecachesubentry.stat(self.path)
887 901
888 902 # we may not know if it's cacheable yet, check again now
889 903 if newstat and self._cacheable is None:
890 904 self._cacheable = newstat.cacheable()
891 905
892 906 # check again
893 907 if not self._cacheable:
894 908 return True
895 909
896 910 if self.cachestat != newstat:
897 911 self.cachestat = newstat
898 912 return True
899 913 else:
900 914 return False
901 915
902 916 @staticmethod
903 917 def stat(path):
904 918 try:
905 919 return util.cachestat(path)
906 920 except OSError, e:
907 921 if e.errno != errno.ENOENT:
908 922 raise
909 923
910 924 class filecacheentry(object):
911 925 def __init__(self, paths, stat=True):
912 926 self._entries = []
913 927 for path in paths:
914 928 self._entries.append(filecachesubentry(path, stat))
915 929
916 930 def changed(self):
917 931 '''true if any entry has changed'''
918 932 for entry in self._entries:
919 933 if entry.changed():
920 934 return True
921 935 return False
922 936
923 937 def refresh(self):
924 938 for entry in self._entries:
925 939 entry.refresh()
926 940
927 941 class filecache(object):
928 942 '''A property like decorator that tracks files under .hg/ for updates.
929 943
930 944 Records stat info when called in _filecache.
931 945
932 946 On subsequent calls, compares old stat info with new info, and recreates the
933 947 object when any of the files changes, updating the new stat info in
934 948 _filecache.
935 949
936 950 Mercurial either atomic renames or appends for files under .hg,
937 951 so to ensure the cache is reliable we need the filesystem to be able
938 952 to tell us if a file has been replaced. If it can't, we fallback to
939 953 recreating the object on every call (essentially the same behaviour as
940 954 propertycache).
941 955
942 956 '''
943 957 def __init__(self, *paths):
944 958 self.paths = paths
945 959
946 960 def join(self, obj, fname):
947 961 """Used to compute the runtime path of a cached file.
948 962
949 963 Users should subclass filecache and provide their own version of this
950 964 function to call the appropriate join function on 'obj' (an instance
951 965 of the class that its member function was decorated).
952 966 """
953 967 return obj.join(fname)
954 968
955 969 def __call__(self, func):
956 970 self.func = func
957 971 self.name = func.__name__
958 972 return self
959 973
960 974 def __get__(self, obj, type=None):
961 975 # do we need to check if the file changed?
962 976 if self.name in obj.__dict__:
963 977 assert self.name in obj._filecache, self.name
964 978 return obj.__dict__[self.name]
965 979
966 980 entry = obj._filecache.get(self.name)
967 981
968 982 if entry:
969 983 if entry.changed():
970 984 entry.obj = self.func(obj)
971 985 else:
972 986 paths = [self.join(obj, path) for path in self.paths]
973 987
974 988 # We stat -before- creating the object so our cache doesn't lie if
975 989 # a writer modified between the time we read and stat
976 990 entry = filecacheentry(paths, True)
977 991 entry.obj = self.func(obj)
978 992
979 993 obj._filecache[self.name] = entry
980 994
981 995 obj.__dict__[self.name] = entry.obj
982 996 return entry.obj
983 997
984 998 def __set__(self, obj, value):
985 999 if self.name not in obj._filecache:
986 1000 # we add an entry for the missing value because X in __dict__
987 1001 # implies X in _filecache
988 1002 paths = [self.join(obj, path) for path in self.paths]
989 1003 ce = filecacheentry(paths, False)
990 1004 obj._filecache[self.name] = ce
991 1005 else:
992 1006 ce = obj._filecache[self.name]
993 1007
994 1008 ce.obj = value # update cached copy
995 1009 obj.__dict__[self.name] = value # update copy returned by obj.x
996 1010
997 1011 def __delete__(self, obj):
998 1012 try:
999 1013 del obj.__dict__[self.name]
1000 1014 except KeyError:
1001 1015 raise AttributeError(self.name)
1002 1016
1003 1017 class dirs(object):
1004 1018 '''a multiset of directory names from a dirstate or manifest'''
1005 1019
1006 1020 def __init__(self, map, skip=None):
1007 1021 self._dirs = {}
1008 1022 addpath = self.addpath
1009 1023 if util.safehasattr(map, 'iteritems') and skip is not None:
1010 1024 for f, s in map.iteritems():
1011 1025 if s[0] != skip:
1012 1026 addpath(f)
1013 1027 else:
1014 1028 for f in map:
1015 1029 addpath(f)
1016 1030
1017 1031 def addpath(self, path):
1018 1032 dirs = self._dirs
1019 1033 for base in finddirs(path):
1020 1034 if base in dirs:
1021 1035 dirs[base] += 1
1022 1036 return
1023 1037 dirs[base] = 1
1024 1038
1025 1039 def delpath(self, path):
1026 1040 dirs = self._dirs
1027 1041 for base in finddirs(path):
1028 1042 if dirs[base] > 1:
1029 1043 dirs[base] -= 1
1030 1044 return
1031 1045 del dirs[base]
1032 1046
1033 1047 def __iter__(self):
1034 1048 return self._dirs.iterkeys()
1035 1049
1036 1050 def __contains__(self, d):
1037 1051 return d in self._dirs
1038 1052
1039 1053 if util.safehasattr(parsers, 'dirs'):
1040 1054 dirs = parsers.dirs
1041 1055
1042 1056 def finddirs(path):
1043 1057 pos = path.rfind('/')
1044 1058 while pos != -1:
1045 1059 yield path[:pos]
1046 1060 pos = path.rfind('/', 0, pos)
@@ -1,2186 +1,2191
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 import i18n
17 17 _ = i18n._
18 18 import error, osutil, encoding
19 19 import errno, shutil, sys, tempfile, traceback
20 20 import re as remod
21 21 import os, time, datetime, calendar, textwrap, signal, collections
22 22 import imp, socket, urllib
23 23
24 24 if os.name == 'nt':
25 25 import windows as platform
26 26 else:
27 27 import posix as platform
28 28
29 29 cachestat = platform.cachestat
30 30 checkexec = platform.checkexec
31 31 checklink = platform.checklink
32 32 copymode = platform.copymode
33 33 executablepath = platform.executablepath
34 34 expandglobs = platform.expandglobs
35 35 explainexit = platform.explainexit
36 36 findexe = platform.findexe
37 37 gethgcmd = platform.gethgcmd
38 38 getuser = platform.getuser
39 39 groupmembers = platform.groupmembers
40 40 groupname = platform.groupname
41 41 hidewindow = platform.hidewindow
42 42 isexec = platform.isexec
43 43 isowner = platform.isowner
44 44 localpath = platform.localpath
45 45 lookupreg = platform.lookupreg
46 46 makedir = platform.makedir
47 47 nlinks = platform.nlinks
48 48 normpath = platform.normpath
49 49 normcase = platform.normcase
50 50 openhardlinks = platform.openhardlinks
51 51 oslink = platform.oslink
52 52 parsepatchoutput = platform.parsepatchoutput
53 53 pconvert = platform.pconvert
54 54 popen = platform.popen
55 55 posixfile = platform.posixfile
56 56 quotecommand = platform.quotecommand
57 57 readpipe = platform.readpipe
58 58 rename = platform.rename
59 59 samedevice = platform.samedevice
60 60 samefile = platform.samefile
61 61 samestat = platform.samestat
62 62 setbinary = platform.setbinary
63 63 setflags = platform.setflags
64 64 setsignalhandler = platform.setsignalhandler
65 65 shellquote = platform.shellquote
66 66 spawndetached = platform.spawndetached
67 67 split = platform.split
68 68 sshargs = platform.sshargs
69 69 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
70 70 statisexec = platform.statisexec
71 71 statislink = platform.statislink
72 72 termwidth = platform.termwidth
73 73 testpid = platform.testpid
74 74 umask = platform.umask
75 75 unlink = platform.unlink
76 76 unlinkpath = platform.unlinkpath
77 77 username = platform.username
78 78
79 79 # Python compatibility
80 80
81 81 _notset = object()
82 82
83 83 def safehasattr(thing, attr):
84 84 return getattr(thing, attr, _notset) is not _notset
85 85
86 86 def sha1(s=''):
87 87 '''
88 88 Low-overhead wrapper around Python's SHA support
89 89
90 90 >>> f = _fastsha1
91 91 >>> a = sha1()
92 92 >>> a = f()
93 93 >>> a.hexdigest()
94 94 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
95 95 '''
96 96
97 97 return _fastsha1(s)
98 98
99 99 def _fastsha1(s=''):
100 100 # This function will import sha1 from hashlib or sha (whichever is
101 101 # available) and overwrite itself with it on the first call.
102 102 # Subsequent calls will go directly to the imported function.
103 103 if sys.version_info >= (2, 5):
104 104 from hashlib import sha1 as _sha1
105 105 else:
106 106 from sha import sha as _sha1
107 107 global _fastsha1, sha1
108 108 _fastsha1 = sha1 = _sha1
109 109 return _sha1(s)
110 110
111 111 def md5(s=''):
112 112 try:
113 113 from hashlib import md5 as _md5
114 114 except ImportError:
115 115 from md5 import md5 as _md5
116 116 global md5
117 117 md5 = _md5
118 118 return _md5(s)
119 119
120 120 DIGESTS = {
121 121 'md5': md5,
122 122 'sha1': sha1,
123 123 }
124 124 # List of digest types from strongest to weakest
125 125 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
126 126
127 127 try:
128 128 import hashlib
129 129 DIGESTS.update({
130 130 'sha512': hashlib.sha512,
131 131 })
132 132 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
133 133 except ImportError:
134 134 pass
135 135
136 136 for k in DIGESTS_BY_STRENGTH:
137 137 assert k in DIGESTS
138 138
139 139 class digester(object):
140 140 """helper to compute digests.
141 141
142 142 This helper can be used to compute one or more digests given their name.
143 143
144 144 >>> d = digester(['md5', 'sha1'])
145 145 >>> d.update('foo')
146 146 >>> [k for k in sorted(d)]
147 147 ['md5', 'sha1']
148 148 >>> d['md5']
149 149 'acbd18db4cc2f85cedef654fccc4a4d8'
150 150 >>> d['sha1']
151 151 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
152 152 >>> digester.preferred(['md5', 'sha1'])
153 153 'sha1'
154 154 """
155 155
156 156 def __init__(self, digests, s=''):
157 157 self._hashes = {}
158 158 for k in digests:
159 159 if k not in DIGESTS:
160 160 raise Abort(_('unknown digest type: %s') % k)
161 161 self._hashes[k] = DIGESTS[k]()
162 162 if s:
163 163 self.update(s)
164 164
165 165 def update(self, data):
166 166 for h in self._hashes.values():
167 167 h.update(data)
168 168
169 169 def __getitem__(self, key):
170 170 if key not in DIGESTS:
171 171 raise Abort(_('unknown digest type: %s') % k)
172 172 return self._hashes[key].hexdigest()
173 173
174 174 def __iter__(self):
175 175 return iter(self._hashes)
176 176
177 177 @staticmethod
178 178 def preferred(supported):
179 179 """returns the strongest digest type in both supported and DIGESTS."""
180 180
181 181 for k in DIGESTS_BY_STRENGTH:
182 182 if k in supported:
183 183 return k
184 184 return None
185 185
186 186 class digestchecker(object):
187 187 """file handle wrapper that additionally checks content against a given
188 188 size and digests.
189 189
190 190 d = digestchecker(fh, size, {'md5': '...'})
191 191
192 192 When multiple digests are given, all of them are validated.
193 193 """
194 194
195 195 def __init__(self, fh, size, digests):
196 196 self._fh = fh
197 197 self._size = size
198 198 self._got = 0
199 199 self._digests = dict(digests)
200 200 self._digester = digester(self._digests.keys())
201 201
202 202 def read(self, length=-1):
203 203 content = self._fh.read(length)
204 204 self._digester.update(content)
205 205 self._got += len(content)
206 206 return content
207 207
208 208 def validate(self):
209 209 if self._size != self._got:
210 210 raise Abort(_('size mismatch: expected %d, got %d') %
211 211 (self._size, self._got))
212 212 for k, v in self._digests.items():
213 213 if v != self._digester[k]:
214 214 # i18n: first parameter is a digest name
215 215 raise Abort(_('%s mismatch: expected %s, got %s') %
216 216 (k, v, self._digester[k]))
217 217
218 218 try:
219 219 buffer = buffer
220 220 except NameError:
221 221 if sys.version_info[0] < 3:
222 222 def buffer(sliceable, offset=0):
223 223 return sliceable[offset:]
224 224 else:
225 225 def buffer(sliceable, offset=0):
226 226 return memoryview(sliceable)[offset:]
227 227
228 228 import subprocess
229 229 closefds = os.name == 'posix'
230 230
231 231 def popen2(cmd, env=None, newlines=False):
232 232 # Setting bufsize to -1 lets the system decide the buffer size.
233 233 # The default for bufsize is 0, meaning unbuffered. This leads to
234 234 # poor performance on Mac OS X: http://bugs.python.org/issue4194
235 235 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
236 236 close_fds=closefds,
237 237 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
238 238 universal_newlines=newlines,
239 239 env=env)
240 240 return p.stdin, p.stdout
241 241
242 242 def popen3(cmd, env=None, newlines=False):
243 243 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
244 244 return stdin, stdout, stderr
245 245
246 246 def popen4(cmd, env=None, newlines=False):
247 247 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
248 248 close_fds=closefds,
249 249 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
250 250 stderr=subprocess.PIPE,
251 251 universal_newlines=newlines,
252 252 env=env)
253 253 return p.stdin, p.stdout, p.stderr, p
254 254
255 255 def version():
256 256 """Return version information if available."""
257 257 try:
258 258 import __version__
259 259 return __version__.version
260 260 except ImportError:
261 261 return 'unknown'
262 262
263 263 # used by parsedate
264 264 defaultdateformats = (
265 265 '%Y-%m-%d %H:%M:%S',
266 266 '%Y-%m-%d %I:%M:%S%p',
267 267 '%Y-%m-%d %H:%M',
268 268 '%Y-%m-%d %I:%M%p',
269 269 '%Y-%m-%d',
270 270 '%m-%d',
271 271 '%m/%d',
272 272 '%m/%d/%y',
273 273 '%m/%d/%Y',
274 274 '%a %b %d %H:%M:%S %Y',
275 275 '%a %b %d %I:%M:%S%p %Y',
276 276 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
277 277 '%b %d %H:%M:%S %Y',
278 278 '%b %d %I:%M:%S%p %Y',
279 279 '%b %d %H:%M:%S',
280 280 '%b %d %I:%M:%S%p',
281 281 '%b %d %H:%M',
282 282 '%b %d %I:%M%p',
283 283 '%b %d %Y',
284 284 '%b %d',
285 285 '%H:%M:%S',
286 286 '%I:%M:%S%p',
287 287 '%H:%M',
288 288 '%I:%M%p',
289 289 )
290 290
291 291 extendeddateformats = defaultdateformats + (
292 292 "%Y",
293 293 "%Y-%m",
294 294 "%b",
295 295 "%b %Y",
296 296 )
297 297
298 298 def cachefunc(func):
299 299 '''cache the result of function calls'''
300 300 # XXX doesn't handle keywords args
301 301 if func.func_code.co_argcount == 0:
302 302 cache = []
303 303 def f():
304 304 if len(cache) == 0:
305 305 cache.append(func())
306 306 return cache[0]
307 307 return f
308 308 cache = {}
309 309 if func.func_code.co_argcount == 1:
310 310 # we gain a small amount of time because
311 311 # we don't need to pack/unpack the list
312 312 def f(arg):
313 313 if arg not in cache:
314 314 cache[arg] = func(arg)
315 315 return cache[arg]
316 316 else:
317 317 def f(*args):
318 318 if args not in cache:
319 319 cache[args] = func(*args)
320 320 return cache[args]
321 321
322 322 return f
323 323
324 324 try:
325 325 collections.deque.remove
326 326 deque = collections.deque
327 327 except AttributeError:
328 328 # python 2.4 lacks deque.remove
329 329 class deque(collections.deque):
330 330 def remove(self, val):
331 331 for i, v in enumerate(self):
332 332 if v == val:
333 333 del self[i]
334 334 break
335 335
336 336 class sortdict(dict):
337 337 '''a simple sorted dictionary'''
338 338 def __init__(self, data=None):
339 339 self._list = []
340 340 if data:
341 341 self.update(data)
342 342 def copy(self):
343 343 return sortdict(self)
344 344 def __setitem__(self, key, val):
345 345 if key in self:
346 346 self._list.remove(key)
347 347 self._list.append(key)
348 348 dict.__setitem__(self, key, val)
349 349 def __iter__(self):
350 350 return self._list.__iter__()
351 351 def update(self, src):
352 352 for k in src:
353 353 self[k] = src[k]
354 354 def clear(self):
355 355 dict.clear(self)
356 356 self._list = []
357 357 def items(self):
358 358 return [(k, self[k]) for k in self._list]
359 359 def __delitem__(self, key):
360 360 dict.__delitem__(self, key)
361 361 self._list.remove(key)
362 362 def pop(self, key, *args, **kwargs):
363 363 dict.pop(self, key, *args, **kwargs)
364 364 try:
365 365 self._list.remove(key)
366 366 except ValueError:
367 367 pass
368 368 def keys(self):
369 369 return self._list
370 370 def iterkeys(self):
371 371 return self._list.__iter__()
372 372 def iteritems(self):
373 373 for k in self._list:
374 374 yield k, self[k]
375 375 def insert(self, index, key, val):
376 376 self._list.insert(index, key)
377 377 dict.__setitem__(self, key, val)
378 378
379 379 class lrucachedict(object):
380 380 '''cache most recent gets from or sets to this dictionary'''
381 381 def __init__(self, maxsize):
382 382 self._cache = {}
383 383 self._maxsize = maxsize
384 384 self._order = deque()
385 385
386 386 def __getitem__(self, key):
387 387 value = self._cache[key]
388 388 self._order.remove(key)
389 389 self._order.append(key)
390 390 return value
391 391
392 392 def __setitem__(self, key, value):
393 393 if key not in self._cache:
394 394 if len(self._cache) >= self._maxsize:
395 395 del self._cache[self._order.popleft()]
396 396 else:
397 397 self._order.remove(key)
398 398 self._cache[key] = value
399 399 self._order.append(key)
400 400
401 401 def __contains__(self, key):
402 402 return key in self._cache
403 403
404 404 def clear(self):
405 405 self._cache.clear()
406 406 self._order = deque()
407 407
408 408 def lrucachefunc(func):
409 409 '''cache most recent results of function calls'''
410 410 cache = {}
411 411 order = deque()
412 412 if func.func_code.co_argcount == 1:
413 413 def f(arg):
414 414 if arg not in cache:
415 415 if len(cache) > 20:
416 416 del cache[order.popleft()]
417 417 cache[arg] = func(arg)
418 418 else:
419 419 order.remove(arg)
420 420 order.append(arg)
421 421 return cache[arg]
422 422 else:
423 423 def f(*args):
424 424 if args not in cache:
425 425 if len(cache) > 20:
426 426 del cache[order.popleft()]
427 427 cache[args] = func(*args)
428 428 else:
429 429 order.remove(args)
430 430 order.append(args)
431 431 return cache[args]
432 432
433 433 return f
434 434
435 435 class propertycache(object):
436 436 def __init__(self, func):
437 437 self.func = func
438 438 self.name = func.__name__
439 439 def __get__(self, obj, type=None):
440 440 result = self.func(obj)
441 441 self.cachevalue(obj, result)
442 442 return result
443 443
444 444 def cachevalue(self, obj, value):
445 445 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
446 446 obj.__dict__[self.name] = value
447 447
448 448 def pipefilter(s, cmd):
449 449 '''filter string S through command CMD, returning its output'''
450 450 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
451 451 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
452 452 pout, perr = p.communicate(s)
453 453 return pout
454 454
455 455 def tempfilter(s, cmd):
456 456 '''filter string S through a pair of temporary files with CMD.
457 457 CMD is used as a template to create the real command to be run,
458 458 with the strings INFILE and OUTFILE replaced by the real names of
459 459 the temporary files generated.'''
460 460 inname, outname = None, None
461 461 try:
462 462 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
463 463 fp = os.fdopen(infd, 'wb')
464 464 fp.write(s)
465 465 fp.close()
466 466 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
467 467 os.close(outfd)
468 468 cmd = cmd.replace('INFILE', inname)
469 469 cmd = cmd.replace('OUTFILE', outname)
470 470 code = os.system(cmd)
471 471 if sys.platform == 'OpenVMS' and code & 1:
472 472 code = 0
473 473 if code:
474 474 raise Abort(_("command '%s' failed: %s") %
475 475 (cmd, explainexit(code)))
476 476 fp = open(outname, 'rb')
477 477 r = fp.read()
478 478 fp.close()
479 479 return r
480 480 finally:
481 481 try:
482 482 if inname:
483 483 os.unlink(inname)
484 484 except OSError:
485 485 pass
486 486 try:
487 487 if outname:
488 488 os.unlink(outname)
489 489 except OSError:
490 490 pass
491 491
492 492 filtertable = {
493 493 'tempfile:': tempfilter,
494 494 'pipe:': pipefilter,
495 495 }
496 496
497 497 def filter(s, cmd):
498 498 "filter a string through a command that transforms its input to its output"
499 499 for name, fn in filtertable.iteritems():
500 500 if cmd.startswith(name):
501 501 return fn(s, cmd[len(name):].lstrip())
502 502 return pipefilter(s, cmd)
503 503
504 504 def binary(s):
505 505 """return true if a string is binary data"""
506 506 return bool(s and '\0' in s)
507 507
508 508 def increasingchunks(source, min=1024, max=65536):
509 509 '''return no less than min bytes per chunk while data remains,
510 510 doubling min after each chunk until it reaches max'''
511 511 def log2(x):
512 512 if not x:
513 513 return 0
514 514 i = 0
515 515 while x:
516 516 x >>= 1
517 517 i += 1
518 518 return i - 1
519 519
520 520 buf = []
521 521 blen = 0
522 522 for chunk in source:
523 523 buf.append(chunk)
524 524 blen += len(chunk)
525 525 if blen >= min:
526 526 if min < max:
527 527 min = min << 1
528 528 nmin = 1 << log2(blen)
529 529 if nmin > min:
530 530 min = nmin
531 531 if min > max:
532 532 min = max
533 533 yield ''.join(buf)
534 534 blen = 0
535 535 buf = []
536 536 if buf:
537 537 yield ''.join(buf)
538 538
539 539 Abort = error.Abort
540 540
541 541 def always(fn):
542 542 return True
543 543
544 544 def never(fn):
545 545 return False
546 546
547 547 def pathto(root, n1, n2):
548 548 '''return the relative path from one place to another.
549 549 root should use os.sep to separate directories
550 550 n1 should use os.sep to separate directories
551 551 n2 should use "/" to separate directories
552 552 returns an os.sep-separated path.
553 553
554 554 If n1 is a relative path, it's assumed it's
555 555 relative to root.
556 556 n2 should always be relative to root.
557 557 '''
558 558 if not n1:
559 559 return localpath(n2)
560 560 if os.path.isabs(n1):
561 561 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
562 562 return os.path.join(root, localpath(n2))
563 563 n2 = '/'.join((pconvert(root), n2))
564 564 a, b = splitpath(n1), n2.split('/')
565 565 a.reverse()
566 566 b.reverse()
567 567 while a and b and a[-1] == b[-1]:
568 568 a.pop()
569 569 b.pop()
570 570 b.reverse()
571 571 return os.sep.join((['..'] * len(a)) + b) or '.'
572 572
573 573 def mainfrozen():
574 574 """return True if we are a frozen executable.
575 575
576 576 The code supports py2exe (most common, Windows only) and tools/freeze
577 577 (portable, not much used).
578 578 """
579 579 return (safehasattr(sys, "frozen") or # new py2exe
580 580 safehasattr(sys, "importers") or # old py2exe
581 581 imp.is_frozen("__main__")) # tools/freeze
582 582
583 583 # the location of data files matching the source code
584 584 if mainfrozen():
585 585 # executable version (py2exe) doesn't support __file__
586 586 datapath = os.path.dirname(sys.executable)
587 587 else:
588 588 datapath = os.path.dirname(__file__)
589 589
590 590 i18n.setdatapath(datapath)
591 591
592 592 _hgexecutable = None
593 593
594 594 def hgexecutable():
595 595 """return location of the 'hg' executable.
596 596
597 597 Defaults to $HG or 'hg' in the search path.
598 598 """
599 599 if _hgexecutable is None:
600 600 hg = os.environ.get('HG')
601 601 mainmod = sys.modules['__main__']
602 602 if hg:
603 603 _sethgexecutable(hg)
604 604 elif mainfrozen():
605 605 _sethgexecutable(sys.executable)
606 606 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
607 607 _sethgexecutable(mainmod.__file__)
608 608 else:
609 609 exe = findexe('hg') or os.path.basename(sys.argv[0])
610 610 _sethgexecutable(exe)
611 611 return _hgexecutable
612 612
613 613 def _sethgexecutable(path):
614 614 """set location of the 'hg' executable"""
615 615 global _hgexecutable
616 616 _hgexecutable = path
617 617
618 618 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
619 619 '''enhanced shell command execution.
620 620 run with environment maybe modified, maybe in different dir.
621 621
622 622 if command fails and onerr is None, return status, else raise onerr
623 623 object as exception.
624 624
625 625 if out is specified, it is assumed to be a file-like object that has a
626 626 write() method. stdout and stderr will be redirected to out.'''
627 627 try:
628 628 sys.stdout.flush()
629 629 except Exception:
630 630 pass
631 631 def py2shell(val):
632 632 'convert python object into string that is useful to shell'
633 633 if val is None or val is False:
634 634 return '0'
635 635 if val is True:
636 636 return '1'
637 637 return str(val)
638 638 origcmd = cmd
639 639 cmd = quotecommand(cmd)
640 640 if sys.platform == 'plan9' and (sys.version_info[0] == 2
641 641 and sys.version_info[1] < 7):
642 642 # subprocess kludge to work around issues in half-baked Python
643 643 # ports, notably bichued/python:
644 644 if not cwd is None:
645 645 os.chdir(cwd)
646 646 rc = os.system(cmd)
647 647 else:
648 648 env = dict(os.environ)
649 649 env.update((k, py2shell(v)) for k, v in environ.iteritems())
650 650 env['HG'] = hgexecutable()
651 651 if out is None or out == sys.__stdout__:
652 652 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
653 653 env=env, cwd=cwd)
654 654 else:
655 655 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
656 656 env=env, cwd=cwd, stdout=subprocess.PIPE,
657 657 stderr=subprocess.STDOUT)
658 658 while True:
659 659 line = proc.stdout.readline()
660 660 if not line:
661 661 break
662 662 out.write(line)
663 663 proc.wait()
664 664 rc = proc.returncode
665 665 if sys.platform == 'OpenVMS' and rc & 1:
666 666 rc = 0
667 667 if rc and onerr:
668 668 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
669 669 explainexit(rc)[0])
670 670 if errprefix:
671 671 errmsg = '%s: %s' % (errprefix, errmsg)
672 672 raise onerr(errmsg)
673 673 return rc
674 674
675 675 def checksignature(func):
676 676 '''wrap a function with code to check for calling errors'''
677 677 def check(*args, **kwargs):
678 678 try:
679 679 return func(*args, **kwargs)
680 680 except TypeError:
681 681 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
682 682 raise error.SignatureError
683 683 raise
684 684
685 685 return check
686 686
687 687 def copyfile(src, dest):
688 688 "copy a file, preserving mode and atime/mtime"
689 689 if os.path.lexists(dest):
690 690 unlink(dest)
691 691 if os.path.islink(src):
692 692 os.symlink(os.readlink(src), dest)
693 693 else:
694 694 try:
695 695 shutil.copyfile(src, dest)
696 696 shutil.copymode(src, dest)
697 697 except shutil.Error, inst:
698 698 raise Abort(str(inst))
699 699
700 700 def copyfiles(src, dst, hardlink=None):
701 701 """Copy a directory tree using hardlinks if possible"""
702 702
703 703 if hardlink is None:
704 704 hardlink = (os.stat(src).st_dev ==
705 705 os.stat(os.path.dirname(dst)).st_dev)
706 706
707 707 num = 0
708 708 if os.path.isdir(src):
709 709 os.mkdir(dst)
710 710 for name, kind in osutil.listdir(src):
711 711 srcname = os.path.join(src, name)
712 712 dstname = os.path.join(dst, name)
713 713 hardlink, n = copyfiles(srcname, dstname, hardlink)
714 714 num += n
715 715 else:
716 716 if hardlink:
717 717 try:
718 718 oslink(src, dst)
719 719 except (IOError, OSError):
720 720 hardlink = False
721 721 shutil.copy(src, dst)
722 722 else:
723 723 shutil.copy(src, dst)
724 724 num += 1
725 725
726 726 return hardlink, num
727 727
728 728 _winreservednames = '''con prn aux nul
729 729 com1 com2 com3 com4 com5 com6 com7 com8 com9
730 730 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
731 731 _winreservedchars = ':*?"<>|'
732 732 def checkwinfilename(path):
733 733 r'''Check that the base-relative path is a valid filename on Windows.
734 734 Returns None if the path is ok, or a UI string describing the problem.
735 735
736 736 >>> checkwinfilename("just/a/normal/path")
737 737 >>> checkwinfilename("foo/bar/con.xml")
738 738 "filename contains 'con', which is reserved on Windows"
739 739 >>> checkwinfilename("foo/con.xml/bar")
740 740 "filename contains 'con', which is reserved on Windows"
741 741 >>> checkwinfilename("foo/bar/xml.con")
742 742 >>> checkwinfilename("foo/bar/AUX/bla.txt")
743 743 "filename contains 'AUX', which is reserved on Windows"
744 744 >>> checkwinfilename("foo/bar/bla:.txt")
745 745 "filename contains ':', which is reserved on Windows"
746 746 >>> checkwinfilename("foo/bar/b\07la.txt")
747 747 "filename contains '\\x07', which is invalid on Windows"
748 748 >>> checkwinfilename("foo/bar/bla ")
749 749 "filename ends with ' ', which is not allowed on Windows"
750 750 >>> checkwinfilename("../bar")
751 751 >>> checkwinfilename("foo\\")
752 752 "filename ends with '\\', which is invalid on Windows"
753 753 >>> checkwinfilename("foo\\/bar")
754 754 "directory name ends with '\\', which is invalid on Windows"
755 755 '''
756 756 if path.endswith('\\'):
757 757 return _("filename ends with '\\', which is invalid on Windows")
758 758 if '\\/' in path:
759 759 return _("directory name ends with '\\', which is invalid on Windows")
760 760 for n in path.replace('\\', '/').split('/'):
761 761 if not n:
762 762 continue
763 763 for c in n:
764 764 if c in _winreservedchars:
765 765 return _("filename contains '%s', which is reserved "
766 766 "on Windows") % c
767 767 if ord(c) <= 31:
768 768 return _("filename contains %r, which is invalid "
769 769 "on Windows") % c
770 770 base = n.split('.')[0]
771 771 if base and base.lower() in _winreservednames:
772 772 return _("filename contains '%s', which is reserved "
773 773 "on Windows") % base
774 774 t = n[-1]
775 775 if t in '. ' and n not in '..':
776 776 return _("filename ends with '%s', which is not allowed "
777 777 "on Windows") % t
778 778
779 779 if os.name == 'nt':
780 780 checkosfilename = checkwinfilename
781 781 else:
782 782 checkosfilename = platform.checkosfilename
783 783
784 784 def makelock(info, pathname):
785 785 try:
786 786 return os.symlink(info, pathname)
787 787 except OSError, why:
788 788 if why.errno == errno.EEXIST:
789 789 raise
790 790 except AttributeError: # no symlink in os
791 791 pass
792 792
793 793 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
794 794 os.write(ld, info)
795 795 os.close(ld)
796 796
797 797 def readlock(pathname):
798 798 try:
799 799 return os.readlink(pathname)
800 800 except OSError, why:
801 801 if why.errno not in (errno.EINVAL, errno.ENOSYS):
802 802 raise
803 803 except AttributeError: # no symlink in os
804 804 pass
805 805 fp = posixfile(pathname)
806 806 r = fp.read()
807 807 fp.close()
808 808 return r
809 809
810 810 def fstat(fp):
811 811 '''stat file object that may not have fileno method.'''
812 812 try:
813 813 return os.fstat(fp.fileno())
814 814 except AttributeError:
815 815 return os.stat(fp.name)
816 816
817 817 # File system features
818 818
819 819 def checkcase(path):
820 820 """
821 821 Return true if the given path is on a case-sensitive filesystem
822 822
823 823 Requires a path (like /foo/.hg) ending with a foldable final
824 824 directory component.
825 825 """
826 826 s1 = os.stat(path)
827 827 d, b = os.path.split(path)
828 828 b2 = b.upper()
829 829 if b == b2:
830 830 b2 = b.lower()
831 831 if b == b2:
832 832 return True # no evidence against case sensitivity
833 833 p2 = os.path.join(d, b2)
834 834 try:
835 835 s2 = os.stat(p2)
836 836 if s2 == s1:
837 837 return False
838 838 return True
839 839 except OSError:
840 840 return True
841 841
842 842 try:
843 843 import re2
844 844 _re2 = None
845 845 except ImportError:
846 846 _re2 = False
847 847
848 848 class _re(object):
849 849 def _checkre2(self):
850 850 global _re2
851 851 try:
852 852 # check if match works, see issue3964
853 853 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
854 854 except ImportError:
855 855 _re2 = False
856 856
857 857 def compile(self, pat, flags=0):
858 858 '''Compile a regular expression, using re2 if possible
859 859
860 860 For best performance, use only re2-compatible regexp features. The
861 861 only flags from the re module that are re2-compatible are
862 862 IGNORECASE and MULTILINE.'''
863 863 if _re2 is None:
864 864 self._checkre2()
865 865 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
866 866 if flags & remod.IGNORECASE:
867 867 pat = '(?i)' + pat
868 868 if flags & remod.MULTILINE:
869 869 pat = '(?m)' + pat
870 870 try:
871 871 return re2.compile(pat)
872 872 except re2.error:
873 873 pass
874 874 return remod.compile(pat, flags)
875 875
876 876 @propertycache
877 877 def escape(self):
878 878 '''Return the version of escape corresponding to self.compile.
879 879
880 880 This is imperfect because whether re2 or re is used for a particular
881 881 function depends on the flags, etc, but it's the best we can do.
882 882 '''
883 883 global _re2
884 884 if _re2 is None:
885 885 self._checkre2()
886 886 if _re2:
887 887 return re2.escape
888 888 else:
889 889 return remod.escape
890 890
891 891 re = _re()
892 892
893 893 _fspathcache = {}
894 894 def fspath(name, root):
895 895 '''Get name in the case stored in the filesystem
896 896
897 897 The name should be relative to root, and be normcase-ed for efficiency.
898 898
899 899 Note that this function is unnecessary, and should not be
900 900 called, for case-sensitive filesystems (simply because it's expensive).
901 901
902 902 The root should be normcase-ed, too.
903 903 '''
904 904 def _makefspathcacheentry(dir):
905 905 return dict((normcase(n), n) for n in os.listdir(dir))
906 906
907 907 seps = os.sep
908 908 if os.altsep:
909 909 seps = seps + os.altsep
910 910 # Protect backslashes. This gets silly very quickly.
911 911 seps.replace('\\','\\\\')
912 912 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
913 913 dir = os.path.normpath(root)
914 914 result = []
915 915 for part, sep in pattern.findall(name):
916 916 if sep:
917 917 result.append(sep)
918 918 continue
919 919
920 920 if dir not in _fspathcache:
921 921 _fspathcache[dir] = _makefspathcacheentry(dir)
922 922 contents = _fspathcache[dir]
923 923
924 924 found = contents.get(part)
925 925 if not found:
926 926 # retry "once per directory" per "dirstate.walk" which
927 927 # may take place for each patches of "hg qpush", for example
928 928 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
929 929 found = contents.get(part)
930 930
931 931 result.append(found or part)
932 932 dir = os.path.join(dir, part)
933 933
934 934 return ''.join(result)
935 935
936 936 def checknlink(testfile):
937 937 '''check whether hardlink count reporting works properly'''
938 938
939 939 # testfile may be open, so we need a separate file for checking to
940 940 # work around issue2543 (or testfile may get lost on Samba shares)
941 941 f1 = testfile + ".hgtmp1"
942 942 if os.path.lexists(f1):
943 943 return False
944 944 try:
945 945 posixfile(f1, 'w').close()
946 946 except IOError:
947 947 return False
948 948
949 949 f2 = testfile + ".hgtmp2"
950 950 fd = None
951 951 try:
952 952 try:
953 953 oslink(f1, f2)
954 954 except OSError:
955 955 return False
956 956
957 957 # nlinks() may behave differently for files on Windows shares if
958 958 # the file is open.
959 959 fd = posixfile(f2)
960 960 return nlinks(f2) > 1
961 961 finally:
962 962 if fd is not None:
963 963 fd.close()
964 964 for f in (f1, f2):
965 965 try:
966 966 os.unlink(f)
967 967 except OSError:
968 968 pass
969 969
970 970 def endswithsep(path):
971 971 '''Check path ends with os.sep or os.altsep.'''
972 972 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
973 973
974 974 def splitpath(path):
975 975 '''Split path by os.sep.
976 976 Note that this function does not use os.altsep because this is
977 977 an alternative of simple "xxx.split(os.sep)".
978 978 It is recommended to use os.path.normpath() before using this
979 979 function if need.'''
980 980 return path.split(os.sep)
981 981
982 982 def gui():
983 983 '''Are we running in a GUI?'''
984 984 if sys.platform == 'darwin':
985 985 if 'SSH_CONNECTION' in os.environ:
986 986 # handle SSH access to a box where the user is logged in
987 987 return False
988 988 elif getattr(osutil, 'isgui', None):
989 989 # check if a CoreGraphics session is available
990 990 return osutil.isgui()
991 991 else:
992 992 # pure build; use a safe default
993 993 return True
994 994 else:
995 995 return os.name == "nt" or os.environ.get("DISPLAY")
996 996
997 997 def mktempcopy(name, emptyok=False, createmode=None):
998 998 """Create a temporary file with the same contents from name
999 999
1000 1000 The permission bits are copied from the original file.
1001 1001
1002 1002 If the temporary file is going to be truncated immediately, you
1003 1003 can use emptyok=True as an optimization.
1004 1004
1005 1005 Returns the name of the temporary file.
1006 1006 """
1007 1007 d, fn = os.path.split(name)
1008 1008 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1009 1009 os.close(fd)
1010 1010 # Temporary files are created with mode 0600, which is usually not
1011 1011 # what we want. If the original file already exists, just copy
1012 1012 # its mode. Otherwise, manually obey umask.
1013 1013 copymode(name, temp, createmode)
1014 1014 if emptyok:
1015 1015 return temp
1016 1016 try:
1017 1017 try:
1018 1018 ifp = posixfile(name, "rb")
1019 1019 except IOError, inst:
1020 1020 if inst.errno == errno.ENOENT:
1021 1021 return temp
1022 1022 if not getattr(inst, 'filename', None):
1023 1023 inst.filename = name
1024 1024 raise
1025 1025 ofp = posixfile(temp, "wb")
1026 1026 for chunk in filechunkiter(ifp):
1027 1027 ofp.write(chunk)
1028 1028 ifp.close()
1029 1029 ofp.close()
1030 1030 except: # re-raises
1031 1031 try: os.unlink(temp)
1032 1032 except OSError: pass
1033 1033 raise
1034 1034 return temp
1035 1035
1036 1036 class atomictempfile(object):
1037 1037 '''writable file object that atomically updates a file
1038 1038
1039 1039 All writes will go to a temporary copy of the original file. Call
1040 1040 close() when you are done writing, and atomictempfile will rename
1041 1041 the temporary copy to the original name, making the changes
1042 1042 visible. If the object is destroyed without being closed, all your
1043 1043 writes are discarded.
1044 1044 '''
1045 1045 def __init__(self, name, mode='w+b', createmode=None):
1046 1046 self.__name = name # permanent name
1047 1047 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1048 1048 createmode=createmode)
1049 1049 self._fp = posixfile(self._tempname, mode)
1050 1050
1051 1051 # delegated methods
1052 1052 self.write = self._fp.write
1053 1053 self.seek = self._fp.seek
1054 1054 self.tell = self._fp.tell
1055 1055 self.fileno = self._fp.fileno
1056 1056
1057 1057 def close(self):
1058 1058 if not self._fp.closed:
1059 1059 self._fp.close()
1060 1060 rename(self._tempname, localpath(self.__name))
1061 1061
1062 1062 def discard(self):
1063 1063 if not self._fp.closed:
1064 1064 try:
1065 1065 os.unlink(self._tempname)
1066 1066 except OSError:
1067 1067 pass
1068 1068 self._fp.close()
1069 1069
1070 1070 def __del__(self):
1071 1071 if safehasattr(self, '_fp'): # constructor actually did something
1072 1072 self.discard()
1073 1073
1074 1074 def makedirs(name, mode=None, notindexed=False):
1075 1075 """recursive directory creation with parent mode inheritance"""
1076 1076 try:
1077 1077 makedir(name, notindexed)
1078 1078 except OSError, err:
1079 1079 if err.errno == errno.EEXIST:
1080 1080 return
1081 1081 if err.errno != errno.ENOENT or not name:
1082 1082 raise
1083 1083 parent = os.path.dirname(os.path.abspath(name))
1084 1084 if parent == name:
1085 1085 raise
1086 1086 makedirs(parent, mode, notindexed)
1087 1087 makedir(name, notindexed)
1088 1088 if mode is not None:
1089 1089 os.chmod(name, mode)
1090 1090
1091 def ensuredirs(name, mode=None):
1092 """race-safe recursive directory creation"""
1091 def ensuredirs(name, mode=None, notindexed=False):
1092 """race-safe recursive directory creation
1093
1094 Newly created directories are marked as "not to be indexed by
1095 the content indexing service", if ``notindexed`` is specified
1096 for "write" mode access.
1097 """
1093 1098 if os.path.isdir(name):
1094 1099 return
1095 1100 parent = os.path.dirname(os.path.abspath(name))
1096 1101 if parent != name:
1097 ensuredirs(parent, mode)
1102 ensuredirs(parent, mode, notindexed)
1098 1103 try:
1099 os.mkdir(name)
1104 makedir(name, notindexed)
1100 1105 except OSError, err:
1101 1106 if err.errno == errno.EEXIST and os.path.isdir(name):
1102 1107 # someone else seems to have won a directory creation race
1103 1108 return
1104 1109 raise
1105 1110 if mode is not None:
1106 1111 os.chmod(name, mode)
1107 1112
1108 1113 def readfile(path):
1109 1114 fp = open(path, 'rb')
1110 1115 try:
1111 1116 return fp.read()
1112 1117 finally:
1113 1118 fp.close()
1114 1119
1115 1120 def writefile(path, text):
1116 1121 fp = open(path, 'wb')
1117 1122 try:
1118 1123 fp.write(text)
1119 1124 finally:
1120 1125 fp.close()
1121 1126
1122 1127 def appendfile(path, text):
1123 1128 fp = open(path, 'ab')
1124 1129 try:
1125 1130 fp.write(text)
1126 1131 finally:
1127 1132 fp.close()
1128 1133
1129 1134 class chunkbuffer(object):
1130 1135 """Allow arbitrary sized chunks of data to be efficiently read from an
1131 1136 iterator over chunks of arbitrary size."""
1132 1137
1133 1138 def __init__(self, in_iter):
1134 1139 """in_iter is the iterator that's iterating over the input chunks.
1135 1140 targetsize is how big a buffer to try to maintain."""
1136 1141 def splitbig(chunks):
1137 1142 for chunk in chunks:
1138 1143 if len(chunk) > 2**20:
1139 1144 pos = 0
1140 1145 while pos < len(chunk):
1141 1146 end = pos + 2 ** 18
1142 1147 yield chunk[pos:end]
1143 1148 pos = end
1144 1149 else:
1145 1150 yield chunk
1146 1151 self.iter = splitbig(in_iter)
1147 1152 self._queue = deque()
1148 1153
1149 1154 def read(self, l=None):
1150 1155 """Read L bytes of data from the iterator of chunks of data.
1151 1156 Returns less than L bytes if the iterator runs dry.
1152 1157
1153 1158 If size parameter is omitted, read everything"""
1154 1159 left = l
1155 1160 buf = []
1156 1161 queue = self._queue
1157 1162 while left is None or left > 0:
1158 1163 # refill the queue
1159 1164 if not queue:
1160 1165 target = 2**18
1161 1166 for chunk in self.iter:
1162 1167 queue.append(chunk)
1163 1168 target -= len(chunk)
1164 1169 if target <= 0:
1165 1170 break
1166 1171 if not queue:
1167 1172 break
1168 1173
1169 1174 chunk = queue.popleft()
1170 1175 if left is not None:
1171 1176 left -= len(chunk)
1172 1177 if left is not None and left < 0:
1173 1178 queue.appendleft(chunk[left:])
1174 1179 buf.append(chunk[:left])
1175 1180 else:
1176 1181 buf.append(chunk)
1177 1182
1178 1183 return ''.join(buf)
1179 1184
1180 1185 def filechunkiter(f, size=65536, limit=None):
1181 1186 """Create a generator that produces the data in the file size
1182 1187 (default 65536) bytes at a time, up to optional limit (default is
1183 1188 to read all data). Chunks may be less than size bytes if the
1184 1189 chunk is the last chunk in the file, or the file is a socket or
1185 1190 some other type of file that sometimes reads less data than is
1186 1191 requested."""
1187 1192 assert size >= 0
1188 1193 assert limit is None or limit >= 0
1189 1194 while True:
1190 1195 if limit is None:
1191 1196 nbytes = size
1192 1197 else:
1193 1198 nbytes = min(limit, size)
1194 1199 s = nbytes and f.read(nbytes)
1195 1200 if not s:
1196 1201 break
1197 1202 if limit:
1198 1203 limit -= len(s)
1199 1204 yield s
1200 1205
1201 1206 def makedate(timestamp=None):
1202 1207 '''Return a unix timestamp (or the current time) as a (unixtime,
1203 1208 offset) tuple based off the local timezone.'''
1204 1209 if timestamp is None:
1205 1210 timestamp = time.time()
1206 1211 if timestamp < 0:
1207 1212 hint = _("check your clock")
1208 1213 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1209 1214 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1210 1215 datetime.datetime.fromtimestamp(timestamp))
1211 1216 tz = delta.days * 86400 + delta.seconds
1212 1217 return timestamp, tz
1213 1218
1214 1219 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1215 1220 """represent a (unixtime, offset) tuple as a localized time.
1216 1221 unixtime is seconds since the epoch, and offset is the time zone's
1217 1222 number of seconds away from UTC. if timezone is false, do not
1218 1223 append time zone to string."""
1219 1224 t, tz = date or makedate()
1220 1225 if t < 0:
1221 1226 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1222 1227 tz = 0
1223 1228 if "%1" in format or "%2" in format or "%z" in format:
1224 1229 sign = (tz > 0) and "-" or "+"
1225 1230 minutes = abs(tz) // 60
1226 1231 format = format.replace("%z", "%1%2")
1227 1232 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1228 1233 format = format.replace("%2", "%02d" % (minutes % 60))
1229 1234 try:
1230 1235 t = time.gmtime(float(t) - tz)
1231 1236 except ValueError:
1232 1237 # time was out of range
1233 1238 t = time.gmtime(sys.maxint)
1234 1239 s = time.strftime(format, t)
1235 1240 return s
1236 1241
1237 1242 def shortdate(date=None):
1238 1243 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1239 1244 return datestr(date, format='%Y-%m-%d')
1240 1245
1241 1246 def strdate(string, format, defaults=[]):
1242 1247 """parse a localized time string and return a (unixtime, offset) tuple.
1243 1248 if the string cannot be parsed, ValueError is raised."""
1244 1249 def timezone(string):
1245 1250 tz = string.split()[-1]
1246 1251 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1247 1252 sign = (tz[0] == "+") and 1 or -1
1248 1253 hours = int(tz[1:3])
1249 1254 minutes = int(tz[3:5])
1250 1255 return -sign * (hours * 60 + minutes) * 60
1251 1256 if tz == "GMT" or tz == "UTC":
1252 1257 return 0
1253 1258 return None
1254 1259
1255 1260 # NOTE: unixtime = localunixtime + offset
1256 1261 offset, date = timezone(string), string
1257 1262 if offset is not None:
1258 1263 date = " ".join(string.split()[:-1])
1259 1264
1260 1265 # add missing elements from defaults
1261 1266 usenow = False # default to using biased defaults
1262 1267 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1263 1268 found = [True for p in part if ("%"+p) in format]
1264 1269 if not found:
1265 1270 date += "@" + defaults[part][usenow]
1266 1271 format += "@%" + part[0]
1267 1272 else:
1268 1273 # We've found a specific time element, less specific time
1269 1274 # elements are relative to today
1270 1275 usenow = True
1271 1276
1272 1277 timetuple = time.strptime(date, format)
1273 1278 localunixtime = int(calendar.timegm(timetuple))
1274 1279 if offset is None:
1275 1280 # local timezone
1276 1281 unixtime = int(time.mktime(timetuple))
1277 1282 offset = unixtime - localunixtime
1278 1283 else:
1279 1284 unixtime = localunixtime + offset
1280 1285 return unixtime, offset
1281 1286
1282 1287 def parsedate(date, formats=None, bias={}):
1283 1288 """parse a localized date/time and return a (unixtime, offset) tuple.
1284 1289
1285 1290 The date may be a "unixtime offset" string or in one of the specified
1286 1291 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1287 1292
1288 1293 >>> parsedate(' today ') == parsedate(\
1289 1294 datetime.date.today().strftime('%b %d'))
1290 1295 True
1291 1296 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1292 1297 datetime.timedelta(days=1)\
1293 1298 ).strftime('%b %d'))
1294 1299 True
1295 1300 >>> now, tz = makedate()
1296 1301 >>> strnow, strtz = parsedate('now')
1297 1302 >>> (strnow - now) < 1
1298 1303 True
1299 1304 >>> tz == strtz
1300 1305 True
1301 1306 """
1302 1307 if not date:
1303 1308 return 0, 0
1304 1309 if isinstance(date, tuple) and len(date) == 2:
1305 1310 return date
1306 1311 if not formats:
1307 1312 formats = defaultdateformats
1308 1313 date = date.strip()
1309 1314
1310 1315 if date == _('now'):
1311 1316 return makedate()
1312 1317 if date == _('today'):
1313 1318 date = datetime.date.today().strftime('%b %d')
1314 1319 elif date == _('yesterday'):
1315 1320 date = (datetime.date.today() -
1316 1321 datetime.timedelta(days=1)).strftime('%b %d')
1317 1322
1318 1323 try:
1319 1324 when, offset = map(int, date.split(' '))
1320 1325 except ValueError:
1321 1326 # fill out defaults
1322 1327 now = makedate()
1323 1328 defaults = {}
1324 1329 for part in ("d", "mb", "yY", "HI", "M", "S"):
1325 1330 # this piece is for rounding the specific end of unknowns
1326 1331 b = bias.get(part)
1327 1332 if b is None:
1328 1333 if part[0] in "HMS":
1329 1334 b = "00"
1330 1335 else:
1331 1336 b = "0"
1332 1337
1333 1338 # this piece is for matching the generic end to today's date
1334 1339 n = datestr(now, "%" + part[0])
1335 1340
1336 1341 defaults[part] = (b, n)
1337 1342
1338 1343 for format in formats:
1339 1344 try:
1340 1345 when, offset = strdate(date, format, defaults)
1341 1346 except (ValueError, OverflowError):
1342 1347 pass
1343 1348 else:
1344 1349 break
1345 1350 else:
1346 1351 raise Abort(_('invalid date: %r') % date)
1347 1352 # validate explicit (probably user-specified) date and
1348 1353 # time zone offset. values must fit in signed 32 bits for
1349 1354 # current 32-bit linux runtimes. timezones go from UTC-12
1350 1355 # to UTC+14
1351 1356 if abs(when) > 0x7fffffff:
1352 1357 raise Abort(_('date exceeds 32 bits: %d') % when)
1353 1358 if when < 0:
1354 1359 raise Abort(_('negative date value: %d') % when)
1355 1360 if offset < -50400 or offset > 43200:
1356 1361 raise Abort(_('impossible time zone offset: %d') % offset)
1357 1362 return when, offset
1358 1363
1359 1364 def matchdate(date):
1360 1365 """Return a function that matches a given date match specifier
1361 1366
1362 1367 Formats include:
1363 1368
1364 1369 '{date}' match a given date to the accuracy provided
1365 1370
1366 1371 '<{date}' on or before a given date
1367 1372
1368 1373 '>{date}' on or after a given date
1369 1374
1370 1375 >>> p1 = parsedate("10:29:59")
1371 1376 >>> p2 = parsedate("10:30:00")
1372 1377 >>> p3 = parsedate("10:30:59")
1373 1378 >>> p4 = parsedate("10:31:00")
1374 1379 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1375 1380 >>> f = matchdate("10:30")
1376 1381 >>> f(p1[0])
1377 1382 False
1378 1383 >>> f(p2[0])
1379 1384 True
1380 1385 >>> f(p3[0])
1381 1386 True
1382 1387 >>> f(p4[0])
1383 1388 False
1384 1389 >>> f(p5[0])
1385 1390 False
1386 1391 """
1387 1392
1388 1393 def lower(date):
1389 1394 d = {'mb': "1", 'd': "1"}
1390 1395 return parsedate(date, extendeddateformats, d)[0]
1391 1396
1392 1397 def upper(date):
1393 1398 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1394 1399 for days in ("31", "30", "29"):
1395 1400 try:
1396 1401 d["d"] = days
1397 1402 return parsedate(date, extendeddateformats, d)[0]
1398 1403 except Abort:
1399 1404 pass
1400 1405 d["d"] = "28"
1401 1406 return parsedate(date, extendeddateformats, d)[0]
1402 1407
1403 1408 date = date.strip()
1404 1409
1405 1410 if not date:
1406 1411 raise Abort(_("dates cannot consist entirely of whitespace"))
1407 1412 elif date[0] == "<":
1408 1413 if not date[1:]:
1409 1414 raise Abort(_("invalid day spec, use '<DATE'"))
1410 1415 when = upper(date[1:])
1411 1416 return lambda x: x <= when
1412 1417 elif date[0] == ">":
1413 1418 if not date[1:]:
1414 1419 raise Abort(_("invalid day spec, use '>DATE'"))
1415 1420 when = lower(date[1:])
1416 1421 return lambda x: x >= when
1417 1422 elif date[0] == "-":
1418 1423 try:
1419 1424 days = int(date[1:])
1420 1425 except ValueError:
1421 1426 raise Abort(_("invalid day spec: %s") % date[1:])
1422 1427 if days < 0:
1423 1428 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1424 1429 % date[1:])
1425 1430 when = makedate()[0] - days * 3600 * 24
1426 1431 return lambda x: x >= when
1427 1432 elif " to " in date:
1428 1433 a, b = date.split(" to ")
1429 1434 start, stop = lower(a), upper(b)
1430 1435 return lambda x: x >= start and x <= stop
1431 1436 else:
1432 1437 start, stop = lower(date), upper(date)
1433 1438 return lambda x: x >= start and x <= stop
1434 1439
1435 1440 def shortuser(user):
1436 1441 """Return a short representation of a user name or email address."""
1437 1442 f = user.find('@')
1438 1443 if f >= 0:
1439 1444 user = user[:f]
1440 1445 f = user.find('<')
1441 1446 if f >= 0:
1442 1447 user = user[f + 1:]
1443 1448 f = user.find(' ')
1444 1449 if f >= 0:
1445 1450 user = user[:f]
1446 1451 f = user.find('.')
1447 1452 if f >= 0:
1448 1453 user = user[:f]
1449 1454 return user
1450 1455
1451 1456 def emailuser(user):
1452 1457 """Return the user portion of an email address."""
1453 1458 f = user.find('@')
1454 1459 if f >= 0:
1455 1460 user = user[:f]
1456 1461 f = user.find('<')
1457 1462 if f >= 0:
1458 1463 user = user[f + 1:]
1459 1464 return user
1460 1465
1461 1466 def email(author):
1462 1467 '''get email of author.'''
1463 1468 r = author.find('>')
1464 1469 if r == -1:
1465 1470 r = None
1466 1471 return author[author.find('<') + 1:r]
1467 1472
1468 1473 def ellipsis(text, maxlength=400):
1469 1474 """Trim string to at most maxlength (default: 400) columns in display."""
1470 1475 return encoding.trim(text, maxlength, ellipsis='...')
1471 1476
1472 1477 def unitcountfn(*unittable):
1473 1478 '''return a function that renders a readable count of some quantity'''
1474 1479
1475 1480 def go(count):
1476 1481 for multiplier, divisor, format in unittable:
1477 1482 if count >= divisor * multiplier:
1478 1483 return format % (count / float(divisor))
1479 1484 return unittable[-1][2] % count
1480 1485
1481 1486 return go
1482 1487
1483 1488 bytecount = unitcountfn(
1484 1489 (100, 1 << 30, _('%.0f GB')),
1485 1490 (10, 1 << 30, _('%.1f GB')),
1486 1491 (1, 1 << 30, _('%.2f GB')),
1487 1492 (100, 1 << 20, _('%.0f MB')),
1488 1493 (10, 1 << 20, _('%.1f MB')),
1489 1494 (1, 1 << 20, _('%.2f MB')),
1490 1495 (100, 1 << 10, _('%.0f KB')),
1491 1496 (10, 1 << 10, _('%.1f KB')),
1492 1497 (1, 1 << 10, _('%.2f KB')),
1493 1498 (1, 1, _('%.0f bytes')),
1494 1499 )
1495 1500
1496 1501 def uirepr(s):
1497 1502 # Avoid double backslash in Windows path repr()
1498 1503 return repr(s).replace('\\\\', '\\')
1499 1504
1500 1505 # delay import of textwrap
1501 1506 def MBTextWrapper(**kwargs):
1502 1507 class tw(textwrap.TextWrapper):
1503 1508 """
1504 1509 Extend TextWrapper for width-awareness.
1505 1510
1506 1511 Neither number of 'bytes' in any encoding nor 'characters' is
1507 1512 appropriate to calculate terminal columns for specified string.
1508 1513
1509 1514 Original TextWrapper implementation uses built-in 'len()' directly,
1510 1515 so overriding is needed to use width information of each characters.
1511 1516
1512 1517 In addition, characters classified into 'ambiguous' width are
1513 1518 treated as wide in East Asian area, but as narrow in other.
1514 1519
1515 1520 This requires use decision to determine width of such characters.
1516 1521 """
1517 1522 def __init__(self, **kwargs):
1518 1523 textwrap.TextWrapper.__init__(self, **kwargs)
1519 1524
1520 1525 # for compatibility between 2.4 and 2.6
1521 1526 if getattr(self, 'drop_whitespace', None) is None:
1522 1527 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1523 1528
1524 1529 def _cutdown(self, ucstr, space_left):
1525 1530 l = 0
1526 1531 colwidth = encoding.ucolwidth
1527 1532 for i in xrange(len(ucstr)):
1528 1533 l += colwidth(ucstr[i])
1529 1534 if space_left < l:
1530 1535 return (ucstr[:i], ucstr[i:])
1531 1536 return ucstr, ''
1532 1537
1533 1538 # overriding of base class
1534 1539 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1535 1540 space_left = max(width - cur_len, 1)
1536 1541
1537 1542 if self.break_long_words:
1538 1543 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1539 1544 cur_line.append(cut)
1540 1545 reversed_chunks[-1] = res
1541 1546 elif not cur_line:
1542 1547 cur_line.append(reversed_chunks.pop())
1543 1548
1544 1549 # this overriding code is imported from TextWrapper of python 2.6
1545 1550 # to calculate columns of string by 'encoding.ucolwidth()'
1546 1551 def _wrap_chunks(self, chunks):
1547 1552 colwidth = encoding.ucolwidth
1548 1553
1549 1554 lines = []
1550 1555 if self.width <= 0:
1551 1556 raise ValueError("invalid width %r (must be > 0)" % self.width)
1552 1557
1553 1558 # Arrange in reverse order so items can be efficiently popped
1554 1559 # from a stack of chucks.
1555 1560 chunks.reverse()
1556 1561
1557 1562 while chunks:
1558 1563
1559 1564 # Start the list of chunks that will make up the current line.
1560 1565 # cur_len is just the length of all the chunks in cur_line.
1561 1566 cur_line = []
1562 1567 cur_len = 0
1563 1568
1564 1569 # Figure out which static string will prefix this line.
1565 1570 if lines:
1566 1571 indent = self.subsequent_indent
1567 1572 else:
1568 1573 indent = self.initial_indent
1569 1574
1570 1575 # Maximum width for this line.
1571 1576 width = self.width - len(indent)
1572 1577
1573 1578 # First chunk on line is whitespace -- drop it, unless this
1574 1579 # is the very beginning of the text (i.e. no lines started yet).
1575 1580 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1576 1581 del chunks[-1]
1577 1582
1578 1583 while chunks:
1579 1584 l = colwidth(chunks[-1])
1580 1585
1581 1586 # Can at least squeeze this chunk onto the current line.
1582 1587 if cur_len + l <= width:
1583 1588 cur_line.append(chunks.pop())
1584 1589 cur_len += l
1585 1590
1586 1591 # Nope, this line is full.
1587 1592 else:
1588 1593 break
1589 1594
1590 1595 # The current line is full, and the next chunk is too big to
1591 1596 # fit on *any* line (not just this one).
1592 1597 if chunks and colwidth(chunks[-1]) > width:
1593 1598 self._handle_long_word(chunks, cur_line, cur_len, width)
1594 1599
1595 1600 # If the last chunk on this line is all whitespace, drop it.
1596 1601 if (self.drop_whitespace and
1597 1602 cur_line and cur_line[-1].strip() == ''):
1598 1603 del cur_line[-1]
1599 1604
1600 1605 # Convert current line back to a string and store it in list
1601 1606 # of all lines (return value).
1602 1607 if cur_line:
1603 1608 lines.append(indent + ''.join(cur_line))
1604 1609
1605 1610 return lines
1606 1611
1607 1612 global MBTextWrapper
1608 1613 MBTextWrapper = tw
1609 1614 return tw(**kwargs)
1610 1615
1611 1616 def wrap(line, width, initindent='', hangindent=''):
1612 1617 maxindent = max(len(hangindent), len(initindent))
1613 1618 if width <= maxindent:
1614 1619 # adjust for weird terminal size
1615 1620 width = max(78, maxindent + 1)
1616 1621 line = line.decode(encoding.encoding, encoding.encodingmode)
1617 1622 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1618 1623 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1619 1624 wrapper = MBTextWrapper(width=width,
1620 1625 initial_indent=initindent,
1621 1626 subsequent_indent=hangindent)
1622 1627 return wrapper.fill(line).encode(encoding.encoding)
1623 1628
1624 1629 def iterlines(iterator):
1625 1630 for chunk in iterator:
1626 1631 for line in chunk.splitlines():
1627 1632 yield line
1628 1633
1629 1634 def expandpath(path):
1630 1635 return os.path.expanduser(os.path.expandvars(path))
1631 1636
1632 1637 def hgcmd():
1633 1638 """Return the command used to execute current hg
1634 1639
1635 1640 This is different from hgexecutable() because on Windows we want
1636 1641 to avoid things opening new shell windows like batch files, so we
1637 1642 get either the python call or current executable.
1638 1643 """
1639 1644 if mainfrozen():
1640 1645 return [sys.executable]
1641 1646 return gethgcmd()
1642 1647
1643 1648 def rundetached(args, condfn):
1644 1649 """Execute the argument list in a detached process.
1645 1650
1646 1651 condfn is a callable which is called repeatedly and should return
1647 1652 True once the child process is known to have started successfully.
1648 1653 At this point, the child process PID is returned. If the child
1649 1654 process fails to start or finishes before condfn() evaluates to
1650 1655 True, return -1.
1651 1656 """
1652 1657 # Windows case is easier because the child process is either
1653 1658 # successfully starting and validating the condition or exiting
1654 1659 # on failure. We just poll on its PID. On Unix, if the child
1655 1660 # process fails to start, it will be left in a zombie state until
1656 1661 # the parent wait on it, which we cannot do since we expect a long
1657 1662 # running process on success. Instead we listen for SIGCHLD telling
1658 1663 # us our child process terminated.
1659 1664 terminated = set()
1660 1665 def handler(signum, frame):
1661 1666 terminated.add(os.wait())
1662 1667 prevhandler = None
1663 1668 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1664 1669 if SIGCHLD is not None:
1665 1670 prevhandler = signal.signal(SIGCHLD, handler)
1666 1671 try:
1667 1672 pid = spawndetached(args)
1668 1673 while not condfn():
1669 1674 if ((pid in terminated or not testpid(pid))
1670 1675 and not condfn()):
1671 1676 return -1
1672 1677 time.sleep(0.1)
1673 1678 return pid
1674 1679 finally:
1675 1680 if prevhandler is not None:
1676 1681 signal.signal(signal.SIGCHLD, prevhandler)
1677 1682
1678 1683 try:
1679 1684 any, all = any, all
1680 1685 except NameError:
1681 1686 def any(iterable):
1682 1687 for i in iterable:
1683 1688 if i:
1684 1689 return True
1685 1690 return False
1686 1691
1687 1692 def all(iterable):
1688 1693 for i in iterable:
1689 1694 if not i:
1690 1695 return False
1691 1696 return True
1692 1697
1693 1698 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1694 1699 """Return the result of interpolating items in the mapping into string s.
1695 1700
1696 1701 prefix is a single character string, or a two character string with
1697 1702 a backslash as the first character if the prefix needs to be escaped in
1698 1703 a regular expression.
1699 1704
1700 1705 fn is an optional function that will be applied to the replacement text
1701 1706 just before replacement.
1702 1707
1703 1708 escape_prefix is an optional flag that allows using doubled prefix for
1704 1709 its escaping.
1705 1710 """
1706 1711 fn = fn or (lambda s: s)
1707 1712 patterns = '|'.join(mapping.keys())
1708 1713 if escape_prefix:
1709 1714 patterns += '|' + prefix
1710 1715 if len(prefix) > 1:
1711 1716 prefix_char = prefix[1:]
1712 1717 else:
1713 1718 prefix_char = prefix
1714 1719 mapping[prefix_char] = prefix_char
1715 1720 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1716 1721 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1717 1722
1718 1723 def getport(port):
1719 1724 """Return the port for a given network service.
1720 1725
1721 1726 If port is an integer, it's returned as is. If it's a string, it's
1722 1727 looked up using socket.getservbyname(). If there's no matching
1723 1728 service, util.Abort is raised.
1724 1729 """
1725 1730 try:
1726 1731 return int(port)
1727 1732 except ValueError:
1728 1733 pass
1729 1734
1730 1735 try:
1731 1736 return socket.getservbyname(port)
1732 1737 except socket.error:
1733 1738 raise Abort(_("no port number associated with service '%s'") % port)
1734 1739
1735 1740 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1736 1741 '0': False, 'no': False, 'false': False, 'off': False,
1737 1742 'never': False}
1738 1743
1739 1744 def parsebool(s):
1740 1745 """Parse s into a boolean.
1741 1746
1742 1747 If s is not a valid boolean, returns None.
1743 1748 """
1744 1749 return _booleans.get(s.lower(), None)
1745 1750
1746 1751 _hexdig = '0123456789ABCDEFabcdef'
1747 1752 _hextochr = dict((a + b, chr(int(a + b, 16)))
1748 1753 for a in _hexdig for b in _hexdig)
1749 1754
1750 1755 def _urlunquote(s):
1751 1756 """Decode HTTP/HTML % encoding.
1752 1757
1753 1758 >>> _urlunquote('abc%20def')
1754 1759 'abc def'
1755 1760 """
1756 1761 res = s.split('%')
1757 1762 # fastpath
1758 1763 if len(res) == 1:
1759 1764 return s
1760 1765 s = res[0]
1761 1766 for item in res[1:]:
1762 1767 try:
1763 1768 s += _hextochr[item[:2]] + item[2:]
1764 1769 except KeyError:
1765 1770 s += '%' + item
1766 1771 except UnicodeDecodeError:
1767 1772 s += unichr(int(item[:2], 16)) + item[2:]
1768 1773 return s
1769 1774
1770 1775 class url(object):
1771 1776 r"""Reliable URL parser.
1772 1777
1773 1778 This parses URLs and provides attributes for the following
1774 1779 components:
1775 1780
1776 1781 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1777 1782
1778 1783 Missing components are set to None. The only exception is
1779 1784 fragment, which is set to '' if present but empty.
1780 1785
1781 1786 If parsefragment is False, fragment is included in query. If
1782 1787 parsequery is False, query is included in path. If both are
1783 1788 False, both fragment and query are included in path.
1784 1789
1785 1790 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1786 1791
1787 1792 Note that for backward compatibility reasons, bundle URLs do not
1788 1793 take host names. That means 'bundle://../' has a path of '../'.
1789 1794
1790 1795 Examples:
1791 1796
1792 1797 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1793 1798 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1794 1799 >>> url('ssh://[::1]:2200//home/joe/repo')
1795 1800 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1796 1801 >>> url('file:///home/joe/repo')
1797 1802 <url scheme: 'file', path: '/home/joe/repo'>
1798 1803 >>> url('file:///c:/temp/foo/')
1799 1804 <url scheme: 'file', path: 'c:/temp/foo/'>
1800 1805 >>> url('bundle:foo')
1801 1806 <url scheme: 'bundle', path: 'foo'>
1802 1807 >>> url('bundle://../foo')
1803 1808 <url scheme: 'bundle', path: '../foo'>
1804 1809 >>> url(r'c:\foo\bar')
1805 1810 <url path: 'c:\\foo\\bar'>
1806 1811 >>> url(r'\\blah\blah\blah')
1807 1812 <url path: '\\\\blah\\blah\\blah'>
1808 1813 >>> url(r'\\blah\blah\blah#baz')
1809 1814 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1810 1815 >>> url(r'file:///C:\users\me')
1811 1816 <url scheme: 'file', path: 'C:\\users\\me'>
1812 1817
1813 1818 Authentication credentials:
1814 1819
1815 1820 >>> url('ssh://joe:xyz@x/repo')
1816 1821 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1817 1822 >>> url('ssh://joe@x/repo')
1818 1823 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1819 1824
1820 1825 Query strings and fragments:
1821 1826
1822 1827 >>> url('http://host/a?b#c')
1823 1828 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1824 1829 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1825 1830 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1826 1831 """
1827 1832
1828 1833 _safechars = "!~*'()+"
1829 1834 _safepchars = "/!~*'()+:\\"
1830 1835 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1831 1836
1832 1837 def __init__(self, path, parsequery=True, parsefragment=True):
1833 1838 # We slowly chomp away at path until we have only the path left
1834 1839 self.scheme = self.user = self.passwd = self.host = None
1835 1840 self.port = self.path = self.query = self.fragment = None
1836 1841 self._localpath = True
1837 1842 self._hostport = ''
1838 1843 self._origpath = path
1839 1844
1840 1845 if parsefragment and '#' in path:
1841 1846 path, self.fragment = path.split('#', 1)
1842 1847 if not path:
1843 1848 path = None
1844 1849
1845 1850 # special case for Windows drive letters and UNC paths
1846 1851 if hasdriveletter(path) or path.startswith(r'\\'):
1847 1852 self.path = path
1848 1853 return
1849 1854
1850 1855 # For compatibility reasons, we can't handle bundle paths as
1851 1856 # normal URLS
1852 1857 if path.startswith('bundle:'):
1853 1858 self.scheme = 'bundle'
1854 1859 path = path[7:]
1855 1860 if path.startswith('//'):
1856 1861 path = path[2:]
1857 1862 self.path = path
1858 1863 return
1859 1864
1860 1865 if self._matchscheme(path):
1861 1866 parts = path.split(':', 1)
1862 1867 if parts[0]:
1863 1868 self.scheme, path = parts
1864 1869 self._localpath = False
1865 1870
1866 1871 if not path:
1867 1872 path = None
1868 1873 if self._localpath:
1869 1874 self.path = ''
1870 1875 return
1871 1876 else:
1872 1877 if self._localpath:
1873 1878 self.path = path
1874 1879 return
1875 1880
1876 1881 if parsequery and '?' in path:
1877 1882 path, self.query = path.split('?', 1)
1878 1883 if not path:
1879 1884 path = None
1880 1885 if not self.query:
1881 1886 self.query = None
1882 1887
1883 1888 # // is required to specify a host/authority
1884 1889 if path and path.startswith('//'):
1885 1890 parts = path[2:].split('/', 1)
1886 1891 if len(parts) > 1:
1887 1892 self.host, path = parts
1888 1893 else:
1889 1894 self.host = parts[0]
1890 1895 path = None
1891 1896 if not self.host:
1892 1897 self.host = None
1893 1898 # path of file:///d is /d
1894 1899 # path of file:///d:/ is d:/, not /d:/
1895 1900 if path and not hasdriveletter(path):
1896 1901 path = '/' + path
1897 1902
1898 1903 if self.host and '@' in self.host:
1899 1904 self.user, self.host = self.host.rsplit('@', 1)
1900 1905 if ':' in self.user:
1901 1906 self.user, self.passwd = self.user.split(':', 1)
1902 1907 if not self.host:
1903 1908 self.host = None
1904 1909
1905 1910 # Don't split on colons in IPv6 addresses without ports
1906 1911 if (self.host and ':' in self.host and
1907 1912 not (self.host.startswith('[') and self.host.endswith(']'))):
1908 1913 self._hostport = self.host
1909 1914 self.host, self.port = self.host.rsplit(':', 1)
1910 1915 if not self.host:
1911 1916 self.host = None
1912 1917
1913 1918 if (self.host and self.scheme == 'file' and
1914 1919 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1915 1920 raise Abort(_('file:// URLs can only refer to localhost'))
1916 1921
1917 1922 self.path = path
1918 1923
1919 1924 # leave the query string escaped
1920 1925 for a in ('user', 'passwd', 'host', 'port',
1921 1926 'path', 'fragment'):
1922 1927 v = getattr(self, a)
1923 1928 if v is not None:
1924 1929 setattr(self, a, _urlunquote(v))
1925 1930
1926 1931 def __repr__(self):
1927 1932 attrs = []
1928 1933 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1929 1934 'query', 'fragment'):
1930 1935 v = getattr(self, a)
1931 1936 if v is not None:
1932 1937 attrs.append('%s: %r' % (a, v))
1933 1938 return '<url %s>' % ', '.join(attrs)
1934 1939
1935 1940 def __str__(self):
1936 1941 r"""Join the URL's components back into a URL string.
1937 1942
1938 1943 Examples:
1939 1944
1940 1945 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1941 1946 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1942 1947 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1943 1948 'http://user:pw@host:80/?foo=bar&baz=42'
1944 1949 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1945 1950 'http://user:pw@host:80/?foo=bar%3dbaz'
1946 1951 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1947 1952 'ssh://user:pw@[::1]:2200//home/joe#'
1948 1953 >>> str(url('http://localhost:80//'))
1949 1954 'http://localhost:80//'
1950 1955 >>> str(url('http://localhost:80/'))
1951 1956 'http://localhost:80/'
1952 1957 >>> str(url('http://localhost:80'))
1953 1958 'http://localhost:80/'
1954 1959 >>> str(url('bundle:foo'))
1955 1960 'bundle:foo'
1956 1961 >>> str(url('bundle://../foo'))
1957 1962 'bundle:../foo'
1958 1963 >>> str(url('path'))
1959 1964 'path'
1960 1965 >>> str(url('file:///tmp/foo/bar'))
1961 1966 'file:///tmp/foo/bar'
1962 1967 >>> str(url('file:///c:/tmp/foo/bar'))
1963 1968 'file:///c:/tmp/foo/bar'
1964 1969 >>> print url(r'bundle:foo\bar')
1965 1970 bundle:foo\bar
1966 1971 >>> print url(r'file:///D:\data\hg')
1967 1972 file:///D:\data\hg
1968 1973 """
1969 1974 if self._localpath:
1970 1975 s = self.path
1971 1976 if self.scheme == 'bundle':
1972 1977 s = 'bundle:' + s
1973 1978 if self.fragment:
1974 1979 s += '#' + self.fragment
1975 1980 return s
1976 1981
1977 1982 s = self.scheme + ':'
1978 1983 if self.user or self.passwd or self.host:
1979 1984 s += '//'
1980 1985 elif self.scheme and (not self.path or self.path.startswith('/')
1981 1986 or hasdriveletter(self.path)):
1982 1987 s += '//'
1983 1988 if hasdriveletter(self.path):
1984 1989 s += '/'
1985 1990 if self.user:
1986 1991 s += urllib.quote(self.user, safe=self._safechars)
1987 1992 if self.passwd:
1988 1993 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1989 1994 if self.user or self.passwd:
1990 1995 s += '@'
1991 1996 if self.host:
1992 1997 if not (self.host.startswith('[') and self.host.endswith(']')):
1993 1998 s += urllib.quote(self.host)
1994 1999 else:
1995 2000 s += self.host
1996 2001 if self.port:
1997 2002 s += ':' + urllib.quote(self.port)
1998 2003 if self.host:
1999 2004 s += '/'
2000 2005 if self.path:
2001 2006 # TODO: similar to the query string, we should not unescape the
2002 2007 # path when we store it, the path might contain '%2f' = '/',
2003 2008 # which we should *not* escape.
2004 2009 s += urllib.quote(self.path, safe=self._safepchars)
2005 2010 if self.query:
2006 2011 # we store the query in escaped form.
2007 2012 s += '?' + self.query
2008 2013 if self.fragment is not None:
2009 2014 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2010 2015 return s
2011 2016
2012 2017 def authinfo(self):
2013 2018 user, passwd = self.user, self.passwd
2014 2019 try:
2015 2020 self.user, self.passwd = None, None
2016 2021 s = str(self)
2017 2022 finally:
2018 2023 self.user, self.passwd = user, passwd
2019 2024 if not self.user:
2020 2025 return (s, None)
2021 2026 # authinfo[1] is passed to urllib2 password manager, and its
2022 2027 # URIs must not contain credentials. The host is passed in the
2023 2028 # URIs list because Python < 2.4.3 uses only that to search for
2024 2029 # a password.
2025 2030 return (s, (None, (s, self.host),
2026 2031 self.user, self.passwd or ''))
2027 2032
2028 2033 def isabs(self):
2029 2034 if self.scheme and self.scheme != 'file':
2030 2035 return True # remote URL
2031 2036 if hasdriveletter(self.path):
2032 2037 return True # absolute for our purposes - can't be joined()
2033 2038 if self.path.startswith(r'\\'):
2034 2039 return True # Windows UNC path
2035 2040 if self.path.startswith('/'):
2036 2041 return True # POSIX-style
2037 2042 return False
2038 2043
2039 2044 def localpath(self):
2040 2045 if self.scheme == 'file' or self.scheme == 'bundle':
2041 2046 path = self.path or '/'
2042 2047 # For Windows, we need to promote hosts containing drive
2043 2048 # letters to paths with drive letters.
2044 2049 if hasdriveletter(self._hostport):
2045 2050 path = self._hostport + '/' + self.path
2046 2051 elif (self.host is not None and self.path
2047 2052 and not hasdriveletter(path)):
2048 2053 path = '/' + path
2049 2054 return path
2050 2055 return self._origpath
2051 2056
2052 2057 def islocal(self):
2053 2058 '''whether localpath will return something that posixfile can open'''
2054 2059 return (not self.scheme or self.scheme == 'file'
2055 2060 or self.scheme == 'bundle')
2056 2061
2057 2062 def hasscheme(path):
2058 2063 return bool(url(path).scheme)
2059 2064
2060 2065 def hasdriveletter(path):
2061 2066 return path and path[1:2] == ':' and path[0:1].isalpha()
2062 2067
2063 2068 def urllocalpath(path):
2064 2069 return url(path, parsequery=False, parsefragment=False).localpath()
2065 2070
2066 2071 def hidepassword(u):
2067 2072 '''hide user credential in a url string'''
2068 2073 u = url(u)
2069 2074 if u.passwd:
2070 2075 u.passwd = '***'
2071 2076 return str(u)
2072 2077
2073 2078 def removeauth(u):
2074 2079 '''remove all authentication information from a url string'''
2075 2080 u = url(u)
2076 2081 u.user = u.passwd = None
2077 2082 return str(u)
2078 2083
2079 2084 def isatty(fd):
2080 2085 try:
2081 2086 return fd.isatty()
2082 2087 except AttributeError:
2083 2088 return False
2084 2089
2085 2090 timecount = unitcountfn(
2086 2091 (1, 1e3, _('%.0f s')),
2087 2092 (100, 1, _('%.1f s')),
2088 2093 (10, 1, _('%.2f s')),
2089 2094 (1, 1, _('%.3f s')),
2090 2095 (100, 0.001, _('%.1f ms')),
2091 2096 (10, 0.001, _('%.2f ms')),
2092 2097 (1, 0.001, _('%.3f ms')),
2093 2098 (100, 0.000001, _('%.1f us')),
2094 2099 (10, 0.000001, _('%.2f us')),
2095 2100 (1, 0.000001, _('%.3f us')),
2096 2101 (100, 0.000000001, _('%.1f ns')),
2097 2102 (10, 0.000000001, _('%.2f ns')),
2098 2103 (1, 0.000000001, _('%.3f ns')),
2099 2104 )
2100 2105
2101 2106 _timenesting = [0]
2102 2107
2103 2108 def timed(func):
2104 2109 '''Report the execution time of a function call to stderr.
2105 2110
2106 2111 During development, use as a decorator when you need to measure
2107 2112 the cost of a function, e.g. as follows:
2108 2113
2109 2114 @util.timed
2110 2115 def foo(a, b, c):
2111 2116 pass
2112 2117 '''
2113 2118
2114 2119 def wrapper(*args, **kwargs):
2115 2120 start = time.time()
2116 2121 indent = 2
2117 2122 _timenesting[0] += indent
2118 2123 try:
2119 2124 return func(*args, **kwargs)
2120 2125 finally:
2121 2126 elapsed = time.time() - start
2122 2127 _timenesting[0] -= indent
2123 2128 sys.stderr.write('%s%s: %s\n' %
2124 2129 (' ' * _timenesting[0], func.__name__,
2125 2130 timecount(elapsed)))
2126 2131 return wrapper
2127 2132
2128 2133 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2129 2134 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2130 2135
2131 2136 def sizetoint(s):
2132 2137 '''Convert a space specifier to a byte count.
2133 2138
2134 2139 >>> sizetoint('30')
2135 2140 30
2136 2141 >>> sizetoint('2.2kb')
2137 2142 2252
2138 2143 >>> sizetoint('6M')
2139 2144 6291456
2140 2145 '''
2141 2146 t = s.strip().lower()
2142 2147 try:
2143 2148 for k, u in _sizeunits:
2144 2149 if t.endswith(k):
2145 2150 return int(float(t[:-len(k)]) * u)
2146 2151 return int(t)
2147 2152 except ValueError:
2148 2153 raise error.ParseError(_("couldn't parse size: %s") % s)
2149 2154
2150 2155 class hooks(object):
2151 2156 '''A collection of hook functions that can be used to extend a
2152 2157 function's behaviour. Hooks are called in lexicographic order,
2153 2158 based on the names of their sources.'''
2154 2159
2155 2160 def __init__(self):
2156 2161 self._hooks = []
2157 2162
2158 2163 def add(self, source, hook):
2159 2164 self._hooks.append((source, hook))
2160 2165
2161 2166 def __call__(self, *args):
2162 2167 self._hooks.sort(key=lambda x: x[0])
2163 2168 results = []
2164 2169 for source, hook in self._hooks:
2165 2170 results.append(hook(*args))
2166 2171 return results
2167 2172
2168 2173 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2169 2174 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2170 2175 Skips the 'skip' last entries. By default it will flush stdout first.
2171 2176 It can be used everywhere and do intentionally not require an ui object.
2172 2177 Not be used in production code but very convenient while developing.
2173 2178 '''
2174 2179 if otherf:
2175 2180 otherf.flush()
2176 2181 f.write('%s at:\n' % msg)
2177 2182 entries = [('%s:%s' % (fn, ln), func)
2178 2183 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2179 2184 if entries:
2180 2185 fnmax = max(len(entry[0]) for entry in entries)
2181 2186 for fnln, func in entries:
2182 2187 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2183 2188 f.flush()
2184 2189
2185 2190 # convenient shortcut
2186 2191 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now