##// END OF EJS Templates
transaction: really disable hardlink backups (issue4546)
Matt Harbison -
r24164:07a92bbd 3.3.2 stable
parent child Browse files
Show More
@@ -1,2231 +1,2231 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 import i18n
17 17 _ = i18n._
18 18 import error, osutil, encoding
19 19 import errno, shutil, sys, tempfile, traceback
20 20 import re as remod
21 21 import os, time, datetime, calendar, textwrap, signal, collections
22 22 import imp, socket, urllib, struct
23 23 import gc
24 24
25 25 if os.name == 'nt':
26 26 import windows as platform
27 27 else:
28 28 import posix as platform
29 29
30 30 cachestat = platform.cachestat
31 31 checkexec = platform.checkexec
32 32 checklink = platform.checklink
33 33 copymode = platform.copymode
34 34 executablepath = platform.executablepath
35 35 expandglobs = platform.expandglobs
36 36 explainexit = platform.explainexit
37 37 findexe = platform.findexe
38 38 gethgcmd = platform.gethgcmd
39 39 getuser = platform.getuser
40 40 groupmembers = platform.groupmembers
41 41 groupname = platform.groupname
42 42 hidewindow = platform.hidewindow
43 43 isexec = platform.isexec
44 44 isowner = platform.isowner
45 45 localpath = platform.localpath
46 46 lookupreg = platform.lookupreg
47 47 makedir = platform.makedir
48 48 nlinks = platform.nlinks
49 49 normpath = platform.normpath
50 50 normcase = platform.normcase
51 51 openhardlinks = platform.openhardlinks
52 52 oslink = platform.oslink
53 53 parsepatchoutput = platform.parsepatchoutput
54 54 pconvert = platform.pconvert
55 55 popen = platform.popen
56 56 posixfile = platform.posixfile
57 57 quotecommand = platform.quotecommand
58 58 readpipe = platform.readpipe
59 59 rename = platform.rename
60 60 samedevice = platform.samedevice
61 61 samefile = platform.samefile
62 62 samestat = platform.samestat
63 63 setbinary = platform.setbinary
64 64 setflags = platform.setflags
65 65 setsignalhandler = platform.setsignalhandler
66 66 shellquote = platform.shellquote
67 67 spawndetached = platform.spawndetached
68 68 split = platform.split
69 69 sshargs = platform.sshargs
70 70 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
71 71 statisexec = platform.statisexec
72 72 statislink = platform.statislink
73 73 termwidth = platform.termwidth
74 74 testpid = platform.testpid
75 75 umask = platform.umask
76 76 unlink = platform.unlink
77 77 unlinkpath = platform.unlinkpath
78 78 username = platform.username
79 79
80 80 # Python compatibility
81 81
82 82 _notset = object()
83 83
84 84 def safehasattr(thing, attr):
85 85 return getattr(thing, attr, _notset) is not _notset
86 86
87 87 def sha1(s=''):
88 88 '''
89 89 Low-overhead wrapper around Python's SHA support
90 90
91 91 >>> f = _fastsha1
92 92 >>> a = sha1()
93 93 >>> a = f()
94 94 >>> a.hexdigest()
95 95 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
96 96 '''
97 97
98 98 return _fastsha1(s)
99 99
100 100 def _fastsha1(s=''):
101 101 # This function will import sha1 from hashlib or sha (whichever is
102 102 # available) and overwrite itself with it on the first call.
103 103 # Subsequent calls will go directly to the imported function.
104 104 if sys.version_info >= (2, 5):
105 105 from hashlib import sha1 as _sha1
106 106 else:
107 107 from sha import sha as _sha1
108 108 global _fastsha1, sha1
109 109 _fastsha1 = sha1 = _sha1
110 110 return _sha1(s)
111 111
112 112 def md5(s=''):
113 113 try:
114 114 from hashlib import md5 as _md5
115 115 except ImportError:
116 116 from md5 import md5 as _md5
117 117 global md5
118 118 md5 = _md5
119 119 return _md5(s)
120 120
121 121 DIGESTS = {
122 122 'md5': md5,
123 123 'sha1': sha1,
124 124 }
125 125 # List of digest types from strongest to weakest
126 126 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
127 127
128 128 try:
129 129 import hashlib
130 130 DIGESTS.update({
131 131 'sha512': hashlib.sha512,
132 132 })
133 133 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
134 134 except ImportError:
135 135 pass
136 136
137 137 for k in DIGESTS_BY_STRENGTH:
138 138 assert k in DIGESTS
139 139
140 140 class digester(object):
141 141 """helper to compute digests.
142 142
143 143 This helper can be used to compute one or more digests given their name.
144 144
145 145 >>> d = digester(['md5', 'sha1'])
146 146 >>> d.update('foo')
147 147 >>> [k for k in sorted(d)]
148 148 ['md5', 'sha1']
149 149 >>> d['md5']
150 150 'acbd18db4cc2f85cedef654fccc4a4d8'
151 151 >>> d['sha1']
152 152 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
153 153 >>> digester.preferred(['md5', 'sha1'])
154 154 'sha1'
155 155 """
156 156
157 157 def __init__(self, digests, s=''):
158 158 self._hashes = {}
159 159 for k in digests:
160 160 if k not in DIGESTS:
161 161 raise Abort(_('unknown digest type: %s') % k)
162 162 self._hashes[k] = DIGESTS[k]()
163 163 if s:
164 164 self.update(s)
165 165
166 166 def update(self, data):
167 167 for h in self._hashes.values():
168 168 h.update(data)
169 169
170 170 def __getitem__(self, key):
171 171 if key not in DIGESTS:
172 172 raise Abort(_('unknown digest type: %s') % k)
173 173 return self._hashes[key].hexdigest()
174 174
175 175 def __iter__(self):
176 176 return iter(self._hashes)
177 177
178 178 @staticmethod
179 179 def preferred(supported):
180 180 """returns the strongest digest type in both supported and DIGESTS."""
181 181
182 182 for k in DIGESTS_BY_STRENGTH:
183 183 if k in supported:
184 184 return k
185 185 return None
186 186
187 187 class digestchecker(object):
188 188 """file handle wrapper that additionally checks content against a given
189 189 size and digests.
190 190
191 191 d = digestchecker(fh, size, {'md5': '...'})
192 192
193 193 When multiple digests are given, all of them are validated.
194 194 """
195 195
196 196 def __init__(self, fh, size, digests):
197 197 self._fh = fh
198 198 self._size = size
199 199 self._got = 0
200 200 self._digests = dict(digests)
201 201 self._digester = digester(self._digests.keys())
202 202
203 203 def read(self, length=-1):
204 204 content = self._fh.read(length)
205 205 self._digester.update(content)
206 206 self._got += len(content)
207 207 return content
208 208
209 209 def validate(self):
210 210 if self._size != self._got:
211 211 raise Abort(_('size mismatch: expected %d, got %d') %
212 212 (self._size, self._got))
213 213 for k, v in self._digests.items():
214 214 if v != self._digester[k]:
215 215 # i18n: first parameter is a digest name
216 216 raise Abort(_('%s mismatch: expected %s, got %s') %
217 217 (k, v, self._digester[k]))
218 218
219 219 try:
220 220 buffer = buffer
221 221 except NameError:
222 222 if sys.version_info[0] < 3:
223 223 def buffer(sliceable, offset=0):
224 224 return sliceable[offset:]
225 225 else:
226 226 def buffer(sliceable, offset=0):
227 227 return memoryview(sliceable)[offset:]
228 228
229 229 import subprocess
230 230 closefds = os.name == 'posix'
231 231
232 232 def unpacker(fmt):
233 233 """create a struct unpacker for the specified format"""
234 234 try:
235 235 # 2.5+
236 236 return struct.Struct(fmt).unpack
237 237 except AttributeError:
238 238 # 2.4
239 239 return lambda buf: struct.unpack(fmt, buf)
240 240
241 241 def popen2(cmd, env=None, newlines=False):
242 242 # Setting bufsize to -1 lets the system decide the buffer size.
243 243 # The default for bufsize is 0, meaning unbuffered. This leads to
244 244 # poor performance on Mac OS X: http://bugs.python.org/issue4194
245 245 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
246 246 close_fds=closefds,
247 247 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
248 248 universal_newlines=newlines,
249 249 env=env)
250 250 return p.stdin, p.stdout
251 251
252 252 def popen3(cmd, env=None, newlines=False):
253 253 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
254 254 return stdin, stdout, stderr
255 255
256 256 def popen4(cmd, env=None, newlines=False):
257 257 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
258 258 close_fds=closefds,
259 259 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
260 260 stderr=subprocess.PIPE,
261 261 universal_newlines=newlines,
262 262 env=env)
263 263 return p.stdin, p.stdout, p.stderr, p
264 264
265 265 def version():
266 266 """Return version information if available."""
267 267 try:
268 268 import __version__
269 269 return __version__.version
270 270 except ImportError:
271 271 return 'unknown'
272 272
273 273 # used by parsedate
274 274 defaultdateformats = (
275 275 '%Y-%m-%d %H:%M:%S',
276 276 '%Y-%m-%d %I:%M:%S%p',
277 277 '%Y-%m-%d %H:%M',
278 278 '%Y-%m-%d %I:%M%p',
279 279 '%Y-%m-%d',
280 280 '%m-%d',
281 281 '%m/%d',
282 282 '%m/%d/%y',
283 283 '%m/%d/%Y',
284 284 '%a %b %d %H:%M:%S %Y',
285 285 '%a %b %d %I:%M:%S%p %Y',
286 286 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
287 287 '%b %d %H:%M:%S %Y',
288 288 '%b %d %I:%M:%S%p %Y',
289 289 '%b %d %H:%M:%S',
290 290 '%b %d %I:%M:%S%p',
291 291 '%b %d %H:%M',
292 292 '%b %d %I:%M%p',
293 293 '%b %d %Y',
294 294 '%b %d',
295 295 '%H:%M:%S',
296 296 '%I:%M:%S%p',
297 297 '%H:%M',
298 298 '%I:%M%p',
299 299 )
300 300
301 301 extendeddateformats = defaultdateformats + (
302 302 "%Y",
303 303 "%Y-%m",
304 304 "%b",
305 305 "%b %Y",
306 306 )
307 307
308 308 def cachefunc(func):
309 309 '''cache the result of function calls'''
310 310 # XXX doesn't handle keywords args
311 311 if func.func_code.co_argcount == 0:
312 312 cache = []
313 313 def f():
314 314 if len(cache) == 0:
315 315 cache.append(func())
316 316 return cache[0]
317 317 return f
318 318 cache = {}
319 319 if func.func_code.co_argcount == 1:
320 320 # we gain a small amount of time because
321 321 # we don't need to pack/unpack the list
322 322 def f(arg):
323 323 if arg not in cache:
324 324 cache[arg] = func(arg)
325 325 return cache[arg]
326 326 else:
327 327 def f(*args):
328 328 if args not in cache:
329 329 cache[args] = func(*args)
330 330 return cache[args]
331 331
332 332 return f
333 333
334 334 try:
335 335 collections.deque.remove
336 336 deque = collections.deque
337 337 except AttributeError:
338 338 # python 2.4 lacks deque.remove
339 339 class deque(collections.deque):
340 340 def remove(self, val):
341 341 for i, v in enumerate(self):
342 342 if v == val:
343 343 del self[i]
344 344 break
345 345
346 346 class sortdict(dict):
347 347 '''a simple sorted dictionary'''
348 348 def __init__(self, data=None):
349 349 self._list = []
350 350 if data:
351 351 self.update(data)
352 352 def copy(self):
353 353 return sortdict(self)
354 354 def __setitem__(self, key, val):
355 355 if key in self:
356 356 self._list.remove(key)
357 357 self._list.append(key)
358 358 dict.__setitem__(self, key, val)
359 359 def __iter__(self):
360 360 return self._list.__iter__()
361 361 def update(self, src):
362 362 for k in src:
363 363 self[k] = src[k]
364 364 def clear(self):
365 365 dict.clear(self)
366 366 self._list = []
367 367 def items(self):
368 368 return [(k, self[k]) for k in self._list]
369 369 def __delitem__(self, key):
370 370 dict.__delitem__(self, key)
371 371 self._list.remove(key)
372 372 def pop(self, key, *args, **kwargs):
373 373 dict.pop(self, key, *args, **kwargs)
374 374 try:
375 375 self._list.remove(key)
376 376 except ValueError:
377 377 pass
378 378 def keys(self):
379 379 return self._list
380 380 def iterkeys(self):
381 381 return self._list.__iter__()
382 382 def iteritems(self):
383 383 for k in self._list:
384 384 yield k, self[k]
385 385 def insert(self, index, key, val):
386 386 self._list.insert(index, key)
387 387 dict.__setitem__(self, key, val)
388 388
389 389 class lrucachedict(object):
390 390 '''cache most recent gets from or sets to this dictionary'''
391 391 def __init__(self, maxsize):
392 392 self._cache = {}
393 393 self._maxsize = maxsize
394 394 self._order = deque()
395 395
396 396 def __getitem__(self, key):
397 397 value = self._cache[key]
398 398 self._order.remove(key)
399 399 self._order.append(key)
400 400 return value
401 401
402 402 def __setitem__(self, key, value):
403 403 if key not in self._cache:
404 404 if len(self._cache) >= self._maxsize:
405 405 del self._cache[self._order.popleft()]
406 406 else:
407 407 self._order.remove(key)
408 408 self._cache[key] = value
409 409 self._order.append(key)
410 410
411 411 def __contains__(self, key):
412 412 return key in self._cache
413 413
414 414 def clear(self):
415 415 self._cache.clear()
416 416 self._order = deque()
417 417
418 418 def lrucachefunc(func):
419 419 '''cache most recent results of function calls'''
420 420 cache = {}
421 421 order = deque()
422 422 if func.func_code.co_argcount == 1:
423 423 def f(arg):
424 424 if arg not in cache:
425 425 if len(cache) > 20:
426 426 del cache[order.popleft()]
427 427 cache[arg] = func(arg)
428 428 else:
429 429 order.remove(arg)
430 430 order.append(arg)
431 431 return cache[arg]
432 432 else:
433 433 def f(*args):
434 434 if args not in cache:
435 435 if len(cache) > 20:
436 436 del cache[order.popleft()]
437 437 cache[args] = func(*args)
438 438 else:
439 439 order.remove(args)
440 440 order.append(args)
441 441 return cache[args]
442 442
443 443 return f
444 444
445 445 class propertycache(object):
446 446 def __init__(self, func):
447 447 self.func = func
448 448 self.name = func.__name__
449 449 def __get__(self, obj, type=None):
450 450 result = self.func(obj)
451 451 self.cachevalue(obj, result)
452 452 return result
453 453
454 454 def cachevalue(self, obj, value):
455 455 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
456 456 obj.__dict__[self.name] = value
457 457
458 458 def pipefilter(s, cmd):
459 459 '''filter string S through command CMD, returning its output'''
460 460 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
461 461 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
462 462 pout, perr = p.communicate(s)
463 463 return pout
464 464
465 465 def tempfilter(s, cmd):
466 466 '''filter string S through a pair of temporary files with CMD.
467 467 CMD is used as a template to create the real command to be run,
468 468 with the strings INFILE and OUTFILE replaced by the real names of
469 469 the temporary files generated.'''
470 470 inname, outname = None, None
471 471 try:
472 472 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
473 473 fp = os.fdopen(infd, 'wb')
474 474 fp.write(s)
475 475 fp.close()
476 476 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
477 477 os.close(outfd)
478 478 cmd = cmd.replace('INFILE', inname)
479 479 cmd = cmd.replace('OUTFILE', outname)
480 480 code = os.system(cmd)
481 481 if sys.platform == 'OpenVMS' and code & 1:
482 482 code = 0
483 483 if code:
484 484 raise Abort(_("command '%s' failed: %s") %
485 485 (cmd, explainexit(code)))
486 486 fp = open(outname, 'rb')
487 487 r = fp.read()
488 488 fp.close()
489 489 return r
490 490 finally:
491 491 try:
492 492 if inname:
493 493 os.unlink(inname)
494 494 except OSError:
495 495 pass
496 496 try:
497 497 if outname:
498 498 os.unlink(outname)
499 499 except OSError:
500 500 pass
501 501
502 502 filtertable = {
503 503 'tempfile:': tempfilter,
504 504 'pipe:': pipefilter,
505 505 }
506 506
507 507 def filter(s, cmd):
508 508 "filter a string through a command that transforms its input to its output"
509 509 for name, fn in filtertable.iteritems():
510 510 if cmd.startswith(name):
511 511 return fn(s, cmd[len(name):].lstrip())
512 512 return pipefilter(s, cmd)
513 513
514 514 def binary(s):
515 515 """return true if a string is binary data"""
516 516 return bool(s and '\0' in s)
517 517
518 518 def increasingchunks(source, min=1024, max=65536):
519 519 '''return no less than min bytes per chunk while data remains,
520 520 doubling min after each chunk until it reaches max'''
521 521 def log2(x):
522 522 if not x:
523 523 return 0
524 524 i = 0
525 525 while x:
526 526 x >>= 1
527 527 i += 1
528 528 return i - 1
529 529
530 530 buf = []
531 531 blen = 0
532 532 for chunk in source:
533 533 buf.append(chunk)
534 534 blen += len(chunk)
535 535 if blen >= min:
536 536 if min < max:
537 537 min = min << 1
538 538 nmin = 1 << log2(blen)
539 539 if nmin > min:
540 540 min = nmin
541 541 if min > max:
542 542 min = max
543 543 yield ''.join(buf)
544 544 blen = 0
545 545 buf = []
546 546 if buf:
547 547 yield ''.join(buf)
548 548
549 549 Abort = error.Abort
550 550
551 551 def always(fn):
552 552 return True
553 553
554 554 def never(fn):
555 555 return False
556 556
557 557 def nogc(func):
558 558 """disable garbage collector
559 559
560 560 Python's garbage collector triggers a GC each time a certain number of
561 561 container objects (the number being defined by gc.get_threshold()) are
562 562 allocated even when marked not to be tracked by the collector. Tracking has
563 563 no effect on when GCs are triggered, only on what objects the GC looks
564 564 into. As a workaround, disable GC while building complex (huge)
565 565 containers.
566 566
567 567 This garbage collector issue have been fixed in 2.7.
568 568 """
569 569 def wrapper(*args, **kwargs):
570 570 gcenabled = gc.isenabled()
571 571 gc.disable()
572 572 try:
573 573 return func(*args, **kwargs)
574 574 finally:
575 575 if gcenabled:
576 576 gc.enable()
577 577 return wrapper
578 578
579 579 def pathto(root, n1, n2):
580 580 '''return the relative path from one place to another.
581 581 root should use os.sep to separate directories
582 582 n1 should use os.sep to separate directories
583 583 n2 should use "/" to separate directories
584 584 returns an os.sep-separated path.
585 585
586 586 If n1 is a relative path, it's assumed it's
587 587 relative to root.
588 588 n2 should always be relative to root.
589 589 '''
590 590 if not n1:
591 591 return localpath(n2)
592 592 if os.path.isabs(n1):
593 593 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
594 594 return os.path.join(root, localpath(n2))
595 595 n2 = '/'.join((pconvert(root), n2))
596 596 a, b = splitpath(n1), n2.split('/')
597 597 a.reverse()
598 598 b.reverse()
599 599 while a and b and a[-1] == b[-1]:
600 600 a.pop()
601 601 b.pop()
602 602 b.reverse()
603 603 return os.sep.join((['..'] * len(a)) + b) or '.'
604 604
605 605 def mainfrozen():
606 606 """return True if we are a frozen executable.
607 607
608 608 The code supports py2exe (most common, Windows only) and tools/freeze
609 609 (portable, not much used).
610 610 """
611 611 return (safehasattr(sys, "frozen") or # new py2exe
612 612 safehasattr(sys, "importers") or # old py2exe
613 613 imp.is_frozen("__main__")) # tools/freeze
614 614
615 615 # the location of data files matching the source code
616 616 if mainfrozen():
617 617 # executable version (py2exe) doesn't support __file__
618 618 datapath = os.path.dirname(sys.executable)
619 619 else:
620 620 datapath = os.path.dirname(__file__)
621 621
622 622 i18n.setdatapath(datapath)
623 623
624 624 _hgexecutable = None
625 625
626 626 def hgexecutable():
627 627 """return location of the 'hg' executable.
628 628
629 629 Defaults to $HG or 'hg' in the search path.
630 630 """
631 631 if _hgexecutable is None:
632 632 hg = os.environ.get('HG')
633 633 mainmod = sys.modules['__main__']
634 634 if hg:
635 635 _sethgexecutable(hg)
636 636 elif mainfrozen():
637 637 _sethgexecutable(sys.executable)
638 638 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
639 639 _sethgexecutable(mainmod.__file__)
640 640 else:
641 641 exe = findexe('hg') or os.path.basename(sys.argv[0])
642 642 _sethgexecutable(exe)
643 643 return _hgexecutable
644 644
645 645 def _sethgexecutable(path):
646 646 """set location of the 'hg' executable"""
647 647 global _hgexecutable
648 648 _hgexecutable = path
649 649
650 650 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
651 651 '''enhanced shell command execution.
652 652 run with environment maybe modified, maybe in different dir.
653 653
654 654 if command fails and onerr is None, return status, else raise onerr
655 655 object as exception.
656 656
657 657 if out is specified, it is assumed to be a file-like object that has a
658 658 write() method. stdout and stderr will be redirected to out.'''
659 659 try:
660 660 sys.stdout.flush()
661 661 except Exception:
662 662 pass
663 663 def py2shell(val):
664 664 'convert python object into string that is useful to shell'
665 665 if val is None or val is False:
666 666 return '0'
667 667 if val is True:
668 668 return '1'
669 669 return str(val)
670 670 origcmd = cmd
671 671 cmd = quotecommand(cmd)
672 672 if sys.platform == 'plan9' and (sys.version_info[0] == 2
673 673 and sys.version_info[1] < 7):
674 674 # subprocess kludge to work around issues in half-baked Python
675 675 # ports, notably bichued/python:
676 676 if not cwd is None:
677 677 os.chdir(cwd)
678 678 rc = os.system(cmd)
679 679 else:
680 680 env = dict(os.environ)
681 681 env.update((k, py2shell(v)) for k, v in environ.iteritems())
682 682 env['HG'] = hgexecutable()
683 683 if out is None or out == sys.__stdout__:
684 684 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
685 685 env=env, cwd=cwd)
686 686 else:
687 687 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
688 688 env=env, cwd=cwd, stdout=subprocess.PIPE,
689 689 stderr=subprocess.STDOUT)
690 690 while True:
691 691 line = proc.stdout.readline()
692 692 if not line:
693 693 break
694 694 out.write(line)
695 695 proc.wait()
696 696 rc = proc.returncode
697 697 if sys.platform == 'OpenVMS' and rc & 1:
698 698 rc = 0
699 699 if rc and onerr:
700 700 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
701 701 explainexit(rc)[0])
702 702 if errprefix:
703 703 errmsg = '%s: %s' % (errprefix, errmsg)
704 704 raise onerr(errmsg)
705 705 return rc
706 706
707 707 def checksignature(func):
708 708 '''wrap a function with code to check for calling errors'''
709 709 def check(*args, **kwargs):
710 710 try:
711 711 return func(*args, **kwargs)
712 712 except TypeError:
713 713 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
714 714 raise error.SignatureError
715 715 raise
716 716
717 717 return check
718 718
719 719 def copyfile(src, dest, hardlink=False):
720 720 "copy a file, preserving mode and atime/mtime"
721 721 if os.path.lexists(dest):
722 722 unlink(dest)
723 723 # hardlinks are problematic on CIFS, quietly ignore this flag
724 724 # until we find a way to work around it cleanly (issue4546)
725 if False or hardlink:
725 if False and hardlink:
726 726 try:
727 727 oslink(src, dest)
728 728 return
729 729 except (IOError, OSError):
730 730 pass # fall back to normal copy
731 731 if os.path.islink(src):
732 732 os.symlink(os.readlink(src), dest)
733 733 else:
734 734 try:
735 735 shutil.copyfile(src, dest)
736 736 shutil.copymode(src, dest)
737 737 except shutil.Error, inst:
738 738 raise Abort(str(inst))
739 739
740 740 def copyfiles(src, dst, hardlink=None):
741 741 """Copy a directory tree using hardlinks if possible"""
742 742
743 743 if hardlink is None:
744 744 hardlink = (os.stat(src).st_dev ==
745 745 os.stat(os.path.dirname(dst)).st_dev)
746 746
747 747 num = 0
748 748 if os.path.isdir(src):
749 749 os.mkdir(dst)
750 750 for name, kind in osutil.listdir(src):
751 751 srcname = os.path.join(src, name)
752 752 dstname = os.path.join(dst, name)
753 753 hardlink, n = copyfiles(srcname, dstname, hardlink)
754 754 num += n
755 755 else:
756 756 if hardlink:
757 757 try:
758 758 oslink(src, dst)
759 759 except (IOError, OSError):
760 760 hardlink = False
761 761 shutil.copy(src, dst)
762 762 else:
763 763 shutil.copy(src, dst)
764 764 num += 1
765 765
766 766 return hardlink, num
767 767
768 768 _winreservednames = '''con prn aux nul
769 769 com1 com2 com3 com4 com5 com6 com7 com8 com9
770 770 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
771 771 _winreservedchars = ':*?"<>|'
772 772 def checkwinfilename(path):
773 773 r'''Check that the base-relative path is a valid filename on Windows.
774 774 Returns None if the path is ok, or a UI string describing the problem.
775 775
776 776 >>> checkwinfilename("just/a/normal/path")
777 777 >>> checkwinfilename("foo/bar/con.xml")
778 778 "filename contains 'con', which is reserved on Windows"
779 779 >>> checkwinfilename("foo/con.xml/bar")
780 780 "filename contains 'con', which is reserved on Windows"
781 781 >>> checkwinfilename("foo/bar/xml.con")
782 782 >>> checkwinfilename("foo/bar/AUX/bla.txt")
783 783 "filename contains 'AUX', which is reserved on Windows"
784 784 >>> checkwinfilename("foo/bar/bla:.txt")
785 785 "filename contains ':', which is reserved on Windows"
786 786 >>> checkwinfilename("foo/bar/b\07la.txt")
787 787 "filename contains '\\x07', which is invalid on Windows"
788 788 >>> checkwinfilename("foo/bar/bla ")
789 789 "filename ends with ' ', which is not allowed on Windows"
790 790 >>> checkwinfilename("../bar")
791 791 >>> checkwinfilename("foo\\")
792 792 "filename ends with '\\', which is invalid on Windows"
793 793 >>> checkwinfilename("foo\\/bar")
794 794 "directory name ends with '\\', which is invalid on Windows"
795 795 '''
796 796 if path.endswith('\\'):
797 797 return _("filename ends with '\\', which is invalid on Windows")
798 798 if '\\/' in path:
799 799 return _("directory name ends with '\\', which is invalid on Windows")
800 800 for n in path.replace('\\', '/').split('/'):
801 801 if not n:
802 802 continue
803 803 for c in n:
804 804 if c in _winreservedchars:
805 805 return _("filename contains '%s', which is reserved "
806 806 "on Windows") % c
807 807 if ord(c) <= 31:
808 808 return _("filename contains %r, which is invalid "
809 809 "on Windows") % c
810 810 base = n.split('.')[0]
811 811 if base and base.lower() in _winreservednames:
812 812 return _("filename contains '%s', which is reserved "
813 813 "on Windows") % base
814 814 t = n[-1]
815 815 if t in '. ' and n not in '..':
816 816 return _("filename ends with '%s', which is not allowed "
817 817 "on Windows") % t
818 818
819 819 if os.name == 'nt':
820 820 checkosfilename = checkwinfilename
821 821 else:
822 822 checkosfilename = platform.checkosfilename
823 823
824 824 def makelock(info, pathname):
825 825 try:
826 826 return os.symlink(info, pathname)
827 827 except OSError, why:
828 828 if why.errno == errno.EEXIST:
829 829 raise
830 830 except AttributeError: # no symlink in os
831 831 pass
832 832
833 833 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
834 834 os.write(ld, info)
835 835 os.close(ld)
836 836
837 837 def readlock(pathname):
838 838 try:
839 839 return os.readlink(pathname)
840 840 except OSError, why:
841 841 if why.errno not in (errno.EINVAL, errno.ENOSYS):
842 842 raise
843 843 except AttributeError: # no symlink in os
844 844 pass
845 845 fp = posixfile(pathname)
846 846 r = fp.read()
847 847 fp.close()
848 848 return r
849 849
850 850 def fstat(fp):
851 851 '''stat file object that may not have fileno method.'''
852 852 try:
853 853 return os.fstat(fp.fileno())
854 854 except AttributeError:
855 855 return os.stat(fp.name)
856 856
857 857 # File system features
858 858
859 859 def checkcase(path):
860 860 """
861 861 Return true if the given path is on a case-sensitive filesystem
862 862
863 863 Requires a path (like /foo/.hg) ending with a foldable final
864 864 directory component.
865 865 """
866 866 s1 = os.stat(path)
867 867 d, b = os.path.split(path)
868 868 b2 = b.upper()
869 869 if b == b2:
870 870 b2 = b.lower()
871 871 if b == b2:
872 872 return True # no evidence against case sensitivity
873 873 p2 = os.path.join(d, b2)
874 874 try:
875 875 s2 = os.stat(p2)
876 876 if s2 == s1:
877 877 return False
878 878 return True
879 879 except OSError:
880 880 return True
881 881
882 882 try:
883 883 import re2
884 884 _re2 = None
885 885 except ImportError:
886 886 _re2 = False
887 887
888 888 class _re(object):
889 889 def _checkre2(self):
890 890 global _re2
891 891 try:
892 892 # check if match works, see issue3964
893 893 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
894 894 except ImportError:
895 895 _re2 = False
896 896
897 897 def compile(self, pat, flags=0):
898 898 '''Compile a regular expression, using re2 if possible
899 899
900 900 For best performance, use only re2-compatible regexp features. The
901 901 only flags from the re module that are re2-compatible are
902 902 IGNORECASE and MULTILINE.'''
903 903 if _re2 is None:
904 904 self._checkre2()
905 905 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
906 906 if flags & remod.IGNORECASE:
907 907 pat = '(?i)' + pat
908 908 if flags & remod.MULTILINE:
909 909 pat = '(?m)' + pat
910 910 try:
911 911 return re2.compile(pat)
912 912 except re2.error:
913 913 pass
914 914 return remod.compile(pat, flags)
915 915
916 916 @propertycache
917 917 def escape(self):
918 918 '''Return the version of escape corresponding to self.compile.
919 919
920 920 This is imperfect because whether re2 or re is used for a particular
921 921 function depends on the flags, etc, but it's the best we can do.
922 922 '''
923 923 global _re2
924 924 if _re2 is None:
925 925 self._checkre2()
926 926 if _re2:
927 927 return re2.escape
928 928 else:
929 929 return remod.escape
930 930
931 931 re = _re()
932 932
933 933 _fspathcache = {}
934 934 def fspath(name, root):
935 935 '''Get name in the case stored in the filesystem
936 936
937 937 The name should be relative to root, and be normcase-ed for efficiency.
938 938
939 939 Note that this function is unnecessary, and should not be
940 940 called, for case-sensitive filesystems (simply because it's expensive).
941 941
942 942 The root should be normcase-ed, too.
943 943 '''
944 944 def _makefspathcacheentry(dir):
945 945 return dict((normcase(n), n) for n in os.listdir(dir))
946 946
947 947 seps = os.sep
948 948 if os.altsep:
949 949 seps = seps + os.altsep
950 950 # Protect backslashes. This gets silly very quickly.
951 951 seps.replace('\\','\\\\')
952 952 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
953 953 dir = os.path.normpath(root)
954 954 result = []
955 955 for part, sep in pattern.findall(name):
956 956 if sep:
957 957 result.append(sep)
958 958 continue
959 959
960 960 if dir not in _fspathcache:
961 961 _fspathcache[dir] = _makefspathcacheentry(dir)
962 962 contents = _fspathcache[dir]
963 963
964 964 found = contents.get(part)
965 965 if not found:
966 966 # retry "once per directory" per "dirstate.walk" which
967 967 # may take place for each patches of "hg qpush", for example
968 968 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
969 969 found = contents.get(part)
970 970
971 971 result.append(found or part)
972 972 dir = os.path.join(dir, part)
973 973
974 974 return ''.join(result)
975 975
976 976 def checknlink(testfile):
977 977 '''check whether hardlink count reporting works properly'''
978 978
979 979 # testfile may be open, so we need a separate file for checking to
980 980 # work around issue2543 (or testfile may get lost on Samba shares)
981 981 f1 = testfile + ".hgtmp1"
982 982 if os.path.lexists(f1):
983 983 return False
984 984 try:
985 985 posixfile(f1, 'w').close()
986 986 except IOError:
987 987 return False
988 988
989 989 f2 = testfile + ".hgtmp2"
990 990 fd = None
991 991 try:
992 992 try:
993 993 oslink(f1, f2)
994 994 except OSError:
995 995 return False
996 996
997 997 # nlinks() may behave differently for files on Windows shares if
998 998 # the file is open.
999 999 fd = posixfile(f2)
1000 1000 return nlinks(f2) > 1
1001 1001 finally:
1002 1002 if fd is not None:
1003 1003 fd.close()
1004 1004 for f in (f1, f2):
1005 1005 try:
1006 1006 os.unlink(f)
1007 1007 except OSError:
1008 1008 pass
1009 1009
1010 1010 def endswithsep(path):
1011 1011 '''Check path ends with os.sep or os.altsep.'''
1012 1012 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1013 1013
1014 1014 def splitpath(path):
1015 1015 '''Split path by os.sep.
1016 1016 Note that this function does not use os.altsep because this is
1017 1017 an alternative of simple "xxx.split(os.sep)".
1018 1018 It is recommended to use os.path.normpath() before using this
1019 1019 function if need.'''
1020 1020 return path.split(os.sep)
1021 1021
1022 1022 def gui():
1023 1023 '''Are we running in a GUI?'''
1024 1024 if sys.platform == 'darwin':
1025 1025 if 'SSH_CONNECTION' in os.environ:
1026 1026 # handle SSH access to a box where the user is logged in
1027 1027 return False
1028 1028 elif getattr(osutil, 'isgui', None):
1029 1029 # check if a CoreGraphics session is available
1030 1030 return osutil.isgui()
1031 1031 else:
1032 1032 # pure build; use a safe default
1033 1033 return True
1034 1034 else:
1035 1035 return os.name == "nt" or os.environ.get("DISPLAY")
1036 1036
1037 1037 def mktempcopy(name, emptyok=False, createmode=None):
1038 1038 """Create a temporary file with the same contents from name
1039 1039
1040 1040 The permission bits are copied from the original file.
1041 1041
1042 1042 If the temporary file is going to be truncated immediately, you
1043 1043 can use emptyok=True as an optimization.
1044 1044
1045 1045 Returns the name of the temporary file.
1046 1046 """
1047 1047 d, fn = os.path.split(name)
1048 1048 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1049 1049 os.close(fd)
1050 1050 # Temporary files are created with mode 0600, which is usually not
1051 1051 # what we want. If the original file already exists, just copy
1052 1052 # its mode. Otherwise, manually obey umask.
1053 1053 copymode(name, temp, createmode)
1054 1054 if emptyok:
1055 1055 return temp
1056 1056 try:
1057 1057 try:
1058 1058 ifp = posixfile(name, "rb")
1059 1059 except IOError, inst:
1060 1060 if inst.errno == errno.ENOENT:
1061 1061 return temp
1062 1062 if not getattr(inst, 'filename', None):
1063 1063 inst.filename = name
1064 1064 raise
1065 1065 ofp = posixfile(temp, "wb")
1066 1066 for chunk in filechunkiter(ifp):
1067 1067 ofp.write(chunk)
1068 1068 ifp.close()
1069 1069 ofp.close()
1070 1070 except: # re-raises
1071 1071 try: os.unlink(temp)
1072 1072 except OSError: pass
1073 1073 raise
1074 1074 return temp
1075 1075
1076 1076 class atomictempfile(object):
1077 1077 '''writable file object that atomically updates a file
1078 1078
1079 1079 All writes will go to a temporary copy of the original file. Call
1080 1080 close() when you are done writing, and atomictempfile will rename
1081 1081 the temporary copy to the original name, making the changes
1082 1082 visible. If the object is destroyed without being closed, all your
1083 1083 writes are discarded.
1084 1084 '''
1085 1085 def __init__(self, name, mode='w+b', createmode=None):
1086 1086 self.__name = name # permanent name
1087 1087 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1088 1088 createmode=createmode)
1089 1089 self._fp = posixfile(self._tempname, mode)
1090 1090
1091 1091 # delegated methods
1092 1092 self.write = self._fp.write
1093 1093 self.seek = self._fp.seek
1094 1094 self.tell = self._fp.tell
1095 1095 self.fileno = self._fp.fileno
1096 1096
1097 1097 def close(self):
1098 1098 if not self._fp.closed:
1099 1099 self._fp.close()
1100 1100 rename(self._tempname, localpath(self.__name))
1101 1101
1102 1102 def discard(self):
1103 1103 if not self._fp.closed:
1104 1104 try:
1105 1105 os.unlink(self._tempname)
1106 1106 except OSError:
1107 1107 pass
1108 1108 self._fp.close()
1109 1109
1110 1110 def __del__(self):
1111 1111 if safehasattr(self, '_fp'): # constructor actually did something
1112 1112 self.discard()
1113 1113
1114 1114 def makedirs(name, mode=None, notindexed=False):
1115 1115 """recursive directory creation with parent mode inheritance"""
1116 1116 try:
1117 1117 makedir(name, notindexed)
1118 1118 except OSError, err:
1119 1119 if err.errno == errno.EEXIST:
1120 1120 return
1121 1121 if err.errno != errno.ENOENT or not name:
1122 1122 raise
1123 1123 parent = os.path.dirname(os.path.abspath(name))
1124 1124 if parent == name:
1125 1125 raise
1126 1126 makedirs(parent, mode, notindexed)
1127 1127 makedir(name, notindexed)
1128 1128 if mode is not None:
1129 1129 os.chmod(name, mode)
1130 1130
1131 1131 def ensuredirs(name, mode=None, notindexed=False):
1132 1132 """race-safe recursive directory creation
1133 1133
1134 1134 Newly created directories are marked as "not to be indexed by
1135 1135 the content indexing service", if ``notindexed`` is specified
1136 1136 for "write" mode access.
1137 1137 """
1138 1138 if os.path.isdir(name):
1139 1139 return
1140 1140 parent = os.path.dirname(os.path.abspath(name))
1141 1141 if parent != name:
1142 1142 ensuredirs(parent, mode, notindexed)
1143 1143 try:
1144 1144 makedir(name, notindexed)
1145 1145 except OSError, err:
1146 1146 if err.errno == errno.EEXIST and os.path.isdir(name):
1147 1147 # someone else seems to have won a directory creation race
1148 1148 return
1149 1149 raise
1150 1150 if mode is not None:
1151 1151 os.chmod(name, mode)
1152 1152
1153 1153 def readfile(path):
1154 1154 fp = open(path, 'rb')
1155 1155 try:
1156 1156 return fp.read()
1157 1157 finally:
1158 1158 fp.close()
1159 1159
1160 1160 def writefile(path, text):
1161 1161 fp = open(path, 'wb')
1162 1162 try:
1163 1163 fp.write(text)
1164 1164 finally:
1165 1165 fp.close()
1166 1166
1167 1167 def appendfile(path, text):
1168 1168 fp = open(path, 'ab')
1169 1169 try:
1170 1170 fp.write(text)
1171 1171 finally:
1172 1172 fp.close()
1173 1173
1174 1174 class chunkbuffer(object):
1175 1175 """Allow arbitrary sized chunks of data to be efficiently read from an
1176 1176 iterator over chunks of arbitrary size."""
1177 1177
1178 1178 def __init__(self, in_iter):
1179 1179 """in_iter is the iterator that's iterating over the input chunks.
1180 1180 targetsize is how big a buffer to try to maintain."""
1181 1181 def splitbig(chunks):
1182 1182 for chunk in chunks:
1183 1183 if len(chunk) > 2**20:
1184 1184 pos = 0
1185 1185 while pos < len(chunk):
1186 1186 end = pos + 2 ** 18
1187 1187 yield chunk[pos:end]
1188 1188 pos = end
1189 1189 else:
1190 1190 yield chunk
1191 1191 self.iter = splitbig(in_iter)
1192 1192 self._queue = deque()
1193 1193
1194 1194 def read(self, l=None):
1195 1195 """Read L bytes of data from the iterator of chunks of data.
1196 1196 Returns less than L bytes if the iterator runs dry.
1197 1197
1198 1198 If size parameter is omitted, read everything"""
1199 1199 left = l
1200 1200 buf = []
1201 1201 queue = self._queue
1202 1202 while left is None or left > 0:
1203 1203 # refill the queue
1204 1204 if not queue:
1205 1205 target = 2**18
1206 1206 for chunk in self.iter:
1207 1207 queue.append(chunk)
1208 1208 target -= len(chunk)
1209 1209 if target <= 0:
1210 1210 break
1211 1211 if not queue:
1212 1212 break
1213 1213
1214 1214 chunk = queue.popleft()
1215 1215 if left is not None:
1216 1216 left -= len(chunk)
1217 1217 if left is not None and left < 0:
1218 1218 queue.appendleft(chunk[left:])
1219 1219 buf.append(chunk[:left])
1220 1220 else:
1221 1221 buf.append(chunk)
1222 1222
1223 1223 return ''.join(buf)
1224 1224
1225 1225 def filechunkiter(f, size=65536, limit=None):
1226 1226 """Create a generator that produces the data in the file size
1227 1227 (default 65536) bytes at a time, up to optional limit (default is
1228 1228 to read all data). Chunks may be less than size bytes if the
1229 1229 chunk is the last chunk in the file, or the file is a socket or
1230 1230 some other type of file that sometimes reads less data than is
1231 1231 requested."""
1232 1232 assert size >= 0
1233 1233 assert limit is None or limit >= 0
1234 1234 while True:
1235 1235 if limit is None:
1236 1236 nbytes = size
1237 1237 else:
1238 1238 nbytes = min(limit, size)
1239 1239 s = nbytes and f.read(nbytes)
1240 1240 if not s:
1241 1241 break
1242 1242 if limit:
1243 1243 limit -= len(s)
1244 1244 yield s
1245 1245
1246 1246 def makedate(timestamp=None):
1247 1247 '''Return a unix timestamp (or the current time) as a (unixtime,
1248 1248 offset) tuple based off the local timezone.'''
1249 1249 if timestamp is None:
1250 1250 timestamp = time.time()
1251 1251 if timestamp < 0:
1252 1252 hint = _("check your clock")
1253 1253 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1254 1254 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1255 1255 datetime.datetime.fromtimestamp(timestamp))
1256 1256 tz = delta.days * 86400 + delta.seconds
1257 1257 return timestamp, tz
1258 1258
1259 1259 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1260 1260 """represent a (unixtime, offset) tuple as a localized time.
1261 1261 unixtime is seconds since the epoch, and offset is the time zone's
1262 1262 number of seconds away from UTC. if timezone is false, do not
1263 1263 append time zone to string."""
1264 1264 t, tz = date or makedate()
1265 1265 if t < 0:
1266 1266 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1267 1267 tz = 0
1268 1268 if "%1" in format or "%2" in format or "%z" in format:
1269 1269 sign = (tz > 0) and "-" or "+"
1270 1270 minutes = abs(tz) // 60
1271 1271 format = format.replace("%z", "%1%2")
1272 1272 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1273 1273 format = format.replace("%2", "%02d" % (minutes % 60))
1274 1274 try:
1275 1275 t = time.gmtime(float(t) - tz)
1276 1276 except ValueError:
1277 1277 # time was out of range
1278 1278 t = time.gmtime(sys.maxint)
1279 1279 s = time.strftime(format, t)
1280 1280 return s
1281 1281
1282 1282 def shortdate(date=None):
1283 1283 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1284 1284 return datestr(date, format='%Y-%m-%d')
1285 1285
1286 1286 def strdate(string, format, defaults=[]):
1287 1287 """parse a localized time string and return a (unixtime, offset) tuple.
1288 1288 if the string cannot be parsed, ValueError is raised."""
1289 1289 def timezone(string):
1290 1290 tz = string.split()[-1]
1291 1291 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1292 1292 sign = (tz[0] == "+") and 1 or -1
1293 1293 hours = int(tz[1:3])
1294 1294 minutes = int(tz[3:5])
1295 1295 return -sign * (hours * 60 + minutes) * 60
1296 1296 if tz == "GMT" or tz == "UTC":
1297 1297 return 0
1298 1298 return None
1299 1299
1300 1300 # NOTE: unixtime = localunixtime + offset
1301 1301 offset, date = timezone(string), string
1302 1302 if offset is not None:
1303 1303 date = " ".join(string.split()[:-1])
1304 1304
1305 1305 # add missing elements from defaults
1306 1306 usenow = False # default to using biased defaults
1307 1307 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1308 1308 found = [True for p in part if ("%"+p) in format]
1309 1309 if not found:
1310 1310 date += "@" + defaults[part][usenow]
1311 1311 format += "@%" + part[0]
1312 1312 else:
1313 1313 # We've found a specific time element, less specific time
1314 1314 # elements are relative to today
1315 1315 usenow = True
1316 1316
1317 1317 timetuple = time.strptime(date, format)
1318 1318 localunixtime = int(calendar.timegm(timetuple))
1319 1319 if offset is None:
1320 1320 # local timezone
1321 1321 unixtime = int(time.mktime(timetuple))
1322 1322 offset = unixtime - localunixtime
1323 1323 else:
1324 1324 unixtime = localunixtime + offset
1325 1325 return unixtime, offset
1326 1326
1327 1327 def parsedate(date, formats=None, bias={}):
1328 1328 """parse a localized date/time and return a (unixtime, offset) tuple.
1329 1329
1330 1330 The date may be a "unixtime offset" string or in one of the specified
1331 1331 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1332 1332
1333 1333 >>> parsedate(' today ') == parsedate(\
1334 1334 datetime.date.today().strftime('%b %d'))
1335 1335 True
1336 1336 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1337 1337 datetime.timedelta(days=1)\
1338 1338 ).strftime('%b %d'))
1339 1339 True
1340 1340 >>> now, tz = makedate()
1341 1341 >>> strnow, strtz = parsedate('now')
1342 1342 >>> (strnow - now) < 1
1343 1343 True
1344 1344 >>> tz == strtz
1345 1345 True
1346 1346 """
1347 1347 if not date:
1348 1348 return 0, 0
1349 1349 if isinstance(date, tuple) and len(date) == 2:
1350 1350 return date
1351 1351 if not formats:
1352 1352 formats = defaultdateformats
1353 1353 date = date.strip()
1354 1354
1355 1355 if date == _('now'):
1356 1356 return makedate()
1357 1357 if date == _('today'):
1358 1358 date = datetime.date.today().strftime('%b %d')
1359 1359 elif date == _('yesterday'):
1360 1360 date = (datetime.date.today() -
1361 1361 datetime.timedelta(days=1)).strftime('%b %d')
1362 1362
1363 1363 try:
1364 1364 when, offset = map(int, date.split(' '))
1365 1365 except ValueError:
1366 1366 # fill out defaults
1367 1367 now = makedate()
1368 1368 defaults = {}
1369 1369 for part in ("d", "mb", "yY", "HI", "M", "S"):
1370 1370 # this piece is for rounding the specific end of unknowns
1371 1371 b = bias.get(part)
1372 1372 if b is None:
1373 1373 if part[0] in "HMS":
1374 1374 b = "00"
1375 1375 else:
1376 1376 b = "0"
1377 1377
1378 1378 # this piece is for matching the generic end to today's date
1379 1379 n = datestr(now, "%" + part[0])
1380 1380
1381 1381 defaults[part] = (b, n)
1382 1382
1383 1383 for format in formats:
1384 1384 try:
1385 1385 when, offset = strdate(date, format, defaults)
1386 1386 except (ValueError, OverflowError):
1387 1387 pass
1388 1388 else:
1389 1389 break
1390 1390 else:
1391 1391 raise Abort(_('invalid date: %r') % date)
1392 1392 # validate explicit (probably user-specified) date and
1393 1393 # time zone offset. values must fit in signed 32 bits for
1394 1394 # current 32-bit linux runtimes. timezones go from UTC-12
1395 1395 # to UTC+14
1396 1396 if abs(when) > 0x7fffffff:
1397 1397 raise Abort(_('date exceeds 32 bits: %d') % when)
1398 1398 if when < 0:
1399 1399 raise Abort(_('negative date value: %d') % when)
1400 1400 if offset < -50400 or offset > 43200:
1401 1401 raise Abort(_('impossible time zone offset: %d') % offset)
1402 1402 return when, offset
1403 1403
1404 1404 def matchdate(date):
1405 1405 """Return a function that matches a given date match specifier
1406 1406
1407 1407 Formats include:
1408 1408
1409 1409 '{date}' match a given date to the accuracy provided
1410 1410
1411 1411 '<{date}' on or before a given date
1412 1412
1413 1413 '>{date}' on or after a given date
1414 1414
1415 1415 >>> p1 = parsedate("10:29:59")
1416 1416 >>> p2 = parsedate("10:30:00")
1417 1417 >>> p3 = parsedate("10:30:59")
1418 1418 >>> p4 = parsedate("10:31:00")
1419 1419 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1420 1420 >>> f = matchdate("10:30")
1421 1421 >>> f(p1[0])
1422 1422 False
1423 1423 >>> f(p2[0])
1424 1424 True
1425 1425 >>> f(p3[0])
1426 1426 True
1427 1427 >>> f(p4[0])
1428 1428 False
1429 1429 >>> f(p5[0])
1430 1430 False
1431 1431 """
1432 1432
1433 1433 def lower(date):
1434 1434 d = {'mb': "1", 'd': "1"}
1435 1435 return parsedate(date, extendeddateformats, d)[0]
1436 1436
1437 1437 def upper(date):
1438 1438 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1439 1439 for days in ("31", "30", "29"):
1440 1440 try:
1441 1441 d["d"] = days
1442 1442 return parsedate(date, extendeddateformats, d)[0]
1443 1443 except Abort:
1444 1444 pass
1445 1445 d["d"] = "28"
1446 1446 return parsedate(date, extendeddateformats, d)[0]
1447 1447
1448 1448 date = date.strip()
1449 1449
1450 1450 if not date:
1451 1451 raise Abort(_("dates cannot consist entirely of whitespace"))
1452 1452 elif date[0] == "<":
1453 1453 if not date[1:]:
1454 1454 raise Abort(_("invalid day spec, use '<DATE'"))
1455 1455 when = upper(date[1:])
1456 1456 return lambda x: x <= when
1457 1457 elif date[0] == ">":
1458 1458 if not date[1:]:
1459 1459 raise Abort(_("invalid day spec, use '>DATE'"))
1460 1460 when = lower(date[1:])
1461 1461 return lambda x: x >= when
1462 1462 elif date[0] == "-":
1463 1463 try:
1464 1464 days = int(date[1:])
1465 1465 except ValueError:
1466 1466 raise Abort(_("invalid day spec: %s") % date[1:])
1467 1467 if days < 0:
1468 1468 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1469 1469 % date[1:])
1470 1470 when = makedate()[0] - days * 3600 * 24
1471 1471 return lambda x: x >= when
1472 1472 elif " to " in date:
1473 1473 a, b = date.split(" to ")
1474 1474 start, stop = lower(a), upper(b)
1475 1475 return lambda x: x >= start and x <= stop
1476 1476 else:
1477 1477 start, stop = lower(date), upper(date)
1478 1478 return lambda x: x >= start and x <= stop
1479 1479
1480 1480 def shortuser(user):
1481 1481 """Return a short representation of a user name or email address."""
1482 1482 f = user.find('@')
1483 1483 if f >= 0:
1484 1484 user = user[:f]
1485 1485 f = user.find('<')
1486 1486 if f >= 0:
1487 1487 user = user[f + 1:]
1488 1488 f = user.find(' ')
1489 1489 if f >= 0:
1490 1490 user = user[:f]
1491 1491 f = user.find('.')
1492 1492 if f >= 0:
1493 1493 user = user[:f]
1494 1494 return user
1495 1495
1496 1496 def emailuser(user):
1497 1497 """Return the user portion of an email address."""
1498 1498 f = user.find('@')
1499 1499 if f >= 0:
1500 1500 user = user[:f]
1501 1501 f = user.find('<')
1502 1502 if f >= 0:
1503 1503 user = user[f + 1:]
1504 1504 return user
1505 1505
1506 1506 def email(author):
1507 1507 '''get email of author.'''
1508 1508 r = author.find('>')
1509 1509 if r == -1:
1510 1510 r = None
1511 1511 return author[author.find('<') + 1:r]
1512 1512
1513 1513 def ellipsis(text, maxlength=400):
1514 1514 """Trim string to at most maxlength (default: 400) columns in display."""
1515 1515 return encoding.trim(text, maxlength, ellipsis='...')
1516 1516
1517 1517 def unitcountfn(*unittable):
1518 1518 '''return a function that renders a readable count of some quantity'''
1519 1519
1520 1520 def go(count):
1521 1521 for multiplier, divisor, format in unittable:
1522 1522 if count >= divisor * multiplier:
1523 1523 return format % (count / float(divisor))
1524 1524 return unittable[-1][2] % count
1525 1525
1526 1526 return go
1527 1527
1528 1528 bytecount = unitcountfn(
1529 1529 (100, 1 << 30, _('%.0f GB')),
1530 1530 (10, 1 << 30, _('%.1f GB')),
1531 1531 (1, 1 << 30, _('%.2f GB')),
1532 1532 (100, 1 << 20, _('%.0f MB')),
1533 1533 (10, 1 << 20, _('%.1f MB')),
1534 1534 (1, 1 << 20, _('%.2f MB')),
1535 1535 (100, 1 << 10, _('%.0f KB')),
1536 1536 (10, 1 << 10, _('%.1f KB')),
1537 1537 (1, 1 << 10, _('%.2f KB')),
1538 1538 (1, 1, _('%.0f bytes')),
1539 1539 )
1540 1540
1541 1541 def uirepr(s):
1542 1542 # Avoid double backslash in Windows path repr()
1543 1543 return repr(s).replace('\\\\', '\\')
1544 1544
1545 1545 # delay import of textwrap
1546 1546 def MBTextWrapper(**kwargs):
1547 1547 class tw(textwrap.TextWrapper):
1548 1548 """
1549 1549 Extend TextWrapper for width-awareness.
1550 1550
1551 1551 Neither number of 'bytes' in any encoding nor 'characters' is
1552 1552 appropriate to calculate terminal columns for specified string.
1553 1553
1554 1554 Original TextWrapper implementation uses built-in 'len()' directly,
1555 1555 so overriding is needed to use width information of each characters.
1556 1556
1557 1557 In addition, characters classified into 'ambiguous' width are
1558 1558 treated as wide in East Asian area, but as narrow in other.
1559 1559
1560 1560 This requires use decision to determine width of such characters.
1561 1561 """
1562 1562 def __init__(self, **kwargs):
1563 1563 textwrap.TextWrapper.__init__(self, **kwargs)
1564 1564
1565 1565 # for compatibility between 2.4 and 2.6
1566 1566 if getattr(self, 'drop_whitespace', None) is None:
1567 1567 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1568 1568
1569 1569 def _cutdown(self, ucstr, space_left):
1570 1570 l = 0
1571 1571 colwidth = encoding.ucolwidth
1572 1572 for i in xrange(len(ucstr)):
1573 1573 l += colwidth(ucstr[i])
1574 1574 if space_left < l:
1575 1575 return (ucstr[:i], ucstr[i:])
1576 1576 return ucstr, ''
1577 1577
1578 1578 # overriding of base class
1579 1579 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1580 1580 space_left = max(width - cur_len, 1)
1581 1581
1582 1582 if self.break_long_words:
1583 1583 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1584 1584 cur_line.append(cut)
1585 1585 reversed_chunks[-1] = res
1586 1586 elif not cur_line:
1587 1587 cur_line.append(reversed_chunks.pop())
1588 1588
1589 1589 # this overriding code is imported from TextWrapper of python 2.6
1590 1590 # to calculate columns of string by 'encoding.ucolwidth()'
1591 1591 def _wrap_chunks(self, chunks):
1592 1592 colwidth = encoding.ucolwidth
1593 1593
1594 1594 lines = []
1595 1595 if self.width <= 0:
1596 1596 raise ValueError("invalid width %r (must be > 0)" % self.width)
1597 1597
1598 1598 # Arrange in reverse order so items can be efficiently popped
1599 1599 # from a stack of chucks.
1600 1600 chunks.reverse()
1601 1601
1602 1602 while chunks:
1603 1603
1604 1604 # Start the list of chunks that will make up the current line.
1605 1605 # cur_len is just the length of all the chunks in cur_line.
1606 1606 cur_line = []
1607 1607 cur_len = 0
1608 1608
1609 1609 # Figure out which static string will prefix this line.
1610 1610 if lines:
1611 1611 indent = self.subsequent_indent
1612 1612 else:
1613 1613 indent = self.initial_indent
1614 1614
1615 1615 # Maximum width for this line.
1616 1616 width = self.width - len(indent)
1617 1617
1618 1618 # First chunk on line is whitespace -- drop it, unless this
1619 1619 # is the very beginning of the text (i.e. no lines started yet).
1620 1620 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1621 1621 del chunks[-1]
1622 1622
1623 1623 while chunks:
1624 1624 l = colwidth(chunks[-1])
1625 1625
1626 1626 # Can at least squeeze this chunk onto the current line.
1627 1627 if cur_len + l <= width:
1628 1628 cur_line.append(chunks.pop())
1629 1629 cur_len += l
1630 1630
1631 1631 # Nope, this line is full.
1632 1632 else:
1633 1633 break
1634 1634
1635 1635 # The current line is full, and the next chunk is too big to
1636 1636 # fit on *any* line (not just this one).
1637 1637 if chunks and colwidth(chunks[-1]) > width:
1638 1638 self._handle_long_word(chunks, cur_line, cur_len, width)
1639 1639
1640 1640 # If the last chunk on this line is all whitespace, drop it.
1641 1641 if (self.drop_whitespace and
1642 1642 cur_line and cur_line[-1].strip() == ''):
1643 1643 del cur_line[-1]
1644 1644
1645 1645 # Convert current line back to a string and store it in list
1646 1646 # of all lines (return value).
1647 1647 if cur_line:
1648 1648 lines.append(indent + ''.join(cur_line))
1649 1649
1650 1650 return lines
1651 1651
1652 1652 global MBTextWrapper
1653 1653 MBTextWrapper = tw
1654 1654 return tw(**kwargs)
1655 1655
1656 1656 def wrap(line, width, initindent='', hangindent=''):
1657 1657 maxindent = max(len(hangindent), len(initindent))
1658 1658 if width <= maxindent:
1659 1659 # adjust for weird terminal size
1660 1660 width = max(78, maxindent + 1)
1661 1661 line = line.decode(encoding.encoding, encoding.encodingmode)
1662 1662 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1663 1663 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1664 1664 wrapper = MBTextWrapper(width=width,
1665 1665 initial_indent=initindent,
1666 1666 subsequent_indent=hangindent)
1667 1667 return wrapper.fill(line).encode(encoding.encoding)
1668 1668
1669 1669 def iterlines(iterator):
1670 1670 for chunk in iterator:
1671 1671 for line in chunk.splitlines():
1672 1672 yield line
1673 1673
1674 1674 def expandpath(path):
1675 1675 return os.path.expanduser(os.path.expandvars(path))
1676 1676
1677 1677 def hgcmd():
1678 1678 """Return the command used to execute current hg
1679 1679
1680 1680 This is different from hgexecutable() because on Windows we want
1681 1681 to avoid things opening new shell windows like batch files, so we
1682 1682 get either the python call or current executable.
1683 1683 """
1684 1684 if mainfrozen():
1685 1685 return [sys.executable]
1686 1686 return gethgcmd()
1687 1687
1688 1688 def rundetached(args, condfn):
1689 1689 """Execute the argument list in a detached process.
1690 1690
1691 1691 condfn is a callable which is called repeatedly and should return
1692 1692 True once the child process is known to have started successfully.
1693 1693 At this point, the child process PID is returned. If the child
1694 1694 process fails to start or finishes before condfn() evaluates to
1695 1695 True, return -1.
1696 1696 """
1697 1697 # Windows case is easier because the child process is either
1698 1698 # successfully starting and validating the condition or exiting
1699 1699 # on failure. We just poll on its PID. On Unix, if the child
1700 1700 # process fails to start, it will be left in a zombie state until
1701 1701 # the parent wait on it, which we cannot do since we expect a long
1702 1702 # running process on success. Instead we listen for SIGCHLD telling
1703 1703 # us our child process terminated.
1704 1704 terminated = set()
1705 1705 def handler(signum, frame):
1706 1706 terminated.add(os.wait())
1707 1707 prevhandler = None
1708 1708 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1709 1709 if SIGCHLD is not None:
1710 1710 prevhandler = signal.signal(SIGCHLD, handler)
1711 1711 try:
1712 1712 pid = spawndetached(args)
1713 1713 while not condfn():
1714 1714 if ((pid in terminated or not testpid(pid))
1715 1715 and not condfn()):
1716 1716 return -1
1717 1717 time.sleep(0.1)
1718 1718 return pid
1719 1719 finally:
1720 1720 if prevhandler is not None:
1721 1721 signal.signal(signal.SIGCHLD, prevhandler)
1722 1722
1723 1723 try:
1724 1724 any, all = any, all
1725 1725 except NameError:
1726 1726 def any(iterable):
1727 1727 for i in iterable:
1728 1728 if i:
1729 1729 return True
1730 1730 return False
1731 1731
1732 1732 def all(iterable):
1733 1733 for i in iterable:
1734 1734 if not i:
1735 1735 return False
1736 1736 return True
1737 1737
1738 1738 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1739 1739 """Return the result of interpolating items in the mapping into string s.
1740 1740
1741 1741 prefix is a single character string, or a two character string with
1742 1742 a backslash as the first character if the prefix needs to be escaped in
1743 1743 a regular expression.
1744 1744
1745 1745 fn is an optional function that will be applied to the replacement text
1746 1746 just before replacement.
1747 1747
1748 1748 escape_prefix is an optional flag that allows using doubled prefix for
1749 1749 its escaping.
1750 1750 """
1751 1751 fn = fn or (lambda s: s)
1752 1752 patterns = '|'.join(mapping.keys())
1753 1753 if escape_prefix:
1754 1754 patterns += '|' + prefix
1755 1755 if len(prefix) > 1:
1756 1756 prefix_char = prefix[1:]
1757 1757 else:
1758 1758 prefix_char = prefix
1759 1759 mapping[prefix_char] = prefix_char
1760 1760 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1761 1761 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1762 1762
1763 1763 def getport(port):
1764 1764 """Return the port for a given network service.
1765 1765
1766 1766 If port is an integer, it's returned as is. If it's a string, it's
1767 1767 looked up using socket.getservbyname(). If there's no matching
1768 1768 service, util.Abort is raised.
1769 1769 """
1770 1770 try:
1771 1771 return int(port)
1772 1772 except ValueError:
1773 1773 pass
1774 1774
1775 1775 try:
1776 1776 return socket.getservbyname(port)
1777 1777 except socket.error:
1778 1778 raise Abort(_("no port number associated with service '%s'") % port)
1779 1779
1780 1780 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1781 1781 '0': False, 'no': False, 'false': False, 'off': False,
1782 1782 'never': False}
1783 1783
1784 1784 def parsebool(s):
1785 1785 """Parse s into a boolean.
1786 1786
1787 1787 If s is not a valid boolean, returns None.
1788 1788 """
1789 1789 return _booleans.get(s.lower(), None)
1790 1790
1791 1791 _hexdig = '0123456789ABCDEFabcdef'
1792 1792 _hextochr = dict((a + b, chr(int(a + b, 16)))
1793 1793 for a in _hexdig for b in _hexdig)
1794 1794
1795 1795 def _urlunquote(s):
1796 1796 """Decode HTTP/HTML % encoding.
1797 1797
1798 1798 >>> _urlunquote('abc%20def')
1799 1799 'abc def'
1800 1800 """
1801 1801 res = s.split('%')
1802 1802 # fastpath
1803 1803 if len(res) == 1:
1804 1804 return s
1805 1805 s = res[0]
1806 1806 for item in res[1:]:
1807 1807 try:
1808 1808 s += _hextochr[item[:2]] + item[2:]
1809 1809 except KeyError:
1810 1810 s += '%' + item
1811 1811 except UnicodeDecodeError:
1812 1812 s += unichr(int(item[:2], 16)) + item[2:]
1813 1813 return s
1814 1814
1815 1815 class url(object):
1816 1816 r"""Reliable URL parser.
1817 1817
1818 1818 This parses URLs and provides attributes for the following
1819 1819 components:
1820 1820
1821 1821 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1822 1822
1823 1823 Missing components are set to None. The only exception is
1824 1824 fragment, which is set to '' if present but empty.
1825 1825
1826 1826 If parsefragment is False, fragment is included in query. If
1827 1827 parsequery is False, query is included in path. If both are
1828 1828 False, both fragment and query are included in path.
1829 1829
1830 1830 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1831 1831
1832 1832 Note that for backward compatibility reasons, bundle URLs do not
1833 1833 take host names. That means 'bundle://../' has a path of '../'.
1834 1834
1835 1835 Examples:
1836 1836
1837 1837 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1838 1838 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1839 1839 >>> url('ssh://[::1]:2200//home/joe/repo')
1840 1840 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1841 1841 >>> url('file:///home/joe/repo')
1842 1842 <url scheme: 'file', path: '/home/joe/repo'>
1843 1843 >>> url('file:///c:/temp/foo/')
1844 1844 <url scheme: 'file', path: 'c:/temp/foo/'>
1845 1845 >>> url('bundle:foo')
1846 1846 <url scheme: 'bundle', path: 'foo'>
1847 1847 >>> url('bundle://../foo')
1848 1848 <url scheme: 'bundle', path: '../foo'>
1849 1849 >>> url(r'c:\foo\bar')
1850 1850 <url path: 'c:\\foo\\bar'>
1851 1851 >>> url(r'\\blah\blah\blah')
1852 1852 <url path: '\\\\blah\\blah\\blah'>
1853 1853 >>> url(r'\\blah\blah\blah#baz')
1854 1854 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1855 1855 >>> url(r'file:///C:\users\me')
1856 1856 <url scheme: 'file', path: 'C:\\users\\me'>
1857 1857
1858 1858 Authentication credentials:
1859 1859
1860 1860 >>> url('ssh://joe:xyz@x/repo')
1861 1861 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1862 1862 >>> url('ssh://joe@x/repo')
1863 1863 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1864 1864
1865 1865 Query strings and fragments:
1866 1866
1867 1867 >>> url('http://host/a?b#c')
1868 1868 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1869 1869 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1870 1870 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1871 1871 """
1872 1872
1873 1873 _safechars = "!~*'()+"
1874 1874 _safepchars = "/!~*'()+:\\"
1875 1875 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1876 1876
1877 1877 def __init__(self, path, parsequery=True, parsefragment=True):
1878 1878 # We slowly chomp away at path until we have only the path left
1879 1879 self.scheme = self.user = self.passwd = self.host = None
1880 1880 self.port = self.path = self.query = self.fragment = None
1881 1881 self._localpath = True
1882 1882 self._hostport = ''
1883 1883 self._origpath = path
1884 1884
1885 1885 if parsefragment and '#' in path:
1886 1886 path, self.fragment = path.split('#', 1)
1887 1887 if not path:
1888 1888 path = None
1889 1889
1890 1890 # special case for Windows drive letters and UNC paths
1891 1891 if hasdriveletter(path) or path.startswith(r'\\'):
1892 1892 self.path = path
1893 1893 return
1894 1894
1895 1895 # For compatibility reasons, we can't handle bundle paths as
1896 1896 # normal URLS
1897 1897 if path.startswith('bundle:'):
1898 1898 self.scheme = 'bundle'
1899 1899 path = path[7:]
1900 1900 if path.startswith('//'):
1901 1901 path = path[2:]
1902 1902 self.path = path
1903 1903 return
1904 1904
1905 1905 if self._matchscheme(path):
1906 1906 parts = path.split(':', 1)
1907 1907 if parts[0]:
1908 1908 self.scheme, path = parts
1909 1909 self._localpath = False
1910 1910
1911 1911 if not path:
1912 1912 path = None
1913 1913 if self._localpath:
1914 1914 self.path = ''
1915 1915 return
1916 1916 else:
1917 1917 if self._localpath:
1918 1918 self.path = path
1919 1919 return
1920 1920
1921 1921 if parsequery and '?' in path:
1922 1922 path, self.query = path.split('?', 1)
1923 1923 if not path:
1924 1924 path = None
1925 1925 if not self.query:
1926 1926 self.query = None
1927 1927
1928 1928 # // is required to specify a host/authority
1929 1929 if path and path.startswith('//'):
1930 1930 parts = path[2:].split('/', 1)
1931 1931 if len(parts) > 1:
1932 1932 self.host, path = parts
1933 1933 else:
1934 1934 self.host = parts[0]
1935 1935 path = None
1936 1936 if not self.host:
1937 1937 self.host = None
1938 1938 # path of file:///d is /d
1939 1939 # path of file:///d:/ is d:/, not /d:/
1940 1940 if path and not hasdriveletter(path):
1941 1941 path = '/' + path
1942 1942
1943 1943 if self.host and '@' in self.host:
1944 1944 self.user, self.host = self.host.rsplit('@', 1)
1945 1945 if ':' in self.user:
1946 1946 self.user, self.passwd = self.user.split(':', 1)
1947 1947 if not self.host:
1948 1948 self.host = None
1949 1949
1950 1950 # Don't split on colons in IPv6 addresses without ports
1951 1951 if (self.host and ':' in self.host and
1952 1952 not (self.host.startswith('[') and self.host.endswith(']'))):
1953 1953 self._hostport = self.host
1954 1954 self.host, self.port = self.host.rsplit(':', 1)
1955 1955 if not self.host:
1956 1956 self.host = None
1957 1957
1958 1958 if (self.host and self.scheme == 'file' and
1959 1959 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1960 1960 raise Abort(_('file:// URLs can only refer to localhost'))
1961 1961
1962 1962 self.path = path
1963 1963
1964 1964 # leave the query string escaped
1965 1965 for a in ('user', 'passwd', 'host', 'port',
1966 1966 'path', 'fragment'):
1967 1967 v = getattr(self, a)
1968 1968 if v is not None:
1969 1969 setattr(self, a, _urlunquote(v))
1970 1970
1971 1971 def __repr__(self):
1972 1972 attrs = []
1973 1973 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1974 1974 'query', 'fragment'):
1975 1975 v = getattr(self, a)
1976 1976 if v is not None:
1977 1977 attrs.append('%s: %r' % (a, v))
1978 1978 return '<url %s>' % ', '.join(attrs)
1979 1979
1980 1980 def __str__(self):
1981 1981 r"""Join the URL's components back into a URL string.
1982 1982
1983 1983 Examples:
1984 1984
1985 1985 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1986 1986 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1987 1987 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1988 1988 'http://user:pw@host:80/?foo=bar&baz=42'
1989 1989 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1990 1990 'http://user:pw@host:80/?foo=bar%3dbaz'
1991 1991 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1992 1992 'ssh://user:pw@[::1]:2200//home/joe#'
1993 1993 >>> str(url('http://localhost:80//'))
1994 1994 'http://localhost:80//'
1995 1995 >>> str(url('http://localhost:80/'))
1996 1996 'http://localhost:80/'
1997 1997 >>> str(url('http://localhost:80'))
1998 1998 'http://localhost:80/'
1999 1999 >>> str(url('bundle:foo'))
2000 2000 'bundle:foo'
2001 2001 >>> str(url('bundle://../foo'))
2002 2002 'bundle:../foo'
2003 2003 >>> str(url('path'))
2004 2004 'path'
2005 2005 >>> str(url('file:///tmp/foo/bar'))
2006 2006 'file:///tmp/foo/bar'
2007 2007 >>> str(url('file:///c:/tmp/foo/bar'))
2008 2008 'file:///c:/tmp/foo/bar'
2009 2009 >>> print url(r'bundle:foo\bar')
2010 2010 bundle:foo\bar
2011 2011 >>> print url(r'file:///D:\data\hg')
2012 2012 file:///D:\data\hg
2013 2013 """
2014 2014 if self._localpath:
2015 2015 s = self.path
2016 2016 if self.scheme == 'bundle':
2017 2017 s = 'bundle:' + s
2018 2018 if self.fragment:
2019 2019 s += '#' + self.fragment
2020 2020 return s
2021 2021
2022 2022 s = self.scheme + ':'
2023 2023 if self.user or self.passwd or self.host:
2024 2024 s += '//'
2025 2025 elif self.scheme and (not self.path or self.path.startswith('/')
2026 2026 or hasdriveletter(self.path)):
2027 2027 s += '//'
2028 2028 if hasdriveletter(self.path):
2029 2029 s += '/'
2030 2030 if self.user:
2031 2031 s += urllib.quote(self.user, safe=self._safechars)
2032 2032 if self.passwd:
2033 2033 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2034 2034 if self.user or self.passwd:
2035 2035 s += '@'
2036 2036 if self.host:
2037 2037 if not (self.host.startswith('[') and self.host.endswith(']')):
2038 2038 s += urllib.quote(self.host)
2039 2039 else:
2040 2040 s += self.host
2041 2041 if self.port:
2042 2042 s += ':' + urllib.quote(self.port)
2043 2043 if self.host:
2044 2044 s += '/'
2045 2045 if self.path:
2046 2046 # TODO: similar to the query string, we should not unescape the
2047 2047 # path when we store it, the path might contain '%2f' = '/',
2048 2048 # which we should *not* escape.
2049 2049 s += urllib.quote(self.path, safe=self._safepchars)
2050 2050 if self.query:
2051 2051 # we store the query in escaped form.
2052 2052 s += '?' + self.query
2053 2053 if self.fragment is not None:
2054 2054 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2055 2055 return s
2056 2056
2057 2057 def authinfo(self):
2058 2058 user, passwd = self.user, self.passwd
2059 2059 try:
2060 2060 self.user, self.passwd = None, None
2061 2061 s = str(self)
2062 2062 finally:
2063 2063 self.user, self.passwd = user, passwd
2064 2064 if not self.user:
2065 2065 return (s, None)
2066 2066 # authinfo[1] is passed to urllib2 password manager, and its
2067 2067 # URIs must not contain credentials. The host is passed in the
2068 2068 # URIs list because Python < 2.4.3 uses only that to search for
2069 2069 # a password.
2070 2070 return (s, (None, (s, self.host),
2071 2071 self.user, self.passwd or ''))
2072 2072
2073 2073 def isabs(self):
2074 2074 if self.scheme and self.scheme != 'file':
2075 2075 return True # remote URL
2076 2076 if hasdriveletter(self.path):
2077 2077 return True # absolute for our purposes - can't be joined()
2078 2078 if self.path.startswith(r'\\'):
2079 2079 return True # Windows UNC path
2080 2080 if self.path.startswith('/'):
2081 2081 return True # POSIX-style
2082 2082 return False
2083 2083
2084 2084 def localpath(self):
2085 2085 if self.scheme == 'file' or self.scheme == 'bundle':
2086 2086 path = self.path or '/'
2087 2087 # For Windows, we need to promote hosts containing drive
2088 2088 # letters to paths with drive letters.
2089 2089 if hasdriveletter(self._hostport):
2090 2090 path = self._hostport + '/' + self.path
2091 2091 elif (self.host is not None and self.path
2092 2092 and not hasdriveletter(path)):
2093 2093 path = '/' + path
2094 2094 return path
2095 2095 return self._origpath
2096 2096
2097 2097 def islocal(self):
2098 2098 '''whether localpath will return something that posixfile can open'''
2099 2099 return (not self.scheme or self.scheme == 'file'
2100 2100 or self.scheme == 'bundle')
2101 2101
2102 2102 def hasscheme(path):
2103 2103 return bool(url(path).scheme)
2104 2104
2105 2105 def hasdriveletter(path):
2106 2106 return path and path[1:2] == ':' and path[0:1].isalpha()
2107 2107
2108 2108 def urllocalpath(path):
2109 2109 return url(path, parsequery=False, parsefragment=False).localpath()
2110 2110
2111 2111 def hidepassword(u):
2112 2112 '''hide user credential in a url string'''
2113 2113 u = url(u)
2114 2114 if u.passwd:
2115 2115 u.passwd = '***'
2116 2116 return str(u)
2117 2117
2118 2118 def removeauth(u):
2119 2119 '''remove all authentication information from a url string'''
2120 2120 u = url(u)
2121 2121 u.user = u.passwd = None
2122 2122 return str(u)
2123 2123
2124 2124 def isatty(fd):
2125 2125 try:
2126 2126 return fd.isatty()
2127 2127 except AttributeError:
2128 2128 return False
2129 2129
2130 2130 timecount = unitcountfn(
2131 2131 (1, 1e3, _('%.0f s')),
2132 2132 (100, 1, _('%.1f s')),
2133 2133 (10, 1, _('%.2f s')),
2134 2134 (1, 1, _('%.3f s')),
2135 2135 (100, 0.001, _('%.1f ms')),
2136 2136 (10, 0.001, _('%.2f ms')),
2137 2137 (1, 0.001, _('%.3f ms')),
2138 2138 (100, 0.000001, _('%.1f us')),
2139 2139 (10, 0.000001, _('%.2f us')),
2140 2140 (1, 0.000001, _('%.3f us')),
2141 2141 (100, 0.000000001, _('%.1f ns')),
2142 2142 (10, 0.000000001, _('%.2f ns')),
2143 2143 (1, 0.000000001, _('%.3f ns')),
2144 2144 )
2145 2145
2146 2146 _timenesting = [0]
2147 2147
2148 2148 def timed(func):
2149 2149 '''Report the execution time of a function call to stderr.
2150 2150
2151 2151 During development, use as a decorator when you need to measure
2152 2152 the cost of a function, e.g. as follows:
2153 2153
2154 2154 @util.timed
2155 2155 def foo(a, b, c):
2156 2156 pass
2157 2157 '''
2158 2158
2159 2159 def wrapper(*args, **kwargs):
2160 2160 start = time.time()
2161 2161 indent = 2
2162 2162 _timenesting[0] += indent
2163 2163 try:
2164 2164 return func(*args, **kwargs)
2165 2165 finally:
2166 2166 elapsed = time.time() - start
2167 2167 _timenesting[0] -= indent
2168 2168 sys.stderr.write('%s%s: %s\n' %
2169 2169 (' ' * _timenesting[0], func.__name__,
2170 2170 timecount(elapsed)))
2171 2171 return wrapper
2172 2172
2173 2173 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2174 2174 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2175 2175
2176 2176 def sizetoint(s):
2177 2177 '''Convert a space specifier to a byte count.
2178 2178
2179 2179 >>> sizetoint('30')
2180 2180 30
2181 2181 >>> sizetoint('2.2kb')
2182 2182 2252
2183 2183 >>> sizetoint('6M')
2184 2184 6291456
2185 2185 '''
2186 2186 t = s.strip().lower()
2187 2187 try:
2188 2188 for k, u in _sizeunits:
2189 2189 if t.endswith(k):
2190 2190 return int(float(t[:-len(k)]) * u)
2191 2191 return int(t)
2192 2192 except ValueError:
2193 2193 raise error.ParseError(_("couldn't parse size: %s") % s)
2194 2194
2195 2195 class hooks(object):
2196 2196 '''A collection of hook functions that can be used to extend a
2197 2197 function's behaviour. Hooks are called in lexicographic order,
2198 2198 based on the names of their sources.'''
2199 2199
2200 2200 def __init__(self):
2201 2201 self._hooks = []
2202 2202
2203 2203 def add(self, source, hook):
2204 2204 self._hooks.append((source, hook))
2205 2205
2206 2206 def __call__(self, *args):
2207 2207 self._hooks.sort(key=lambda x: x[0])
2208 2208 results = []
2209 2209 for source, hook in self._hooks:
2210 2210 results.append(hook(*args))
2211 2211 return results
2212 2212
2213 2213 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2214 2214 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2215 2215 Skips the 'skip' last entries. By default it will flush stdout first.
2216 2216 It can be used everywhere and do intentionally not require an ui object.
2217 2217 Not be used in production code but very convenient while developing.
2218 2218 '''
2219 2219 if otherf:
2220 2220 otherf.flush()
2221 2221 f.write('%s at:\n' % msg)
2222 2222 entries = [('%s:%s' % (fn, ln), func)
2223 2223 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2224 2224 if entries:
2225 2225 fnmax = max(len(entry[0]) for entry in entries)
2226 2226 for fnln, func in entries:
2227 2227 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2228 2228 f.flush()
2229 2229
2230 2230 # convenient shortcut
2231 2231 dst = debugstacktrace
@@ -1,367 +1,367 b''
1 1 #require hardlink
2 2
3 3 $ cat > nlinks.py <<EOF
4 4 > import sys
5 5 > from mercurial import util
6 6 > for f in sorted(sys.stdin.readlines()):
7 7 > f = f[:-1]
8 8 > print util.nlinks(f), f
9 9 > EOF
10 10
11 11 $ nlinksdir()
12 12 > {
13 13 > find $1 -type f | python $TESTTMP/nlinks.py
14 14 > }
15 15
16 16 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
17 17
18 18 $ cat > linkcp.py <<EOF
19 19 > from mercurial import util
20 20 > import sys
21 21 > util.copyfiles(sys.argv[1], sys.argv[2], hardlink=True)
22 22 > EOF
23 23
24 24 $ linkcp()
25 25 > {
26 26 > python $TESTTMP/linkcp.py $1 $2
27 27 > }
28 28
29 29 Prepare repo r1:
30 30
31 31 $ hg init r1
32 32 $ cd r1
33 33
34 34 $ echo c1 > f1
35 35 $ hg add f1
36 36 $ hg ci -m0
37 37
38 38 $ mkdir d1
39 39 $ cd d1
40 40 $ echo c2 > f2
41 41 $ hg add f2
42 42 $ hg ci -m1
43 43 $ cd ../..
44 44
45 45 $ nlinksdir r1/.hg/store
46 46 1 r1/.hg/store/00changelog.i
47 47 1 r1/.hg/store/00manifest.i
48 48 1 r1/.hg/store/data/d1/f2.i
49 49 1 r1/.hg/store/data/f1.i
50 50 1 r1/.hg/store/fncache
51 51 1 r1/.hg/store/phaseroots
52 52 1 r1/.hg/store/undo
53 53 1 r1/.hg/store/undo.backup.fncache
54 54 1 r1/.hg/store/undo.backupfiles
55 55 1 r1/.hg/store/undo.phaseroots
56 56
57 57
58 58 Create hardlinked clone r2:
59 59
60 60 $ hg clone -U --debug r1 r2
61 61 linked 7 files
62 62
63 63 Create non-hardlinked clone r3:
64 64
65 65 $ hg clone --pull r1 r3
66 66 requesting all changes
67 67 adding changesets
68 68 adding manifests
69 69 adding file changes
70 70 added 2 changesets with 2 changes to 2 files
71 71 updating to branch default
72 72 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
73 73
74 74
75 75 Repos r1 and r2 should now contain hardlinked files:
76 76
77 77 $ nlinksdir r1/.hg/store
78 78 2 r1/.hg/store/00changelog.i
79 79 2 r1/.hg/store/00manifest.i
80 80 2 r1/.hg/store/data/d1/f2.i
81 81 2 r1/.hg/store/data/f1.i
82 82 2 r1/.hg/store/fncache
83 83 1 r1/.hg/store/phaseroots
84 84 1 r1/.hg/store/undo
85 85 1 r1/.hg/store/undo.backup.fncache
86 86 1 r1/.hg/store/undo.backupfiles
87 87 1 r1/.hg/store/undo.phaseroots
88 88
89 89 $ nlinksdir r2/.hg/store
90 90 2 r2/.hg/store/00changelog.i
91 91 2 r2/.hg/store/00manifest.i
92 92 2 r2/.hg/store/data/d1/f2.i
93 93 2 r2/.hg/store/data/f1.i
94 94 2 r2/.hg/store/fncache
95 95
96 96 Repo r3 should not be hardlinked:
97 97
98 98 $ nlinksdir r3/.hg/store
99 99 1 r3/.hg/store/00changelog.i
100 100 1 r3/.hg/store/00manifest.i
101 101 1 r3/.hg/store/data/d1/f2.i
102 102 1 r3/.hg/store/data/f1.i
103 103 1 r3/.hg/store/fncache
104 104 1 r3/.hg/store/phaseroots
105 105 1 r3/.hg/store/undo
106 106 1 r3/.hg/store/undo.backupfiles
107 107 1 r3/.hg/store/undo.phaseroots
108 108
109 109
110 110 Create a non-inlined filelog in r3:
111 111
112 112 $ cd r3/d1
113 113 >>> f = open('data1', 'wb')
114 114 >>> for x in range(10000):
115 115 ... f.write("%s\n" % str(x))
116 116 >>> f.close()
117 117 $ for j in 0 1 2 3 4 5 6 7 8 9; do
118 118 > cat data1 >> f2
119 119 > hg commit -m$j
120 120 > done
121 121 $ cd ../..
122 122
123 123 $ nlinksdir r3/.hg/store
124 124 1 r3/.hg/store/00changelog.i
125 125 1 r3/.hg/store/00manifest.i
126 126 1 r3/.hg/store/data/d1/f2.d
127 127 1 r3/.hg/store/data/d1/f2.i
128 128 1 r3/.hg/store/data/f1.i
129 129 1 r3/.hg/store/fncache
130 130 1 r3/.hg/store/phaseroots
131 131 1 r3/.hg/store/undo
132 132 1 r3/.hg/store/undo.backup.fncache
133 133 1 r3/.hg/store/undo.backup.phaseroots
134 134 1 r3/.hg/store/undo.backupfiles
135 135 1 r3/.hg/store/undo.phaseroots
136 136
137 137 Push to repo r1 should break up most hardlinks in r2:
138 138
139 139 $ hg -R r2 verify
140 140 checking changesets
141 141 checking manifests
142 142 crosschecking files in changesets and manifests
143 143 checking files
144 144 2 files, 2 changesets, 2 total revisions
145 145
146 146 $ cd r3
147 147 $ hg push
148 148 pushing to $TESTTMP/r1 (glob)
149 149 searching for changes
150 150 adding changesets
151 151 adding manifests
152 152 adding file changes
153 153 added 10 changesets with 10 changes to 1 files
154 154
155 155 $ cd ..
156 156
157 157 $ nlinksdir r2/.hg/store
158 158 1 r2/.hg/store/00changelog.i
159 159 1 r2/.hg/store/00manifest.i
160 160 1 r2/.hg/store/data/d1/f2.i
161 161 2 r2/.hg/store/data/f1.i
162 2 r2/.hg/store/fncache
162 1 r2/.hg/store/fncache
163 163
164 164 $ hg -R r2 verify
165 165 checking changesets
166 166 checking manifests
167 167 crosschecking files in changesets and manifests
168 168 checking files
169 169 2 files, 2 changesets, 2 total revisions
170 170
171 171
172 172 $ cd r1
173 173 $ hg up
174 174 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 175
176 176 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
177 177
178 178 $ echo c1c1 >> f1
179 179 $ hg ci -m00
180 180 $ cd ..
181 181
182 182 $ nlinksdir r2/.hg/store
183 183 1 r2/.hg/store/00changelog.i
184 184 1 r2/.hg/store/00manifest.i
185 185 1 r2/.hg/store/data/d1/f2.i
186 186 1 r2/.hg/store/data/f1.i
187 2 r2/.hg/store/fncache
187 1 r2/.hg/store/fncache
188 188
189 189
190 190 $ cd r3
191 191 $ hg tip --template '{rev}:{node|short}\n'
192 192 11:a6451b6bc41f
193 193 $ echo bla > f1
194 194 $ hg ci -m1
195 195 $ cd ..
196 196
197 197 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
198 198
199 199 $ linkcp r3 r4
200 200
201 201 r4 has hardlinks in the working dir (not just inside .hg):
202 202
203 203 $ nlinksdir r4
204 204 2 r4/.hg/00changelog.i
205 205 2 r4/.hg/branch
206 206 2 r4/.hg/cache/branch2-served
207 207 2 r4/.hg/cache/rbc-names-v1
208 208 2 r4/.hg/cache/rbc-revs-v1
209 209 2 r4/.hg/dirstate
210 210 2 r4/.hg/hgrc
211 211 2 r4/.hg/last-message.txt
212 212 2 r4/.hg/requires
213 213 2 r4/.hg/store/00changelog.i
214 214 2 r4/.hg/store/00manifest.i
215 215 2 r4/.hg/store/data/d1/f2.d
216 216 2 r4/.hg/store/data/d1/f2.i
217 217 2 r4/.hg/store/data/f1.i
218 218 2 r4/.hg/store/fncache
219 219 2 r4/.hg/store/phaseroots
220 220 2 r4/.hg/store/undo
221 221 2 r4/.hg/store/undo.backup.fncache
222 222 2 r4/.hg/store/undo.backup.phaseroots
223 223 2 r4/.hg/store/undo.backupfiles
224 224 2 r4/.hg/store/undo.phaseroots
225 225 2 r4/.hg/undo.bookmarks
226 226 2 r4/.hg/undo.branch
227 227 2 r4/.hg/undo.desc
228 228 2 r4/.hg/undo.dirstate
229 229 2 r4/d1/data1
230 230 2 r4/d1/f2
231 231 2 r4/f1
232 232
233 233 Update back to revision 11 in r4 should break hardlink of file f1:
234 234
235 235 $ hg -R r4 up 11
236 236 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
237 237
238 238 $ nlinksdir r4
239 239 2 r4/.hg/00changelog.i
240 240 1 r4/.hg/branch
241 241 2 r4/.hg/cache/branch2-served
242 242 2 r4/.hg/cache/rbc-names-v1
243 243 2 r4/.hg/cache/rbc-revs-v1
244 244 1 r4/.hg/dirstate
245 245 2 r4/.hg/hgrc
246 246 2 r4/.hg/last-message.txt
247 247 2 r4/.hg/requires
248 248 2 r4/.hg/store/00changelog.i
249 249 2 r4/.hg/store/00manifest.i
250 250 2 r4/.hg/store/data/d1/f2.d
251 251 2 r4/.hg/store/data/d1/f2.i
252 252 2 r4/.hg/store/data/f1.i
253 253 2 r4/.hg/store/fncache
254 254 2 r4/.hg/store/phaseroots
255 255 2 r4/.hg/store/undo
256 256 2 r4/.hg/store/undo.backup.fncache
257 257 2 r4/.hg/store/undo.backup.phaseroots
258 258 2 r4/.hg/store/undo.backupfiles
259 259 2 r4/.hg/store/undo.phaseroots
260 260 2 r4/.hg/undo.bookmarks
261 261 2 r4/.hg/undo.branch
262 262 2 r4/.hg/undo.desc
263 263 2 r4/.hg/undo.dirstate
264 264 2 r4/d1/data1
265 265 2 r4/d1/f2
266 266 1 r4/f1
267 267
268 268
269 269 Test hardlinking outside hg:
270 270
271 271 $ mkdir x
272 272 $ echo foo > x/a
273 273
274 274 $ linkcp x y
275 275 $ echo bar >> y/a
276 276
277 277 No diff if hardlink:
278 278
279 279 $ diff x/a y/a
280 280
281 281 Test mq hardlinking:
282 282
283 283 $ echo "[extensions]" >> $HGRCPATH
284 284 $ echo "mq=" >> $HGRCPATH
285 285
286 286 $ hg init a
287 287 $ cd a
288 288
289 289 $ hg qimport -n foo - << EOF
290 290 > # HG changeset patch
291 291 > # Date 1 0
292 292 > diff -r 2588a8b53d66 a
293 293 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
294 294 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
295 295 > @@ -0,0 +1,1 @@
296 296 > +a
297 297 > EOF
298 298 adding foo to series file
299 299
300 300 $ hg qpush
301 301 applying foo
302 302 now at: foo
303 303
304 304 $ cd ..
305 305 $ linkcp a b
306 306 $ cd b
307 307
308 308 $ hg qimport -n bar - << EOF
309 309 > # HG changeset patch
310 310 > # Date 2 0
311 311 > diff -r 2588a8b53d66 a
312 312 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
313 313 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
314 314 > @@ -0,0 +1,1 @@
315 315 > +b
316 316 > EOF
317 317 adding bar to series file
318 318
319 319 $ hg qpush
320 320 applying bar
321 321 now at: bar
322 322
323 323 $ cat .hg/patches/status
324 324 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
325 325 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
326 326
327 327 $ cat .hg/patches/series
328 328 foo
329 329 bar
330 330
331 331 $ cat ../a/.hg/patches/status
332 332 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
333 333
334 334 $ cat ../a/.hg/patches/series
335 335 foo
336 336
337 337 Test tags hardlinking:
338 338
339 339 $ hg qdel -r qbase:qtip
340 340 patch foo finalized without changeset message
341 341 patch bar finalized without changeset message
342 342
343 343 $ hg tag -l lfoo
344 344 $ hg tag foo
345 345
346 346 $ cd ..
347 347 $ linkcp b c
348 348 $ cd c
349 349
350 350 $ hg tag -l -r 0 lbar
351 351 $ hg tag -r 0 bar
352 352
353 353 $ cat .hgtags
354 354 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
355 355 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
356 356
357 357 $ cat .hg/localtags
358 358 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
359 359 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
360 360
361 361 $ cat ../b/.hgtags
362 362 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
363 363
364 364 $ cat ../b/.hg/localtags
365 365 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
366 366
367 367 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now