##// END OF EJS Templates
statichttprepo: use new functions for requirements validation...
Gregory Szorc -
r39730:61929805 default
parent child Browse files
Show More
@@ -1,1791 +1,1772 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28
29 29 from . import (
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 policy,
38 38 pycompat,
39 39 revsetlang,
40 40 similar,
41 41 url,
42 42 util,
43 43 vfs,
44 44 )
45 45
46 46 from .utils import (
47 47 procutil,
48 48 stringutil,
49 49 )
50 50
51 51 if pycompat.iswindows:
52 52 from . import scmwindows as scmplatform
53 53 else:
54 54 from . import scmposix as scmplatform
55 55
56 56 parsers = policy.importmod(r'parsers')
57 57
58 58 termsize = scmplatform.termsize
59 59
60 60 class status(tuple):
61 61 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
62 62 and 'ignored' properties are only relevant to the working copy.
63 63 '''
64 64
65 65 __slots__ = ()
66 66
67 67 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
68 68 clean):
69 69 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
70 70 ignored, clean))
71 71
72 72 @property
73 73 def modified(self):
74 74 '''files that have been modified'''
75 75 return self[0]
76 76
77 77 @property
78 78 def added(self):
79 79 '''files that have been added'''
80 80 return self[1]
81 81
82 82 @property
83 83 def removed(self):
84 84 '''files that have been removed'''
85 85 return self[2]
86 86
87 87 @property
88 88 def deleted(self):
89 89 '''files that are in the dirstate, but have been deleted from the
90 90 working copy (aka "missing")
91 91 '''
92 92 return self[3]
93 93
94 94 @property
95 95 def unknown(self):
96 96 '''files not in the dirstate that are not ignored'''
97 97 return self[4]
98 98
99 99 @property
100 100 def ignored(self):
101 101 '''files not in the dirstate that are ignored (by _dirignore())'''
102 102 return self[5]
103 103
104 104 @property
105 105 def clean(self):
106 106 '''files that have not been modified'''
107 107 return self[6]
108 108
109 109 def __repr__(self, *args, **kwargs):
110 110 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
111 111 r'unknown=%s, ignored=%s, clean=%s>') %
112 112 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
113 113
114 114 def itersubrepos(ctx1, ctx2):
115 115 """find subrepos in ctx1 or ctx2"""
116 116 # Create a (subpath, ctx) mapping where we prefer subpaths from
117 117 # ctx1. The subpaths from ctx2 are important when the .hgsub file
118 118 # has been modified (in ctx2) but not yet committed (in ctx1).
119 119 subpaths = dict.fromkeys(ctx2.substate, ctx2)
120 120 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
121 121
122 122 missing = set()
123 123
124 124 for subpath in ctx2.substate:
125 125 if subpath not in ctx1.substate:
126 126 del subpaths[subpath]
127 127 missing.add(subpath)
128 128
129 129 for subpath, ctx in sorted(subpaths.iteritems()):
130 130 yield subpath, ctx.sub(subpath)
131 131
132 132 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
133 133 # status and diff will have an accurate result when it does
134 134 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
135 135 # against itself.
136 136 for subpath in missing:
137 137 yield subpath, ctx2.nullsub(subpath, ctx1)
138 138
139 139 def nochangesfound(ui, repo, excluded=None):
140 140 '''Report no changes for push/pull, excluded is None or a list of
141 141 nodes excluded from the push/pull.
142 142 '''
143 143 secretlist = []
144 144 if excluded:
145 145 for n in excluded:
146 146 ctx = repo[n]
147 147 if ctx.phase() >= phases.secret and not ctx.extinct():
148 148 secretlist.append(n)
149 149
150 150 if secretlist:
151 151 ui.status(_("no changes found (ignored %d secret changesets)\n")
152 152 % len(secretlist))
153 153 else:
154 154 ui.status(_("no changes found\n"))
155 155
156 156 def callcatch(ui, func):
157 157 """call func() with global exception handling
158 158
159 159 return func() if no exception happens. otherwise do some error handling
160 160 and return an exit code accordingly. does not handle all exceptions.
161 161 """
162 162 try:
163 163 try:
164 164 return func()
165 165 except: # re-raises
166 166 ui.traceback()
167 167 raise
168 168 # Global exception handling, alphabetically
169 169 # Mercurial-specific first, followed by built-in and library exceptions
170 170 except error.LockHeld as inst:
171 171 if inst.errno == errno.ETIMEDOUT:
172 172 reason = _('timed out waiting for lock held by %r') % inst.locker
173 173 else:
174 174 reason = _('lock held by %r') % inst.locker
175 175 ui.error(_("abort: %s: %s\n") % (
176 176 inst.desc or stringutil.forcebytestr(inst.filename), reason))
177 177 if not inst.locker:
178 178 ui.error(_("(lock might be very busy)\n"))
179 179 except error.LockUnavailable as inst:
180 180 ui.error(_("abort: could not lock %s: %s\n") %
181 181 (inst.desc or stringutil.forcebytestr(inst.filename),
182 182 encoding.strtolocal(inst.strerror)))
183 183 except error.OutOfBandError as inst:
184 184 if inst.args:
185 185 msg = _("abort: remote error:\n")
186 186 else:
187 187 msg = _("abort: remote error\n")
188 188 ui.error(msg)
189 189 if inst.args:
190 190 ui.error(''.join(inst.args))
191 191 if inst.hint:
192 192 ui.error('(%s)\n' % inst.hint)
193 193 except error.RepoError as inst:
194 194 ui.error(_("abort: %s!\n") % inst)
195 195 if inst.hint:
196 196 ui.error(_("(%s)\n") % inst.hint)
197 197 except error.ResponseError as inst:
198 198 ui.error(_("abort: %s") % inst.args[0])
199 199 msg = inst.args[1]
200 200 if isinstance(msg, type(u'')):
201 201 msg = pycompat.sysbytes(msg)
202 202 if not isinstance(msg, bytes):
203 203 ui.error(" %r\n" % (msg,))
204 204 elif not msg:
205 205 ui.error(_(" empty string\n"))
206 206 else:
207 207 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
208 208 except error.CensoredNodeError as inst:
209 209 ui.error(_("abort: file censored %s!\n") % inst)
210 210 except error.RevlogError as inst:
211 211 ui.error(_("abort: %s!\n") % inst)
212 212 except error.InterventionRequired as inst:
213 213 ui.error("%s\n" % inst)
214 214 if inst.hint:
215 215 ui.error(_("(%s)\n") % inst.hint)
216 216 return 1
217 217 except error.WdirUnsupported:
218 218 ui.error(_("abort: working directory revision cannot be specified\n"))
219 219 except error.Abort as inst:
220 220 ui.error(_("abort: %s\n") % inst)
221 221 if inst.hint:
222 222 ui.error(_("(%s)\n") % inst.hint)
223 223 except ImportError as inst:
224 224 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
225 225 m = stringutil.forcebytestr(inst).split()[-1]
226 226 if m in "mpatch bdiff".split():
227 227 ui.error(_("(did you forget to compile extensions?)\n"))
228 228 elif m in "zlib".split():
229 229 ui.error(_("(is your Python install correct?)\n"))
230 230 except IOError as inst:
231 231 if util.safehasattr(inst, "code"):
232 232 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
233 233 elif util.safehasattr(inst, "reason"):
234 234 try: # usually it is in the form (errno, strerror)
235 235 reason = inst.reason.args[1]
236 236 except (AttributeError, IndexError):
237 237 # it might be anything, for example a string
238 238 reason = inst.reason
239 239 if isinstance(reason, pycompat.unicode):
240 240 # SSLError of Python 2.7.9 contains a unicode
241 241 reason = encoding.unitolocal(reason)
242 242 ui.error(_("abort: error: %s\n") % reason)
243 243 elif (util.safehasattr(inst, "args")
244 244 and inst.args and inst.args[0] == errno.EPIPE):
245 245 pass
246 246 elif getattr(inst, "strerror", None):
247 247 if getattr(inst, "filename", None):
248 248 ui.error(_("abort: %s: %s\n") % (
249 249 encoding.strtolocal(inst.strerror),
250 250 stringutil.forcebytestr(inst.filename)))
251 251 else:
252 252 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
253 253 else:
254 254 raise
255 255 except OSError as inst:
256 256 if getattr(inst, "filename", None) is not None:
257 257 ui.error(_("abort: %s: '%s'\n") % (
258 258 encoding.strtolocal(inst.strerror),
259 259 stringutil.forcebytestr(inst.filename)))
260 260 else:
261 261 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
262 262 except MemoryError:
263 263 ui.error(_("abort: out of memory\n"))
264 264 except SystemExit as inst:
265 265 # Commands shouldn't sys.exit directly, but give a return code.
266 266 # Just in case catch this and and pass exit code to caller.
267 267 return inst.code
268 268 except socket.error as inst:
269 269 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
270 270
271 271 return -1
272 272
273 273 def checknewlabel(repo, lbl, kind):
274 274 # Do not use the "kind" parameter in ui output.
275 275 # It makes strings difficult to translate.
276 276 if lbl in ['tip', '.', 'null']:
277 277 raise error.Abort(_("the name '%s' is reserved") % lbl)
278 278 for c in (':', '\0', '\n', '\r'):
279 279 if c in lbl:
280 280 raise error.Abort(
281 281 _("%r cannot be used in a name") % pycompat.bytestr(c))
282 282 try:
283 283 int(lbl)
284 284 raise error.Abort(_("cannot use an integer as a name"))
285 285 except ValueError:
286 286 pass
287 287 if lbl.strip() != lbl:
288 288 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
289 289
290 290 def checkfilename(f):
291 291 '''Check that the filename f is an acceptable filename for a tracked file'''
292 292 if '\r' in f or '\n' in f:
293 293 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
294 294 % pycompat.bytestr(f))
295 295
296 296 def checkportable(ui, f):
297 297 '''Check if filename f is portable and warn or abort depending on config'''
298 298 checkfilename(f)
299 299 abort, warn = checkportabilityalert(ui)
300 300 if abort or warn:
301 301 msg = util.checkwinfilename(f)
302 302 if msg:
303 303 msg = "%s: %s" % (msg, procutil.shellquote(f))
304 304 if abort:
305 305 raise error.Abort(msg)
306 306 ui.warn(_("warning: %s\n") % msg)
307 307
308 308 def checkportabilityalert(ui):
309 309 '''check if the user's config requests nothing, a warning, or abort for
310 310 non-portable filenames'''
311 311 val = ui.config('ui', 'portablefilenames')
312 312 lval = val.lower()
313 313 bval = stringutil.parsebool(val)
314 314 abort = pycompat.iswindows or lval == 'abort'
315 315 warn = bval or lval == 'warn'
316 316 if bval is None and not (warn or abort or lval == 'ignore'):
317 317 raise error.ConfigError(
318 318 _("ui.portablefilenames value is invalid ('%s')") % val)
319 319 return abort, warn
320 320
321 321 class casecollisionauditor(object):
322 322 def __init__(self, ui, abort, dirstate):
323 323 self._ui = ui
324 324 self._abort = abort
325 325 allfiles = '\0'.join(dirstate._map)
326 326 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
327 327 self._dirstate = dirstate
328 328 # The purpose of _newfiles is so that we don't complain about
329 329 # case collisions if someone were to call this object with the
330 330 # same filename twice.
331 331 self._newfiles = set()
332 332
333 333 def __call__(self, f):
334 334 if f in self._newfiles:
335 335 return
336 336 fl = encoding.lower(f)
337 337 if fl in self._loweredfiles and f not in self._dirstate:
338 338 msg = _('possible case-folding collision for %s') % f
339 339 if self._abort:
340 340 raise error.Abort(msg)
341 341 self._ui.warn(_("warning: %s\n") % msg)
342 342 self._loweredfiles.add(fl)
343 343 self._newfiles.add(f)
344 344
345 345 def filteredhash(repo, maxrev):
346 346 """build hash of filtered revisions in the current repoview.
347 347
348 348 Multiple caches perform up-to-date validation by checking that the
349 349 tiprev and tipnode stored in the cache file match the current repository.
350 350 However, this is not sufficient for validating repoviews because the set
351 351 of revisions in the view may change without the repository tiprev and
352 352 tipnode changing.
353 353
354 354 This function hashes all the revs filtered from the view and returns
355 355 that SHA-1 digest.
356 356 """
357 357 cl = repo.changelog
358 358 if not cl.filteredrevs:
359 359 return None
360 360 key = None
361 361 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
362 362 if revs:
363 363 s = hashlib.sha1()
364 364 for rev in revs:
365 365 s.update('%d;' % rev)
366 366 key = s.digest()
367 367 return key
368 368
369 369 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
370 370 '''yield every hg repository under path, always recursively.
371 371 The recurse flag will only control recursion into repo working dirs'''
372 372 def errhandler(err):
373 373 if err.filename == path:
374 374 raise err
375 375 samestat = getattr(os.path, 'samestat', None)
376 376 if followsym and samestat is not None:
377 377 def adddir(dirlst, dirname):
378 378 dirstat = os.stat(dirname)
379 379 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
380 380 if not match:
381 381 dirlst.append(dirstat)
382 382 return not match
383 383 else:
384 384 followsym = False
385 385
386 386 if (seen_dirs is None) and followsym:
387 387 seen_dirs = []
388 388 adddir(seen_dirs, path)
389 389 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
390 390 dirs.sort()
391 391 if '.hg' in dirs:
392 392 yield root # found a repository
393 393 qroot = os.path.join(root, '.hg', 'patches')
394 394 if os.path.isdir(os.path.join(qroot, '.hg')):
395 395 yield qroot # we have a patch queue repo here
396 396 if recurse:
397 397 # avoid recursing inside the .hg directory
398 398 dirs.remove('.hg')
399 399 else:
400 400 dirs[:] = [] # don't descend further
401 401 elif followsym:
402 402 newdirs = []
403 403 for d in dirs:
404 404 fname = os.path.join(root, d)
405 405 if adddir(seen_dirs, fname):
406 406 if os.path.islink(fname):
407 407 for hgname in walkrepos(fname, True, seen_dirs):
408 408 yield hgname
409 409 else:
410 410 newdirs.append(d)
411 411 dirs[:] = newdirs
412 412
413 413 def binnode(ctx):
414 414 """Return binary node id for a given basectx"""
415 415 node = ctx.node()
416 416 if node is None:
417 417 return wdirid
418 418 return node
419 419
420 420 def intrev(ctx):
421 421 """Return integer for a given basectx that can be used in comparison or
422 422 arithmetic operation"""
423 423 rev = ctx.rev()
424 424 if rev is None:
425 425 return wdirrev
426 426 return rev
427 427
428 428 def formatchangeid(ctx):
429 429 """Format changectx as '{rev}:{node|formatnode}', which is the default
430 430 template provided by logcmdutil.changesettemplater"""
431 431 repo = ctx.repo()
432 432 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
433 433
434 434 def formatrevnode(ui, rev, node):
435 435 """Format given revision and node depending on the current verbosity"""
436 436 if ui.debugflag:
437 437 hexfunc = hex
438 438 else:
439 439 hexfunc = short
440 440 return '%d:%s' % (rev, hexfunc(node))
441 441
442 442 def resolvehexnodeidprefix(repo, prefix):
443 443 if (prefix.startswith('x') and
444 444 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
445 445 prefix = prefix[1:]
446 446 try:
447 447 # Uses unfiltered repo because it's faster when prefix is ambiguous/
448 448 # This matches the shortesthexnodeidprefix() function below.
449 449 node = repo.unfiltered().changelog._partialmatch(prefix)
450 450 except error.AmbiguousPrefixLookupError:
451 451 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
452 452 if revset:
453 453 # Clear config to avoid infinite recursion
454 454 configoverrides = {('experimental',
455 455 'revisions.disambiguatewithin'): None}
456 456 with repo.ui.configoverride(configoverrides):
457 457 revs = repo.anyrevs([revset], user=True)
458 458 matches = []
459 459 for rev in revs:
460 460 node = repo.changelog.node(rev)
461 461 if hex(node).startswith(prefix):
462 462 matches.append(node)
463 463 if len(matches) == 1:
464 464 return matches[0]
465 465 raise
466 466 if node is None:
467 467 return
468 468 repo.changelog.rev(node) # make sure node isn't filtered
469 469 return node
470 470
471 471 def mayberevnum(repo, prefix):
472 472 """Checks if the given prefix may be mistaken for a revision number"""
473 473 try:
474 474 i = int(prefix)
475 475 # if we are a pure int, then starting with zero will not be
476 476 # confused as a rev; or, obviously, if the int is larger
477 477 # than the value of the tip rev
478 478 if prefix[0:1] == b'0' or i >= len(repo):
479 479 return False
480 480 return True
481 481 except ValueError:
482 482 return False
483 483
484 484 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
485 485 """Find the shortest unambiguous prefix that matches hexnode.
486 486
487 487 If "cache" is not None, it must be a dictionary that can be used for
488 488 caching between calls to this method.
489 489 """
490 490 # _partialmatch() of filtered changelog could take O(len(repo)) time,
491 491 # which would be unacceptably slow. so we look for hash collision in
492 492 # unfiltered space, which means some hashes may be slightly longer.
493 493
494 494 def disambiguate(prefix):
495 495 """Disambiguate against revnums."""
496 496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 497 if mayberevnum(repo, prefix):
498 498 return 'x' + prefix
499 499 else:
500 500 return prefix
501 501
502 502 hexnode = hex(node)
503 503 for length in range(len(prefix), len(hexnode) + 1):
504 504 prefix = hexnode[:length]
505 505 if not mayberevnum(repo, prefix):
506 506 return prefix
507 507
508 508 cl = repo.unfiltered().changelog
509 509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 510 if revset:
511 511 revs = None
512 512 if cache is not None:
513 513 revs = cache.get('disambiguationrevset')
514 514 if revs is None:
515 515 revs = repo.anyrevs([revset], user=True)
516 516 if cache is not None:
517 517 cache['disambiguationrevset'] = revs
518 518 if cl.rev(node) in revs:
519 519 hexnode = hex(node)
520 520 nodetree = None
521 521 if cache is not None:
522 522 nodetree = cache.get('disambiguationnodetree')
523 523 if not nodetree:
524 524 try:
525 525 nodetree = parsers.nodetree(cl.index, len(revs))
526 526 except AttributeError:
527 527 # no native nodetree
528 528 pass
529 529 else:
530 530 for r in revs:
531 531 nodetree.insert(r)
532 532 if cache is not None:
533 533 cache['disambiguationnodetree'] = nodetree
534 534 if nodetree is not None:
535 535 length = max(nodetree.shortest(node), minlength)
536 536 prefix = hexnode[:length]
537 537 return disambiguate(prefix)
538 538 for length in range(minlength, len(hexnode) + 1):
539 539 matches = []
540 540 prefix = hexnode[:length]
541 541 for rev in revs:
542 542 otherhexnode = repo[rev].hex()
543 543 if prefix == otherhexnode[:length]:
544 544 matches.append(otherhexnode)
545 545 if len(matches) == 1:
546 546 return disambiguate(prefix)
547 547
548 548 try:
549 549 return disambiguate(cl.shortest(node, minlength))
550 550 except error.LookupError:
551 551 raise error.RepoLookupError()
552 552
553 553 def isrevsymbol(repo, symbol):
554 554 """Checks if a symbol exists in the repo.
555 555
556 556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 557 symbol is an ambiguous nodeid prefix.
558 558 """
559 559 try:
560 560 revsymbol(repo, symbol)
561 561 return True
562 562 except error.RepoLookupError:
563 563 return False
564 564
565 565 def revsymbol(repo, symbol):
566 566 """Returns a context given a single revision symbol (as string).
567 567
568 568 This is similar to revsingle(), but accepts only a single revision symbol,
569 569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 570 not "max(public())".
571 571 """
572 572 if not isinstance(symbol, bytes):
573 573 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 574 "repo[symbol]?" % (symbol, type(symbol)))
575 575 raise error.ProgrammingError(msg)
576 576 try:
577 577 if symbol in ('.', 'tip', 'null'):
578 578 return repo[symbol]
579 579
580 580 try:
581 581 r = int(symbol)
582 582 if '%d' % r != symbol:
583 583 raise ValueError
584 584 l = len(repo.changelog)
585 585 if r < 0:
586 586 r += l
587 587 if r < 0 or r >= l and r != wdirrev:
588 588 raise ValueError
589 589 return repo[r]
590 590 except error.FilteredIndexError:
591 591 raise
592 592 except (ValueError, OverflowError, IndexError):
593 593 pass
594 594
595 595 if len(symbol) == 40:
596 596 try:
597 597 node = bin(symbol)
598 598 rev = repo.changelog.rev(node)
599 599 return repo[rev]
600 600 except error.FilteredLookupError:
601 601 raise
602 602 except (TypeError, LookupError):
603 603 pass
604 604
605 605 # look up bookmarks through the name interface
606 606 try:
607 607 node = repo.names.singlenode(repo, symbol)
608 608 rev = repo.changelog.rev(node)
609 609 return repo[rev]
610 610 except KeyError:
611 611 pass
612 612
613 613 node = resolvehexnodeidprefix(repo, symbol)
614 614 if node is not None:
615 615 rev = repo.changelog.rev(node)
616 616 return repo[rev]
617 617
618 618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619 619
620 620 except error.WdirUnsupported:
621 621 return repo[None]
622 622 except (error.FilteredIndexError, error.FilteredLookupError,
623 623 error.FilteredRepoLookupError):
624 624 raise _filterederror(repo, symbol)
625 625
626 626 def _filterederror(repo, changeid):
627 627 """build an exception to be raised about a filtered changeid
628 628
629 629 This is extracted in a function to help extensions (eg: evolve) to
630 630 experiment with various message variants."""
631 631 if repo.filtername.startswith('visible'):
632 632
633 633 # Check if the changeset is obsolete
634 634 unfilteredrepo = repo.unfiltered()
635 635 ctx = revsymbol(unfilteredrepo, changeid)
636 636
637 637 # If the changeset is obsolete, enrich the message with the reason
638 638 # that made this changeset not visible
639 639 if ctx.obsolete():
640 640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 641 else:
642 642 msg = _("hidden revision '%s'") % changeid
643 643
644 644 hint = _('use --hidden to access hidden revisions')
645 645
646 646 return error.FilteredRepoLookupError(msg, hint=hint)
647 647 msg = _("filtered revision '%s' (not in '%s' subset)")
648 648 msg %= (changeid, repo.filtername)
649 649 return error.FilteredRepoLookupError(msg)
650 650
651 651 def revsingle(repo, revspec, default='.', localalias=None):
652 652 if not revspec and revspec != 0:
653 653 return repo[default]
654 654
655 655 l = revrange(repo, [revspec], localalias=localalias)
656 656 if not l:
657 657 raise error.Abort(_('empty revision set'))
658 658 return repo[l.last()]
659 659
660 660 def _pairspec(revspec):
661 661 tree = revsetlang.parse(revspec)
662 662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663 663
664 664 def revpair(repo, revs):
665 665 if not revs:
666 666 return repo['.'], repo[None]
667 667
668 668 l = revrange(repo, revs)
669 669
670 670 if not l:
671 671 first = second = None
672 672 elif l.isascending():
673 673 first = l.min()
674 674 second = l.max()
675 675 elif l.isdescending():
676 676 first = l.max()
677 677 second = l.min()
678 678 else:
679 679 first = l.first()
680 680 second = l.last()
681 681
682 682 if first is None:
683 683 raise error.Abort(_('empty revision range'))
684 684 if (first == second and len(revs) >= 2
685 685 and not all(revrange(repo, [r]) for r in revs)):
686 686 raise error.Abort(_('empty revision on one side of range'))
687 687
688 688 # if top-level is range expression, the result must always be a pair
689 689 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
690 690 return repo[first], repo[None]
691 691
692 692 return repo[first], repo[second]
693 693
694 694 def revrange(repo, specs, localalias=None):
695 695 """Execute 1 to many revsets and return the union.
696 696
697 697 This is the preferred mechanism for executing revsets using user-specified
698 698 config options, such as revset aliases.
699 699
700 700 The revsets specified by ``specs`` will be executed via a chained ``OR``
701 701 expression. If ``specs`` is empty, an empty result is returned.
702 702
703 703 ``specs`` can contain integers, in which case they are assumed to be
704 704 revision numbers.
705 705
706 706 It is assumed the revsets are already formatted. If you have arguments
707 707 that need to be expanded in the revset, call ``revsetlang.formatspec()``
708 708 and pass the result as an element of ``specs``.
709 709
710 710 Specifying a single revset is allowed.
711 711
712 712 Returns a ``revset.abstractsmartset`` which is a list-like interface over
713 713 integer revisions.
714 714 """
715 715 allspecs = []
716 716 for spec in specs:
717 717 if isinstance(spec, int):
718 718 spec = revsetlang.formatspec('rev(%d)', spec)
719 719 allspecs.append(spec)
720 720 return repo.anyrevs(allspecs, user=True, localalias=localalias)
721 721
722 722 def meaningfulparents(repo, ctx):
723 723 """Return list of meaningful (or all if debug) parentrevs for rev.
724 724
725 725 For merges (two non-nullrev revisions) both parents are meaningful.
726 726 Otherwise the first parent revision is considered meaningful if it
727 727 is not the preceding revision.
728 728 """
729 729 parents = ctx.parents()
730 730 if len(parents) > 1:
731 731 return parents
732 732 if repo.ui.debugflag:
733 733 return [parents[0], repo['null']]
734 734 if parents[0].rev() >= intrev(ctx) - 1:
735 735 return []
736 736 return parents
737 737
738 738 def expandpats(pats):
739 739 '''Expand bare globs when running on windows.
740 740 On posix we assume it already has already been done by sh.'''
741 741 if not util.expandglobs:
742 742 return list(pats)
743 743 ret = []
744 744 for kindpat in pats:
745 745 kind, pat = matchmod._patsplit(kindpat, None)
746 746 if kind is None:
747 747 try:
748 748 globbed = glob.glob(pat)
749 749 except re.error:
750 750 globbed = [pat]
751 751 if globbed:
752 752 ret.extend(globbed)
753 753 continue
754 754 ret.append(kindpat)
755 755 return ret
756 756
757 757 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
758 758 badfn=None):
759 759 '''Return a matcher and the patterns that were used.
760 760 The matcher will warn about bad matches, unless an alternate badfn callback
761 761 is provided.'''
762 762 if pats == ("",):
763 763 pats = []
764 764 if opts is None:
765 765 opts = {}
766 766 if not globbed and default == 'relpath':
767 767 pats = expandpats(pats or [])
768 768
769 769 def bad(f, msg):
770 770 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
771 771
772 772 if badfn is None:
773 773 badfn = bad
774 774
775 775 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
776 776 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
777 777
778 778 if m.always():
779 779 pats = []
780 780 return m, pats
781 781
782 782 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
783 783 badfn=None):
784 784 '''Return a matcher that will warn about bad matches.'''
785 785 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
786 786
787 787 def matchall(repo):
788 788 '''Return a matcher that will efficiently match everything.'''
789 789 return matchmod.always(repo.root, repo.getcwd())
790 790
791 791 def matchfiles(repo, files, badfn=None):
792 792 '''Return a matcher that will efficiently match exactly these files.'''
793 793 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
794 794
795 795 def parsefollowlinespattern(repo, rev, pat, msg):
796 796 """Return a file name from `pat` pattern suitable for usage in followlines
797 797 logic.
798 798 """
799 799 if not matchmod.patkind(pat):
800 800 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
801 801 else:
802 802 ctx = repo[rev]
803 803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
804 804 files = [f for f in ctx if m(f)]
805 805 if len(files) != 1:
806 806 raise error.ParseError(msg)
807 807 return files[0]
808 808
809 809 def origpath(ui, repo, filepath):
810 810 '''customize where .orig files are created
811 811
812 812 Fetch user defined path from config file: [ui] origbackuppath = <path>
813 813 Fall back to default (filepath with .orig suffix) if not specified
814 814 '''
815 815 origbackuppath = ui.config('ui', 'origbackuppath')
816 816 if not origbackuppath:
817 817 return filepath + ".orig"
818 818
819 819 # Convert filepath from an absolute path into a path inside the repo.
820 820 filepathfromroot = util.normpath(os.path.relpath(filepath,
821 821 start=repo.root))
822 822
823 823 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
824 824 origbackupdir = origvfs.dirname(filepathfromroot)
825 825 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
826 826 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
827 827
828 828 # Remove any files that conflict with the backup file's path
829 829 for f in reversed(list(util.finddirs(filepathfromroot))):
830 830 if origvfs.isfileorlink(f):
831 831 ui.note(_('removing conflicting file: %s\n')
832 832 % origvfs.join(f))
833 833 origvfs.unlink(f)
834 834 break
835 835
836 836 origvfs.makedirs(origbackupdir)
837 837
838 838 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
839 839 ui.note(_('removing conflicting directory: %s\n')
840 840 % origvfs.join(filepathfromroot))
841 841 origvfs.rmtree(filepathfromroot, forcibly=True)
842 842
843 843 return origvfs.join(filepathfromroot)
844 844
845 845 class _containsnode(object):
846 846 """proxy __contains__(node) to container.__contains__ which accepts revs"""
847 847
848 848 def __init__(self, repo, revcontainer):
849 849 self._torev = repo.changelog.rev
850 850 self._revcontains = revcontainer.__contains__
851 851
852 852 def __contains__(self, node):
853 853 return self._revcontains(self._torev(node))
854 854
855 855 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
856 856 fixphase=False, targetphase=None, backup=True):
857 857 """do common cleanups when old nodes are replaced by new nodes
858 858
859 859 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
860 860 (we might also want to move working directory parent in the future)
861 861
862 862 By default, bookmark moves are calculated automatically from 'replacements',
863 863 but 'moves' can be used to override that. Also, 'moves' may include
864 864 additional bookmark moves that should not have associated obsmarkers.
865 865
866 866 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
867 867 have replacements. operation is a string, like "rebase".
868 868
869 869 metadata is dictionary containing metadata to be stored in obsmarker if
870 870 obsolescence is enabled.
871 871 """
872 872 assert fixphase or targetphase is None
873 873 if not replacements and not moves:
874 874 return
875 875
876 876 # translate mapping's other forms
877 877 if not util.safehasattr(replacements, 'items'):
878 878 replacements = {n: () for n in replacements}
879 879
880 880 # Calculate bookmark movements
881 881 if moves is None:
882 882 moves = {}
883 883 # Unfiltered repo is needed since nodes in replacements might be hidden.
884 884 unfi = repo.unfiltered()
885 885 for oldnode, newnodes in replacements.items():
886 886 if oldnode in moves:
887 887 continue
888 888 if len(newnodes) > 1:
889 889 # usually a split, take the one with biggest rev number
890 890 newnode = next(unfi.set('max(%ln)', newnodes)).node()
891 891 elif len(newnodes) == 0:
892 892 # move bookmark backwards
893 893 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
894 894 list(replacements)))
895 895 if roots:
896 896 newnode = roots[0].node()
897 897 else:
898 898 newnode = nullid
899 899 else:
900 900 newnode = newnodes[0]
901 901 moves[oldnode] = newnode
902 902
903 903 allnewnodes = [n for ns in replacements.values() for n in ns]
904 904 toretract = {}
905 905 toadvance = {}
906 906 if fixphase:
907 907 precursors = {}
908 908 for oldnode, newnodes in replacements.items():
909 909 for newnode in newnodes:
910 910 precursors.setdefault(newnode, []).append(oldnode)
911 911
912 912 allnewnodes.sort(key=lambda n: unfi[n].rev())
913 913 newphases = {}
914 914 def phase(ctx):
915 915 return newphases.get(ctx.node(), ctx.phase())
916 916 for newnode in allnewnodes:
917 917 ctx = unfi[newnode]
918 918 parentphase = max(phase(p) for p in ctx.parents())
919 919 if targetphase is None:
920 920 oldphase = max(unfi[oldnode].phase()
921 921 for oldnode in precursors[newnode])
922 922 newphase = max(oldphase, parentphase)
923 923 else:
924 924 newphase = max(targetphase, parentphase)
925 925 newphases[newnode] = newphase
926 926 if newphase > ctx.phase():
927 927 toretract.setdefault(newphase, []).append(newnode)
928 928 elif newphase < ctx.phase():
929 929 toadvance.setdefault(newphase, []).append(newnode)
930 930
931 931 with repo.transaction('cleanup') as tr:
932 932 # Move bookmarks
933 933 bmarks = repo._bookmarks
934 934 bmarkchanges = []
935 935 for oldnode, newnode in moves.items():
936 936 oldbmarks = repo.nodebookmarks(oldnode)
937 937 if not oldbmarks:
938 938 continue
939 939 from . import bookmarks # avoid import cycle
940 940 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
941 941 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
942 942 hex(oldnode), hex(newnode)))
943 943 # Delete divergent bookmarks being parents of related newnodes
944 944 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
945 945 allnewnodes, newnode, oldnode)
946 946 deletenodes = _containsnode(repo, deleterevs)
947 947 for name in oldbmarks:
948 948 bmarkchanges.append((name, newnode))
949 949 for b in bookmarks.divergent2delete(repo, deletenodes, name):
950 950 bmarkchanges.append((b, None))
951 951
952 952 if bmarkchanges:
953 953 bmarks.applychanges(repo, tr, bmarkchanges)
954 954
955 955 for phase, nodes in toretract.items():
956 956 phases.retractboundary(repo, tr, phase, nodes)
957 957 for phase, nodes in toadvance.items():
958 958 phases.advanceboundary(repo, tr, phase, nodes)
959 959
960 960 # Obsolete or strip nodes
961 961 if obsolete.isenabled(repo, obsolete.createmarkersopt):
962 962 # If a node is already obsoleted, and we want to obsolete it
963 963 # without a successor, skip that obssolete request since it's
964 964 # unnecessary. That's the "if s or not isobs(n)" check below.
965 965 # Also sort the node in topology order, that might be useful for
966 966 # some obsstore logic.
967 967 # NOTE: the filtering and sorting might belong to createmarkers.
968 968 isobs = unfi.obsstore.successors.__contains__
969 969 torev = unfi.changelog.rev
970 970 sortfunc = lambda ns: torev(ns[0])
971 971 rels = [(unfi[n], tuple(unfi[m] for m in s))
972 972 for n, s in sorted(replacements.items(), key=sortfunc)
973 973 if s or not isobs(n)]
974 974 if rels:
975 975 obsolete.createmarkers(repo, rels, operation=operation,
976 976 metadata=metadata)
977 977 else:
978 978 from . import repair # avoid import cycle
979 979 tostrip = list(replacements)
980 980 if tostrip:
981 981 repair.delayedstrip(repo.ui, repo, tostrip, operation,
982 982 backup=backup)
983 983
984 984 def addremove(repo, matcher, prefix, opts=None):
985 985 if opts is None:
986 986 opts = {}
987 987 m = matcher
988 988 dry_run = opts.get('dry_run')
989 989 try:
990 990 similarity = float(opts.get('similarity') or 0)
991 991 except ValueError:
992 992 raise error.Abort(_('similarity must be a number'))
993 993 if similarity < 0 or similarity > 100:
994 994 raise error.Abort(_('similarity must be between 0 and 100'))
995 995 similarity /= 100.0
996 996
997 997 ret = 0
998 998 join = lambda f: os.path.join(prefix, f)
999 999
1000 1000 wctx = repo[None]
1001 1001 for subpath in sorted(wctx.substate):
1002 1002 submatch = matchmod.subdirmatcher(subpath, m)
1003 1003 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1004 1004 sub = wctx.sub(subpath)
1005 1005 try:
1006 1006 if sub.addremove(submatch, prefix, opts):
1007 1007 ret = 1
1008 1008 except error.LookupError:
1009 1009 repo.ui.status(_("skipping missing subrepository: %s\n")
1010 1010 % join(subpath))
1011 1011
1012 1012 rejected = []
1013 1013 def badfn(f, msg):
1014 1014 if f in m.files():
1015 1015 m.bad(f, msg)
1016 1016 rejected.append(f)
1017 1017
1018 1018 badmatch = matchmod.badmatch(m, badfn)
1019 1019 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1020 1020 badmatch)
1021 1021
1022 1022 unknownset = set(unknown + forgotten)
1023 1023 toprint = unknownset.copy()
1024 1024 toprint.update(deleted)
1025 1025 for abs in sorted(toprint):
1026 1026 if repo.ui.verbose or not m.exact(abs):
1027 1027 if abs in unknownset:
1028 1028 status = _('adding %s\n') % m.uipath(abs)
1029 1029 label = 'addremove.added'
1030 1030 else:
1031 1031 status = _('removing %s\n') % m.uipath(abs)
1032 1032 label = 'addremove.removed'
1033 1033 repo.ui.status(status, label=label)
1034 1034
1035 1035 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1036 1036 similarity)
1037 1037
1038 1038 if not dry_run:
1039 1039 _markchanges(repo, unknown + forgotten, deleted, renames)
1040 1040
1041 1041 for f in rejected:
1042 1042 if f in m.files():
1043 1043 return 1
1044 1044 return ret
1045 1045
1046 1046 def marktouched(repo, files, similarity=0.0):
1047 1047 '''Assert that files have somehow been operated upon. files are relative to
1048 1048 the repo root.'''
1049 1049 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1050 1050 rejected = []
1051 1051
1052 1052 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1053 1053
1054 1054 if repo.ui.verbose:
1055 1055 unknownset = set(unknown + forgotten)
1056 1056 toprint = unknownset.copy()
1057 1057 toprint.update(deleted)
1058 1058 for abs in sorted(toprint):
1059 1059 if abs in unknownset:
1060 1060 status = _('adding %s\n') % abs
1061 1061 else:
1062 1062 status = _('removing %s\n') % abs
1063 1063 repo.ui.status(status)
1064 1064
1065 1065 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1066 1066 similarity)
1067 1067
1068 1068 _markchanges(repo, unknown + forgotten, deleted, renames)
1069 1069
1070 1070 for f in rejected:
1071 1071 if f in m.files():
1072 1072 return 1
1073 1073 return 0
1074 1074
1075 1075 def _interestingfiles(repo, matcher):
1076 1076 '''Walk dirstate with matcher, looking for files that addremove would care
1077 1077 about.
1078 1078
1079 1079 This is different from dirstate.status because it doesn't care about
1080 1080 whether files are modified or clean.'''
1081 1081 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1082 1082 audit_path = pathutil.pathauditor(repo.root, cached=True)
1083 1083
1084 1084 ctx = repo[None]
1085 1085 dirstate = repo.dirstate
1086 1086 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1087 1087 unknown=True, ignored=False, full=False)
1088 1088 for abs, st in walkresults.iteritems():
1089 1089 dstate = dirstate[abs]
1090 1090 if dstate == '?' and audit_path.check(abs):
1091 1091 unknown.append(abs)
1092 1092 elif dstate != 'r' and not st:
1093 1093 deleted.append(abs)
1094 1094 elif dstate == 'r' and st:
1095 1095 forgotten.append(abs)
1096 1096 # for finding renames
1097 1097 elif dstate == 'r' and not st:
1098 1098 removed.append(abs)
1099 1099 elif dstate == 'a':
1100 1100 added.append(abs)
1101 1101
1102 1102 return added, unknown, deleted, removed, forgotten
1103 1103
1104 1104 def _findrenames(repo, matcher, added, removed, similarity):
1105 1105 '''Find renames from removed files to added ones.'''
1106 1106 renames = {}
1107 1107 if similarity > 0:
1108 1108 for old, new, score in similar.findrenames(repo, added, removed,
1109 1109 similarity):
1110 1110 if (repo.ui.verbose or not matcher.exact(old)
1111 1111 or not matcher.exact(new)):
1112 1112 repo.ui.status(_('recording removal of %s as rename to %s '
1113 1113 '(%d%% similar)\n') %
1114 1114 (matcher.rel(old), matcher.rel(new),
1115 1115 score * 100))
1116 1116 renames[new] = old
1117 1117 return renames
1118 1118
1119 1119 def _markchanges(repo, unknown, deleted, renames):
1120 1120 '''Marks the files in unknown as added, the files in deleted as removed,
1121 1121 and the files in renames as copied.'''
1122 1122 wctx = repo[None]
1123 1123 with repo.wlock():
1124 1124 wctx.forget(deleted)
1125 1125 wctx.add(unknown)
1126 1126 for new, old in renames.iteritems():
1127 1127 wctx.copy(old, new)
1128 1128
1129 1129 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1130 1130 """Update the dirstate to reflect the intent of copying src to dst. For
1131 1131 different reasons it might not end with dst being marked as copied from src.
1132 1132 """
1133 1133 origsrc = repo.dirstate.copied(src) or src
1134 1134 if dst == origsrc: # copying back a copy?
1135 1135 if repo.dirstate[dst] not in 'mn' and not dryrun:
1136 1136 repo.dirstate.normallookup(dst)
1137 1137 else:
1138 1138 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1139 1139 if not ui.quiet:
1140 1140 ui.warn(_("%s has not been committed yet, so no copy "
1141 1141 "data will be stored for %s.\n")
1142 1142 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1143 1143 if repo.dirstate[dst] in '?r' and not dryrun:
1144 1144 wctx.add([dst])
1145 1145 elif not dryrun:
1146 1146 wctx.copy(origsrc, dst)
1147 1147
1148 def readrequires(opener, supported):
1149 '''Reads and parses .hg/requires and checks if all entries found
1150 are in the list of supported features.'''
1151 requirements = set(opener.read("requires").splitlines())
1152 missings = []
1153 for r in requirements:
1154 if r not in supported:
1155 if not r or not r[0:1].isalnum():
1156 raise error.RequirementError(_(".hg/requires file is corrupt"))
1157 missings.append(r)
1158 missings.sort()
1159 if missings:
1160 raise error.RequirementError(
1161 _("repository requires features unknown to this Mercurial: %s")
1162 % " ".join(missings),
1163 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1164 " for more information"))
1165 return requirements
1166
1167 1148 def writerequires(opener, requirements):
1168 1149 with opener('requires', 'w') as fp:
1169 1150 for r in sorted(requirements):
1170 1151 fp.write("%s\n" % r)
1171 1152
1172 1153 class filecachesubentry(object):
1173 1154 def __init__(self, path, stat):
1174 1155 self.path = path
1175 1156 self.cachestat = None
1176 1157 self._cacheable = None
1177 1158
1178 1159 if stat:
1179 1160 self.cachestat = filecachesubentry.stat(self.path)
1180 1161
1181 1162 if self.cachestat:
1182 1163 self._cacheable = self.cachestat.cacheable()
1183 1164 else:
1184 1165 # None means we don't know yet
1185 1166 self._cacheable = None
1186 1167
1187 1168 def refresh(self):
1188 1169 if self.cacheable():
1189 1170 self.cachestat = filecachesubentry.stat(self.path)
1190 1171
1191 1172 def cacheable(self):
1192 1173 if self._cacheable is not None:
1193 1174 return self._cacheable
1194 1175
1195 1176 # we don't know yet, assume it is for now
1196 1177 return True
1197 1178
1198 1179 def changed(self):
1199 1180 # no point in going further if we can't cache it
1200 1181 if not self.cacheable():
1201 1182 return True
1202 1183
1203 1184 newstat = filecachesubentry.stat(self.path)
1204 1185
1205 1186 # we may not know if it's cacheable yet, check again now
1206 1187 if newstat and self._cacheable is None:
1207 1188 self._cacheable = newstat.cacheable()
1208 1189
1209 1190 # check again
1210 1191 if not self._cacheable:
1211 1192 return True
1212 1193
1213 1194 if self.cachestat != newstat:
1214 1195 self.cachestat = newstat
1215 1196 return True
1216 1197 else:
1217 1198 return False
1218 1199
1219 1200 @staticmethod
1220 1201 def stat(path):
1221 1202 try:
1222 1203 return util.cachestat(path)
1223 1204 except OSError as e:
1224 1205 if e.errno != errno.ENOENT:
1225 1206 raise
1226 1207
1227 1208 class filecacheentry(object):
1228 1209 def __init__(self, paths, stat=True):
1229 1210 self._entries = []
1230 1211 for path in paths:
1231 1212 self._entries.append(filecachesubentry(path, stat))
1232 1213
1233 1214 def changed(self):
1234 1215 '''true if any entry has changed'''
1235 1216 for entry in self._entries:
1236 1217 if entry.changed():
1237 1218 return True
1238 1219 return False
1239 1220
1240 1221 def refresh(self):
1241 1222 for entry in self._entries:
1242 1223 entry.refresh()
1243 1224
1244 1225 class filecache(object):
1245 1226 """A property like decorator that tracks files under .hg/ for updates.
1246 1227
1247 1228 On first access, the files defined as arguments are stat()ed and the
1248 1229 results cached. The decorated function is called. The results are stashed
1249 1230 away in a ``_filecache`` dict on the object whose method is decorated.
1250 1231
1251 1232 On subsequent access, the cached result is returned.
1252 1233
1253 1234 On external property set operations, stat() calls are performed and the new
1254 1235 value is cached.
1255 1236
1256 1237 On property delete operations, cached data is removed.
1257 1238
1258 1239 When using the property API, cached data is always returned, if available:
1259 1240 no stat() is performed to check if the file has changed and if the function
1260 1241 needs to be called to reflect file changes.
1261 1242
1262 1243 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1263 1244 can populate an entry before the property's getter is called. In this case,
1264 1245 entries in ``_filecache`` will be used during property operations,
1265 1246 if available. If the underlying file changes, it is up to external callers
1266 1247 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1267 1248 method result as well as possibly calling ``del obj._filecache[attr]`` to
1268 1249 remove the ``filecacheentry``.
1269 1250 """
1270 1251
1271 1252 def __init__(self, *paths):
1272 1253 self.paths = paths
1273 1254
1274 1255 def join(self, obj, fname):
1275 1256 """Used to compute the runtime path of a cached file.
1276 1257
1277 1258 Users should subclass filecache and provide their own version of this
1278 1259 function to call the appropriate join function on 'obj' (an instance
1279 1260 of the class that its member function was decorated).
1280 1261 """
1281 1262 raise NotImplementedError
1282 1263
1283 1264 def __call__(self, func):
1284 1265 self.func = func
1285 1266 self.sname = func.__name__
1286 1267 self.name = pycompat.sysbytes(self.sname)
1287 1268 return self
1288 1269
1289 1270 def __get__(self, obj, type=None):
1290 1271 # if accessed on the class, return the descriptor itself.
1291 1272 if obj is None:
1292 1273 return self
1293 1274 # do we need to check if the file changed?
1294 1275 if self.sname in obj.__dict__:
1295 1276 assert self.name in obj._filecache, self.name
1296 1277 return obj.__dict__[self.sname]
1297 1278
1298 1279 entry = obj._filecache.get(self.name)
1299 1280
1300 1281 if entry:
1301 1282 if entry.changed():
1302 1283 entry.obj = self.func(obj)
1303 1284 else:
1304 1285 paths = [self.join(obj, path) for path in self.paths]
1305 1286
1306 1287 # We stat -before- creating the object so our cache doesn't lie if
1307 1288 # a writer modified between the time we read and stat
1308 1289 entry = filecacheentry(paths, True)
1309 1290 entry.obj = self.func(obj)
1310 1291
1311 1292 obj._filecache[self.name] = entry
1312 1293
1313 1294 obj.__dict__[self.sname] = entry.obj
1314 1295 return entry.obj
1315 1296
1316 1297 def __set__(self, obj, value):
1317 1298 if self.name not in obj._filecache:
1318 1299 # we add an entry for the missing value because X in __dict__
1319 1300 # implies X in _filecache
1320 1301 paths = [self.join(obj, path) for path in self.paths]
1321 1302 ce = filecacheentry(paths, False)
1322 1303 obj._filecache[self.name] = ce
1323 1304 else:
1324 1305 ce = obj._filecache[self.name]
1325 1306
1326 1307 ce.obj = value # update cached copy
1327 1308 obj.__dict__[self.sname] = value # update copy returned by obj.x
1328 1309
1329 1310 def __delete__(self, obj):
1330 1311 try:
1331 1312 del obj.__dict__[self.sname]
1332 1313 except KeyError:
1333 1314 raise AttributeError(self.sname)
1334 1315
1335 1316 def extdatasource(repo, source):
1336 1317 """Gather a map of rev -> value dict from the specified source
1337 1318
1338 1319 A source spec is treated as a URL, with a special case shell: type
1339 1320 for parsing the output from a shell command.
1340 1321
1341 1322 The data is parsed as a series of newline-separated records where
1342 1323 each record is a revision specifier optionally followed by a space
1343 1324 and a freeform string value. If the revision is known locally, it
1344 1325 is converted to a rev, otherwise the record is skipped.
1345 1326
1346 1327 Note that both key and value are treated as UTF-8 and converted to
1347 1328 the local encoding. This allows uniformity between local and
1348 1329 remote data sources.
1349 1330 """
1350 1331
1351 1332 spec = repo.ui.config("extdata", source)
1352 1333 if not spec:
1353 1334 raise error.Abort(_("unknown extdata source '%s'") % source)
1354 1335
1355 1336 data = {}
1356 1337 src = proc = None
1357 1338 try:
1358 1339 if spec.startswith("shell:"):
1359 1340 # external commands should be run relative to the repo root
1360 1341 cmd = spec[6:]
1361 1342 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1362 1343 close_fds=procutil.closefds,
1363 1344 stdout=subprocess.PIPE, cwd=repo.root)
1364 1345 src = proc.stdout
1365 1346 else:
1366 1347 # treat as a URL or file
1367 1348 src = url.open(repo.ui, spec)
1368 1349 for l in src:
1369 1350 if " " in l:
1370 1351 k, v = l.strip().split(" ", 1)
1371 1352 else:
1372 1353 k, v = l.strip(), ""
1373 1354
1374 1355 k = encoding.tolocal(k)
1375 1356 try:
1376 1357 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1377 1358 except (error.LookupError, error.RepoLookupError):
1378 1359 pass # we ignore data for nodes that don't exist locally
1379 1360 finally:
1380 1361 if proc:
1381 1362 proc.communicate()
1382 1363 if src:
1383 1364 src.close()
1384 1365 if proc and proc.returncode != 0:
1385 1366 raise error.Abort(_("extdata command '%s' failed: %s")
1386 1367 % (cmd, procutil.explainexit(proc.returncode)))
1387 1368
1388 1369 return data
1389 1370
1390 1371 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1391 1372 if lock is None:
1392 1373 raise error.LockInheritanceContractViolation(
1393 1374 'lock can only be inherited while held')
1394 1375 if environ is None:
1395 1376 environ = {}
1396 1377 with lock.inherit() as locker:
1397 1378 environ[envvar] = locker
1398 1379 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1399 1380
1400 1381 def wlocksub(repo, cmd, *args, **kwargs):
1401 1382 """run cmd as a subprocess that allows inheriting repo's wlock
1402 1383
1403 1384 This can only be called while the wlock is held. This takes all the
1404 1385 arguments that ui.system does, and returns the exit code of the
1405 1386 subprocess."""
1406 1387 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1407 1388 **kwargs)
1408 1389
1409 1390 class progress(object):
1410 1391 def __init__(self, ui, topic, unit="", total=None):
1411 1392 self.ui = ui
1412 1393 self.pos = 0
1413 1394 self.topic = topic
1414 1395 self.unit = unit
1415 1396 self.total = total
1416 1397
1417 1398 def __enter__(self):
1418 1399 return self
1419 1400
1420 1401 def __exit__(self, exc_type, exc_value, exc_tb):
1421 1402 self.complete()
1422 1403
1423 1404 def update(self, pos, item="", total=None):
1424 1405 assert pos is not None
1425 1406 if total:
1426 1407 self.total = total
1427 1408 self.pos = pos
1428 1409 self._print(item)
1429 1410
1430 1411 def increment(self, step=1, item="", total=None):
1431 1412 self.update(self.pos + step, item, total)
1432 1413
1433 1414 def complete(self):
1434 1415 self.ui.progress(self.topic, None)
1435 1416
1436 1417 def _print(self, item):
1437 1418 self.ui.progress(self.topic, self.pos, item, self.unit,
1438 1419 self.total)
1439 1420
1440 1421 def gdinitconfig(ui):
1441 1422 """helper function to know if a repo should be created as general delta
1442 1423 """
1443 1424 # experimental config: format.generaldelta
1444 1425 return (ui.configbool('format', 'generaldelta')
1445 1426 or ui.configbool('format', 'usegeneraldelta')
1446 1427 or ui.configbool('format', 'sparse-revlog'))
1447 1428
1448 1429 def gddeltaconfig(ui):
1449 1430 """helper function to know if incoming delta should be optimised
1450 1431 """
1451 1432 # experimental config: format.generaldelta
1452 1433 return ui.configbool('format', 'generaldelta')
1453 1434
1454 1435 class simplekeyvaluefile(object):
1455 1436 """A simple file with key=value lines
1456 1437
1457 1438 Keys must be alphanumerics and start with a letter, values must not
1458 1439 contain '\n' characters"""
1459 1440 firstlinekey = '__firstline'
1460 1441
1461 1442 def __init__(self, vfs, path, keys=None):
1462 1443 self.vfs = vfs
1463 1444 self.path = path
1464 1445
1465 1446 def read(self, firstlinenonkeyval=False):
1466 1447 """Read the contents of a simple key-value file
1467 1448
1468 1449 'firstlinenonkeyval' indicates whether the first line of file should
1469 1450 be treated as a key-value pair or reuturned fully under the
1470 1451 __firstline key."""
1471 1452 lines = self.vfs.readlines(self.path)
1472 1453 d = {}
1473 1454 if firstlinenonkeyval:
1474 1455 if not lines:
1475 1456 e = _("empty simplekeyvalue file")
1476 1457 raise error.CorruptedState(e)
1477 1458 # we don't want to include '\n' in the __firstline
1478 1459 d[self.firstlinekey] = lines[0][:-1]
1479 1460 del lines[0]
1480 1461
1481 1462 try:
1482 1463 # the 'if line.strip()' part prevents us from failing on empty
1483 1464 # lines which only contain '\n' therefore are not skipped
1484 1465 # by 'if line'
1485 1466 updatedict = dict(line[:-1].split('=', 1) for line in lines
1486 1467 if line.strip())
1487 1468 if self.firstlinekey in updatedict:
1488 1469 e = _("%r can't be used as a key")
1489 1470 raise error.CorruptedState(e % self.firstlinekey)
1490 1471 d.update(updatedict)
1491 1472 except ValueError as e:
1492 1473 raise error.CorruptedState(str(e))
1493 1474 return d
1494 1475
1495 1476 def write(self, data, firstline=None):
1496 1477 """Write key=>value mapping to a file
1497 1478 data is a dict. Keys must be alphanumerical and start with a letter.
1498 1479 Values must not contain newline characters.
1499 1480
1500 1481 If 'firstline' is not None, it is written to file before
1501 1482 everything else, as it is, not in a key=value form"""
1502 1483 lines = []
1503 1484 if firstline is not None:
1504 1485 lines.append('%s\n' % firstline)
1505 1486
1506 1487 for k, v in data.items():
1507 1488 if k == self.firstlinekey:
1508 1489 e = "key name '%s' is reserved" % self.firstlinekey
1509 1490 raise error.ProgrammingError(e)
1510 1491 if not k[0:1].isalpha():
1511 1492 e = "keys must start with a letter in a key-value file"
1512 1493 raise error.ProgrammingError(e)
1513 1494 if not k.isalnum():
1514 1495 e = "invalid key name in a simple key-value file"
1515 1496 raise error.ProgrammingError(e)
1516 1497 if '\n' in v:
1517 1498 e = "invalid value in a simple key-value file"
1518 1499 raise error.ProgrammingError(e)
1519 1500 lines.append("%s=%s\n" % (k, v))
1520 1501 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1521 1502 fp.write(''.join(lines))
1522 1503
1523 1504 _reportobsoletedsource = [
1524 1505 'debugobsolete',
1525 1506 'pull',
1526 1507 'push',
1527 1508 'serve',
1528 1509 'unbundle',
1529 1510 ]
1530 1511
1531 1512 _reportnewcssource = [
1532 1513 'pull',
1533 1514 'unbundle',
1534 1515 ]
1535 1516
1536 1517 def prefetchfiles(repo, revs, match):
1537 1518 """Invokes the registered file prefetch functions, allowing extensions to
1538 1519 ensure the corresponding files are available locally, before the command
1539 1520 uses them."""
1540 1521 if match:
1541 1522 # The command itself will complain about files that don't exist, so
1542 1523 # don't duplicate the message.
1543 1524 match = matchmod.badmatch(match, lambda fn, msg: None)
1544 1525 else:
1545 1526 match = matchall(repo)
1546 1527
1547 1528 fileprefetchhooks(repo, revs, match)
1548 1529
1549 1530 # a list of (repo, revs, match) prefetch functions
1550 1531 fileprefetchhooks = util.hooks()
1551 1532
1552 1533 # A marker that tells the evolve extension to suppress its own reporting
1553 1534 _reportstroubledchangesets = True
1554 1535
1555 1536 def registersummarycallback(repo, otr, txnname=''):
1556 1537 """register a callback to issue a summary after the transaction is closed
1557 1538 """
1558 1539 def txmatch(sources):
1559 1540 return any(txnname.startswith(source) for source in sources)
1560 1541
1561 1542 categories = []
1562 1543
1563 1544 def reportsummary(func):
1564 1545 """decorator for report callbacks."""
1565 1546 # The repoview life cycle is shorter than the one of the actual
1566 1547 # underlying repository. So the filtered object can die before the
1567 1548 # weakref is used leading to troubles. We keep a reference to the
1568 1549 # unfiltered object and restore the filtering when retrieving the
1569 1550 # repository through the weakref.
1570 1551 filtername = repo.filtername
1571 1552 reporef = weakref.ref(repo.unfiltered())
1572 1553 def wrapped(tr):
1573 1554 repo = reporef()
1574 1555 if filtername:
1575 1556 repo = repo.filtered(filtername)
1576 1557 func(repo, tr)
1577 1558 newcat = '%02i-txnreport' % len(categories)
1578 1559 otr.addpostclose(newcat, wrapped)
1579 1560 categories.append(newcat)
1580 1561 return wrapped
1581 1562
1582 1563 if txmatch(_reportobsoletedsource):
1583 1564 @reportsummary
1584 1565 def reportobsoleted(repo, tr):
1585 1566 obsoleted = obsutil.getobsoleted(repo, tr)
1586 1567 if obsoleted:
1587 1568 repo.ui.status(_('obsoleted %i changesets\n')
1588 1569 % len(obsoleted))
1589 1570
1590 1571 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1591 1572 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1592 1573 instabilitytypes = [
1593 1574 ('orphan', 'orphan'),
1594 1575 ('phase-divergent', 'phasedivergent'),
1595 1576 ('content-divergent', 'contentdivergent'),
1596 1577 ]
1597 1578
1598 1579 def getinstabilitycounts(repo):
1599 1580 filtered = repo.changelog.filteredrevs
1600 1581 counts = {}
1601 1582 for instability, revset in instabilitytypes:
1602 1583 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1603 1584 filtered)
1604 1585 return counts
1605 1586
1606 1587 oldinstabilitycounts = getinstabilitycounts(repo)
1607 1588 @reportsummary
1608 1589 def reportnewinstabilities(repo, tr):
1609 1590 newinstabilitycounts = getinstabilitycounts(repo)
1610 1591 for instability, revset in instabilitytypes:
1611 1592 delta = (newinstabilitycounts[instability] -
1612 1593 oldinstabilitycounts[instability])
1613 1594 msg = getinstabilitymessage(delta, instability)
1614 1595 if msg:
1615 1596 repo.ui.warn(msg)
1616 1597
1617 1598 if txmatch(_reportnewcssource):
1618 1599 @reportsummary
1619 1600 def reportnewcs(repo, tr):
1620 1601 """Report the range of new revisions pulled/unbundled."""
1621 1602 origrepolen = tr.changes.get('origrepolen', len(repo))
1622 1603 if origrepolen >= len(repo):
1623 1604 return
1624 1605
1625 1606 # Compute the bounds of new revisions' range, excluding obsoletes.
1626 1607 unfi = repo.unfiltered()
1627 1608 revs = unfi.revs('%d: and not obsolete()', origrepolen)
1628 1609 if not revs:
1629 1610 # Got only obsoletes.
1630 1611 return
1631 1612 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1632 1613
1633 1614 if minrev == maxrev:
1634 1615 revrange = minrev
1635 1616 else:
1636 1617 revrange = '%s:%s' % (minrev, maxrev)
1637 1618 draft = len(repo.revs('%ld and draft()', revs))
1638 1619 secret = len(repo.revs('%ld and secret()', revs))
1639 1620 if not (draft or secret):
1640 1621 msg = _('new changesets %s\n') % revrange
1641 1622 elif draft and secret:
1642 1623 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1643 1624 msg %= (revrange, draft, secret)
1644 1625 elif draft:
1645 1626 msg = _('new changesets %s (%d drafts)\n')
1646 1627 msg %= (revrange, draft)
1647 1628 elif secret:
1648 1629 msg = _('new changesets %s (%d secrets)\n')
1649 1630 msg %= (revrange, secret)
1650 1631 else:
1651 1632 raise error.ProgrammingError('entered unreachable condition')
1652 1633 repo.ui.status(msg)
1653 1634
1654 1635 @reportsummary
1655 1636 def reportphasechanges(repo, tr):
1656 1637 """Report statistics of phase changes for changesets pre-existing
1657 1638 pull/unbundle.
1658 1639 """
1659 1640 origrepolen = tr.changes.get('origrepolen', len(repo))
1660 1641 phasetracking = tr.changes.get('phases', {})
1661 1642 if not phasetracking:
1662 1643 return
1663 1644 published = [
1664 1645 rev for rev, (old, new) in phasetracking.iteritems()
1665 1646 if new == phases.public and rev < origrepolen
1666 1647 ]
1667 1648 if not published:
1668 1649 return
1669 1650 repo.ui.status(_('%d local changesets published\n')
1670 1651 % len(published))
1671 1652
1672 1653 def getinstabilitymessage(delta, instability):
1673 1654 """function to return the message to show warning about new instabilities
1674 1655
1675 1656 exists as a separate function so that extension can wrap to show more
1676 1657 information like how to fix instabilities"""
1677 1658 if delta > 0:
1678 1659 return _('%i new %s changesets\n') % (delta, instability)
1679 1660
1680 1661 def nodesummaries(repo, nodes, maxnumnodes=4):
1681 1662 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1682 1663 return ' '.join(short(h) for h in nodes)
1683 1664 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1684 1665 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1685 1666
1686 1667 def enforcesinglehead(repo, tr, desc):
1687 1668 """check that no named branch has multiple heads"""
1688 1669 if desc in ('strip', 'repair'):
1689 1670 # skip the logic during strip
1690 1671 return
1691 1672 visible = repo.filtered('visible')
1692 1673 # possible improvement: we could restrict the check to affected branch
1693 1674 for name, heads in visible.branchmap().iteritems():
1694 1675 if len(heads) > 1:
1695 1676 msg = _('rejecting multiple heads on branch "%s"')
1696 1677 msg %= name
1697 1678 hint = _('%d heads: %s')
1698 1679 hint %= (len(heads), nodesummaries(repo, heads))
1699 1680 raise error.Abort(msg, hint=hint)
1700 1681
1701 1682 def wrapconvertsink(sink):
1702 1683 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1703 1684 before it is used, whether or not the convert extension was formally loaded.
1704 1685 """
1705 1686 return sink
1706 1687
1707 1688 def unhidehashlikerevs(repo, specs, hiddentype):
1708 1689 """parse the user specs and unhide changesets whose hash or revision number
1709 1690 is passed.
1710 1691
1711 1692 hiddentype can be: 1) 'warn': warn while unhiding changesets
1712 1693 2) 'nowarn': don't warn while unhiding changesets
1713 1694
1714 1695 returns a repo object with the required changesets unhidden
1715 1696 """
1716 1697 if not repo.filtername or not repo.ui.configbool('experimental',
1717 1698 'directaccess'):
1718 1699 return repo
1719 1700
1720 1701 if repo.filtername not in ('visible', 'visible-hidden'):
1721 1702 return repo
1722 1703
1723 1704 symbols = set()
1724 1705 for spec in specs:
1725 1706 try:
1726 1707 tree = revsetlang.parse(spec)
1727 1708 except error.ParseError: # will be reported by scmutil.revrange()
1728 1709 continue
1729 1710
1730 1711 symbols.update(revsetlang.gethashlikesymbols(tree))
1731 1712
1732 1713 if not symbols:
1733 1714 return repo
1734 1715
1735 1716 revs = _getrevsfromsymbols(repo, symbols)
1736 1717
1737 1718 if not revs:
1738 1719 return repo
1739 1720
1740 1721 if hiddentype == 'warn':
1741 1722 unfi = repo.unfiltered()
1742 1723 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1743 1724 repo.ui.warn(_("warning: accessing hidden changesets for write "
1744 1725 "operation: %s\n") % revstr)
1745 1726
1746 1727 # we have to use new filtername to separate branch/tags cache until we can
1747 1728 # disbale these cache when revisions are dynamically pinned.
1748 1729 return repo.filtered('visible-hidden', revs)
1749 1730
1750 1731 def _getrevsfromsymbols(repo, symbols):
1751 1732 """parse the list of symbols and returns a set of revision numbers of hidden
1752 1733 changesets present in symbols"""
1753 1734 revs = set()
1754 1735 unfi = repo.unfiltered()
1755 1736 unficl = unfi.changelog
1756 1737 cl = repo.changelog
1757 1738 tiprev = len(unficl)
1758 1739 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1759 1740 for s in symbols:
1760 1741 try:
1761 1742 n = int(s)
1762 1743 if n <= tiprev:
1763 1744 if not allowrevnums:
1764 1745 continue
1765 1746 else:
1766 1747 if n not in cl:
1767 1748 revs.add(n)
1768 1749 continue
1769 1750 except ValueError:
1770 1751 pass
1771 1752
1772 1753 try:
1773 1754 s = resolvehexnodeidprefix(unfi, s)
1774 1755 except (error.LookupError, error.WdirUnsupported):
1775 1756 s = None
1776 1757
1777 1758 if s is not None:
1778 1759 rev = unficl.rev(s)
1779 1760 if rev not in cl:
1780 1761 revs.add(rev)
1781 1762
1782 1763 return revs
1783 1764
1784 1765 def bookmarkrevs(repo, mark):
1785 1766 """
1786 1767 Select revisions reachable by a given bookmark
1787 1768 """
1788 1769 return repo.revs("ancestors(bookmark(%s)) - "
1789 1770 "ancestors(head() and not bookmark(%s)) - "
1790 1771 "ancestors(bookmark() and not bookmark(%s))",
1791 1772 mark, mark, mark)
@@ -1,221 +1,224 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from __future__ import absolute_import
11 11
12 12 import errno
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 changelog,
17 17 error,
18 18 localrepo,
19 19 manifest,
20 20 namespaces,
21 21 pathutil,
22 scmutil,
23 22 store,
24 23 url,
25 24 util,
26 25 vfs as vfsmod,
27 26 )
28 27
29 28 urlerr = util.urlerr
30 29 urlreq = util.urlreq
31 30
32 31 class httprangereader(object):
33 32 def __init__(self, url, opener):
34 33 # we assume opener has HTTPRangeHandler
35 34 self.url = url
36 35 self.pos = 0
37 36 self.opener = opener
38 37 self.name = url
39 38
40 39 def __enter__(self):
41 40 return self
42 41
43 42 def __exit__(self, exc_type, exc_value, traceback):
44 43 self.close()
45 44
46 45 def seek(self, pos):
47 46 self.pos = pos
48 47 def read(self, bytes=None):
49 48 req = urlreq.request(self.url)
50 49 end = ''
51 50 if bytes:
52 51 end = self.pos + bytes - 1
53 52 if self.pos or end:
54 53 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
55 54
56 55 try:
57 56 f = self.opener.open(req)
58 57 data = f.read()
59 58 code = f.code
60 59 except urlerr.httperror as inst:
61 60 num = inst.code == 404 and errno.ENOENT or None
62 61 raise IOError(num, inst)
63 62 except urlerr.urlerror as inst:
64 63 raise IOError(None, inst.reason[1])
65 64
66 65 if code == 200:
67 66 # HTTPRangeHandler does nothing if remote does not support
68 67 # Range headers and returns the full entity. Let's slice it.
69 68 if bytes:
70 69 data = data[self.pos:self.pos + bytes]
71 70 else:
72 71 data = data[self.pos:]
73 72 elif bytes:
74 73 data = data[:bytes]
75 74 self.pos += len(data)
76 75 return data
77 76 def readlines(self):
78 77 return self.read().splitlines(True)
79 78 def __iter__(self):
80 79 return iter(self.readlines())
81 80 def close(self):
82 81 pass
83 82
84 83 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
85 84 # which was itself extracted from urlgrabber. See the last version of
86 85 # byterange.py from history if you need more information.
87 86 class _RangeError(IOError):
88 87 """Error raised when an unsatisfiable range is requested."""
89 88
90 89 class _HTTPRangeHandler(urlreq.basehandler):
91 90 """Handler that enables HTTP Range headers.
92 91
93 92 This was extremely simple. The Range header is a HTTP feature to
94 93 begin with so all this class does is tell urllib2 that the
95 94 "206 Partial Content" response from the HTTP server is what we
96 95 expected.
97 96 """
98 97
99 98 def http_error_206(self, req, fp, code, msg, hdrs):
100 99 # 206 Partial Content Response
101 100 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
102 101 r.code = code
103 102 r.msg = msg
104 103 return r
105 104
106 105 def http_error_416(self, req, fp, code, msg, hdrs):
107 106 # HTTP's Range Not Satisfiable error
108 107 raise _RangeError('Requested Range Not Satisfiable')
109 108
110 109 def build_opener(ui, authinfo):
111 110 # urllib cannot handle URLs with embedded user or passwd
112 111 urlopener = url.opener(ui, authinfo)
113 112 urlopener.add_handler(_HTTPRangeHandler())
114 113
115 114 class statichttpvfs(vfsmod.abstractvfs):
116 115 def __init__(self, base):
117 116 self.base = base
118 117
119 118 def __call__(self, path, mode='r', *args, **kw):
120 119 if mode not in ('r', 'rb'):
121 120 raise IOError('Permission denied')
122 121 f = "/".join((self.base, urlreq.quote(path)))
123 122 return httprangereader(f, urlopener)
124 123
125 124 def join(self, path):
126 125 if path:
127 126 return pathutil.join(self.base, path)
128 127 else:
129 128 return self.base
130 129
131 130 return statichttpvfs
132 131
133 132 class statichttppeer(localrepo.localpeer):
134 133 def local(self):
135 134 return None
136 135 def canpush(self):
137 136 return False
138 137
139 138 class statichttprepository(localrepo.localrepository):
140 139 supported = localrepo.localrepository._basesupported
141 140
142 141 def __init__(self, ui, path):
143 142 self._url = path
144 143 self.ui = ui
145 144
146 145 self.root = path
147 146 u = util.url(path.rstrip('/') + "/.hg")
148 147 self.path, authinfo = u.authinfo()
149 148
150 149 vfsclass = build_opener(ui, authinfo)
151 150 self.vfs = vfsclass(self.path)
152 151 self.cachevfs = vfsclass(self.vfs.join('cache'))
153 152 self._phasedefaults = []
154 153
155 154 self.names = namespaces.namespaces()
156 155 self.filtername = None
157 156
158 157 try:
159 requirements = scmutil.readrequires(self.vfs, self.supported)
158 requirements = set(self.vfs.read(b'requires').splitlines())
160 159 except IOError as inst:
161 160 if inst.errno != errno.ENOENT:
162 161 raise
163 162 requirements = set()
164 163
165 164 # check if it is a non-empty old-style repository
166 165 try:
167 166 fp = self.vfs("00changelog.i")
168 167 fp.read(1)
169 168 fp.close()
170 169 except IOError as inst:
171 170 if inst.errno != errno.ENOENT:
172 171 raise
173 172 # we do not care about empty old-style repositories here
174 173 msg = _("'%s' does not appear to be an hg repository") % path
175 174 raise error.RepoError(msg)
176 175
176 supportedrequirements = localrepo.gathersupportedrequirements(ui)
177 localrepo.ensurerequirementsrecognized(requirements,
178 supportedrequirements)
179
177 180 # setup store
178 181 self.store = store.store(requirements, self.path, vfsclass)
179 182 self.spath = self.store.path
180 183 self.svfs = self.store.opener
181 184 self.sjoin = self.store.join
182 185 self._filecache = {}
183 186 self.requirements = requirements
184 187
185 188 self.manifestlog = manifest.manifestlog(self.svfs, self)
186 189 self.changelog = changelog.changelog(self.svfs)
187 190 self._tags = None
188 191 self.nodetagscache = None
189 192 self._branchcaches = {}
190 193 self._revbranchcache = None
191 194 self.encodepats = None
192 195 self.decodepats = None
193 196 self._transref = None
194 197
195 198 def _restrictcapabilities(self, caps):
196 199 caps = super(statichttprepository, self)._restrictcapabilities(caps)
197 200 return caps.difference(["pushkey"])
198 201
199 202 def url(self):
200 203 return self._url
201 204
202 205 def local(self):
203 206 return False
204 207
205 208 def peer(self):
206 209 return statichttppeer(self)
207 210
208 211 def wlock(self, wait=True):
209 212 raise error.LockUnavailable(0, _('lock not available'), 'lock',
210 213 _('cannot lock static-http repository'))
211 214
212 215 def lock(self, wait=True):
213 216 raise error.Abort(_('cannot lock static-http repository'))
214 217
215 218 def _writecaches(self):
216 219 pass # statichttprepository are read only
217 220
218 221 def instance(ui, path, create, intents=None, createopts=None):
219 222 if create:
220 223 raise error.Abort(_('cannot create new static-http repository'))
221 224 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now