##// END OF EJS Templates
localrepo.clone: add a way to override server preferuncompressed...
Siddharth Agarwal -
r23546:deabbe7e default
parent child Browse files
Show More
@@ -1,1823 +1,1823 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 # a list of (ui, featureset) functions.
184 184 # only functions defined in module of enabled extensions are invoked
185 185 featuresetupfuncs = set()
186 186
187 187 def _baserequirements(self, create):
188 188 return self.requirements[:]
189 189
190 190 def __init__(self, baseui, path=None, create=False):
191 191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 192 self.wopener = self.wvfs
193 193 self.root = self.wvfs.base
194 194 self.path = self.wvfs.join(".hg")
195 195 self.origroot = path
196 196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 197 self.vfs = scmutil.vfs(self.path)
198 198 self.opener = self.vfs
199 199 self.baseui = baseui
200 200 self.ui = baseui.copy()
201 201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 202 # A list of callback to shape the phase if no data were found.
203 203 # Callback are in the form: func(repo, roots) --> processed root.
204 204 # This list it to be filled by extension during repo setup
205 205 self._phasedefaults = []
206 206 try:
207 207 self.ui.readconfig(self.join("hgrc"), self.root)
208 208 extensions.loadall(self.ui)
209 209 except IOError:
210 210 pass
211 211
212 212 if self.featuresetupfuncs:
213 213 self.supported = set(self._basesupported) # use private copy
214 214 extmods = set(m.__name__ for n, m
215 215 in extensions.extensions(self.ui))
216 216 for setupfunc in self.featuresetupfuncs:
217 217 if setupfunc.__module__ in extmods:
218 218 setupfunc(self.ui, self.supported)
219 219 else:
220 220 self.supported = self._basesupported
221 221
222 222 if not self.vfs.isdir():
223 223 if create:
224 224 if not self.wvfs.exists():
225 225 self.wvfs.makedirs()
226 226 self.vfs.makedir(notindexed=True)
227 227 requirements = self._baserequirements(create)
228 228 if self.ui.configbool('format', 'usestore', True):
229 229 self.vfs.mkdir("store")
230 230 requirements.append("store")
231 231 if self.ui.configbool('format', 'usefncache', True):
232 232 requirements.append("fncache")
233 233 if self.ui.configbool('format', 'dotencode', True):
234 234 requirements.append('dotencode')
235 235 # create an invalid changelog
236 236 self.vfs.append(
237 237 "00changelog.i",
238 238 '\0\0\0\2' # represents revlogv2
239 239 ' dummy changelog to prevent using the old repo layout'
240 240 )
241 241 if self.ui.configbool('format', 'generaldelta', False):
242 242 requirements.append("generaldelta")
243 243 requirements = set(requirements)
244 244 else:
245 245 raise error.RepoError(_("repository %s not found") % path)
246 246 elif create:
247 247 raise error.RepoError(_("repository %s already exists") % path)
248 248 else:
249 249 try:
250 250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 251 except IOError, inst:
252 252 if inst.errno != errno.ENOENT:
253 253 raise
254 254 requirements = set()
255 255
256 256 self.sharedpath = self.path
257 257 try:
258 258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 259 realpath=True)
260 260 s = vfs.base
261 261 if not vfs.exists():
262 262 raise error.RepoError(
263 263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 264 self.sharedpath = s
265 265 except IOError, inst:
266 266 if inst.errno != errno.ENOENT:
267 267 raise
268 268
269 269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 270 self.spath = self.store.path
271 271 self.svfs = self.store.vfs
272 272 self.sopener = self.svfs
273 273 self.sjoin = self.store.join
274 274 self.vfs.createmode = self.store.createmode
275 275 self._applyrequirements(requirements)
276 276 if create:
277 277 self._writerequirements()
278 278
279 279
280 280 self._branchcaches = {}
281 281 self.filterpats = {}
282 282 self._datafilters = {}
283 283 self._transref = self._lockref = self._wlockref = None
284 284
285 285 # A cache for various files under .hg/ that tracks file changes,
286 286 # (used by the filecache decorator)
287 287 #
288 288 # Maps a property name to its util.filecacheentry
289 289 self._filecache = {}
290 290
291 291 # hold sets of revision to be filtered
292 292 # should be cleared when something might have changed the filter value:
293 293 # - new changesets,
294 294 # - phase change,
295 295 # - new obsolescence marker,
296 296 # - working directory parent change,
297 297 # - bookmark changes
298 298 self.filteredrevcache = {}
299 299
300 300 def close(self):
301 301 pass
302 302
303 303 def _restrictcapabilities(self, caps):
304 304 # bundle2 is not ready for prime time, drop it unless explicitly
305 305 # required by the tests (or some brave tester)
306 306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 307 caps = set(caps)
308 308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 310 return caps
311 311
312 312 def _applyrequirements(self, requirements):
313 313 self.requirements = requirements
314 314 self.sopener.options = dict((r, 1) for r in requirements
315 315 if r in self.openerreqs)
316 316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 317 if chunkcachesize is not None:
318 318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 319 maxchainlen = self.ui.configint('format', 'maxchainlen')
320 320 if maxchainlen is not None:
321 321 self.sopener.options['maxchainlen'] = maxchainlen
322 322
323 323 def _writerequirements(self):
324 324 reqfile = self.opener("requires", "w")
325 325 for r in sorted(self.requirements):
326 326 reqfile.write("%s\n" % r)
327 327 reqfile.close()
328 328
329 329 def _checknested(self, path):
330 330 """Determine if path is a legal nested repository."""
331 331 if not path.startswith(self.root):
332 332 return False
333 333 subpath = path[len(self.root) + 1:]
334 334 normsubpath = util.pconvert(subpath)
335 335
336 336 # XXX: Checking against the current working copy is wrong in
337 337 # the sense that it can reject things like
338 338 #
339 339 # $ hg cat -r 10 sub/x.txt
340 340 #
341 341 # if sub/ is no longer a subrepository in the working copy
342 342 # parent revision.
343 343 #
344 344 # However, it can of course also allow things that would have
345 345 # been rejected before, such as the above cat command if sub/
346 346 # is a subrepository now, but was a normal directory before.
347 347 # The old path auditor would have rejected by mistake since it
348 348 # panics when it sees sub/.hg/.
349 349 #
350 350 # All in all, checking against the working copy seems sensible
351 351 # since we want to prevent access to nested repositories on
352 352 # the filesystem *now*.
353 353 ctx = self[None]
354 354 parts = util.splitpath(subpath)
355 355 while parts:
356 356 prefix = '/'.join(parts)
357 357 if prefix in ctx.substate:
358 358 if prefix == normsubpath:
359 359 return True
360 360 else:
361 361 sub = ctx.sub(prefix)
362 362 return sub.checknested(subpath[len(prefix) + 1:])
363 363 else:
364 364 parts.pop()
365 365 return False
366 366
367 367 def peer(self):
368 368 return localpeer(self) # not cached to avoid reference cycle
369 369
370 370 def unfiltered(self):
371 371 """Return unfiltered version of the repository
372 372
373 373 Intended to be overwritten by filtered repo."""
374 374 return self
375 375
376 376 def filtered(self, name):
377 377 """Return a filtered version of a repository"""
378 378 # build a new class with the mixin and the current class
379 379 # (possibly subclass of the repo)
380 380 class proxycls(repoview.repoview, self.unfiltered().__class__):
381 381 pass
382 382 return proxycls(self, name)
383 383
384 384 @repofilecache('bookmarks')
385 385 def _bookmarks(self):
386 386 return bookmarks.bmstore(self)
387 387
388 388 @repofilecache('bookmarks.current')
389 389 def _bookmarkcurrent(self):
390 390 return bookmarks.readcurrent(self)
391 391
392 392 def bookmarkheads(self, bookmark):
393 393 name = bookmark.split('@', 1)[0]
394 394 heads = []
395 395 for mark, n in self._bookmarks.iteritems():
396 396 if mark.split('@', 1)[0] == name:
397 397 heads.append(n)
398 398 return heads
399 399
400 400 @storecache('phaseroots')
401 401 def _phasecache(self):
402 402 return phases.phasecache(self, self._phasedefaults)
403 403
404 404 @storecache('obsstore')
405 405 def obsstore(self):
406 406 # read default format for new obsstore.
407 407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
408 408 # rely on obsstore class default when possible.
409 409 kwargs = {}
410 410 if defaultformat is not None:
411 411 kwargs['defaultformat'] = defaultformat
412 412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
413 413 store = obsolete.obsstore(self.sopener, readonly=readonly,
414 414 **kwargs)
415 415 if store and readonly:
416 416 # message is rare enough to not be translated
417 417 msg = 'obsolete feature not enabled but %i markers found!\n'
418 418 self.ui.warn(msg % len(list(store)))
419 419 return store
420 420
421 421 @storecache('00changelog.i')
422 422 def changelog(self):
423 423 c = changelog.changelog(self.sopener)
424 424 if 'HG_PENDING' in os.environ:
425 425 p = os.environ['HG_PENDING']
426 426 if p.startswith(self.root):
427 427 c.readpending('00changelog.i.a')
428 428 return c
429 429
430 430 @storecache('00manifest.i')
431 431 def manifest(self):
432 432 return manifest.manifest(self.sopener)
433 433
434 434 @repofilecache('dirstate')
435 435 def dirstate(self):
436 436 warned = [0]
437 437 def validate(node):
438 438 try:
439 439 self.changelog.rev(node)
440 440 return node
441 441 except error.LookupError:
442 442 if not warned[0]:
443 443 warned[0] = True
444 444 self.ui.warn(_("warning: ignoring unknown"
445 445 " working parent %s!\n") % short(node))
446 446 return nullid
447 447
448 448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
449 449
450 450 def __getitem__(self, changeid):
451 451 if changeid is None:
452 452 return context.workingctx(self)
453 453 return context.changectx(self, changeid)
454 454
455 455 def __contains__(self, changeid):
456 456 try:
457 457 return bool(self.lookup(changeid))
458 458 except error.RepoLookupError:
459 459 return False
460 460
461 461 def __nonzero__(self):
462 462 return True
463 463
464 464 def __len__(self):
465 465 return len(self.changelog)
466 466
467 467 def __iter__(self):
468 468 return iter(self.changelog)
469 469
470 470 def revs(self, expr, *args):
471 471 '''Return a list of revisions matching the given revset'''
472 472 expr = revset.formatspec(expr, *args)
473 473 m = revset.match(None, expr)
474 474 return m(self, revset.spanset(self))
475 475
476 476 def set(self, expr, *args):
477 477 '''
478 478 Yield a context for each matching revision, after doing arg
479 479 replacement via revset.formatspec
480 480 '''
481 481 for r in self.revs(expr, *args):
482 482 yield self[r]
483 483
484 484 def url(self):
485 485 return 'file:' + self.root
486 486
487 487 def hook(self, name, throw=False, **args):
488 488 """Call a hook, passing this repo instance.
489 489
490 490 This a convenience method to aid invoking hooks. Extensions likely
491 491 won't call this unless they have registered a custom hook or are
492 492 replacing code that is expected to call a hook.
493 493 """
494 494 return hook.hook(self.ui, self, name, throw, **args)
495 495
496 496 @unfilteredmethod
497 497 def _tag(self, names, node, message, local, user, date, extra={},
498 498 editor=False):
499 499 if isinstance(names, str):
500 500 names = (names,)
501 501
502 502 branches = self.branchmap()
503 503 for name in names:
504 504 self.hook('pretag', throw=True, node=hex(node), tag=name,
505 505 local=local)
506 506 if name in branches:
507 507 self.ui.warn(_("warning: tag %s conflicts with existing"
508 508 " branch name\n") % name)
509 509
510 510 def writetags(fp, names, munge, prevtags):
511 511 fp.seek(0, 2)
512 512 if prevtags and prevtags[-1] != '\n':
513 513 fp.write('\n')
514 514 for name in names:
515 515 m = munge and munge(name) or name
516 516 if (self._tagscache.tagtypes and
517 517 name in self._tagscache.tagtypes):
518 518 old = self.tags().get(name, nullid)
519 519 fp.write('%s %s\n' % (hex(old), m))
520 520 fp.write('%s %s\n' % (hex(node), m))
521 521 fp.close()
522 522
523 523 prevtags = ''
524 524 if local:
525 525 try:
526 526 fp = self.opener('localtags', 'r+')
527 527 except IOError:
528 528 fp = self.opener('localtags', 'a')
529 529 else:
530 530 prevtags = fp.read()
531 531
532 532 # local tags are stored in the current charset
533 533 writetags(fp, names, None, prevtags)
534 534 for name in names:
535 535 self.hook('tag', node=hex(node), tag=name, local=local)
536 536 return
537 537
538 538 try:
539 539 fp = self.wfile('.hgtags', 'rb+')
540 540 except IOError, e:
541 541 if e.errno != errno.ENOENT:
542 542 raise
543 543 fp = self.wfile('.hgtags', 'ab')
544 544 else:
545 545 prevtags = fp.read()
546 546
547 547 # committed tags are stored in UTF-8
548 548 writetags(fp, names, encoding.fromlocal, prevtags)
549 549
550 550 fp.close()
551 551
552 552 self.invalidatecaches()
553 553
554 554 if '.hgtags' not in self.dirstate:
555 555 self[None].add(['.hgtags'])
556 556
557 557 m = matchmod.exact(self.root, '', ['.hgtags'])
558 558 tagnode = self.commit(message, user, date, extra=extra, match=m,
559 559 editor=editor)
560 560
561 561 for name in names:
562 562 self.hook('tag', node=hex(node), tag=name, local=local)
563 563
564 564 return tagnode
565 565
566 566 def tag(self, names, node, message, local, user, date, editor=False):
567 567 '''tag a revision with one or more symbolic names.
568 568
569 569 names is a list of strings or, when adding a single tag, names may be a
570 570 string.
571 571
572 572 if local is True, the tags are stored in a per-repository file.
573 573 otherwise, they are stored in the .hgtags file, and a new
574 574 changeset is committed with the change.
575 575
576 576 keyword arguments:
577 577
578 578 local: whether to store tags in non-version-controlled file
579 579 (default False)
580 580
581 581 message: commit message to use if committing
582 582
583 583 user: name of user to use if committing
584 584
585 585 date: date tuple to use if committing'''
586 586
587 587 if not local:
588 588 m = matchmod.exact(self.root, '', ['.hgtags'])
589 589 if util.any(self.status(match=m, unknown=True, ignored=True)):
590 590 raise util.Abort(_('working copy of .hgtags is changed'),
591 591 hint=_('please commit .hgtags manually'))
592 592
593 593 self.tags() # instantiate the cache
594 594 self._tag(names, node, message, local, user, date, editor=editor)
595 595
596 596 @filteredpropertycache
597 597 def _tagscache(self):
598 598 '''Returns a tagscache object that contains various tags related
599 599 caches.'''
600 600
601 601 # This simplifies its cache management by having one decorated
602 602 # function (this one) and the rest simply fetch things from it.
603 603 class tagscache(object):
604 604 def __init__(self):
605 605 # These two define the set of tags for this repository. tags
606 606 # maps tag name to node; tagtypes maps tag name to 'global' or
607 607 # 'local'. (Global tags are defined by .hgtags across all
608 608 # heads, and local tags are defined in .hg/localtags.)
609 609 # They constitute the in-memory cache of tags.
610 610 self.tags = self.tagtypes = None
611 611
612 612 self.nodetagscache = self.tagslist = None
613 613
614 614 cache = tagscache()
615 615 cache.tags, cache.tagtypes = self._findtags()
616 616
617 617 return cache
618 618
619 619 def tags(self):
620 620 '''return a mapping of tag to node'''
621 621 t = {}
622 622 if self.changelog.filteredrevs:
623 623 tags, tt = self._findtags()
624 624 else:
625 625 tags = self._tagscache.tags
626 626 for k, v in tags.iteritems():
627 627 try:
628 628 # ignore tags to unknown nodes
629 629 self.changelog.rev(v)
630 630 t[k] = v
631 631 except (error.LookupError, ValueError):
632 632 pass
633 633 return t
634 634
635 635 def _findtags(self):
636 636 '''Do the hard work of finding tags. Return a pair of dicts
637 637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
638 638 maps tag name to a string like \'global\' or \'local\'.
639 639 Subclasses or extensions are free to add their own tags, but
640 640 should be aware that the returned dicts will be retained for the
641 641 duration of the localrepo object.'''
642 642
643 643 # XXX what tagtype should subclasses/extensions use? Currently
644 644 # mq and bookmarks add tags, but do not set the tagtype at all.
645 645 # Should each extension invent its own tag type? Should there
646 646 # be one tagtype for all such "virtual" tags? Or is the status
647 647 # quo fine?
648 648
649 649 alltags = {} # map tag name to (node, hist)
650 650 tagtypes = {}
651 651
652 652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
653 653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
654 654
655 655 # Build the return dicts. Have to re-encode tag names because
656 656 # the tags module always uses UTF-8 (in order not to lose info
657 657 # writing to the cache), but the rest of Mercurial wants them in
658 658 # local encoding.
659 659 tags = {}
660 660 for (name, (node, hist)) in alltags.iteritems():
661 661 if node != nullid:
662 662 tags[encoding.tolocal(name)] = node
663 663 tags['tip'] = self.changelog.tip()
664 664 tagtypes = dict([(encoding.tolocal(name), value)
665 665 for (name, value) in tagtypes.iteritems()])
666 666 return (tags, tagtypes)
667 667
668 668 def tagtype(self, tagname):
669 669 '''
670 670 return the type of the given tag. result can be:
671 671
672 672 'local' : a local tag
673 673 'global' : a global tag
674 674 None : tag does not exist
675 675 '''
676 676
677 677 return self._tagscache.tagtypes.get(tagname)
678 678
679 679 def tagslist(self):
680 680 '''return a list of tags ordered by revision'''
681 681 if not self._tagscache.tagslist:
682 682 l = []
683 683 for t, n in self.tags().iteritems():
684 684 l.append((self.changelog.rev(n), t, n))
685 685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
686 686
687 687 return self._tagscache.tagslist
688 688
689 689 def nodetags(self, node):
690 690 '''return the tags associated with a node'''
691 691 if not self._tagscache.nodetagscache:
692 692 nodetagscache = {}
693 693 for t, n in self._tagscache.tags.iteritems():
694 694 nodetagscache.setdefault(n, []).append(t)
695 695 for tags in nodetagscache.itervalues():
696 696 tags.sort()
697 697 self._tagscache.nodetagscache = nodetagscache
698 698 return self._tagscache.nodetagscache.get(node, [])
699 699
700 700 def nodebookmarks(self, node):
701 701 marks = []
702 702 for bookmark, n in self._bookmarks.iteritems():
703 703 if n == node:
704 704 marks.append(bookmark)
705 705 return sorted(marks)
706 706
707 707 def branchmap(self):
708 708 '''returns a dictionary {branch: [branchheads]} with branchheads
709 709 ordered by increasing revision number'''
710 710 branchmap.updatecache(self)
711 711 return self._branchcaches[self.filtername]
712 712
713 713 def branchtip(self, branch):
714 714 '''return the tip node for a given branch'''
715 715 try:
716 716 return self.branchmap().branchtip(branch)
717 717 except KeyError:
718 718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
719 719
720 720 def lookup(self, key):
721 721 return self[key].node()
722 722
723 723 def lookupbranch(self, key, remote=None):
724 724 repo = remote or self
725 725 if key in repo.branchmap():
726 726 return key
727 727
728 728 repo = (remote and remote.local()) and remote or self
729 729 return repo[key].branch()
730 730
731 731 def known(self, nodes):
732 732 nm = self.changelog.nodemap
733 733 pc = self._phasecache
734 734 result = []
735 735 for n in nodes:
736 736 r = nm.get(n)
737 737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
738 738 result.append(resp)
739 739 return result
740 740
741 741 def local(self):
742 742 return self
743 743
744 744 def cancopy(self):
745 745 # so statichttprepo's override of local() works
746 746 if not self.local():
747 747 return False
748 748 if not self.ui.configbool('phases', 'publish', True):
749 749 return True
750 750 # if publishing we can't copy if there is filtered content
751 751 return not self.filtered('visible').changelog.filteredrevs
752 752
753 753 def join(self, f, *insidef):
754 754 return os.path.join(self.path, f, *insidef)
755 755
756 756 def wjoin(self, f, *insidef):
757 757 return os.path.join(self.root, f, *insidef)
758 758
759 759 def file(self, f):
760 760 if f[0] == '/':
761 761 f = f[1:]
762 762 return filelog.filelog(self.sopener, f)
763 763
764 764 def changectx(self, changeid):
765 765 return self[changeid]
766 766
767 767 def parents(self, changeid=None):
768 768 '''get list of changectxs for parents of changeid'''
769 769 return self[changeid].parents()
770 770
771 771 def setparents(self, p1, p2=nullid):
772 772 self.dirstate.beginparentchange()
773 773 copies = self.dirstate.setparents(p1, p2)
774 774 pctx = self[p1]
775 775 if copies:
776 776 # Adjust copy records, the dirstate cannot do it, it
777 777 # requires access to parents manifests. Preserve them
778 778 # only for entries added to first parent.
779 779 for f in copies:
780 780 if f not in pctx and copies[f] in pctx:
781 781 self.dirstate.copy(copies[f], f)
782 782 if p2 == nullid:
783 783 for f, s in sorted(self.dirstate.copies().items()):
784 784 if f not in pctx and s not in pctx:
785 785 self.dirstate.copy(None, f)
786 786 self.dirstate.endparentchange()
787 787
788 788 def filectx(self, path, changeid=None, fileid=None):
789 789 """changeid can be a changeset revision, node, or tag.
790 790 fileid can be a file revision or node."""
791 791 return context.filectx(self, path, changeid, fileid)
792 792
793 793 def getcwd(self):
794 794 return self.dirstate.getcwd()
795 795
796 796 def pathto(self, f, cwd=None):
797 797 return self.dirstate.pathto(f, cwd)
798 798
799 799 def wfile(self, f, mode='r'):
800 800 return self.wopener(f, mode)
801 801
802 802 def _link(self, f):
803 803 return self.wvfs.islink(f)
804 804
805 805 def _loadfilter(self, filter):
806 806 if filter not in self.filterpats:
807 807 l = []
808 808 for pat, cmd in self.ui.configitems(filter):
809 809 if cmd == '!':
810 810 continue
811 811 mf = matchmod.match(self.root, '', [pat])
812 812 fn = None
813 813 params = cmd
814 814 for name, filterfn in self._datafilters.iteritems():
815 815 if cmd.startswith(name):
816 816 fn = filterfn
817 817 params = cmd[len(name):].lstrip()
818 818 break
819 819 if not fn:
820 820 fn = lambda s, c, **kwargs: util.filter(s, c)
821 821 # Wrap old filters not supporting keyword arguments
822 822 if not inspect.getargspec(fn)[2]:
823 823 oldfn = fn
824 824 fn = lambda s, c, **kwargs: oldfn(s, c)
825 825 l.append((mf, fn, params))
826 826 self.filterpats[filter] = l
827 827 return self.filterpats[filter]
828 828
829 829 def _filter(self, filterpats, filename, data):
830 830 for mf, fn, cmd in filterpats:
831 831 if mf(filename):
832 832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
833 833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
834 834 break
835 835
836 836 return data
837 837
838 838 @unfilteredpropertycache
839 839 def _encodefilterpats(self):
840 840 return self._loadfilter('encode')
841 841
842 842 @unfilteredpropertycache
843 843 def _decodefilterpats(self):
844 844 return self._loadfilter('decode')
845 845
846 846 def adddatafilter(self, name, filter):
847 847 self._datafilters[name] = filter
848 848
849 849 def wread(self, filename):
850 850 if self._link(filename):
851 851 data = self.wvfs.readlink(filename)
852 852 else:
853 853 data = self.wopener.read(filename)
854 854 return self._filter(self._encodefilterpats, filename, data)
855 855
856 856 def wwrite(self, filename, data, flags):
857 857 data = self._filter(self._decodefilterpats, filename, data)
858 858 if 'l' in flags:
859 859 self.wopener.symlink(data, filename)
860 860 else:
861 861 self.wopener.write(filename, data)
862 862 if 'x' in flags:
863 863 self.wvfs.setflags(filename, False, True)
864 864
865 865 def wwritedata(self, filename, data):
866 866 return self._filter(self._decodefilterpats, filename, data)
867 867
868 868 def currenttransaction(self):
869 869 """return the current transaction or None if non exists"""
870 870 tr = self._transref and self._transref() or None
871 871 if tr and tr.running():
872 872 return tr
873 873 return None
874 874
875 875 def transaction(self, desc, report=None):
876 876 tr = self.currenttransaction()
877 877 if tr is not None:
878 878 return tr.nest()
879 879
880 880 # abort here if the journal already exists
881 881 if self.svfs.exists("journal"):
882 882 raise error.RepoError(
883 883 _("abandoned transaction found"),
884 884 hint=_("run 'hg recover' to clean up transaction"))
885 885
886 886 self._writejournal(desc)
887 887 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
888 888 rp = report and report or self.ui.warn
889 889 vfsmap = {'plain': self.opener} # root of .hg/
890 890 tr = transaction.transaction(rp, self.sopener, vfsmap,
891 891 "journal",
892 892 aftertrans(renames),
893 893 self.store.createmode)
894 894 # note: writing the fncache only during finalize mean that the file is
895 895 # outdated when running hooks. As fncache is used for streaming clone,
896 896 # this is not expected to break anything that happen during the hooks.
897 897 tr.addfinalize('flush-fncache', self.store.write)
898 898 self._transref = weakref.ref(tr)
899 899 return tr
900 900
901 901 def _journalfiles(self):
902 902 return ((self.svfs, 'journal'),
903 903 (self.vfs, 'journal.dirstate'),
904 904 (self.vfs, 'journal.branch'),
905 905 (self.vfs, 'journal.desc'),
906 906 (self.vfs, 'journal.bookmarks'),
907 907 (self.svfs, 'journal.phaseroots'))
908 908
909 909 def undofiles(self):
910 910 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
911 911
912 912 def _writejournal(self, desc):
913 913 self.opener.write("journal.dirstate",
914 914 self.opener.tryread("dirstate"))
915 915 self.opener.write("journal.branch",
916 916 encoding.fromlocal(self.dirstate.branch()))
917 917 self.opener.write("journal.desc",
918 918 "%d\n%s\n" % (len(self), desc))
919 919 self.opener.write("journal.bookmarks",
920 920 self.opener.tryread("bookmarks"))
921 921 self.sopener.write("journal.phaseroots",
922 922 self.sopener.tryread("phaseroots"))
923 923
924 924 def recover(self):
925 925 lock = self.lock()
926 926 try:
927 927 if self.svfs.exists("journal"):
928 928 self.ui.status(_("rolling back interrupted transaction\n"))
929 929 vfsmap = {'': self.sopener,
930 930 'plain': self.opener,}
931 931 transaction.rollback(self.sopener, vfsmap, "journal",
932 932 self.ui.warn)
933 933 self.invalidate()
934 934 return True
935 935 else:
936 936 self.ui.warn(_("no interrupted transaction available\n"))
937 937 return False
938 938 finally:
939 939 lock.release()
940 940
941 941 def rollback(self, dryrun=False, force=False):
942 942 wlock = lock = None
943 943 try:
944 944 wlock = self.wlock()
945 945 lock = self.lock()
946 946 if self.svfs.exists("undo"):
947 947 return self._rollback(dryrun, force)
948 948 else:
949 949 self.ui.warn(_("no rollback information available\n"))
950 950 return 1
951 951 finally:
952 952 release(lock, wlock)
953 953
954 954 @unfilteredmethod # Until we get smarter cache management
955 955 def _rollback(self, dryrun, force):
956 956 ui = self.ui
957 957 try:
958 958 args = self.opener.read('undo.desc').splitlines()
959 959 (oldlen, desc, detail) = (int(args[0]), args[1], None)
960 960 if len(args) >= 3:
961 961 detail = args[2]
962 962 oldtip = oldlen - 1
963 963
964 964 if detail and ui.verbose:
965 965 msg = (_('repository tip rolled back to revision %s'
966 966 ' (undo %s: %s)\n')
967 967 % (oldtip, desc, detail))
968 968 else:
969 969 msg = (_('repository tip rolled back to revision %s'
970 970 ' (undo %s)\n')
971 971 % (oldtip, desc))
972 972 except IOError:
973 973 msg = _('rolling back unknown transaction\n')
974 974 desc = None
975 975
976 976 if not force and self['.'] != self['tip'] and desc == 'commit':
977 977 raise util.Abort(
978 978 _('rollback of last commit while not checked out '
979 979 'may lose data'), hint=_('use -f to force'))
980 980
981 981 ui.status(msg)
982 982 if dryrun:
983 983 return 0
984 984
985 985 parents = self.dirstate.parents()
986 986 self.destroying()
987 987 vfsmap = {'plain': self.opener}
988 988 transaction.rollback(self.sopener, vfsmap, 'undo', ui.warn)
989 989 if self.vfs.exists('undo.bookmarks'):
990 990 self.vfs.rename('undo.bookmarks', 'bookmarks')
991 991 if self.svfs.exists('undo.phaseroots'):
992 992 self.svfs.rename('undo.phaseroots', 'phaseroots')
993 993 self.invalidate()
994 994
995 995 parentgone = (parents[0] not in self.changelog.nodemap or
996 996 parents[1] not in self.changelog.nodemap)
997 997 if parentgone:
998 998 self.vfs.rename('undo.dirstate', 'dirstate')
999 999 try:
1000 1000 branch = self.opener.read('undo.branch')
1001 1001 self.dirstate.setbranch(encoding.tolocal(branch))
1002 1002 except IOError:
1003 1003 ui.warn(_('named branch could not be reset: '
1004 1004 'current branch is still \'%s\'\n')
1005 1005 % self.dirstate.branch())
1006 1006
1007 1007 self.dirstate.invalidate()
1008 1008 parents = tuple([p.rev() for p in self.parents()])
1009 1009 if len(parents) > 1:
1010 1010 ui.status(_('working directory now based on '
1011 1011 'revisions %d and %d\n') % parents)
1012 1012 else:
1013 1013 ui.status(_('working directory now based on '
1014 1014 'revision %d\n') % parents)
1015 1015 # TODO: if we know which new heads may result from this rollback, pass
1016 1016 # them to destroy(), which will prevent the branchhead cache from being
1017 1017 # invalidated.
1018 1018 self.destroyed()
1019 1019 return 0
1020 1020
1021 1021 def invalidatecaches(self):
1022 1022
1023 1023 if '_tagscache' in vars(self):
1024 1024 # can't use delattr on proxy
1025 1025 del self.__dict__['_tagscache']
1026 1026
1027 1027 self.unfiltered()._branchcaches.clear()
1028 1028 self.invalidatevolatilesets()
1029 1029
1030 1030 def invalidatevolatilesets(self):
1031 1031 self.filteredrevcache.clear()
1032 1032 obsolete.clearobscaches(self)
1033 1033
1034 1034 def invalidatedirstate(self):
1035 1035 '''Invalidates the dirstate, causing the next call to dirstate
1036 1036 to check if it was modified since the last time it was read,
1037 1037 rereading it if it has.
1038 1038
1039 1039 This is different to dirstate.invalidate() that it doesn't always
1040 1040 rereads the dirstate. Use dirstate.invalidate() if you want to
1041 1041 explicitly read the dirstate again (i.e. restoring it to a previous
1042 1042 known good state).'''
1043 1043 if hasunfilteredcache(self, 'dirstate'):
1044 1044 for k in self.dirstate._filecache:
1045 1045 try:
1046 1046 delattr(self.dirstate, k)
1047 1047 except AttributeError:
1048 1048 pass
1049 1049 delattr(self.unfiltered(), 'dirstate')
1050 1050
1051 1051 def invalidate(self):
1052 1052 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1053 1053 for k in self._filecache:
1054 1054 # dirstate is invalidated separately in invalidatedirstate()
1055 1055 if k == 'dirstate':
1056 1056 continue
1057 1057
1058 1058 try:
1059 1059 delattr(unfiltered, k)
1060 1060 except AttributeError:
1061 1061 pass
1062 1062 self.invalidatecaches()
1063 1063 self.store.invalidatecaches()
1064 1064
1065 1065 def invalidateall(self):
1066 1066 '''Fully invalidates both store and non-store parts, causing the
1067 1067 subsequent operation to reread any outside changes.'''
1068 1068 # extension should hook this to invalidate its caches
1069 1069 self.invalidate()
1070 1070 self.invalidatedirstate()
1071 1071
1072 1072 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1073 1073 try:
1074 1074 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1075 1075 except error.LockHeld, inst:
1076 1076 if not wait:
1077 1077 raise
1078 1078 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1079 1079 (desc, inst.locker))
1080 1080 # default to 600 seconds timeout
1081 1081 l = lockmod.lock(vfs, lockname,
1082 1082 int(self.ui.config("ui", "timeout", "600")),
1083 1083 releasefn, desc=desc)
1084 1084 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1085 1085 if acquirefn:
1086 1086 acquirefn()
1087 1087 return l
1088 1088
1089 1089 def _afterlock(self, callback):
1090 1090 """add a callback to the current repository lock.
1091 1091
1092 1092 The callback will be executed on lock release."""
1093 1093 l = self._lockref and self._lockref()
1094 1094 if l:
1095 1095 l.postrelease.append(callback)
1096 1096 else:
1097 1097 callback()
1098 1098
1099 1099 def lock(self, wait=True):
1100 1100 '''Lock the repository store (.hg/store) and return a weak reference
1101 1101 to the lock. Use this before modifying the store (e.g. committing or
1102 1102 stripping). If you are opening a transaction, get a lock as well.)'''
1103 1103 l = self._lockref and self._lockref()
1104 1104 if l is not None and l.held:
1105 1105 l.lock()
1106 1106 return l
1107 1107
1108 1108 def unlock():
1109 1109 for k, ce in self._filecache.items():
1110 1110 if k == 'dirstate' or k not in self.__dict__:
1111 1111 continue
1112 1112 ce.refresh()
1113 1113
1114 1114 l = self._lock(self.svfs, "lock", wait, unlock,
1115 1115 self.invalidate, _('repository %s') % self.origroot)
1116 1116 self._lockref = weakref.ref(l)
1117 1117 return l
1118 1118
1119 1119 def wlock(self, wait=True):
1120 1120 '''Lock the non-store parts of the repository (everything under
1121 1121 .hg except .hg/store) and return a weak reference to the lock.
1122 1122 Use this before modifying files in .hg.'''
1123 1123 l = self._wlockref and self._wlockref()
1124 1124 if l is not None and l.held:
1125 1125 l.lock()
1126 1126 return l
1127 1127
1128 1128 def unlock():
1129 1129 if self.dirstate.pendingparentchange():
1130 1130 self.dirstate.invalidate()
1131 1131 else:
1132 1132 self.dirstate.write()
1133 1133
1134 1134 self._filecache['dirstate'].refresh()
1135 1135
1136 1136 l = self._lock(self.vfs, "wlock", wait, unlock,
1137 1137 self.invalidatedirstate, _('working directory of %s') %
1138 1138 self.origroot)
1139 1139 self._wlockref = weakref.ref(l)
1140 1140 return l
1141 1141
1142 1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1143 1143 """
1144 1144 commit an individual file as part of a larger transaction
1145 1145 """
1146 1146
1147 1147 fname = fctx.path()
1148 1148 text = fctx.data()
1149 1149 flog = self.file(fname)
1150 1150 fparent1 = manifest1.get(fname, nullid)
1151 1151 fparent2 = manifest2.get(fname, nullid)
1152 1152
1153 1153 meta = {}
1154 1154 copy = fctx.renamed()
1155 1155 if copy and copy[0] != fname:
1156 1156 # Mark the new revision of this file as a copy of another
1157 1157 # file. This copy data will effectively act as a parent
1158 1158 # of this new revision. If this is a merge, the first
1159 1159 # parent will be the nullid (meaning "look up the copy data")
1160 1160 # and the second one will be the other parent. For example:
1161 1161 #
1162 1162 # 0 --- 1 --- 3 rev1 changes file foo
1163 1163 # \ / rev2 renames foo to bar and changes it
1164 1164 # \- 2 -/ rev3 should have bar with all changes and
1165 1165 # should record that bar descends from
1166 1166 # bar in rev2 and foo in rev1
1167 1167 #
1168 1168 # this allows this merge to succeed:
1169 1169 #
1170 1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1171 1171 # \ / merging rev3 and rev4 should use bar@rev2
1172 1172 # \- 2 --- 4 as the merge base
1173 1173 #
1174 1174
1175 1175 cfname = copy[0]
1176 1176 crev = manifest1.get(cfname)
1177 1177 newfparent = fparent2
1178 1178
1179 1179 if manifest2: # branch merge
1180 1180 if fparent2 == nullid or crev is None: # copied on remote side
1181 1181 if cfname in manifest2:
1182 1182 crev = manifest2[cfname]
1183 1183 newfparent = fparent1
1184 1184
1185 1185 # find source in nearest ancestor if we've lost track
1186 1186 if not crev:
1187 1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1188 1188 (fname, cfname))
1189 1189 for ancestor in self[None].ancestors():
1190 1190 if cfname in ancestor:
1191 1191 crev = ancestor[cfname].filenode()
1192 1192 break
1193 1193
1194 1194 if crev:
1195 1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1196 1196 meta["copy"] = cfname
1197 1197 meta["copyrev"] = hex(crev)
1198 1198 fparent1, fparent2 = nullid, newfparent
1199 1199 else:
1200 1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1201 1201 "copied from '%s'!\n") % (fname, cfname))
1202 1202
1203 1203 elif fparent1 == nullid:
1204 1204 fparent1, fparent2 = fparent2, nullid
1205 1205 elif fparent2 != nullid:
1206 1206 # is one parent an ancestor of the other?
1207 1207 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1208 1208 if fparent1 in fparentancestors:
1209 1209 fparent1, fparent2 = fparent2, nullid
1210 1210 elif fparent2 in fparentancestors:
1211 1211 fparent2 = nullid
1212 1212
1213 1213 # is the file changed?
1214 1214 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1215 1215 changelist.append(fname)
1216 1216 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1217 1217 # are just the flags changed during merge?
1218 1218 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1219 1219 changelist.append(fname)
1220 1220
1221 1221 return fparent1
1222 1222
1223 1223 @unfilteredmethod
1224 1224 def commit(self, text="", user=None, date=None, match=None, force=False,
1225 1225 editor=False, extra={}):
1226 1226 """Add a new revision to current repository.
1227 1227
1228 1228 Revision information is gathered from the working directory,
1229 1229 match can be used to filter the committed files. If editor is
1230 1230 supplied, it is called to get a commit message.
1231 1231 """
1232 1232
1233 1233 def fail(f, msg):
1234 1234 raise util.Abort('%s: %s' % (f, msg))
1235 1235
1236 1236 if not match:
1237 1237 match = matchmod.always(self.root, '')
1238 1238
1239 1239 if not force:
1240 1240 vdirs = []
1241 1241 match.explicitdir = vdirs.append
1242 1242 match.bad = fail
1243 1243
1244 1244 wlock = self.wlock()
1245 1245 try:
1246 1246 wctx = self[None]
1247 1247 merge = len(wctx.parents()) > 1
1248 1248
1249 1249 if (not force and merge and match and
1250 1250 (match.files() or match.anypats())):
1251 1251 raise util.Abort(_('cannot partially commit a merge '
1252 1252 '(do not specify files or patterns)'))
1253 1253
1254 1254 status = self.status(match=match, clean=force)
1255 1255 if force:
1256 1256 status.modified.extend(status.clean) # mq may commit clean files
1257 1257
1258 1258 # check subrepos
1259 1259 subs = []
1260 1260 commitsubs = set()
1261 1261 newstate = wctx.substate.copy()
1262 1262 # only manage subrepos and .hgsubstate if .hgsub is present
1263 1263 if '.hgsub' in wctx:
1264 1264 # we'll decide whether to track this ourselves, thanks
1265 1265 for c in status.modified, status.added, status.removed:
1266 1266 if '.hgsubstate' in c:
1267 1267 c.remove('.hgsubstate')
1268 1268
1269 1269 # compare current state to last committed state
1270 1270 # build new substate based on last committed state
1271 1271 oldstate = wctx.p1().substate
1272 1272 for s in sorted(newstate.keys()):
1273 1273 if not match(s):
1274 1274 # ignore working copy, use old state if present
1275 1275 if s in oldstate:
1276 1276 newstate[s] = oldstate[s]
1277 1277 continue
1278 1278 if not force:
1279 1279 raise util.Abort(
1280 1280 _("commit with new subrepo %s excluded") % s)
1281 1281 if wctx.sub(s).dirty(True):
1282 1282 if not self.ui.configbool('ui', 'commitsubrepos'):
1283 1283 raise util.Abort(
1284 1284 _("uncommitted changes in subrepo %s") % s,
1285 1285 hint=_("use --subrepos for recursive commit"))
1286 1286 subs.append(s)
1287 1287 commitsubs.add(s)
1288 1288 else:
1289 1289 bs = wctx.sub(s).basestate()
1290 1290 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1291 1291 if oldstate.get(s, (None, None, None))[1] != bs:
1292 1292 subs.append(s)
1293 1293
1294 1294 # check for removed subrepos
1295 1295 for p in wctx.parents():
1296 1296 r = [s for s in p.substate if s not in newstate]
1297 1297 subs += [s for s in r if match(s)]
1298 1298 if subs:
1299 1299 if (not match('.hgsub') and
1300 1300 '.hgsub' in (wctx.modified() + wctx.added())):
1301 1301 raise util.Abort(
1302 1302 _("can't commit subrepos without .hgsub"))
1303 1303 status.modified.insert(0, '.hgsubstate')
1304 1304
1305 1305 elif '.hgsub' in status.removed:
1306 1306 # clean up .hgsubstate when .hgsub is removed
1307 1307 if ('.hgsubstate' in wctx and
1308 1308 '.hgsubstate' not in (status.modified + status.added +
1309 1309 status.removed)):
1310 1310 status.removed.insert(0, '.hgsubstate')
1311 1311
1312 1312 # make sure all explicit patterns are matched
1313 1313 if not force and match.files():
1314 1314 matched = set(status.modified + status.added + status.removed)
1315 1315
1316 1316 for f in match.files():
1317 1317 f = self.dirstate.normalize(f)
1318 1318 if f == '.' or f in matched or f in wctx.substate:
1319 1319 continue
1320 1320 if f in status.deleted:
1321 1321 fail(f, _('file not found!'))
1322 1322 if f in vdirs: # visited directory
1323 1323 d = f + '/'
1324 1324 for mf in matched:
1325 1325 if mf.startswith(d):
1326 1326 break
1327 1327 else:
1328 1328 fail(f, _("no match under directory!"))
1329 1329 elif f not in self.dirstate:
1330 1330 fail(f, _("file not tracked!"))
1331 1331
1332 1332 cctx = context.workingctx(self, text, user, date, extra, status)
1333 1333
1334 1334 if (not force and not extra.get("close") and not merge
1335 1335 and not cctx.files()
1336 1336 and wctx.branch() == wctx.p1().branch()):
1337 1337 return None
1338 1338
1339 1339 if merge and cctx.deleted():
1340 1340 raise util.Abort(_("cannot commit merge with missing files"))
1341 1341
1342 1342 ms = mergemod.mergestate(self)
1343 1343 for f in status.modified:
1344 1344 if f in ms and ms[f] == 'u':
1345 1345 raise util.Abort(_("unresolved merge conflicts "
1346 1346 "(see hg help resolve)"))
1347 1347
1348 1348 if editor:
1349 1349 cctx._text = editor(self, cctx, subs)
1350 1350 edited = (text != cctx._text)
1351 1351
1352 1352 # Save commit message in case this transaction gets rolled back
1353 1353 # (e.g. by a pretxncommit hook). Leave the content alone on
1354 1354 # the assumption that the user will use the same editor again.
1355 1355 msgfn = self.savecommitmessage(cctx._text)
1356 1356
1357 1357 # commit subs and write new state
1358 1358 if subs:
1359 1359 for s in sorted(commitsubs):
1360 1360 sub = wctx.sub(s)
1361 1361 self.ui.status(_('committing subrepository %s\n') %
1362 1362 subrepo.subrelpath(sub))
1363 1363 sr = sub.commit(cctx._text, user, date)
1364 1364 newstate[s] = (newstate[s][0], sr)
1365 1365 subrepo.writestate(self, newstate)
1366 1366
1367 1367 p1, p2 = self.dirstate.parents()
1368 1368 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1369 1369 try:
1370 1370 self.hook("precommit", throw=True, parent1=hookp1,
1371 1371 parent2=hookp2)
1372 1372 ret = self.commitctx(cctx, True)
1373 1373 except: # re-raises
1374 1374 if edited:
1375 1375 self.ui.write(
1376 1376 _('note: commit message saved in %s\n') % msgfn)
1377 1377 raise
1378 1378
1379 1379 # update bookmarks, dirstate and mergestate
1380 1380 bookmarks.update(self, [p1, p2], ret)
1381 1381 cctx.markcommitted(ret)
1382 1382 ms.reset()
1383 1383 finally:
1384 1384 wlock.release()
1385 1385
1386 1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1387 1387 # hack for command that use a temporary commit (eg: histedit)
1388 1388 # temporary commit got stripped before hook release
1389 1389 if node in self:
1390 1390 self.hook("commit", node=node, parent1=parent1,
1391 1391 parent2=parent2)
1392 1392 self._afterlock(commithook)
1393 1393 return ret
1394 1394
1395 1395 @unfilteredmethod
1396 1396 def commitctx(self, ctx, error=False):
1397 1397 """Add a new revision to current repository.
1398 1398 Revision information is passed via the context argument.
1399 1399 """
1400 1400
1401 1401 tr = None
1402 1402 p1, p2 = ctx.p1(), ctx.p2()
1403 1403 user = ctx.user()
1404 1404
1405 1405 lock = self.lock()
1406 1406 try:
1407 1407 tr = self.transaction("commit")
1408 1408 trp = weakref.proxy(tr)
1409 1409
1410 1410 if ctx.files():
1411 1411 m1 = p1.manifest()
1412 1412 m2 = p2.manifest()
1413 1413 m = m1.copy()
1414 1414
1415 1415 # check in files
1416 1416 added = []
1417 1417 changed = []
1418 1418 removed = list(ctx.removed())
1419 1419 linkrev = len(self)
1420 1420 for f in sorted(ctx.modified() + ctx.added()):
1421 1421 self.ui.note(f + "\n")
1422 1422 try:
1423 1423 fctx = ctx[f]
1424 1424 if fctx is None:
1425 1425 removed.append(f)
1426 1426 else:
1427 1427 added.append(f)
1428 1428 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1429 1429 trp, changed)
1430 1430 m.setflag(f, fctx.flags())
1431 1431 except OSError, inst:
1432 1432 self.ui.warn(_("trouble committing %s!\n") % f)
1433 1433 raise
1434 1434 except IOError, inst:
1435 1435 errcode = getattr(inst, 'errno', errno.ENOENT)
1436 1436 if error or errcode and errcode != errno.ENOENT:
1437 1437 self.ui.warn(_("trouble committing %s!\n") % f)
1438 1438 raise
1439 1439
1440 1440 # update manifest
1441 1441 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1442 1442 drop = [f for f in removed if f in m]
1443 1443 for f in drop:
1444 1444 del m[f]
1445 1445 mn = self.manifest.add(m, trp, linkrev,
1446 1446 p1.manifestnode(), p2.manifestnode(),
1447 1447 added, drop)
1448 1448 files = changed + removed
1449 1449 else:
1450 1450 mn = p1.manifestnode()
1451 1451 files = []
1452 1452
1453 1453 # update changelog
1454 1454 self.changelog.delayupdate(tr)
1455 1455 n = self.changelog.add(mn, files, ctx.description(),
1456 1456 trp, p1.node(), p2.node(),
1457 1457 user, ctx.date(), ctx.extra().copy())
1458 1458 p = lambda: tr.writepending() and self.root or ""
1459 1459 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1460 1460 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1461 1461 parent2=xp2, pending=p)
1462 1462 # set the new commit is proper phase
1463 1463 targetphase = subrepo.newcommitphase(self.ui, ctx)
1464 1464 if targetphase:
1465 1465 # retract boundary do not alter parent changeset.
1466 1466 # if a parent have higher the resulting phase will
1467 1467 # be compliant anyway
1468 1468 #
1469 1469 # if minimal phase was 0 we don't need to retract anything
1470 1470 phases.retractboundary(self, tr, targetphase, [n])
1471 1471 tr.close()
1472 1472 branchmap.updatecache(self.filtered('served'))
1473 1473 return n
1474 1474 finally:
1475 1475 if tr:
1476 1476 tr.release()
1477 1477 lock.release()
1478 1478
1479 1479 @unfilteredmethod
1480 1480 def destroying(self):
1481 1481 '''Inform the repository that nodes are about to be destroyed.
1482 1482 Intended for use by strip and rollback, so there's a common
1483 1483 place for anything that has to be done before destroying history.
1484 1484
1485 1485 This is mostly useful for saving state that is in memory and waiting
1486 1486 to be flushed when the current lock is released. Because a call to
1487 1487 destroyed is imminent, the repo will be invalidated causing those
1488 1488 changes to stay in memory (waiting for the next unlock), or vanish
1489 1489 completely.
1490 1490 '''
1491 1491 # When using the same lock to commit and strip, the phasecache is left
1492 1492 # dirty after committing. Then when we strip, the repo is invalidated,
1493 1493 # causing those changes to disappear.
1494 1494 if '_phasecache' in vars(self):
1495 1495 self._phasecache.write()
1496 1496
1497 1497 @unfilteredmethod
1498 1498 def destroyed(self):
1499 1499 '''Inform the repository that nodes have been destroyed.
1500 1500 Intended for use by strip and rollback, so there's a common
1501 1501 place for anything that has to be done after destroying history.
1502 1502 '''
1503 1503 # When one tries to:
1504 1504 # 1) destroy nodes thus calling this method (e.g. strip)
1505 1505 # 2) use phasecache somewhere (e.g. commit)
1506 1506 #
1507 1507 # then 2) will fail because the phasecache contains nodes that were
1508 1508 # removed. We can either remove phasecache from the filecache,
1509 1509 # causing it to reload next time it is accessed, or simply filter
1510 1510 # the removed nodes now and write the updated cache.
1511 1511 self._phasecache.filterunknown(self)
1512 1512 self._phasecache.write()
1513 1513
1514 1514 # update the 'served' branch cache to help read only server process
1515 1515 # Thanks to branchcache collaboration this is done from the nearest
1516 1516 # filtered subset and it is expected to be fast.
1517 1517 branchmap.updatecache(self.filtered('served'))
1518 1518
1519 1519 # Ensure the persistent tag cache is updated. Doing it now
1520 1520 # means that the tag cache only has to worry about destroyed
1521 1521 # heads immediately after a strip/rollback. That in turn
1522 1522 # guarantees that "cachetip == currenttip" (comparing both rev
1523 1523 # and node) always means no nodes have been added or destroyed.
1524 1524
1525 1525 # XXX this is suboptimal when qrefresh'ing: we strip the current
1526 1526 # head, refresh the tag cache, then immediately add a new head.
1527 1527 # But I think doing it this way is necessary for the "instant
1528 1528 # tag cache retrieval" case to work.
1529 1529 self.invalidate()
1530 1530
1531 1531 def walk(self, match, node=None):
1532 1532 '''
1533 1533 walk recursively through the directory tree or a given
1534 1534 changeset, finding all files matched by the match
1535 1535 function
1536 1536 '''
1537 1537 return self[node].walk(match)
1538 1538
1539 1539 def status(self, node1='.', node2=None, match=None,
1540 1540 ignored=False, clean=False, unknown=False,
1541 1541 listsubrepos=False):
1542 1542 '''a convenience method that calls node1.status(node2)'''
1543 1543 return self[node1].status(node2, match, ignored, clean, unknown,
1544 1544 listsubrepos)
1545 1545
1546 1546 def heads(self, start=None):
1547 1547 heads = self.changelog.heads(start)
1548 1548 # sort the output in rev descending order
1549 1549 return sorted(heads, key=self.changelog.rev, reverse=True)
1550 1550
1551 1551 def branchheads(self, branch=None, start=None, closed=False):
1552 1552 '''return a (possibly filtered) list of heads for the given branch
1553 1553
1554 1554 Heads are returned in topological order, from newest to oldest.
1555 1555 If branch is None, use the dirstate branch.
1556 1556 If start is not None, return only heads reachable from start.
1557 1557 If closed is True, return heads that are marked as closed as well.
1558 1558 '''
1559 1559 if branch is None:
1560 1560 branch = self[None].branch()
1561 1561 branches = self.branchmap()
1562 1562 if branch not in branches:
1563 1563 return []
1564 1564 # the cache returns heads ordered lowest to highest
1565 1565 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1566 1566 if start is not None:
1567 1567 # filter out the heads that cannot be reached from startrev
1568 1568 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1569 1569 bheads = [h for h in bheads if h in fbheads]
1570 1570 return bheads
1571 1571
1572 1572 def branches(self, nodes):
1573 1573 if not nodes:
1574 1574 nodes = [self.changelog.tip()]
1575 1575 b = []
1576 1576 for n in nodes:
1577 1577 t = n
1578 1578 while True:
1579 1579 p = self.changelog.parents(n)
1580 1580 if p[1] != nullid or p[0] == nullid:
1581 1581 b.append((t, n, p[0], p[1]))
1582 1582 break
1583 1583 n = p[0]
1584 1584 return b
1585 1585
1586 1586 def between(self, pairs):
1587 1587 r = []
1588 1588
1589 1589 for top, bottom in pairs:
1590 1590 n, l, i = top, [], 0
1591 1591 f = 1
1592 1592
1593 1593 while n != bottom and n != nullid:
1594 1594 p = self.changelog.parents(n)[0]
1595 1595 if i == f:
1596 1596 l.append(n)
1597 1597 f = f * 2
1598 1598 n = p
1599 1599 i += 1
1600 1600
1601 1601 r.append(l)
1602 1602
1603 1603 return r
1604 1604
1605 1605 def checkpush(self, pushop):
1606 1606 """Extensions can override this function if additional checks have
1607 1607 to be performed before pushing, or call it if they override push
1608 1608 command.
1609 1609 """
1610 1610 pass
1611 1611
1612 1612 @unfilteredpropertycache
1613 1613 def prepushoutgoinghooks(self):
1614 1614 """Return util.hooks consists of "(repo, remote, outgoing)"
1615 1615 functions, which are called before pushing changesets.
1616 1616 """
1617 1617 return util.hooks()
1618 1618
1619 1619 def stream_in(self, remote, requirements):
1620 1620 lock = self.lock()
1621 1621 try:
1622 1622 # Save remote branchmap. We will use it later
1623 1623 # to speed up branchcache creation
1624 1624 rbranchmap = None
1625 1625 if remote.capable("branchmap"):
1626 1626 rbranchmap = remote.branchmap()
1627 1627
1628 1628 fp = remote.stream_out()
1629 1629 l = fp.readline()
1630 1630 try:
1631 1631 resp = int(l)
1632 1632 except ValueError:
1633 1633 raise error.ResponseError(
1634 1634 _('unexpected response from remote server:'), l)
1635 1635 if resp == 1:
1636 1636 raise util.Abort(_('operation forbidden by server'))
1637 1637 elif resp == 2:
1638 1638 raise util.Abort(_('locking the remote repository failed'))
1639 1639 elif resp != 0:
1640 1640 raise util.Abort(_('the server sent an unknown error code'))
1641 1641 self.ui.status(_('streaming all changes\n'))
1642 1642 l = fp.readline()
1643 1643 try:
1644 1644 total_files, total_bytes = map(int, l.split(' ', 1))
1645 1645 except (ValueError, TypeError):
1646 1646 raise error.ResponseError(
1647 1647 _('unexpected response from remote server:'), l)
1648 1648 self.ui.status(_('%d files to transfer, %s of data\n') %
1649 1649 (total_files, util.bytecount(total_bytes)))
1650 1650 handled_bytes = 0
1651 1651 self.ui.progress(_('clone'), 0, total=total_bytes)
1652 1652 start = time.time()
1653 1653
1654 1654 tr = self.transaction(_('clone'))
1655 1655 try:
1656 1656 for i in xrange(total_files):
1657 1657 # XXX doesn't support '\n' or '\r' in filenames
1658 1658 l = fp.readline()
1659 1659 try:
1660 1660 name, size = l.split('\0', 1)
1661 1661 size = int(size)
1662 1662 except (ValueError, TypeError):
1663 1663 raise error.ResponseError(
1664 1664 _('unexpected response from remote server:'), l)
1665 1665 if self.ui.debugflag:
1666 1666 self.ui.debug('adding %s (%s)\n' %
1667 1667 (name, util.bytecount(size)))
1668 1668 # for backwards compat, name was partially encoded
1669 1669 ofp = self.sopener(store.decodedir(name), 'w')
1670 1670 for chunk in util.filechunkiter(fp, limit=size):
1671 1671 handled_bytes += len(chunk)
1672 1672 self.ui.progress(_('clone'), handled_bytes,
1673 1673 total=total_bytes)
1674 1674 ofp.write(chunk)
1675 1675 ofp.close()
1676 1676 tr.close()
1677 1677 finally:
1678 1678 tr.release()
1679 1679
1680 1680 # Writing straight to files circumvented the inmemory caches
1681 1681 self.invalidate()
1682 1682
1683 1683 elapsed = time.time() - start
1684 1684 if elapsed <= 0:
1685 1685 elapsed = 0.001
1686 1686 self.ui.progress(_('clone'), None)
1687 1687 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1688 1688 (util.bytecount(total_bytes), elapsed,
1689 1689 util.bytecount(total_bytes / elapsed)))
1690 1690
1691 1691 # new requirements = old non-format requirements +
1692 1692 # new format-related
1693 1693 # requirements from the streamed-in repository
1694 1694 requirements.update(set(self.requirements) - self.supportedformats)
1695 1695 self._applyrequirements(requirements)
1696 1696 self._writerequirements()
1697 1697
1698 1698 if rbranchmap:
1699 1699 rbheads = []
1700 1700 closed = []
1701 1701 for bheads in rbranchmap.itervalues():
1702 1702 rbheads.extend(bheads)
1703 1703 for h in bheads:
1704 1704 r = self.changelog.rev(h)
1705 1705 b, c = self.changelog.branchinfo(r)
1706 1706 if c:
1707 1707 closed.append(h)
1708 1708
1709 1709 if rbheads:
1710 1710 rtiprev = max((int(self.changelog.rev(node))
1711 1711 for node in rbheads))
1712 1712 cache = branchmap.branchcache(rbranchmap,
1713 1713 self[rtiprev].node(),
1714 1714 rtiprev,
1715 1715 closednodes=closed)
1716 1716 # Try to stick it as low as possible
1717 1717 # filter above served are unlikely to be fetch from a clone
1718 1718 for candidate in ('base', 'immutable', 'served'):
1719 1719 rview = self.filtered(candidate)
1720 1720 if cache.validfor(rview):
1721 1721 self._branchcaches[candidate] = cache
1722 1722 cache.write(rview)
1723 1723 break
1724 1724 self.invalidate()
1725 1725 return len(self.heads()) + 1
1726 1726 finally:
1727 1727 lock.release()
1728 1728
1729 def clone(self, remote, heads=[], stream=False):
1729 def clone(self, remote, heads=[], stream=None):
1730 1730 '''clone remote repository.
1731 1731
1732 1732 keyword arguments:
1733 1733 heads: list of revs to clone (forces use of pull)
1734 1734 stream: use streaming clone if possible'''
1735 1735
1736 1736 # now, all clients that can request uncompressed clones can
1737 1737 # read repo formats supported by all servers that can serve
1738 1738 # them.
1739 1739
1740 1740 # if revlog format changes, client will have to check version
1741 1741 # and format flags on "stream" capability, and use
1742 1742 # uncompressed only if compatible.
1743 1743
1744 if not stream:
1744 if stream is None:
1745 1745 # if the server explicitly prefers to stream (for fast LANs)
1746 1746 stream = remote.capable('stream-preferred')
1747 1747
1748 1748 if stream and not heads:
1749 1749 # 'stream' means remote revlog format is revlogv1 only
1750 1750 if remote.capable('stream'):
1751 1751 self.stream_in(remote, set(('revlogv1',)))
1752 1752 else:
1753 1753 # otherwise, 'streamreqs' contains the remote revlog format
1754 1754 streamreqs = remote.capable('streamreqs')
1755 1755 if streamreqs:
1756 1756 streamreqs = set(streamreqs.split(','))
1757 1757 # if we support it, stream in and adjust our requirements
1758 1758 if not streamreqs - self.supportedformats:
1759 1759 self.stream_in(remote, streamreqs)
1760 1760
1761 1761 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1762 1762 try:
1763 1763 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1764 1764 ret = exchange.pull(self, remote, heads).cgresult
1765 1765 finally:
1766 1766 self.ui.restoreconfig(quiet)
1767 1767 return ret
1768 1768
1769 1769 def pushkey(self, namespace, key, old, new):
1770 1770 try:
1771 1771 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1772 1772 old=old, new=new)
1773 1773 except error.HookAbort, exc:
1774 1774 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1775 1775 if exc.hint:
1776 1776 self.ui.write_err(_("(%s)\n") % exc.hint)
1777 1777 return False
1778 1778 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1779 1779 ret = pushkey.push(self, namespace, key, old, new)
1780 1780 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1781 1781 ret=ret)
1782 1782 return ret
1783 1783
1784 1784 def listkeys(self, namespace):
1785 1785 self.hook('prelistkeys', throw=True, namespace=namespace)
1786 1786 self.ui.debug('listing keys for "%s"\n' % namespace)
1787 1787 values = pushkey.list(self, namespace)
1788 1788 self.hook('listkeys', namespace=namespace, values=values)
1789 1789 return values
1790 1790
1791 1791 def debugwireargs(self, one, two, three=None, four=None, five=None):
1792 1792 '''used to test argument passing over the wire'''
1793 1793 return "%s %s %s %s %s" % (one, two, three, four, five)
1794 1794
1795 1795 def savecommitmessage(self, text):
1796 1796 fp = self.opener('last-message.txt', 'wb')
1797 1797 try:
1798 1798 fp.write(text)
1799 1799 finally:
1800 1800 fp.close()
1801 1801 return self.pathto(fp.name[len(self.root) + 1:])
1802 1802
1803 1803 # used to avoid circular references so destructors work
1804 1804 def aftertrans(files):
1805 1805 renamefiles = [tuple(t) for t in files]
1806 1806 def a():
1807 1807 for vfs, src, dest in renamefiles:
1808 1808 try:
1809 1809 vfs.rename(src, dest)
1810 1810 except OSError: # journal file does not yet exist
1811 1811 pass
1812 1812 return a
1813 1813
1814 1814 def undoname(fn):
1815 1815 base, name = os.path.split(fn)
1816 1816 assert name.startswith('journal')
1817 1817 return os.path.join(base, name.replace('journal', 'undo', 1))
1818 1818
1819 1819 def instance(ui, path, create):
1820 1820 return localrepository(ui, util.urllocalpath(path), create)
1821 1821
1822 1822 def islocal(path):
1823 1823 return True
@@ -1,313 +1,328 b''
1 1 #require serve
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo foo>foo
6 6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
7 7 $ echo foo>foo.d/foo
8 8 $ echo bar>foo.d/bAr.hg.d/BaR
9 9 $ echo bar>foo.d/baR.d.hg/bAR
10 10 $ hg commit -A -m 1
11 11 adding foo
12 12 adding foo.d/bAr.hg.d/BaR
13 13 adding foo.d/baR.d.hg/bAR
14 14 adding foo.d/foo
15 15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
16 16 $ hg --config server.uncompressed=False serve -p $HGPORT1 -d --pid-file=../hg2.pid
17 17
18 18 Test server address cannot be reused
19 19
20 20 #if windows
21 21 $ hg serve -p $HGPORT1 2>&1
22 22 abort: cannot start server at ':$HGPORT1': * (glob)
23 23 [255]
24 24 #else
25 25 $ hg serve -p $HGPORT1 2>&1
26 26 abort: cannot start server at ':$HGPORT1': Address already in use
27 27 [255]
28 28 #endif
29 29 $ cd ..
30 30 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
31 31
32 32 clone via stream
33 33
34 34 $ hg clone --uncompressed http://localhost:$HGPORT/ copy 2>&1
35 35 streaming all changes
36 36 6 files to transfer, 606 bytes of data
37 37 transferred * bytes in * seconds (*/sec) (glob)
38 38 searching for changes
39 39 no changes found
40 40 updating to branch default
41 41 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 42 $ hg verify -R copy
43 43 checking changesets
44 44 checking manifests
45 45 crosschecking files in changesets and manifests
46 46 checking files
47 47 4 files, 1 changesets, 4 total revisions
48 48
49 49 try to clone via stream, should use pull instead
50 50
51 51 $ hg clone --uncompressed http://localhost:$HGPORT1/ copy2
52 52 requesting all changes
53 53 adding changesets
54 54 adding manifests
55 55 adding file changes
56 56 added 1 changesets with 4 changes to 4 files
57 57 updating to branch default
58 58 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 59
60 60 clone via pull
61 61
62 62 $ hg clone http://localhost:$HGPORT1/ copy-pull
63 63 requesting all changes
64 64 adding changesets
65 65 adding manifests
66 66 adding file changes
67 67 added 1 changesets with 4 changes to 4 files
68 68 updating to branch default
69 69 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 70 $ hg verify -R copy-pull
71 71 checking changesets
72 72 checking manifests
73 73 crosschecking files in changesets and manifests
74 74 checking files
75 75 4 files, 1 changesets, 4 total revisions
76 76 $ cd test
77 77 $ echo bar > bar
78 78 $ hg commit -A -d '1 0' -m 2
79 79 adding bar
80 80 $ cd ..
81 81
82 82 clone over http with --update
83 83
84 84 $ hg clone http://localhost:$HGPORT1/ updated --update 0
85 85 requesting all changes
86 86 adding changesets
87 87 adding manifests
88 88 adding file changes
89 89 added 2 changesets with 5 changes to 5 files
90 90 updating to branch default
91 91 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 92 $ hg log -r . -R updated
93 93 changeset: 0:8b6053c928fe
94 94 user: test
95 95 date: Thu Jan 01 00:00:00 1970 +0000
96 96 summary: 1
97 97
98 98 $ rm -rf updated
99 99
100 100 incoming via HTTP
101 101
102 102 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
103 103 adding changesets
104 104 adding manifests
105 105 adding file changes
106 106 added 1 changesets with 4 changes to 4 files
107 107 updating to branch default
108 108 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 109 $ cd partial
110 110 $ touch LOCAL
111 111 $ hg ci -qAm LOCAL
112 112 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
113 113 comparing with http://localhost:$HGPORT1/
114 114 searching for changes
115 115 2
116 116 $ cd ..
117 117
118 118 pull
119 119
120 120 $ cd copy-pull
121 121 $ echo '[hooks]' >> .hg/hgrc
122 122 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup" >> .hg/hgrc
123 123 $ hg pull
124 124 pulling from http://localhost:$HGPORT1/
125 125 searching for changes
126 126 adding changesets
127 127 adding manifests
128 128 adding file changes
129 129 added 1 changesets with 1 changes to 1 files
130 130 changegroup hook: HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_URL=http://localhost:$HGPORT1/
131 131 (run 'hg update' to get a working copy)
132 132 $ cd ..
133 133
134 134 clone from invalid URL
135 135
136 136 $ hg clone http://localhost:$HGPORT/bad
137 137 abort: HTTP Error 404: Not Found
138 138 [255]
139 139
140 140 test http authentication
141 141 + use the same server to test server side streaming preference
142 142
143 143 $ cd test
144 144 $ cat << EOT > userpass.py
145 145 > import base64
146 146 > from mercurial.hgweb import common
147 147 > def perform_authentication(hgweb, req, op):
148 148 > auth = req.env.get('HTTP_AUTHORIZATION')
149 149 > if not auth:
150 150 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
151 151 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
152 152 > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
153 153 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
154 154 > def extsetup():
155 155 > common.permhooks.insert(0, perform_authentication)
156 156 > EOT
157 157 $ hg --config extensions.x=userpass.py serve -p $HGPORT2 -d --pid-file=pid \
158 158 > --config server.preferuncompressed=True \
159 159 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
160 160 $ cat pid >> $DAEMON_PIDS
161 161
162 162 $ cat << EOF > get_pass.py
163 163 > import getpass
164 164 > def newgetpass(arg):
165 165 > return "pass"
166 166 > getpass.getpass = newgetpass
167 167 > EOF
168 168
169 169 #if python243
170 170 $ hg id http://localhost:$HGPORT2/
171 171 abort: http authorization required for http://localhost:$HGPORT2/
172 172 [255]
173 173 $ hg id http://localhost:$HGPORT2/
174 174 abort: http authorization required for http://localhost:$HGPORT2/
175 175 [255]
176 176 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
177 177 http authorization required for http://localhost:$HGPORT2/
178 178 realm: mercurial
179 179 user: user
180 180 password: 5fed3813f7f5
181 181 $ hg id http://user:pass@localhost:$HGPORT2/
182 182 5fed3813f7f5
183 183 #endif
184 184 $ echo '[auth]' >> .hg/hgrc
185 185 $ echo 'l.schemes=http' >> .hg/hgrc
186 186 $ echo 'l.prefix=lo' >> .hg/hgrc
187 187 $ echo 'l.username=user' >> .hg/hgrc
188 188 $ echo 'l.password=pass' >> .hg/hgrc
189 189 $ hg id http://localhost:$HGPORT2/
190 190 5fed3813f7f5
191 191 $ hg id http://localhost:$HGPORT2/
192 192 5fed3813f7f5
193 193 $ hg id http://user@localhost:$HGPORT2/
194 194 5fed3813f7f5
195 195 #if python243
196 196 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
197 197 streaming all changes
198 198 7 files to transfer, 916 bytes of data
199 199 transferred * bytes in * seconds (*/sec) (glob)
200 200 searching for changes
201 201 no changes found
202 202 updating to branch default
203 203 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 --pull should override server's preferuncompressed
205 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
206 requesting all changes
207 adding changesets
208 adding manifests
209 adding file changes
210 added 2 changesets with 5 changes to 5 files
211 updating to branch default
212 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 213
205 214 $ hg id http://user2@localhost:$HGPORT2/
206 215 abort: http authorization required for http://localhost:$HGPORT2/
207 216 [255]
208 217 $ hg id http://user:pass2@localhost:$HGPORT2/
209 218 abort: HTTP Error 403: no
210 219 [255]
211 220
212 221 $ hg -R dest tag -r tip top
213 222 $ hg -R dest push http://user:pass@localhost:$HGPORT2/
214 223 pushing to http://user:***@localhost:$HGPORT2/
215 224 searching for changes
216 225 remote: adding changesets
217 226 remote: adding manifests
218 227 remote: adding file changes
219 228 remote: added 1 changesets with 1 changes to 1 files
220 229 $ hg rollback -q
221 230
222 231 $ cut -c38- ../access.log
223 232 "GET /?cmd=capabilities HTTP/1.1" 200 -
224 233 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
225 234 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
226 235 "GET /?cmd=capabilities HTTP/1.1" 200 -
227 236 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
228 237 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
229 238 "GET /?cmd=capabilities HTTP/1.1" 200 -
230 239 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
231 240 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
232 241 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
233 242 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
234 243 "GET /?cmd=capabilities HTTP/1.1" 200 -
235 244 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
236 245 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
237 246 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
238 247 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
239 248 "GET /?cmd=capabilities HTTP/1.1" 200 -
240 249 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
241 250 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
242 251 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
243 252 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
244 253 "GET /?cmd=capabilities HTTP/1.1" 200 -
245 254 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
246 255 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
247 256 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
248 257 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
249 258 "GET /?cmd=capabilities HTTP/1.1" 200 -
250 259 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
251 260 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
252 261 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
253 262 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
254 263 "GET /?cmd=capabilities HTTP/1.1" 200 -
255 264 "GET /?cmd=branchmap HTTP/1.1" 200 -
256 265 "GET /?cmd=stream_out HTTP/1.1" 401 -
257 266 "GET /?cmd=stream_out HTTP/1.1" 200 -
258 267 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
259 268 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d
260 269 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
261 270 "GET /?cmd=capabilities HTTP/1.1" 200 -
271 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks
272 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
273 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D
274 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
275 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
276 "GET /?cmd=capabilities HTTP/1.1" 200 -
262 277 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
263 278 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
264 279 "GET /?cmd=capabilities HTTP/1.1" 200 -
265 280 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
266 281 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
267 282 "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces
268 283 "GET /?cmd=capabilities HTTP/1.1" 200 -
269 284 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872
270 285 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases
271 286 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
272 287 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
273 288 "GET /?cmd=branchmap HTTP/1.1" 200 -
274 289 "GET /?cmd=branchmap HTTP/1.1" 200 -
275 290 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
276 291 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524
277 292 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
278 293
279 294 #endif
280 295 $ cd ..
281 296
282 297 clone of serve with repo in root and unserved subrepo (issue2970)
283 298
284 299 $ hg --cwd test init sub
285 300 $ echo empty > test/sub/empty
286 301 $ hg --cwd test/sub add empty
287 302 $ hg --cwd test/sub commit -qm 'add empty'
288 303 $ hg --cwd test/sub tag -r 0 something
289 304 $ echo sub = sub > test/.hgsub
290 305 $ hg --cwd test add .hgsub
291 306 $ hg --cwd test commit -qm 'add subrepo'
292 307 $ hg clone http://localhost:$HGPORT noslash-clone
293 308 requesting all changes
294 309 adding changesets
295 310 adding manifests
296 311 adding file changes
297 312 added 3 changesets with 7 changes to 7 files
298 313 updating to branch default
299 314 abort: HTTP Error 404: Not Found
300 315 [255]
301 316 $ hg clone http://localhost:$HGPORT/ slash-clone
302 317 requesting all changes
303 318 adding changesets
304 319 adding manifests
305 320 adding file changes
306 321 added 3 changesets with 7 changes to 7 files
307 322 updating to branch default
308 323 abort: HTTP Error 404: Not Found
309 324 [255]
310 325
311 326 check error log
312 327
313 328 $ cat error.log
General Comments 0
You need to be logged in to leave comments. Login now