##// END OF EJS Templates
afterlock: add the callback to the top level lock (issue4608)...
Pierre-Yves David -
r24821:57f1dbc9 stable
parent child Browse files
Show More
@@ -1,1955 +1,1958 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception, exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced, exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 197 'dotencode'))
198 198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 199 requirements = ['revlogv1']
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return self.requirements[:]
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 211 self.wopener = self.wvfs
212 212 self.root = self.wvfs.base
213 213 self.path = self.wvfs.join(".hg")
214 214 self.origroot = path
215 215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 216 self.vfs = scmutil.vfs(self.path)
217 217 self.opener = self.vfs
218 218 self.baseui = baseui
219 219 self.ui = baseui.copy()
220 220 self.ui.copy = baseui.copy # prevent copying repo configuration
221 221 # A list of callback to shape the phase if no data were found.
222 222 # Callback are in the form: func(repo, roots) --> processed root.
223 223 # This list it to be filled by extension during repo setup
224 224 self._phasedefaults = []
225 225 try:
226 226 self.ui.readconfig(self.join("hgrc"), self.root)
227 227 extensions.loadall(self.ui)
228 228 except IOError:
229 229 pass
230 230
231 231 if self.featuresetupfuncs:
232 232 self.supported = set(self._basesupported) # use private copy
233 233 extmods = set(m.__name__ for n, m
234 234 in extensions.extensions(self.ui))
235 235 for setupfunc in self.featuresetupfuncs:
236 236 if setupfunc.__module__ in extmods:
237 237 setupfunc(self.ui, self.supported)
238 238 else:
239 239 self.supported = self._basesupported
240 240
241 241 if not self.vfs.isdir():
242 242 if create:
243 243 if not self.wvfs.exists():
244 244 self.wvfs.makedirs()
245 245 self.vfs.makedir(notindexed=True)
246 246 requirements = self._baserequirements(create)
247 247 if self.ui.configbool('format', 'usestore', True):
248 248 self.vfs.mkdir("store")
249 249 requirements.append("store")
250 250 if self.ui.configbool('format', 'usefncache', True):
251 251 requirements.append("fncache")
252 252 if self.ui.configbool('format', 'dotencode', True):
253 253 requirements.append('dotencode')
254 254 # create an invalid changelog
255 255 self.vfs.append(
256 256 "00changelog.i",
257 257 '\0\0\0\2' # represents revlogv2
258 258 ' dummy changelog to prevent using the old repo layout'
259 259 )
260 260 if self.ui.configbool('format', 'generaldelta', False):
261 261 requirements.append("generaldelta")
262 262 if self.ui.configbool('experimental', 'manifestv2', False):
263 263 requirements.append("manifestv2")
264 264 requirements = set(requirements)
265 265 else:
266 266 raise error.RepoError(_("repository %s not found") % path)
267 267 elif create:
268 268 raise error.RepoError(_("repository %s already exists") % path)
269 269 else:
270 270 try:
271 271 requirements = scmutil.readrequires(self.vfs, self.supported)
272 272 except IOError, inst:
273 273 if inst.errno != errno.ENOENT:
274 274 raise
275 275 requirements = set()
276 276
277 277 self.sharedpath = self.path
278 278 try:
279 279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
280 280 realpath=True)
281 281 s = vfs.base
282 282 if not vfs.exists():
283 283 raise error.RepoError(
284 284 _('.hg/sharedpath points to nonexistent directory %s') % s)
285 285 self.sharedpath = s
286 286 except IOError, inst:
287 287 if inst.errno != errno.ENOENT:
288 288 raise
289 289
290 290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
291 291 self.spath = self.store.path
292 292 self.svfs = self.store.vfs
293 293 self.sopener = self.svfs
294 294 self.sjoin = self.store.join
295 295 self.vfs.createmode = self.store.createmode
296 296 self._applyrequirements(requirements)
297 297 if create:
298 298 self._writerequirements()
299 299
300 300
301 301 self._branchcaches = {}
302 302 self._revbranchcache = None
303 303 self.filterpats = {}
304 304 self._datafilters = {}
305 305 self._transref = self._lockref = self._wlockref = None
306 306
307 307 # A cache for various files under .hg/ that tracks file changes,
308 308 # (used by the filecache decorator)
309 309 #
310 310 # Maps a property name to its util.filecacheentry
311 311 self._filecache = {}
312 312
313 313 # hold sets of revision to be filtered
314 314 # should be cleared when something might have changed the filter value:
315 315 # - new changesets,
316 316 # - phase change,
317 317 # - new obsolescence marker,
318 318 # - working directory parent change,
319 319 # - bookmark changes
320 320 self.filteredrevcache = {}
321 321
322 322 # generic mapping between names and nodes
323 323 self.names = namespaces.namespaces()
324 324
325 325 def close(self):
326 326 self._writecaches()
327 327
328 328 def _writecaches(self):
329 329 if self._revbranchcache:
330 330 self._revbranchcache.write()
331 331
332 332 def _restrictcapabilities(self, caps):
333 333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
334 334 caps = set(caps)
335 335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
336 336 caps.add('bundle2=' + urllib.quote(capsblob))
337 337 return caps
338 338
339 339 def _applyrequirements(self, requirements):
340 340 self.requirements = requirements
341 341 self.svfs.options = dict((r, 1) for r in requirements
342 342 if r in self.openerreqs)
343 343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
344 344 if chunkcachesize is not None:
345 345 self.svfs.options['chunkcachesize'] = chunkcachesize
346 346 maxchainlen = self.ui.configint('format', 'maxchainlen')
347 347 if maxchainlen is not None:
348 348 self.svfs.options['maxchainlen'] = maxchainlen
349 349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
350 350 if manifestcachesize is not None:
351 351 self.svfs.options['manifestcachesize'] = manifestcachesize
352 352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
353 353 if usetreemanifest is not None:
354 354 self.svfs.options['usetreemanifest'] = usetreemanifest
355 355
356 356 def _writerequirements(self):
357 357 reqfile = self.vfs("requires", "w")
358 358 for r in sorted(self.requirements):
359 359 reqfile.write("%s\n" % r)
360 360 reqfile.close()
361 361
362 362 def _checknested(self, path):
363 363 """Determine if path is a legal nested repository."""
364 364 if not path.startswith(self.root):
365 365 return False
366 366 subpath = path[len(self.root) + 1:]
367 367 normsubpath = util.pconvert(subpath)
368 368
369 369 # XXX: Checking against the current working copy is wrong in
370 370 # the sense that it can reject things like
371 371 #
372 372 # $ hg cat -r 10 sub/x.txt
373 373 #
374 374 # if sub/ is no longer a subrepository in the working copy
375 375 # parent revision.
376 376 #
377 377 # However, it can of course also allow things that would have
378 378 # been rejected before, such as the above cat command if sub/
379 379 # is a subrepository now, but was a normal directory before.
380 380 # The old path auditor would have rejected by mistake since it
381 381 # panics when it sees sub/.hg/.
382 382 #
383 383 # All in all, checking against the working copy seems sensible
384 384 # since we want to prevent access to nested repositories on
385 385 # the filesystem *now*.
386 386 ctx = self[None]
387 387 parts = util.splitpath(subpath)
388 388 while parts:
389 389 prefix = '/'.join(parts)
390 390 if prefix in ctx.substate:
391 391 if prefix == normsubpath:
392 392 return True
393 393 else:
394 394 sub = ctx.sub(prefix)
395 395 return sub.checknested(subpath[len(prefix) + 1:])
396 396 else:
397 397 parts.pop()
398 398 return False
399 399
400 400 def peer(self):
401 401 return localpeer(self) # not cached to avoid reference cycle
402 402
403 403 def unfiltered(self):
404 404 """Return unfiltered version of the repository
405 405
406 406 Intended to be overwritten by filtered repo."""
407 407 return self
408 408
409 409 def filtered(self, name):
410 410 """Return a filtered version of a repository"""
411 411 # build a new class with the mixin and the current class
412 412 # (possibly subclass of the repo)
413 413 class proxycls(repoview.repoview, self.unfiltered().__class__):
414 414 pass
415 415 return proxycls(self, name)
416 416
417 417 @repofilecache('bookmarks')
418 418 def _bookmarks(self):
419 419 return bookmarks.bmstore(self)
420 420
421 421 @repofilecache('bookmarks.current')
422 422 def _bookmarkcurrent(self):
423 423 return bookmarks.readcurrent(self)
424 424
425 425 def bookmarkheads(self, bookmark):
426 426 name = bookmark.split('@', 1)[0]
427 427 heads = []
428 428 for mark, n in self._bookmarks.iteritems():
429 429 if mark.split('@', 1)[0] == name:
430 430 heads.append(n)
431 431 return heads
432 432
433 433 @storecache('phaseroots')
434 434 def _phasecache(self):
435 435 return phases.phasecache(self, self._phasedefaults)
436 436
437 437 @storecache('obsstore')
438 438 def obsstore(self):
439 439 # read default format for new obsstore.
440 440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 441 # rely on obsstore class default when possible.
442 442 kwargs = {}
443 443 if defaultformat is not None:
444 444 kwargs['defaultformat'] = defaultformat
445 445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 446 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 447 **kwargs)
448 448 if store and readonly:
449 449 self.ui.warn(
450 450 _('obsolete feature not enabled but %i markers found!\n')
451 451 % len(list(store)))
452 452 return store
453 453
454 454 @storecache('00changelog.i')
455 455 def changelog(self):
456 456 c = changelog.changelog(self.svfs)
457 457 if 'HG_PENDING' in os.environ:
458 458 p = os.environ['HG_PENDING']
459 459 if p.startswith(self.root):
460 460 c.readpending('00changelog.i.a')
461 461 return c
462 462
463 463 @storecache('00manifest.i')
464 464 def manifest(self):
465 465 return manifest.manifest(self.svfs)
466 466
467 467 @repofilecache('dirstate')
468 468 def dirstate(self):
469 469 warned = [0]
470 470 def validate(node):
471 471 try:
472 472 self.changelog.rev(node)
473 473 return node
474 474 except error.LookupError:
475 475 if not warned[0]:
476 476 warned[0] = True
477 477 self.ui.warn(_("warning: ignoring unknown"
478 478 " working parent %s!\n") % short(node))
479 479 return nullid
480 480
481 481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
482 482
483 483 def __getitem__(self, changeid):
484 484 if changeid is None:
485 485 return context.workingctx(self)
486 486 if isinstance(changeid, slice):
487 487 return [context.changectx(self, i)
488 488 for i in xrange(*changeid.indices(len(self)))
489 489 if i not in self.changelog.filteredrevs]
490 490 return context.changectx(self, changeid)
491 491
492 492 def __contains__(self, changeid):
493 493 try:
494 494 self[changeid]
495 495 return True
496 496 except error.RepoLookupError:
497 497 return False
498 498
499 499 def __nonzero__(self):
500 500 return True
501 501
502 502 def __len__(self):
503 503 return len(self.changelog)
504 504
505 505 def __iter__(self):
506 506 return iter(self.changelog)
507 507
508 508 def revs(self, expr, *args):
509 509 '''Return a list of revisions matching the given revset'''
510 510 expr = revset.formatspec(expr, *args)
511 511 m = revset.match(None, expr)
512 512 return m(self)
513 513
514 514 def set(self, expr, *args):
515 515 '''
516 516 Yield a context for each matching revision, after doing arg
517 517 replacement via revset.formatspec
518 518 '''
519 519 for r in self.revs(expr, *args):
520 520 yield self[r]
521 521
522 522 def url(self):
523 523 return 'file:' + self.root
524 524
525 525 def hook(self, name, throw=False, **args):
526 526 """Call a hook, passing this repo instance.
527 527
528 528 This a convenience method to aid invoking hooks. Extensions likely
529 529 won't call this unless they have registered a custom hook or are
530 530 replacing code that is expected to call a hook.
531 531 """
532 532 return hook.hook(self.ui, self, name, throw, **args)
533 533
534 534 @unfilteredmethod
535 535 def _tag(self, names, node, message, local, user, date, extra={},
536 536 editor=False):
537 537 if isinstance(names, str):
538 538 names = (names,)
539 539
540 540 branches = self.branchmap()
541 541 for name in names:
542 542 self.hook('pretag', throw=True, node=hex(node), tag=name,
543 543 local=local)
544 544 if name in branches:
545 545 self.ui.warn(_("warning: tag %s conflicts with existing"
546 546 " branch name\n") % name)
547 547
548 548 def writetags(fp, names, munge, prevtags):
549 549 fp.seek(0, 2)
550 550 if prevtags and prevtags[-1] != '\n':
551 551 fp.write('\n')
552 552 for name in names:
553 553 if munge:
554 554 m = munge(name)
555 555 else:
556 556 m = name
557 557
558 558 if (self._tagscache.tagtypes and
559 559 name in self._tagscache.tagtypes):
560 560 old = self.tags().get(name, nullid)
561 561 fp.write('%s %s\n' % (hex(old), m))
562 562 fp.write('%s %s\n' % (hex(node), m))
563 563 fp.close()
564 564
565 565 prevtags = ''
566 566 if local:
567 567 try:
568 568 fp = self.vfs('localtags', 'r+')
569 569 except IOError:
570 570 fp = self.vfs('localtags', 'a')
571 571 else:
572 572 prevtags = fp.read()
573 573
574 574 # local tags are stored in the current charset
575 575 writetags(fp, names, None, prevtags)
576 576 for name in names:
577 577 self.hook('tag', node=hex(node), tag=name, local=local)
578 578 return
579 579
580 580 try:
581 581 fp = self.wfile('.hgtags', 'rb+')
582 582 except IOError, e:
583 583 if e.errno != errno.ENOENT:
584 584 raise
585 585 fp = self.wfile('.hgtags', 'ab')
586 586 else:
587 587 prevtags = fp.read()
588 588
589 589 # committed tags are stored in UTF-8
590 590 writetags(fp, names, encoding.fromlocal, prevtags)
591 591
592 592 fp.close()
593 593
594 594 self.invalidatecaches()
595 595
596 596 if '.hgtags' not in self.dirstate:
597 597 self[None].add(['.hgtags'])
598 598
599 599 m = matchmod.exact(self.root, '', ['.hgtags'])
600 600 tagnode = self.commit(message, user, date, extra=extra, match=m,
601 601 editor=editor)
602 602
603 603 for name in names:
604 604 self.hook('tag', node=hex(node), tag=name, local=local)
605 605
606 606 return tagnode
607 607
608 608 def tag(self, names, node, message, local, user, date, editor=False):
609 609 '''tag a revision with one or more symbolic names.
610 610
611 611 names is a list of strings or, when adding a single tag, names may be a
612 612 string.
613 613
614 614 if local is True, the tags are stored in a per-repository file.
615 615 otherwise, they are stored in the .hgtags file, and a new
616 616 changeset is committed with the change.
617 617
618 618 keyword arguments:
619 619
620 620 local: whether to store tags in non-version-controlled file
621 621 (default False)
622 622
623 623 message: commit message to use if committing
624 624
625 625 user: name of user to use if committing
626 626
627 627 date: date tuple to use if committing'''
628 628
629 629 if not local:
630 630 m = matchmod.exact(self.root, '', ['.hgtags'])
631 631 if util.any(self.status(match=m, unknown=True, ignored=True)):
632 632 raise util.Abort(_('working copy of .hgtags is changed'),
633 633 hint=_('please commit .hgtags manually'))
634 634
635 635 self.tags() # instantiate the cache
636 636 self._tag(names, node, message, local, user, date, editor=editor)
637 637
638 638 @filteredpropertycache
639 639 def _tagscache(self):
640 640 '''Returns a tagscache object that contains various tags related
641 641 caches.'''
642 642
643 643 # This simplifies its cache management by having one decorated
644 644 # function (this one) and the rest simply fetch things from it.
645 645 class tagscache(object):
646 646 def __init__(self):
647 647 # These two define the set of tags for this repository. tags
648 648 # maps tag name to node; tagtypes maps tag name to 'global' or
649 649 # 'local'. (Global tags are defined by .hgtags across all
650 650 # heads, and local tags are defined in .hg/localtags.)
651 651 # They constitute the in-memory cache of tags.
652 652 self.tags = self.tagtypes = None
653 653
654 654 self.nodetagscache = self.tagslist = None
655 655
656 656 cache = tagscache()
657 657 cache.tags, cache.tagtypes = self._findtags()
658 658
659 659 return cache
660 660
661 661 def tags(self):
662 662 '''return a mapping of tag to node'''
663 663 t = {}
664 664 if self.changelog.filteredrevs:
665 665 tags, tt = self._findtags()
666 666 else:
667 667 tags = self._tagscache.tags
668 668 for k, v in tags.iteritems():
669 669 try:
670 670 # ignore tags to unknown nodes
671 671 self.changelog.rev(v)
672 672 t[k] = v
673 673 except (error.LookupError, ValueError):
674 674 pass
675 675 return t
676 676
677 677 def _findtags(self):
678 678 '''Do the hard work of finding tags. Return a pair of dicts
679 679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
680 680 maps tag name to a string like \'global\' or \'local\'.
681 681 Subclasses or extensions are free to add their own tags, but
682 682 should be aware that the returned dicts will be retained for the
683 683 duration of the localrepo object.'''
684 684
685 685 # XXX what tagtype should subclasses/extensions use? Currently
686 686 # mq and bookmarks add tags, but do not set the tagtype at all.
687 687 # Should each extension invent its own tag type? Should there
688 688 # be one tagtype for all such "virtual" tags? Or is the status
689 689 # quo fine?
690 690
691 691 alltags = {} # map tag name to (node, hist)
692 692 tagtypes = {}
693 693
694 694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
695 695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
696 696
697 697 # Build the return dicts. Have to re-encode tag names because
698 698 # the tags module always uses UTF-8 (in order not to lose info
699 699 # writing to the cache), but the rest of Mercurial wants them in
700 700 # local encoding.
701 701 tags = {}
702 702 for (name, (node, hist)) in alltags.iteritems():
703 703 if node != nullid:
704 704 tags[encoding.tolocal(name)] = node
705 705 tags['tip'] = self.changelog.tip()
706 706 tagtypes = dict([(encoding.tolocal(name), value)
707 707 for (name, value) in tagtypes.iteritems()])
708 708 return (tags, tagtypes)
709 709
710 710 def tagtype(self, tagname):
711 711 '''
712 712 return the type of the given tag. result can be:
713 713
714 714 'local' : a local tag
715 715 'global' : a global tag
716 716 None : tag does not exist
717 717 '''
718 718
719 719 return self._tagscache.tagtypes.get(tagname)
720 720
721 721 def tagslist(self):
722 722 '''return a list of tags ordered by revision'''
723 723 if not self._tagscache.tagslist:
724 724 l = []
725 725 for t, n in self.tags().iteritems():
726 726 l.append((self.changelog.rev(n), t, n))
727 727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
728 728
729 729 return self._tagscache.tagslist
730 730
731 731 def nodetags(self, node):
732 732 '''return the tags associated with a node'''
733 733 if not self._tagscache.nodetagscache:
734 734 nodetagscache = {}
735 735 for t, n in self._tagscache.tags.iteritems():
736 736 nodetagscache.setdefault(n, []).append(t)
737 737 for tags in nodetagscache.itervalues():
738 738 tags.sort()
739 739 self._tagscache.nodetagscache = nodetagscache
740 740 return self._tagscache.nodetagscache.get(node, [])
741 741
742 742 def nodebookmarks(self, node):
743 743 marks = []
744 744 for bookmark, n in self._bookmarks.iteritems():
745 745 if n == node:
746 746 marks.append(bookmark)
747 747 return sorted(marks)
748 748
749 749 def branchmap(self):
750 750 '''returns a dictionary {branch: [branchheads]} with branchheads
751 751 ordered by increasing revision number'''
752 752 branchmap.updatecache(self)
753 753 return self._branchcaches[self.filtername]
754 754
755 755 @unfilteredmethod
756 756 def revbranchcache(self):
757 757 if not self._revbranchcache:
758 758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
759 759 return self._revbranchcache
760 760
761 761 def branchtip(self, branch, ignoremissing=False):
762 762 '''return the tip node for a given branch
763 763
764 764 If ignoremissing is True, then this method will not raise an error.
765 765 This is helpful for callers that only expect None for a missing branch
766 766 (e.g. namespace).
767 767
768 768 '''
769 769 try:
770 770 return self.branchmap().branchtip(branch)
771 771 except KeyError:
772 772 if not ignoremissing:
773 773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
774 774 else:
775 775 pass
776 776
777 777 def lookup(self, key):
778 778 return self[key].node()
779 779
780 780 def lookupbranch(self, key, remote=None):
781 781 repo = remote or self
782 782 if key in repo.branchmap():
783 783 return key
784 784
785 785 repo = (remote and remote.local()) and remote or self
786 786 return repo[key].branch()
787 787
788 788 def known(self, nodes):
789 789 nm = self.changelog.nodemap
790 790 pc = self._phasecache
791 791 result = []
792 792 for n in nodes:
793 793 r = nm.get(n)
794 794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
795 795 result.append(resp)
796 796 return result
797 797
798 798 def local(self):
799 799 return self
800 800
801 801 def cancopy(self):
802 802 # so statichttprepo's override of local() works
803 803 if not self.local():
804 804 return False
805 805 if not self.ui.configbool('phases', 'publish', True):
806 806 return True
807 807 # if publishing we can't copy if there is filtered content
808 808 return not self.filtered('visible').changelog.filteredrevs
809 809
810 810 def shared(self):
811 811 '''the type of shared repository (None if not shared)'''
812 812 if self.sharedpath != self.path:
813 813 return 'store'
814 814 return None
815 815
816 816 def join(self, f, *insidef):
817 817 return self.vfs.join(os.path.join(f, *insidef))
818 818
819 819 def wjoin(self, f, *insidef):
820 820 return self.vfs.reljoin(self.root, f, *insidef)
821 821
822 822 def file(self, f):
823 823 if f[0] == '/':
824 824 f = f[1:]
825 825 return filelog.filelog(self.svfs, f)
826 826
827 827 def changectx(self, changeid):
828 828 return self[changeid]
829 829
830 830 def parents(self, changeid=None):
831 831 '''get list of changectxs for parents of changeid'''
832 832 return self[changeid].parents()
833 833
834 834 def setparents(self, p1, p2=nullid):
835 835 self.dirstate.beginparentchange()
836 836 copies = self.dirstate.setparents(p1, p2)
837 837 pctx = self[p1]
838 838 if copies:
839 839 # Adjust copy records, the dirstate cannot do it, it
840 840 # requires access to parents manifests. Preserve them
841 841 # only for entries added to first parent.
842 842 for f in copies:
843 843 if f not in pctx and copies[f] in pctx:
844 844 self.dirstate.copy(copies[f], f)
845 845 if p2 == nullid:
846 846 for f, s in sorted(self.dirstate.copies().items()):
847 847 if f not in pctx and s not in pctx:
848 848 self.dirstate.copy(None, f)
849 849 self.dirstate.endparentchange()
850 850
851 851 def filectx(self, path, changeid=None, fileid=None):
852 852 """changeid can be a changeset revision, node, or tag.
853 853 fileid can be a file revision or node."""
854 854 return context.filectx(self, path, changeid, fileid)
855 855
856 856 def getcwd(self):
857 857 return self.dirstate.getcwd()
858 858
859 859 def pathto(self, f, cwd=None):
860 860 return self.dirstate.pathto(f, cwd)
861 861
862 862 def wfile(self, f, mode='r'):
863 863 return self.wvfs(f, mode)
864 864
865 865 def _link(self, f):
866 866 return self.wvfs.islink(f)
867 867
868 868 def _loadfilter(self, filter):
869 869 if filter not in self.filterpats:
870 870 l = []
871 871 for pat, cmd in self.ui.configitems(filter):
872 872 if cmd == '!':
873 873 continue
874 874 mf = matchmod.match(self.root, '', [pat])
875 875 fn = None
876 876 params = cmd
877 877 for name, filterfn in self._datafilters.iteritems():
878 878 if cmd.startswith(name):
879 879 fn = filterfn
880 880 params = cmd[len(name):].lstrip()
881 881 break
882 882 if not fn:
883 883 fn = lambda s, c, **kwargs: util.filter(s, c)
884 884 # Wrap old filters not supporting keyword arguments
885 885 if not inspect.getargspec(fn)[2]:
886 886 oldfn = fn
887 887 fn = lambda s, c, **kwargs: oldfn(s, c)
888 888 l.append((mf, fn, params))
889 889 self.filterpats[filter] = l
890 890 return self.filterpats[filter]
891 891
892 892 def _filter(self, filterpats, filename, data):
893 893 for mf, fn, cmd in filterpats:
894 894 if mf(filename):
895 895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
896 896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
897 897 break
898 898
899 899 return data
900 900
901 901 @unfilteredpropertycache
902 902 def _encodefilterpats(self):
903 903 return self._loadfilter('encode')
904 904
905 905 @unfilteredpropertycache
906 906 def _decodefilterpats(self):
907 907 return self._loadfilter('decode')
908 908
909 909 def adddatafilter(self, name, filter):
910 910 self._datafilters[name] = filter
911 911
912 912 def wread(self, filename):
913 913 if self._link(filename):
914 914 data = self.wvfs.readlink(filename)
915 915 else:
916 916 data = self.wvfs.read(filename)
917 917 return self._filter(self._encodefilterpats, filename, data)
918 918
919 919 def wwrite(self, filename, data, flags):
920 920 data = self._filter(self._decodefilterpats, filename, data)
921 921 if 'l' in flags:
922 922 self.wvfs.symlink(data, filename)
923 923 else:
924 924 self.wvfs.write(filename, data)
925 925 if 'x' in flags:
926 926 self.wvfs.setflags(filename, False, True)
927 927
928 928 def wwritedata(self, filename, data):
929 929 return self._filter(self._decodefilterpats, filename, data)
930 930
931 931 def currenttransaction(self):
932 932 """return the current transaction or None if non exists"""
933 933 if self._transref:
934 934 tr = self._transref()
935 935 else:
936 936 tr = None
937 937
938 938 if tr and tr.running():
939 939 return tr
940 940 return None
941 941
942 942 def transaction(self, desc, report=None):
943 943 if (self.ui.configbool('devel', 'all')
944 944 or self.ui.configbool('devel', 'check-locks')):
945 945 l = self._lockref and self._lockref()
946 946 if l is None or not l.held:
947 947 scmutil.develwarn(self.ui, 'transaction with no lock')
948 948 tr = self.currenttransaction()
949 949 if tr is not None:
950 950 return tr.nest()
951 951
952 952 # abort here if the journal already exists
953 953 if self.svfs.exists("journal"):
954 954 raise error.RepoError(
955 955 _("abandoned transaction found"),
956 956 hint=_("run 'hg recover' to clean up transaction"))
957 957
958 958 self.hook('pretxnopen', throw=True, txnname=desc)
959 959
960 960 self._writejournal(desc)
961 961 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
962 962 if report:
963 963 rp = report
964 964 else:
965 965 rp = self.ui.warn
966 966 vfsmap = {'plain': self.vfs} # root of .hg/
967 967 # we must avoid cyclic reference between repo and transaction.
968 968 reporef = weakref.ref(self)
969 969 def validate(tr):
970 970 """will run pre-closing hooks"""
971 971 pending = lambda: tr.writepending() and self.root or ""
972 972 reporef().hook('pretxnclose', throw=True, pending=pending,
973 973 xnname=desc, **tr.hookargs)
974 974
975 975 tr = transaction.transaction(rp, self.sopener, vfsmap,
976 976 "journal",
977 977 "undo",
978 978 aftertrans(renames),
979 979 self.store.createmode,
980 980 validator=validate)
981 981
982 982 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
983 983 tr.hookargs['TXNID'] = trid
984 984 # note: writing the fncache only during finalize mean that the file is
985 985 # outdated when running hooks. As fncache is used for streaming clone,
986 986 # this is not expected to break anything that happen during the hooks.
987 987 tr.addfinalize('flush-fncache', self.store.write)
988 988 def txnclosehook(tr2):
989 989 """To be run if transaction is successful, will schedule a hook run
990 990 """
991 991 def hook():
992 992 reporef().hook('txnclose', throw=False, txnname=desc,
993 993 **tr2.hookargs)
994 994 reporef()._afterlock(hook)
995 995 tr.addfinalize('txnclose-hook', txnclosehook)
996 996 def txnaborthook(tr2):
997 997 """To be run if transaction is aborted
998 998 """
999 999 reporef().hook('txnabort', throw=False, txnname=desc,
1000 1000 **tr2.hookargs)
1001 1001 tr.addabort('txnabort-hook', txnaborthook)
1002 1002 self._transref = weakref.ref(tr)
1003 1003 return tr
1004 1004
1005 1005 def _journalfiles(self):
1006 1006 return ((self.svfs, 'journal'),
1007 1007 (self.vfs, 'journal.dirstate'),
1008 1008 (self.vfs, 'journal.branch'),
1009 1009 (self.vfs, 'journal.desc'),
1010 1010 (self.vfs, 'journal.bookmarks'),
1011 1011 (self.svfs, 'journal.phaseroots'))
1012 1012
1013 1013 def undofiles(self):
1014 1014 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1015 1015
1016 1016 def _writejournal(self, desc):
1017 1017 self.vfs.write("journal.dirstate",
1018 1018 self.vfs.tryread("dirstate"))
1019 1019 self.vfs.write("journal.branch",
1020 1020 encoding.fromlocal(self.dirstate.branch()))
1021 1021 self.vfs.write("journal.desc",
1022 1022 "%d\n%s\n" % (len(self), desc))
1023 1023 self.vfs.write("journal.bookmarks",
1024 1024 self.vfs.tryread("bookmarks"))
1025 1025 self.svfs.write("journal.phaseroots",
1026 1026 self.svfs.tryread("phaseroots"))
1027 1027
1028 1028 def recover(self):
1029 1029 lock = self.lock()
1030 1030 try:
1031 1031 if self.svfs.exists("journal"):
1032 1032 self.ui.status(_("rolling back interrupted transaction\n"))
1033 1033 vfsmap = {'': self.svfs,
1034 1034 'plain': self.vfs,}
1035 1035 transaction.rollback(self.svfs, vfsmap, "journal",
1036 1036 self.ui.warn)
1037 1037 self.invalidate()
1038 1038 return True
1039 1039 else:
1040 1040 self.ui.warn(_("no interrupted transaction available\n"))
1041 1041 return False
1042 1042 finally:
1043 1043 lock.release()
1044 1044
1045 1045 def rollback(self, dryrun=False, force=False):
1046 1046 wlock = lock = None
1047 1047 try:
1048 1048 wlock = self.wlock()
1049 1049 lock = self.lock()
1050 1050 if self.svfs.exists("undo"):
1051 1051 return self._rollback(dryrun, force)
1052 1052 else:
1053 1053 self.ui.warn(_("no rollback information available\n"))
1054 1054 return 1
1055 1055 finally:
1056 1056 release(lock, wlock)
1057 1057
1058 1058 @unfilteredmethod # Until we get smarter cache management
1059 1059 def _rollback(self, dryrun, force):
1060 1060 ui = self.ui
1061 1061 try:
1062 1062 args = self.vfs.read('undo.desc').splitlines()
1063 1063 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1064 1064 if len(args) >= 3:
1065 1065 detail = args[2]
1066 1066 oldtip = oldlen - 1
1067 1067
1068 1068 if detail and ui.verbose:
1069 1069 msg = (_('repository tip rolled back to revision %s'
1070 1070 ' (undo %s: %s)\n')
1071 1071 % (oldtip, desc, detail))
1072 1072 else:
1073 1073 msg = (_('repository tip rolled back to revision %s'
1074 1074 ' (undo %s)\n')
1075 1075 % (oldtip, desc))
1076 1076 except IOError:
1077 1077 msg = _('rolling back unknown transaction\n')
1078 1078 desc = None
1079 1079
1080 1080 if not force and self['.'] != self['tip'] and desc == 'commit':
1081 1081 raise util.Abort(
1082 1082 _('rollback of last commit while not checked out '
1083 1083 'may lose data'), hint=_('use -f to force'))
1084 1084
1085 1085 ui.status(msg)
1086 1086 if dryrun:
1087 1087 return 0
1088 1088
1089 1089 parents = self.dirstate.parents()
1090 1090 self.destroying()
1091 1091 vfsmap = {'plain': self.vfs, '': self.svfs}
1092 1092 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1093 1093 if self.vfs.exists('undo.bookmarks'):
1094 1094 self.vfs.rename('undo.bookmarks', 'bookmarks')
1095 1095 if self.svfs.exists('undo.phaseroots'):
1096 1096 self.svfs.rename('undo.phaseroots', 'phaseroots')
1097 1097 self.invalidate()
1098 1098
1099 1099 parentgone = (parents[0] not in self.changelog.nodemap or
1100 1100 parents[1] not in self.changelog.nodemap)
1101 1101 if parentgone:
1102 1102 self.vfs.rename('undo.dirstate', 'dirstate')
1103 1103 try:
1104 1104 branch = self.vfs.read('undo.branch')
1105 1105 self.dirstate.setbranch(encoding.tolocal(branch))
1106 1106 except IOError:
1107 1107 ui.warn(_('named branch could not be reset: '
1108 1108 'current branch is still \'%s\'\n')
1109 1109 % self.dirstate.branch())
1110 1110
1111 1111 self.dirstate.invalidate()
1112 1112 parents = tuple([p.rev() for p in self.parents()])
1113 1113 if len(parents) > 1:
1114 1114 ui.status(_('working directory now based on '
1115 1115 'revisions %d and %d\n') % parents)
1116 1116 else:
1117 1117 ui.status(_('working directory now based on '
1118 1118 'revision %d\n') % parents)
1119 1119 ms = mergemod.mergestate(self)
1120 1120 ms.reset(self['.'].node())
1121 1121
1122 1122 # TODO: if we know which new heads may result from this rollback, pass
1123 1123 # them to destroy(), which will prevent the branchhead cache from being
1124 1124 # invalidated.
1125 1125 self.destroyed()
1126 1126 return 0
1127 1127
1128 1128 def invalidatecaches(self):
1129 1129
1130 1130 if '_tagscache' in vars(self):
1131 1131 # can't use delattr on proxy
1132 1132 del self.__dict__['_tagscache']
1133 1133
1134 1134 self.unfiltered()._branchcaches.clear()
1135 1135 self.invalidatevolatilesets()
1136 1136
1137 1137 def invalidatevolatilesets(self):
1138 1138 self.filteredrevcache.clear()
1139 1139 obsolete.clearobscaches(self)
1140 1140
1141 1141 def invalidatedirstate(self):
1142 1142 '''Invalidates the dirstate, causing the next call to dirstate
1143 1143 to check if it was modified since the last time it was read,
1144 1144 rereading it if it has.
1145 1145
1146 1146 This is different to dirstate.invalidate() that it doesn't always
1147 1147 rereads the dirstate. Use dirstate.invalidate() if you want to
1148 1148 explicitly read the dirstate again (i.e. restoring it to a previous
1149 1149 known good state).'''
1150 1150 if hasunfilteredcache(self, 'dirstate'):
1151 1151 for k in self.dirstate._filecache:
1152 1152 try:
1153 1153 delattr(self.dirstate, k)
1154 1154 except AttributeError:
1155 1155 pass
1156 1156 delattr(self.unfiltered(), 'dirstate')
1157 1157
1158 1158 def invalidate(self):
1159 1159 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1160 1160 for k in self._filecache:
1161 1161 # dirstate is invalidated separately in invalidatedirstate()
1162 1162 if k == 'dirstate':
1163 1163 continue
1164 1164
1165 1165 try:
1166 1166 delattr(unfiltered, k)
1167 1167 except AttributeError:
1168 1168 pass
1169 1169 self.invalidatecaches()
1170 1170 self.store.invalidatecaches()
1171 1171
1172 1172 def invalidateall(self):
1173 1173 '''Fully invalidates both store and non-store parts, causing the
1174 1174 subsequent operation to reread any outside changes.'''
1175 1175 # extension should hook this to invalidate its caches
1176 1176 self.invalidate()
1177 1177 self.invalidatedirstate()
1178 1178
1179 1179 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1180 1180 try:
1181 1181 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1182 1182 except error.LockHeld, inst:
1183 1183 if not wait:
1184 1184 raise
1185 1185 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1186 1186 (desc, inst.locker))
1187 1187 # default to 600 seconds timeout
1188 1188 l = lockmod.lock(vfs, lockname,
1189 1189 int(self.ui.config("ui", "timeout", "600")),
1190 1190 releasefn, desc=desc)
1191 1191 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1192 1192 if acquirefn:
1193 1193 acquirefn()
1194 1194 return l
1195 1195
1196 1196 def _afterlock(self, callback):
1197 """add a callback to the current repository lock.
1197 """add a callback to be run when the repository is fully unlocked
1198 1198
1199 The callback will be executed on lock release."""
1200 l = self._lockref and self._lockref()
1201 if l:
1202 l.postrelease.append(callback)
1203 else:
1199 The callback will be executed when the outermost lock is released
1200 (with wlock being higher level than 'lock')."""
1201 for ref in (self._wlockref, self._lockref):
1202 l = ref and ref()
1203 if l and l.held:
1204 l.postrelease.append(callback)
1205 break
1206 else: # no lock have been found.
1204 1207 callback()
1205 1208
1206 1209 def lock(self, wait=True):
1207 1210 '''Lock the repository store (.hg/store) and return a weak reference
1208 1211 to the lock. Use this before modifying the store (e.g. committing or
1209 1212 stripping). If you are opening a transaction, get a lock as well.)
1210 1213
1211 1214 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1212 1215 'wlock' first to avoid a dead-lock hazard.'''
1213 1216 l = self._lockref and self._lockref()
1214 1217 if l is not None and l.held:
1215 1218 l.lock()
1216 1219 return l
1217 1220
1218 1221 def unlock():
1219 1222 for k, ce in self._filecache.items():
1220 1223 if k == 'dirstate' or k not in self.__dict__:
1221 1224 continue
1222 1225 ce.refresh()
1223 1226
1224 1227 l = self._lock(self.svfs, "lock", wait, unlock,
1225 1228 self.invalidate, _('repository %s') % self.origroot)
1226 1229 self._lockref = weakref.ref(l)
1227 1230 return l
1228 1231
1229 1232 def wlock(self, wait=True):
1230 1233 '''Lock the non-store parts of the repository (everything under
1231 1234 .hg except .hg/store) and return a weak reference to the lock.
1232 1235
1233 1236 Use this before modifying files in .hg.
1234 1237
1235 1238 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1236 1239 'wlock' first to avoid a dead-lock hazard.'''
1237 1240 l = self._wlockref and self._wlockref()
1238 1241 if l is not None and l.held:
1239 1242 l.lock()
1240 1243 return l
1241 1244
1242 1245 # We do not need to check for non-waiting lock aquisition. Such
1243 1246 # acquisition would not cause dead-lock as they would just fail.
1244 1247 if wait and (self.ui.configbool('devel', 'all')
1245 1248 or self.ui.configbool('devel', 'check-locks')):
1246 1249 l = self._lockref and self._lockref()
1247 1250 if l is not None and l.held:
1248 1251 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1249 1252
1250 1253 def unlock():
1251 1254 if self.dirstate.pendingparentchange():
1252 1255 self.dirstate.invalidate()
1253 1256 else:
1254 1257 self.dirstate.write()
1255 1258
1256 1259 self._filecache['dirstate'].refresh()
1257 1260
1258 1261 l = self._lock(self.vfs, "wlock", wait, unlock,
1259 1262 self.invalidatedirstate, _('working directory of %s') %
1260 1263 self.origroot)
1261 1264 self._wlockref = weakref.ref(l)
1262 1265 return l
1263 1266
1264 1267 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1265 1268 """
1266 1269 commit an individual file as part of a larger transaction
1267 1270 """
1268 1271
1269 1272 fname = fctx.path()
1270 1273 fparent1 = manifest1.get(fname, nullid)
1271 1274 fparent2 = manifest2.get(fname, nullid)
1272 1275 if isinstance(fctx, context.filectx):
1273 1276 node = fctx.filenode()
1274 1277 if node in [fparent1, fparent2]:
1275 1278 self.ui.debug('reusing %s filelog entry\n' % fname)
1276 1279 return node
1277 1280
1278 1281 flog = self.file(fname)
1279 1282 meta = {}
1280 1283 copy = fctx.renamed()
1281 1284 if copy and copy[0] != fname:
1282 1285 # Mark the new revision of this file as a copy of another
1283 1286 # file. This copy data will effectively act as a parent
1284 1287 # of this new revision. If this is a merge, the first
1285 1288 # parent will be the nullid (meaning "look up the copy data")
1286 1289 # and the second one will be the other parent. For example:
1287 1290 #
1288 1291 # 0 --- 1 --- 3 rev1 changes file foo
1289 1292 # \ / rev2 renames foo to bar and changes it
1290 1293 # \- 2 -/ rev3 should have bar with all changes and
1291 1294 # should record that bar descends from
1292 1295 # bar in rev2 and foo in rev1
1293 1296 #
1294 1297 # this allows this merge to succeed:
1295 1298 #
1296 1299 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1297 1300 # \ / merging rev3 and rev4 should use bar@rev2
1298 1301 # \- 2 --- 4 as the merge base
1299 1302 #
1300 1303
1301 1304 cfname = copy[0]
1302 1305 crev = manifest1.get(cfname)
1303 1306 newfparent = fparent2
1304 1307
1305 1308 if manifest2: # branch merge
1306 1309 if fparent2 == nullid or crev is None: # copied on remote side
1307 1310 if cfname in manifest2:
1308 1311 crev = manifest2[cfname]
1309 1312 newfparent = fparent1
1310 1313
1311 1314 # Here, we used to search backwards through history to try to find
1312 1315 # where the file copy came from if the source of a copy was not in
1313 1316 # the parent directory. However, this doesn't actually make sense to
1314 1317 # do (what does a copy from something not in your working copy even
1315 1318 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1316 1319 # the user that copy information was dropped, so if they didn't
1317 1320 # expect this outcome it can be fixed, but this is the correct
1318 1321 # behavior in this circumstance.
1319 1322
1320 1323 if crev:
1321 1324 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1322 1325 meta["copy"] = cfname
1323 1326 meta["copyrev"] = hex(crev)
1324 1327 fparent1, fparent2 = nullid, newfparent
1325 1328 else:
1326 1329 self.ui.warn(_("warning: can't find ancestor for '%s' "
1327 1330 "copied from '%s'!\n") % (fname, cfname))
1328 1331
1329 1332 elif fparent1 == nullid:
1330 1333 fparent1, fparent2 = fparent2, nullid
1331 1334 elif fparent2 != nullid:
1332 1335 # is one parent an ancestor of the other?
1333 1336 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1334 1337 if fparent1 in fparentancestors:
1335 1338 fparent1, fparent2 = fparent2, nullid
1336 1339 elif fparent2 in fparentancestors:
1337 1340 fparent2 = nullid
1338 1341
1339 1342 # is the file changed?
1340 1343 text = fctx.data()
1341 1344 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1342 1345 changelist.append(fname)
1343 1346 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1344 1347 # are just the flags changed during merge?
1345 1348 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1346 1349 changelist.append(fname)
1347 1350
1348 1351 return fparent1
1349 1352
1350 1353 @unfilteredmethod
1351 1354 def commit(self, text="", user=None, date=None, match=None, force=False,
1352 1355 editor=False, extra={}):
1353 1356 """Add a new revision to current repository.
1354 1357
1355 1358 Revision information is gathered from the working directory,
1356 1359 match can be used to filter the committed files. If editor is
1357 1360 supplied, it is called to get a commit message.
1358 1361 """
1359 1362
1360 1363 def fail(f, msg):
1361 1364 raise util.Abort('%s: %s' % (f, msg))
1362 1365
1363 1366 if not match:
1364 1367 match = matchmod.always(self.root, '')
1365 1368
1366 1369 if not force:
1367 1370 vdirs = []
1368 1371 match.explicitdir = vdirs.append
1369 1372 match.bad = fail
1370 1373
1371 1374 wlock = self.wlock()
1372 1375 try:
1373 1376 wctx = self[None]
1374 1377 merge = len(wctx.parents()) > 1
1375 1378
1376 1379 if not force and merge and not match.always():
1377 1380 raise util.Abort(_('cannot partially commit a merge '
1378 1381 '(do not specify files or patterns)'))
1379 1382
1380 1383 status = self.status(match=match, clean=force)
1381 1384 if force:
1382 1385 status.modified.extend(status.clean) # mq may commit clean files
1383 1386
1384 1387 # check subrepos
1385 1388 subs = []
1386 1389 commitsubs = set()
1387 1390 newstate = wctx.substate.copy()
1388 1391 # only manage subrepos and .hgsubstate if .hgsub is present
1389 1392 if '.hgsub' in wctx:
1390 1393 # we'll decide whether to track this ourselves, thanks
1391 1394 for c in status.modified, status.added, status.removed:
1392 1395 if '.hgsubstate' in c:
1393 1396 c.remove('.hgsubstate')
1394 1397
1395 1398 # compare current state to last committed state
1396 1399 # build new substate based on last committed state
1397 1400 oldstate = wctx.p1().substate
1398 1401 for s in sorted(newstate.keys()):
1399 1402 if not match(s):
1400 1403 # ignore working copy, use old state if present
1401 1404 if s in oldstate:
1402 1405 newstate[s] = oldstate[s]
1403 1406 continue
1404 1407 if not force:
1405 1408 raise util.Abort(
1406 1409 _("commit with new subrepo %s excluded") % s)
1407 1410 dirtyreason = wctx.sub(s).dirtyreason(True)
1408 1411 if dirtyreason:
1409 1412 if not self.ui.configbool('ui', 'commitsubrepos'):
1410 1413 raise util.Abort(dirtyreason,
1411 1414 hint=_("use --subrepos for recursive commit"))
1412 1415 subs.append(s)
1413 1416 commitsubs.add(s)
1414 1417 else:
1415 1418 bs = wctx.sub(s).basestate()
1416 1419 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1417 1420 if oldstate.get(s, (None, None, None))[1] != bs:
1418 1421 subs.append(s)
1419 1422
1420 1423 # check for removed subrepos
1421 1424 for p in wctx.parents():
1422 1425 r = [s for s in p.substate if s not in newstate]
1423 1426 subs += [s for s in r if match(s)]
1424 1427 if subs:
1425 1428 if (not match('.hgsub') and
1426 1429 '.hgsub' in (wctx.modified() + wctx.added())):
1427 1430 raise util.Abort(
1428 1431 _("can't commit subrepos without .hgsub"))
1429 1432 status.modified.insert(0, '.hgsubstate')
1430 1433
1431 1434 elif '.hgsub' in status.removed:
1432 1435 # clean up .hgsubstate when .hgsub is removed
1433 1436 if ('.hgsubstate' in wctx and
1434 1437 '.hgsubstate' not in (status.modified + status.added +
1435 1438 status.removed)):
1436 1439 status.removed.insert(0, '.hgsubstate')
1437 1440
1438 1441 # make sure all explicit patterns are matched
1439 1442 if not force and match.files():
1440 1443 matched = set(status.modified + status.added + status.removed)
1441 1444
1442 1445 for f in match.files():
1443 1446 f = self.dirstate.normalize(f)
1444 1447 if f == '.' or f in matched or f in wctx.substate:
1445 1448 continue
1446 1449 if f in status.deleted:
1447 1450 fail(f, _('file not found!'))
1448 1451 if f in vdirs: # visited directory
1449 1452 d = f + '/'
1450 1453 for mf in matched:
1451 1454 if mf.startswith(d):
1452 1455 break
1453 1456 else:
1454 1457 fail(f, _("no match under directory!"))
1455 1458 elif f not in self.dirstate:
1456 1459 fail(f, _("file not tracked!"))
1457 1460
1458 1461 cctx = context.workingcommitctx(self, status,
1459 1462 text, user, date, extra)
1460 1463
1461 1464 if (not force and not extra.get("close") and not merge
1462 1465 and not cctx.files()
1463 1466 and wctx.branch() == wctx.p1().branch()):
1464 1467 return None
1465 1468
1466 1469 if merge and cctx.deleted():
1467 1470 raise util.Abort(_("cannot commit merge with missing files"))
1468 1471
1469 1472 ms = mergemod.mergestate(self)
1470 1473 for f in status.modified:
1471 1474 if f in ms and ms[f] == 'u':
1472 1475 raise util.Abort(_('unresolved merge conflicts '
1473 1476 '(see "hg help resolve")'))
1474 1477
1475 1478 if editor:
1476 1479 cctx._text = editor(self, cctx, subs)
1477 1480 edited = (text != cctx._text)
1478 1481
1479 1482 # Save commit message in case this transaction gets rolled back
1480 1483 # (e.g. by a pretxncommit hook). Leave the content alone on
1481 1484 # the assumption that the user will use the same editor again.
1482 1485 msgfn = self.savecommitmessage(cctx._text)
1483 1486
1484 1487 # commit subs and write new state
1485 1488 if subs:
1486 1489 for s in sorted(commitsubs):
1487 1490 sub = wctx.sub(s)
1488 1491 self.ui.status(_('committing subrepository %s\n') %
1489 1492 subrepo.subrelpath(sub))
1490 1493 sr = sub.commit(cctx._text, user, date)
1491 1494 newstate[s] = (newstate[s][0], sr)
1492 1495 subrepo.writestate(self, newstate)
1493 1496
1494 1497 p1, p2 = self.dirstate.parents()
1495 1498 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1496 1499 try:
1497 1500 self.hook("precommit", throw=True, parent1=hookp1,
1498 1501 parent2=hookp2)
1499 1502 ret = self.commitctx(cctx, True)
1500 1503 except: # re-raises
1501 1504 if edited:
1502 1505 self.ui.write(
1503 1506 _('note: commit message saved in %s\n') % msgfn)
1504 1507 raise
1505 1508
1506 1509 # update bookmarks, dirstate and mergestate
1507 1510 bookmarks.update(self, [p1, p2], ret)
1508 1511 cctx.markcommitted(ret)
1509 1512 ms.reset()
1510 1513 finally:
1511 1514 wlock.release()
1512 1515
1513 1516 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1514 1517 # hack for command that use a temporary commit (eg: histedit)
1515 1518 # temporary commit got stripped before hook release
1516 1519 if node in self:
1517 1520 self.hook("commit", node=node, parent1=parent1,
1518 1521 parent2=parent2)
1519 1522 self._afterlock(commithook)
1520 1523 return ret
1521 1524
1522 1525 @unfilteredmethod
1523 1526 def commitctx(self, ctx, error=False):
1524 1527 """Add a new revision to current repository.
1525 1528 Revision information is passed via the context argument.
1526 1529 """
1527 1530
1528 1531 tr = None
1529 1532 p1, p2 = ctx.p1(), ctx.p2()
1530 1533 user = ctx.user()
1531 1534
1532 1535 lock = self.lock()
1533 1536 try:
1534 1537 tr = self.transaction("commit")
1535 1538 trp = weakref.proxy(tr)
1536 1539
1537 1540 if ctx.files():
1538 1541 m1 = p1.manifest()
1539 1542 m2 = p2.manifest()
1540 1543 m = m1.copy()
1541 1544
1542 1545 # check in files
1543 1546 added = []
1544 1547 changed = []
1545 1548 removed = list(ctx.removed())
1546 1549 linkrev = len(self)
1547 1550 self.ui.note(_("committing files:\n"))
1548 1551 for f in sorted(ctx.modified() + ctx.added()):
1549 1552 self.ui.note(f + "\n")
1550 1553 try:
1551 1554 fctx = ctx[f]
1552 1555 if fctx is None:
1553 1556 removed.append(f)
1554 1557 else:
1555 1558 added.append(f)
1556 1559 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1557 1560 trp, changed)
1558 1561 m.setflag(f, fctx.flags())
1559 1562 except OSError, inst:
1560 1563 self.ui.warn(_("trouble committing %s!\n") % f)
1561 1564 raise
1562 1565 except IOError, inst:
1563 1566 errcode = getattr(inst, 'errno', errno.ENOENT)
1564 1567 if error or errcode and errcode != errno.ENOENT:
1565 1568 self.ui.warn(_("trouble committing %s!\n") % f)
1566 1569 raise
1567 1570
1568 1571 # update manifest
1569 1572 self.ui.note(_("committing manifest\n"))
1570 1573 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1571 1574 drop = [f for f in removed if f in m]
1572 1575 for f in drop:
1573 1576 del m[f]
1574 1577 mn = self.manifest.add(m, trp, linkrev,
1575 1578 p1.manifestnode(), p2.manifestnode(),
1576 1579 added, drop)
1577 1580 files = changed + removed
1578 1581 else:
1579 1582 mn = p1.manifestnode()
1580 1583 files = []
1581 1584
1582 1585 # update changelog
1583 1586 self.ui.note(_("committing changelog\n"))
1584 1587 self.changelog.delayupdate(tr)
1585 1588 n = self.changelog.add(mn, files, ctx.description(),
1586 1589 trp, p1.node(), p2.node(),
1587 1590 user, ctx.date(), ctx.extra().copy())
1588 1591 p = lambda: tr.writepending() and self.root or ""
1589 1592 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1590 1593 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1591 1594 parent2=xp2, pending=p)
1592 1595 # set the new commit is proper phase
1593 1596 targetphase = subrepo.newcommitphase(self.ui, ctx)
1594 1597 if targetphase:
1595 1598 # retract boundary do not alter parent changeset.
1596 1599 # if a parent have higher the resulting phase will
1597 1600 # be compliant anyway
1598 1601 #
1599 1602 # if minimal phase was 0 we don't need to retract anything
1600 1603 phases.retractboundary(self, tr, targetphase, [n])
1601 1604 tr.close()
1602 1605 branchmap.updatecache(self.filtered('served'))
1603 1606 return n
1604 1607 finally:
1605 1608 if tr:
1606 1609 tr.release()
1607 1610 lock.release()
1608 1611
1609 1612 @unfilteredmethod
1610 1613 def destroying(self):
1611 1614 '''Inform the repository that nodes are about to be destroyed.
1612 1615 Intended for use by strip and rollback, so there's a common
1613 1616 place for anything that has to be done before destroying history.
1614 1617
1615 1618 This is mostly useful for saving state that is in memory and waiting
1616 1619 to be flushed when the current lock is released. Because a call to
1617 1620 destroyed is imminent, the repo will be invalidated causing those
1618 1621 changes to stay in memory (waiting for the next unlock), or vanish
1619 1622 completely.
1620 1623 '''
1621 1624 # When using the same lock to commit and strip, the phasecache is left
1622 1625 # dirty after committing. Then when we strip, the repo is invalidated,
1623 1626 # causing those changes to disappear.
1624 1627 if '_phasecache' in vars(self):
1625 1628 self._phasecache.write()
1626 1629
1627 1630 @unfilteredmethod
1628 1631 def destroyed(self):
1629 1632 '''Inform the repository that nodes have been destroyed.
1630 1633 Intended for use by strip and rollback, so there's a common
1631 1634 place for anything that has to be done after destroying history.
1632 1635 '''
1633 1636 # When one tries to:
1634 1637 # 1) destroy nodes thus calling this method (e.g. strip)
1635 1638 # 2) use phasecache somewhere (e.g. commit)
1636 1639 #
1637 1640 # then 2) will fail because the phasecache contains nodes that were
1638 1641 # removed. We can either remove phasecache from the filecache,
1639 1642 # causing it to reload next time it is accessed, or simply filter
1640 1643 # the removed nodes now and write the updated cache.
1641 1644 self._phasecache.filterunknown(self)
1642 1645 self._phasecache.write()
1643 1646
1644 1647 # update the 'served' branch cache to help read only server process
1645 1648 # Thanks to branchcache collaboration this is done from the nearest
1646 1649 # filtered subset and it is expected to be fast.
1647 1650 branchmap.updatecache(self.filtered('served'))
1648 1651
1649 1652 # Ensure the persistent tag cache is updated. Doing it now
1650 1653 # means that the tag cache only has to worry about destroyed
1651 1654 # heads immediately after a strip/rollback. That in turn
1652 1655 # guarantees that "cachetip == currenttip" (comparing both rev
1653 1656 # and node) always means no nodes have been added or destroyed.
1654 1657
1655 1658 # XXX this is suboptimal when qrefresh'ing: we strip the current
1656 1659 # head, refresh the tag cache, then immediately add a new head.
1657 1660 # But I think doing it this way is necessary for the "instant
1658 1661 # tag cache retrieval" case to work.
1659 1662 self.invalidate()
1660 1663
1661 1664 def walk(self, match, node=None):
1662 1665 '''
1663 1666 walk recursively through the directory tree or a given
1664 1667 changeset, finding all files matched by the match
1665 1668 function
1666 1669 '''
1667 1670 return self[node].walk(match)
1668 1671
1669 1672 def status(self, node1='.', node2=None, match=None,
1670 1673 ignored=False, clean=False, unknown=False,
1671 1674 listsubrepos=False):
1672 1675 '''a convenience method that calls node1.status(node2)'''
1673 1676 return self[node1].status(node2, match, ignored, clean, unknown,
1674 1677 listsubrepos)
1675 1678
1676 1679 def heads(self, start=None):
1677 1680 heads = self.changelog.heads(start)
1678 1681 # sort the output in rev descending order
1679 1682 return sorted(heads, key=self.changelog.rev, reverse=True)
1680 1683
1681 1684 def branchheads(self, branch=None, start=None, closed=False):
1682 1685 '''return a (possibly filtered) list of heads for the given branch
1683 1686
1684 1687 Heads are returned in topological order, from newest to oldest.
1685 1688 If branch is None, use the dirstate branch.
1686 1689 If start is not None, return only heads reachable from start.
1687 1690 If closed is True, return heads that are marked as closed as well.
1688 1691 '''
1689 1692 if branch is None:
1690 1693 branch = self[None].branch()
1691 1694 branches = self.branchmap()
1692 1695 if branch not in branches:
1693 1696 return []
1694 1697 # the cache returns heads ordered lowest to highest
1695 1698 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1696 1699 if start is not None:
1697 1700 # filter out the heads that cannot be reached from startrev
1698 1701 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1699 1702 bheads = [h for h in bheads if h in fbheads]
1700 1703 return bheads
1701 1704
1702 1705 def branches(self, nodes):
1703 1706 if not nodes:
1704 1707 nodes = [self.changelog.tip()]
1705 1708 b = []
1706 1709 for n in nodes:
1707 1710 t = n
1708 1711 while True:
1709 1712 p = self.changelog.parents(n)
1710 1713 if p[1] != nullid or p[0] == nullid:
1711 1714 b.append((t, n, p[0], p[1]))
1712 1715 break
1713 1716 n = p[0]
1714 1717 return b
1715 1718
1716 1719 def between(self, pairs):
1717 1720 r = []
1718 1721
1719 1722 for top, bottom in pairs:
1720 1723 n, l, i = top, [], 0
1721 1724 f = 1
1722 1725
1723 1726 while n != bottom and n != nullid:
1724 1727 p = self.changelog.parents(n)[0]
1725 1728 if i == f:
1726 1729 l.append(n)
1727 1730 f = f * 2
1728 1731 n = p
1729 1732 i += 1
1730 1733
1731 1734 r.append(l)
1732 1735
1733 1736 return r
1734 1737
1735 1738 def checkpush(self, pushop):
1736 1739 """Extensions can override this function if additional checks have
1737 1740 to be performed before pushing, or call it if they override push
1738 1741 command.
1739 1742 """
1740 1743 pass
1741 1744
1742 1745 @unfilteredpropertycache
1743 1746 def prepushoutgoinghooks(self):
1744 1747 """Return util.hooks consists of "(repo, remote, outgoing)"
1745 1748 functions, which are called before pushing changesets.
1746 1749 """
1747 1750 return util.hooks()
1748 1751
1749 1752 def stream_in(self, remote, requirements):
1750 1753 lock = self.lock()
1751 1754 try:
1752 1755 # Save remote branchmap. We will use it later
1753 1756 # to speed up branchcache creation
1754 1757 rbranchmap = None
1755 1758 if remote.capable("branchmap"):
1756 1759 rbranchmap = remote.branchmap()
1757 1760
1758 1761 fp = remote.stream_out()
1759 1762 l = fp.readline()
1760 1763 try:
1761 1764 resp = int(l)
1762 1765 except ValueError:
1763 1766 raise error.ResponseError(
1764 1767 _('unexpected response from remote server:'), l)
1765 1768 if resp == 1:
1766 1769 raise util.Abort(_('operation forbidden by server'))
1767 1770 elif resp == 2:
1768 1771 raise util.Abort(_('locking the remote repository failed'))
1769 1772 elif resp != 0:
1770 1773 raise util.Abort(_('the server sent an unknown error code'))
1771 1774 self.ui.status(_('streaming all changes\n'))
1772 1775 l = fp.readline()
1773 1776 try:
1774 1777 total_files, total_bytes = map(int, l.split(' ', 1))
1775 1778 except (ValueError, TypeError):
1776 1779 raise error.ResponseError(
1777 1780 _('unexpected response from remote server:'), l)
1778 1781 self.ui.status(_('%d files to transfer, %s of data\n') %
1779 1782 (total_files, util.bytecount(total_bytes)))
1780 1783 handled_bytes = 0
1781 1784 self.ui.progress(_('clone'), 0, total=total_bytes)
1782 1785 start = time.time()
1783 1786
1784 1787 tr = self.transaction(_('clone'))
1785 1788 try:
1786 1789 for i in xrange(total_files):
1787 1790 # XXX doesn't support '\n' or '\r' in filenames
1788 1791 l = fp.readline()
1789 1792 try:
1790 1793 name, size = l.split('\0', 1)
1791 1794 size = int(size)
1792 1795 except (ValueError, TypeError):
1793 1796 raise error.ResponseError(
1794 1797 _('unexpected response from remote server:'), l)
1795 1798 if self.ui.debugflag:
1796 1799 self.ui.debug('adding %s (%s)\n' %
1797 1800 (name, util.bytecount(size)))
1798 1801 # for backwards compat, name was partially encoded
1799 1802 ofp = self.svfs(store.decodedir(name), 'w')
1800 1803 for chunk in util.filechunkiter(fp, limit=size):
1801 1804 handled_bytes += len(chunk)
1802 1805 self.ui.progress(_('clone'), handled_bytes,
1803 1806 total=total_bytes)
1804 1807 ofp.write(chunk)
1805 1808 ofp.close()
1806 1809 tr.close()
1807 1810 finally:
1808 1811 tr.release()
1809 1812
1810 1813 # Writing straight to files circumvented the inmemory caches
1811 1814 self.invalidate()
1812 1815
1813 1816 elapsed = time.time() - start
1814 1817 if elapsed <= 0:
1815 1818 elapsed = 0.001
1816 1819 self.ui.progress(_('clone'), None)
1817 1820 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1818 1821 (util.bytecount(total_bytes), elapsed,
1819 1822 util.bytecount(total_bytes / elapsed)))
1820 1823
1821 1824 # new requirements = old non-format requirements +
1822 1825 # new format-related
1823 1826 # requirements from the streamed-in repository
1824 1827 requirements.update(set(self.requirements) - self.supportedformats)
1825 1828 self._applyrequirements(requirements)
1826 1829 self._writerequirements()
1827 1830
1828 1831 if rbranchmap:
1829 1832 rbheads = []
1830 1833 closed = []
1831 1834 for bheads in rbranchmap.itervalues():
1832 1835 rbheads.extend(bheads)
1833 1836 for h in bheads:
1834 1837 r = self.changelog.rev(h)
1835 1838 b, c = self.changelog.branchinfo(r)
1836 1839 if c:
1837 1840 closed.append(h)
1838 1841
1839 1842 if rbheads:
1840 1843 rtiprev = max((int(self.changelog.rev(node))
1841 1844 for node in rbheads))
1842 1845 cache = branchmap.branchcache(rbranchmap,
1843 1846 self[rtiprev].node(),
1844 1847 rtiprev,
1845 1848 closednodes=closed)
1846 1849 # Try to stick it as low as possible
1847 1850 # filter above served are unlikely to be fetch from a clone
1848 1851 for candidate in ('base', 'immutable', 'served'):
1849 1852 rview = self.filtered(candidate)
1850 1853 if cache.validfor(rview):
1851 1854 self._branchcaches[candidate] = cache
1852 1855 cache.write(rview)
1853 1856 break
1854 1857 self.invalidate()
1855 1858 return len(self.heads()) + 1
1856 1859 finally:
1857 1860 lock.release()
1858 1861
1859 1862 def clone(self, remote, heads=[], stream=None):
1860 1863 '''clone remote repository.
1861 1864
1862 1865 keyword arguments:
1863 1866 heads: list of revs to clone (forces use of pull)
1864 1867 stream: use streaming clone if possible'''
1865 1868
1866 1869 # now, all clients that can request uncompressed clones can
1867 1870 # read repo formats supported by all servers that can serve
1868 1871 # them.
1869 1872
1870 1873 # if revlog format changes, client will have to check version
1871 1874 # and format flags on "stream" capability, and use
1872 1875 # uncompressed only if compatible.
1873 1876
1874 1877 if stream is None:
1875 1878 # if the server explicitly prefers to stream (for fast LANs)
1876 1879 stream = remote.capable('stream-preferred')
1877 1880
1878 1881 if stream and not heads:
1879 1882 # 'stream' means remote revlog format is revlogv1 only
1880 1883 if remote.capable('stream'):
1881 1884 self.stream_in(remote, set(('revlogv1',)))
1882 1885 else:
1883 1886 # otherwise, 'streamreqs' contains the remote revlog format
1884 1887 streamreqs = remote.capable('streamreqs')
1885 1888 if streamreqs:
1886 1889 streamreqs = set(streamreqs.split(','))
1887 1890 # if we support it, stream in and adjust our requirements
1888 1891 if not streamreqs - self.supportedformats:
1889 1892 self.stream_in(remote, streamreqs)
1890 1893
1891 1894 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1892 1895 try:
1893 1896 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1894 1897 ret = exchange.pull(self, remote, heads).cgresult
1895 1898 finally:
1896 1899 self.ui.restoreconfig(quiet)
1897 1900 return ret
1898 1901
1899 1902 def pushkey(self, namespace, key, old, new):
1900 1903 try:
1901 1904 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1902 1905 old=old, new=new)
1903 1906 except error.HookAbort, exc:
1904 1907 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1905 1908 if exc.hint:
1906 1909 self.ui.write_err(_("(%s)\n") % exc.hint)
1907 1910 return False
1908 1911 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1909 1912 ret = pushkey.push(self, namespace, key, old, new)
1910 1913 def runhook():
1911 1914 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1912 1915 ret=ret)
1913 1916 self._afterlock(runhook)
1914 1917 return ret
1915 1918
1916 1919 def listkeys(self, namespace):
1917 1920 self.hook('prelistkeys', throw=True, namespace=namespace)
1918 1921 self.ui.debug('listing keys for "%s"\n' % namespace)
1919 1922 values = pushkey.list(self, namespace)
1920 1923 self.hook('listkeys', namespace=namespace, values=values)
1921 1924 return values
1922 1925
1923 1926 def debugwireargs(self, one, two, three=None, four=None, five=None):
1924 1927 '''used to test argument passing over the wire'''
1925 1928 return "%s %s %s %s %s" % (one, two, three, four, five)
1926 1929
1927 1930 def savecommitmessage(self, text):
1928 1931 fp = self.vfs('last-message.txt', 'wb')
1929 1932 try:
1930 1933 fp.write(text)
1931 1934 finally:
1932 1935 fp.close()
1933 1936 return self.pathto(fp.name[len(self.root) + 1:])
1934 1937
1935 1938 # used to avoid circular references so destructors work
1936 1939 def aftertrans(files):
1937 1940 renamefiles = [tuple(t) for t in files]
1938 1941 def a():
1939 1942 for vfs, src, dest in renamefiles:
1940 1943 try:
1941 1944 vfs.rename(src, dest)
1942 1945 except OSError: # journal file does not yet exist
1943 1946 pass
1944 1947 return a
1945 1948
1946 1949 def undoname(fn):
1947 1950 base, name = os.path.split(fn)
1948 1951 assert name.startswith('journal')
1949 1952 return os.path.join(base, name.replace('journal', 'undo', 1))
1950 1953
1951 1954 def instance(ui, path, create):
1952 1955 return localrepository(ui, util.urllocalpath(path), create)
1953 1956
1954 1957 def islocal(path):
1955 1958 return True
@@ -1,686 +1,688 b''
1 1 commit hooks can see env vars
2 (and post-transaction one are run unlocked)
2 3
3 4 $ hg init a
4 5 $ cd a
5 6 $ cat > .hg/hgrc <<EOF
6 7 > [hooks]
7 8 > commit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit"
8 9 > commit.b = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit.b"
9 10 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= python \"$TESTDIR/printenv.py\" precommit"
10 11 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxncommit"
11 12 > pretxncommit.tip = hg -q tip
12 13 > pre-identify = python "$TESTDIR/printenv.py" pre-identify 1
13 14 > pre-cat = python "$TESTDIR/printenv.py" pre-cat
14 15 > post-cat = python "$TESTDIR/printenv.py" post-cat
15 16 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxnopen"
16 17 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxnclose"
17 18 > txnclose = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" txnclose"
18 19 > txnabort = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" txnabort"
20 > txnclose.checklock = hg debuglock > /dev/null
19 21 > EOF
20 22 $ echo a > a
21 23 $ hg add a
22 24 $ hg commit -m a
23 25 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
24 26 pretxnopen hook: HG_TXNNAME=commit
25 27 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
26 28 0:cb9a9f314b8b
27 29 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_XNNAME=commit (glob)
28 30 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
29 31 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
30 32 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
31 33
32 34 $ hg clone . ../b
33 35 updating to branch default
34 36 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
35 37 $ cd ../b
36 38
37 39 changegroup hooks can see env vars
38 40
39 41 $ cat > .hg/hgrc <<EOF
40 42 > [hooks]
41 43 > prechangegroup = python "$TESTDIR/printenv.py" prechangegroup
42 44 > changegroup = python "$TESTDIR/printenv.py" changegroup
43 45 > incoming = python "$TESTDIR/printenv.py" incoming
44 46 > EOF
45 47
46 48 pretxncommit and commit hooks can see both parents of merge
47 49
48 50 $ cd ../a
49 51 $ echo b >> a
50 52 $ hg commit -m a1 -d "1 0"
51 53 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
52 54 pretxnopen hook: HG_TXNNAME=commit
53 55 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
54 56 1:ab228980c14d
55 57 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
56 58 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
57 59 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
58 60 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
59 61 $ hg update -C 0
60 62 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 63 $ echo b > b
62 64 $ hg add b
63 65 $ hg commit -m b -d '1 0'
64 66 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
65 67 pretxnopen hook: HG_TXNNAME=commit
66 68 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
67 69 2:ee9deb46ab31
68 70 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
69 71 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
70 72 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
71 73 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
72 74 created new head
73 75 $ hg merge 1
74 76 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 77 (branch merge, don't forget to commit)
76 78 $ hg commit -m merge -d '2 0'
77 79 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
78 80 pretxnopen hook: HG_TXNNAME=commit
79 81 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
80 82 3:07f3376c1e65
81 83 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
82 84 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
83 85 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
84 86 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
85 87
86 88 test generic hooks
87 89
88 90 $ hg id
89 91 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
90 92 abort: pre-identify hook exited with status 1
91 93 [255]
92 94 $ hg cat b
93 95 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
94 96 b
95 97 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
96 98
97 99 $ cd ../b
98 100 $ hg pull ../a
99 101 pulling from ../a
100 102 searching for changes
101 103 prechangegroup hook: HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
102 104 adding changesets
103 105 adding manifests
104 106 adding file changes
105 107 added 3 changesets with 2 changes to 2 files
106 108 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
107 109 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
108 110 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
109 111 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
110 112 (run 'hg update' to get a working copy)
111 113
112 114 tag hooks can see env vars
113 115
114 116 $ cd ../a
115 117 $ cat >> .hg/hgrc <<EOF
116 118 > pretag = python "$TESTDIR/printenv.py" pretag
117 119 > tag = sh -c "HG_PARENT1= HG_PARENT2= python \"$TESTDIR/printenv.py\" tag"
118 120 > EOF
119 121 $ hg tag -d '3 0' a
120 122 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
121 123 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
122 124 pretxnopen hook: HG_TXNNAME=commit
123 125 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
124 126 4:539e4b31b6dc
125 127 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=commit (glob)
126 128 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
127 129 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
128 130 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
129 131 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
130 132 $ hg tag -l la
131 133 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
132 134 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
133 135
134 136 pretag hook can forbid tagging
135 137
136 138 $ echo "pretag.forbid = python \"$TESTDIR/printenv.py\" pretag.forbid 1" >> .hg/hgrc
137 139 $ hg tag -d '4 0' fa
138 140 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
139 141 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
140 142 abort: pretag.forbid hook exited with status 1
141 143 [255]
142 144 $ hg tag -l fla
143 145 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
144 146 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
145 147 abort: pretag.forbid hook exited with status 1
146 148 [255]
147 149
148 150 pretxncommit hook can see changeset, can roll back txn, changeset no
149 151 more there after
150 152
151 153 $ echo "pretxncommit.forbid0 = hg tip -q" >> .hg/hgrc
152 154 $ echo "pretxncommit.forbid1 = python \"$TESTDIR/printenv.py\" pretxncommit.forbid 1" >> .hg/hgrc
153 155 $ echo z > z
154 156 $ hg add z
155 157 $ hg -q tip
156 158 4:539e4b31b6dc
157 159 $ hg commit -m 'fail' -d '4 0'
158 160 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
159 161 pretxnopen hook: HG_TXNNAME=commit
160 162 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
161 163 5:6f611f8018c1
162 164 5:6f611f8018c1
163 165 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
164 166 transaction abort!
165 167 txnabort hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
166 168 rollback completed
167 169 abort: pretxncommit.forbid1 hook exited with status 1
168 170 [255]
169 171 $ hg -q tip
170 172 4:539e4b31b6dc
171 173
172 174 (Check that no 'changelog.i.a' file were left behind)
173 175
174 176 $ ls -1 .hg/store/
175 177 00changelog.i
176 178 00manifest.i
177 179 data
178 180 fncache
179 181 journal.phaseroots
180 182 phaseroots
181 183 undo
182 184 undo.backup.fncache
183 185 undo.backupfiles
184 186 undo.phaseroots
185 187
186 188
187 189 precommit hook can prevent commit
188 190
189 191 $ echo "precommit.forbid = python \"$TESTDIR/printenv.py\" precommit.forbid 1" >> .hg/hgrc
190 192 $ hg commit -m 'fail' -d '4 0'
191 193 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
192 194 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
193 195 abort: precommit.forbid hook exited with status 1
194 196 [255]
195 197 $ hg -q tip
196 198 4:539e4b31b6dc
197 199
198 200 preupdate hook can prevent update
199 201
200 202 $ echo "preupdate = python \"$TESTDIR/printenv.py\" preupdate" >> .hg/hgrc
201 203 $ hg update 1
202 204 preupdate hook: HG_PARENT1=ab228980c14d
203 205 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
204 206
205 207 update hook
206 208
207 209 $ echo "update = python \"$TESTDIR/printenv.py\" update" >> .hg/hgrc
208 210 $ hg update
209 211 preupdate hook: HG_PARENT1=539e4b31b6dc
210 212 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
211 213 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
212 214
213 215 pushkey hook
214 216
215 217 $ echo "pushkey = python \"$TESTDIR/printenv.py\" pushkey" >> .hg/hgrc
216 218 $ cd ../b
217 219 $ hg bookmark -r null foo
218 220 $ hg push -B foo ../a
219 221 pushing to ../a
220 222 searching for changes
221 223 no changes found
222 224 pretxnopen hook: HG_TXNNAME=bookmarks
223 225 pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_XNNAME=bookmarks (glob)
224 226 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmarks (glob)
225 227 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
226 228 exporting bookmark foo
227 229 [1]
228 230 $ cd ../a
229 231
230 232 listkeys hook
231 233
232 234 $ echo "listkeys = python \"$TESTDIR/printenv.py\" listkeys" >> .hg/hgrc
233 235 $ hg bookmark -r null bar
234 236 $ cd ../b
235 237 $ hg pull -B bar ../a
236 238 pulling from ../a
237 239 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
238 240 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
239 241 no changes found
240 242 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
241 243 adding remote bookmark bar
242 244 $ cd ../a
243 245
244 246 test that prepushkey can prevent incoming keys
245 247
246 248 $ echo "prepushkey = python \"$TESTDIR/printenv.py\" prepushkey.forbid 1" >> .hg/hgrc
247 249 $ cd ../b
248 250 $ hg bookmark -r null baz
249 251 $ hg push -B baz ../a
250 252 pushing to ../a
251 253 searching for changes
252 254 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
253 255 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
254 256 no changes found
255 257 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
256 258 prepushkey.forbid hook: HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000
257 259 pushkey-abort: prepushkey hook exited with status 1
258 260 exporting bookmark baz failed!
259 261 [1]
260 262 $ cd ../a
261 263
262 264 test that prelistkeys can prevent listing keys
263 265
264 266 $ echo "prelistkeys = python \"$TESTDIR/printenv.py\" prelistkeys.forbid 1" >> .hg/hgrc
265 267 $ hg bookmark -r null quux
266 268 $ cd ../b
267 269 $ hg pull -B quux ../a
268 270 pulling from ../a
269 271 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
270 272 abort: prelistkeys hook exited with status 1
271 273 [255]
272 274 $ cd ../a
273 275 $ rm .hg/hgrc
274 276
275 277 prechangegroup hook can prevent incoming changes
276 278
277 279 $ cd ../b
278 280 $ hg -q tip
279 281 3:07f3376c1e65
280 282 $ cat > .hg/hgrc <<EOF
281 283 > [hooks]
282 284 > prechangegroup.forbid = python "$TESTDIR/printenv.py" prechangegroup.forbid 1
283 285 > EOF
284 286 $ hg pull ../a
285 287 pulling from ../a
286 288 searching for changes
287 289 prechangegroup.forbid hook: HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
288 290 abort: prechangegroup.forbid hook exited with status 1
289 291 [255]
290 292
291 293 pretxnchangegroup hook can see incoming changes, can roll back txn,
292 294 incoming changes no longer there after
293 295
294 296 $ cat > .hg/hgrc <<EOF
295 297 > [hooks]
296 298 > pretxnchangegroup.forbid0 = hg tip -q
297 299 > pretxnchangegroup.forbid1 = python "$TESTDIR/printenv.py" pretxnchangegroup.forbid 1
298 300 > EOF
299 301 $ hg pull ../a
300 302 pulling from ../a
301 303 searching for changes
302 304 adding changesets
303 305 adding manifests
304 306 adding file changes
305 307 added 1 changesets with 1 changes to 1 files
306 308 4:539e4b31b6dc
307 309 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
308 310 transaction abort!
309 311 rollback completed
310 312 abort: pretxnchangegroup.forbid1 hook exited with status 1
311 313 [255]
312 314 $ hg -q tip
313 315 3:07f3376c1e65
314 316
315 317 outgoing hooks can see env vars
316 318
317 319 $ rm .hg/hgrc
318 320 $ cat > ../a/.hg/hgrc <<EOF
319 321 > [hooks]
320 322 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
321 323 > outgoing = python "$TESTDIR/printenv.py" outgoing
322 324 > EOF
323 325 $ hg pull ../a
324 326 pulling from ../a
325 327 searching for changes
326 328 preoutgoing hook: HG_SOURCE=pull
327 329 adding changesets
328 330 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
329 331 adding manifests
330 332 adding file changes
331 333 added 1 changesets with 1 changes to 1 files
332 334 adding remote bookmark quux
333 335 (run 'hg update' to get a working copy)
334 336 $ hg rollback
335 337 repository tip rolled back to revision 3 (undo pull)
336 338
337 339 preoutgoing hook can prevent outgoing changes
338 340
339 341 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> ../a/.hg/hgrc
340 342 $ hg pull ../a
341 343 pulling from ../a
342 344 searching for changes
343 345 preoutgoing hook: HG_SOURCE=pull
344 346 preoutgoing.forbid hook: HG_SOURCE=pull
345 347 abort: preoutgoing.forbid hook exited with status 1
346 348 [255]
347 349
348 350 outgoing hooks work for local clones
349 351
350 352 $ cd ..
351 353 $ cat > a/.hg/hgrc <<EOF
352 354 > [hooks]
353 355 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
354 356 > outgoing = python "$TESTDIR/printenv.py" outgoing
355 357 > EOF
356 358 $ hg clone a c
357 359 preoutgoing hook: HG_SOURCE=clone
358 360 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
359 361 updating to branch default
360 362 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
361 363 $ rm -rf c
362 364
363 365 preoutgoing hook can prevent outgoing changes for local clones
364 366
365 367 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> a/.hg/hgrc
366 368 $ hg clone a zzz
367 369 preoutgoing hook: HG_SOURCE=clone
368 370 preoutgoing.forbid hook: HG_SOURCE=clone
369 371 abort: preoutgoing.forbid hook exited with status 1
370 372 [255]
371 373
372 374 $ cd "$TESTTMP/b"
373 375
374 376 $ cat > hooktests.py <<EOF
375 377 > from mercurial import util
376 378 >
377 379 > uncallable = 0
378 380 >
379 381 > def printargs(args):
380 382 > args.pop('ui', None)
381 383 > args.pop('repo', None)
382 384 > a = list(args.items())
383 385 > a.sort()
384 386 > print 'hook args:'
385 387 > for k, v in a:
386 388 > print ' ', k, v
387 389 >
388 390 > def passhook(**args):
389 391 > printargs(args)
390 392 >
391 393 > def failhook(**args):
392 394 > printargs(args)
393 395 > return True
394 396 >
395 397 > class LocalException(Exception):
396 398 > pass
397 399 >
398 400 > def raisehook(**args):
399 401 > raise LocalException('exception from hook')
400 402 >
401 403 > def aborthook(**args):
402 404 > raise util.Abort('raise abort from hook')
403 405 >
404 406 > def brokenhook(**args):
405 407 > return 1 + {}
406 408 >
407 409 > def verbosehook(ui, **args):
408 410 > ui.note('verbose output from hook\n')
409 411 >
410 412 > def printtags(ui, repo, **args):
411 413 > print sorted(repo.tags())
412 414 >
413 415 > class container:
414 416 > unreachable = 1
415 417 > EOF
416 418
417 419 test python hooks
418 420
419 421 #if windows
420 422 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
421 423 #else
422 424 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
423 425 #endif
424 426 $ export PYTHONPATH
425 427
426 428 $ echo '[hooks]' > ../a/.hg/hgrc
427 429 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
428 430 $ hg pull ../a 2>&1 | grep 'raised an exception'
429 431 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
430 432
431 433 $ echo '[hooks]' > ../a/.hg/hgrc
432 434 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
433 435 $ hg pull ../a 2>&1 | grep 'raised an exception'
434 436 error: preoutgoing.raise hook raised an exception: exception from hook
435 437
436 438 $ echo '[hooks]' > ../a/.hg/hgrc
437 439 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
438 440 $ hg pull ../a
439 441 pulling from ../a
440 442 searching for changes
441 443 error: preoutgoing.abort hook failed: raise abort from hook
442 444 abort: raise abort from hook
443 445 [255]
444 446
445 447 $ echo '[hooks]' > ../a/.hg/hgrc
446 448 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
447 449 $ hg pull ../a
448 450 pulling from ../a
449 451 searching for changes
450 452 hook args:
451 453 hooktype preoutgoing
452 454 source pull
453 455 abort: preoutgoing.fail hook failed
454 456 [255]
455 457
456 458 $ echo '[hooks]' > ../a/.hg/hgrc
457 459 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
458 460 $ hg pull ../a
459 461 pulling from ../a
460 462 searching for changes
461 463 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
462 464 [255]
463 465
464 466 $ echo '[hooks]' > ../a/.hg/hgrc
465 467 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
466 468 $ hg pull ../a
467 469 pulling from ../a
468 470 searching for changes
469 471 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
470 472 [255]
471 473
472 474 $ echo '[hooks]' > ../a/.hg/hgrc
473 475 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
474 476 $ hg pull ../a
475 477 pulling from ../a
476 478 searching for changes
477 479 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
478 480 [255]
479 481
480 482 $ echo '[hooks]' > ../a/.hg/hgrc
481 483 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
482 484 $ hg pull ../a
483 485 pulling from ../a
484 486 searching for changes
485 487 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
486 488 [255]
487 489
488 490 $ echo '[hooks]' > ../a/.hg/hgrc
489 491 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
490 492 $ hg pull ../a
491 493 pulling from ../a
492 494 searching for changes
493 495 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
494 496 [255]
495 497
496 498 $ echo '[hooks]' > ../a/.hg/hgrc
497 499 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
498 500 $ hg pull ../a
499 501 pulling from ../a
500 502 searching for changes
501 503 hook args:
502 504 hooktype preoutgoing
503 505 source pull
504 506 adding changesets
505 507 adding manifests
506 508 adding file changes
507 509 added 1 changesets with 1 changes to 1 files
508 510 adding remote bookmark quux
509 511 (run 'hg update' to get a working copy)
510 512
511 513 make sure --traceback works
512 514
513 515 $ echo '[hooks]' > .hg/hgrc
514 516 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
515 517
516 518 $ echo aa > a
517 519 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
518 520 Traceback (most recent call last):
519 521
520 522 $ cd ..
521 523 $ hg init c
522 524 $ cd c
523 525
524 526 $ cat > hookext.py <<EOF
525 527 > def autohook(**args):
526 528 > print "Automatically installed hook"
527 529 >
528 530 > def reposetup(ui, repo):
529 531 > repo.ui.setconfig("hooks", "commit.auto", autohook)
530 532 > EOF
531 533 $ echo '[extensions]' >> .hg/hgrc
532 534 $ echo 'hookext = hookext.py' >> .hg/hgrc
533 535
534 536 $ touch foo
535 537 $ hg add foo
536 538 $ hg ci -d '0 0' -m 'add foo'
537 539 Automatically installed hook
538 540 $ echo >> foo
539 541 $ hg ci --debug -d '0 0' -m 'change foo'
540 542 committing files:
541 543 foo
542 544 committing manifest
543 545 committing changelog
544 546 calling hook commit.auto: hgext_hookext.autohook
545 547 Automatically installed hook
546 548 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
547 549
548 550 $ hg showconfig hooks
549 551 hooks.commit.auto=<function autohook at *> (glob)
550 552
551 553 test python hook configured with python:[file]:[hook] syntax
552 554
553 555 $ cd ..
554 556 $ mkdir d
555 557 $ cd d
556 558 $ hg init repo
557 559 $ mkdir hooks
558 560
559 561 $ cd hooks
560 562 $ cat > testhooks.py <<EOF
561 563 > def testhook(**args):
562 564 > print 'hook works'
563 565 > EOF
564 566 $ echo '[hooks]' > ../repo/.hg/hgrc
565 567 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
566 568
567 569 $ cd ../repo
568 570 $ hg commit -d '0 0'
569 571 hook works
570 572 nothing changed
571 573 [1]
572 574
573 575 $ echo '[hooks]' > .hg/hgrc
574 576 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
575 577 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
576 578
577 579 $ hg up null
578 580 loading update.ne hook failed:
579 581 abort: No such file or directory: $TESTTMP/d/repo/nonexistent.py
580 582 [255]
581 583
582 584 $ hg id
583 585 loading pre-identify.npmd hook failed:
584 586 abort: No module named repo!
585 587 [255]
586 588
587 589 $ cd ../../b
588 590
589 591 make sure --traceback works on hook import failure
590 592
591 593 $ cat > importfail.py <<EOF
592 594 > import somebogusmodule
593 595 > # dereference something in the module to force demandimport to load it
594 596 > somebogusmodule.whatever
595 597 > EOF
596 598
597 599 $ echo '[hooks]' > .hg/hgrc
598 600 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
599 601
600 602 $ echo a >> a
601 603 $ hg --traceback commit -ma 2>&1 | egrep -v '^( +File| [a-zA-Z(])'
602 604 exception from first failed import attempt:
603 605 Traceback (most recent call last):
604 606 ImportError: No module named somebogusmodule
605 607 exception from second failed import attempt:
606 608 Traceback (most recent call last):
607 609 ImportError: No module named hgext_importfail
608 610 Traceback (most recent call last):
609 611 Abort: precommit.importfail hook is invalid (import of "importfail" failed)
610 612 abort: precommit.importfail hook is invalid (import of "importfail" failed)
611 613
612 614 Issue1827: Hooks Update & Commit not completely post operation
613 615
614 616 commit and update hooks should run after command completion
615 617
616 618 $ echo '[hooks]' > .hg/hgrc
617 619 $ echo 'commit = hg id' >> .hg/hgrc
618 620 $ echo 'update = hg id' >> .hg/hgrc
619 621 $ echo bb > a
620 622 $ hg ci -ma
621 623 223eafe2750c tip
622 624 $ hg up 0
623 625 cb9a9f314b8b
624 626 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
625 627
626 628 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
627 629 that is passed to pre/post hooks
628 630
629 631 $ echo '[hooks]' > .hg/hgrc
630 632 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
631 633 $ hg id
632 634 cb9a9f314b8b
633 635 $ hg id --verbose
634 636 calling hook pre-identify: hooktests.verbosehook
635 637 verbose output from hook
636 638 cb9a9f314b8b
637 639
638 640 Ensure hooks can be prioritized
639 641
640 642 $ echo '[hooks]' > .hg/hgrc
641 643 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
642 644 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
643 645 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
644 646 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
645 647 $ hg id --verbose
646 648 calling hook pre-identify.b: hooktests.verbosehook
647 649 verbose output from hook
648 650 calling hook pre-identify.a: hooktests.verbosehook
649 651 verbose output from hook
650 652 calling hook pre-identify.c: hooktests.verbosehook
651 653 verbose output from hook
652 654 cb9a9f314b8b
653 655
654 656 new tags must be visible in pretxncommit (issue3210)
655 657
656 658 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
657 659 $ hg tag -f foo
658 660 ['a', 'foo', 'tip']
659 661
660 662 new commits must be visible in pretxnchangegroup (issue3428)
661 663
662 664 $ cd ..
663 665 $ hg init to
664 666 $ echo '[hooks]' >> to/.hg/hgrc
665 667 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
666 668 $ echo a >> to/a
667 669 $ hg --cwd to ci -Ama
668 670 adding a
669 671 $ hg clone to from
670 672 updating to branch default
671 673 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
672 674 $ echo aa >> from/a
673 675 $ hg --cwd from ci -mb
674 676 $ hg --cwd from push
675 677 pushing to $TESTTMP/to (glob)
676 678 searching for changes
677 679 adding changesets
678 680 adding manifests
679 681 adding file changes
680 682 added 1 changesets with 1 changes to 1 files
681 683 changeset: 1:9836a07b9b9d
682 684 tag: tip
683 685 user: test
684 686 date: Thu Jan 01 00:00:00 1970 +0000
685 687 summary: b
686 688
General Comments 0
You need to be logged in to leave comments. Login now