##// END OF EJS Templates
tag: run commit hook when lock is released (issue3344)
Mads Kiilerich -
r16680:d0e419b0 stable
parent child Browse files
Show More
@@ -1,2349 +1,2353
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class storecache(filecache):
23 23 """filecache for files in the store"""
24 24 def join(self, obj, fname):
25 25 return obj.sjoin(fname)
26 26
27 27 class localrepository(repo.repository):
28 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 29 'known', 'getbundle'))
30 30 supportedformats = set(('revlogv1', 'generaldelta'))
31 31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 32 'dotencode'))
33 33
34 34 def __init__(self, baseui, path=None, create=False):
35 35 repo.repository.__init__(self)
36 36 self.root = os.path.realpath(util.expandpath(path))
37 37 self.path = os.path.join(self.root, ".hg")
38 38 self.origroot = path
39 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 40 self.opener = scmutil.opener(self.path)
41 41 self.wopener = scmutil.opener(self.root)
42 42 self.baseui = baseui
43 43 self.ui = baseui.copy()
44 44 self._dirtyphases = False
45 45 # A list of callback to shape the phase if no data were found.
46 46 # Callback are in the form: func(repo, roots) --> processed root.
47 47 # This list it to be filled by extension during repo setup
48 48 self._phasedefaults = []
49 49
50 50 try:
51 51 self.ui.readconfig(self.join("hgrc"), self.root)
52 52 extensions.loadall(self.ui)
53 53 except IOError:
54 54 pass
55 55
56 56 if not os.path.isdir(self.path):
57 57 if create:
58 58 if not os.path.exists(path):
59 59 util.makedirs(path)
60 60 util.makedir(self.path, notindexed=True)
61 61 requirements = ["revlogv1"]
62 62 if self.ui.configbool('format', 'usestore', True):
63 63 os.mkdir(os.path.join(self.path, "store"))
64 64 requirements.append("store")
65 65 if self.ui.configbool('format', 'usefncache', True):
66 66 requirements.append("fncache")
67 67 if self.ui.configbool('format', 'dotencode', True):
68 68 requirements.append('dotencode')
69 69 # create an invalid changelog
70 70 self.opener.append(
71 71 "00changelog.i",
72 72 '\0\0\0\2' # represents revlogv2
73 73 ' dummy changelog to prevent using the old repo layout'
74 74 )
75 75 if self.ui.configbool('format', 'generaldelta', False):
76 76 requirements.append("generaldelta")
77 77 requirements = set(requirements)
78 78 else:
79 79 raise error.RepoError(_("repository %s not found") % path)
80 80 elif create:
81 81 raise error.RepoError(_("repository %s already exists") % path)
82 82 else:
83 83 try:
84 84 requirements = scmutil.readrequires(self.opener, self.supported)
85 85 except IOError, inst:
86 86 if inst.errno != errno.ENOENT:
87 87 raise
88 88 requirements = set()
89 89
90 90 self.sharedpath = self.path
91 91 try:
92 92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 93 if not os.path.exists(s):
94 94 raise error.RepoError(
95 95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 96 self.sharedpath = s
97 97 except IOError, inst:
98 98 if inst.errno != errno.ENOENT:
99 99 raise
100 100
101 101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 102 self.spath = self.store.path
103 103 self.sopener = self.store.opener
104 104 self.sjoin = self.store.join
105 105 self.opener.createmode = self.store.createmode
106 106 self._applyrequirements(requirements)
107 107 if create:
108 108 self._writerequirements()
109 109
110 110
111 111 self._branchcache = None
112 112 self._branchcachetip = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 # A cache for various files under .hg/ that tracks file changes,
118 118 # (used by the filecache decorator)
119 119 #
120 120 # Maps a property name to its util.filecacheentry
121 121 self._filecache = {}
122 122
123 123 def _applyrequirements(self, requirements):
124 124 self.requirements = requirements
125 125 openerreqs = set(('revlogv1', 'generaldelta'))
126 126 self.sopener.options = dict((r, 1) for r in requirements
127 127 if r in openerreqs)
128 128
129 129 def _writerequirements(self):
130 130 reqfile = self.opener("requires", "w")
131 131 for r in self.requirements:
132 132 reqfile.write("%s\n" % r)
133 133 reqfile.close()
134 134
135 135 def _checknested(self, path):
136 136 """Determine if path is a legal nested repository."""
137 137 if not path.startswith(self.root):
138 138 return False
139 139 subpath = path[len(self.root) + 1:]
140 140 normsubpath = util.pconvert(subpath)
141 141
142 142 # XXX: Checking against the current working copy is wrong in
143 143 # the sense that it can reject things like
144 144 #
145 145 # $ hg cat -r 10 sub/x.txt
146 146 #
147 147 # if sub/ is no longer a subrepository in the working copy
148 148 # parent revision.
149 149 #
150 150 # However, it can of course also allow things that would have
151 151 # been rejected before, such as the above cat command if sub/
152 152 # is a subrepository now, but was a normal directory before.
153 153 # The old path auditor would have rejected by mistake since it
154 154 # panics when it sees sub/.hg/.
155 155 #
156 156 # All in all, checking against the working copy seems sensible
157 157 # since we want to prevent access to nested repositories on
158 158 # the filesystem *now*.
159 159 ctx = self[None]
160 160 parts = util.splitpath(subpath)
161 161 while parts:
162 162 prefix = '/'.join(parts)
163 163 if prefix in ctx.substate:
164 164 if prefix == normsubpath:
165 165 return True
166 166 else:
167 167 sub = ctx.sub(prefix)
168 168 return sub.checknested(subpath[len(prefix) + 1:])
169 169 else:
170 170 parts.pop()
171 171 return False
172 172
173 173 @filecache('bookmarks')
174 174 def _bookmarks(self):
175 175 return bookmarks.read(self)
176 176
177 177 @filecache('bookmarks.current')
178 178 def _bookmarkcurrent(self):
179 179 return bookmarks.readcurrent(self)
180 180
181 181 def _writebookmarks(self, marks):
182 182 bookmarks.write(self)
183 183
184 184 @storecache('phaseroots')
185 185 def _phaseroots(self):
186 186 self._dirtyphases = False
187 187 phaseroots = phases.readroots(self)
188 188 phases.filterunknown(self, phaseroots)
189 189 return phaseroots
190 190
191 191 @propertycache
192 192 def _phaserev(self):
193 193 cache = [phases.public] * len(self)
194 194 for phase in phases.trackedphases:
195 195 roots = map(self.changelog.rev, self._phaseroots[phase])
196 196 if roots:
197 197 for rev in roots:
198 198 cache[rev] = phase
199 199 for rev in self.changelog.descendants(*roots):
200 200 cache[rev] = phase
201 201 return cache
202 202
203 203 @storecache('00changelog.i')
204 204 def changelog(self):
205 205 c = changelog.changelog(self.sopener)
206 206 if 'HG_PENDING' in os.environ:
207 207 p = os.environ['HG_PENDING']
208 208 if p.startswith(self.root):
209 209 c.readpending('00changelog.i.a')
210 210 return c
211 211
212 212 @storecache('00manifest.i')
213 213 def manifest(self):
214 214 return manifest.manifest(self.sopener)
215 215
216 216 @filecache('dirstate')
217 217 def dirstate(self):
218 218 warned = [0]
219 219 def validate(node):
220 220 try:
221 221 self.changelog.rev(node)
222 222 return node
223 223 except error.LookupError:
224 224 if not warned[0]:
225 225 warned[0] = True
226 226 self.ui.warn(_("warning: ignoring unknown"
227 227 " working parent %s!\n") % short(node))
228 228 return nullid
229 229
230 230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231 231
232 232 def __getitem__(self, changeid):
233 233 if changeid is None:
234 234 return context.workingctx(self)
235 235 return context.changectx(self, changeid)
236 236
237 237 def __contains__(self, changeid):
238 238 try:
239 239 return bool(self.lookup(changeid))
240 240 except error.RepoLookupError:
241 241 return False
242 242
243 243 def __nonzero__(self):
244 244 return True
245 245
246 246 def __len__(self):
247 247 return len(self.changelog)
248 248
249 249 def __iter__(self):
250 250 for i in xrange(len(self)):
251 251 yield i
252 252
253 253 def revs(self, expr, *args):
254 254 '''Return a list of revisions matching the given revset'''
255 255 expr = revset.formatspec(expr, *args)
256 256 m = revset.match(None, expr)
257 257 return [r for r in m(self, range(len(self)))]
258 258
259 259 def set(self, expr, *args):
260 260 '''
261 261 Yield a context for each matching revision, after doing arg
262 262 replacement via revset.formatspec
263 263 '''
264 264 for r in self.revs(expr, *args):
265 265 yield self[r]
266 266
267 267 def url(self):
268 268 return 'file:' + self.root
269 269
270 270 def hook(self, name, throw=False, **args):
271 271 return hook.hook(self.ui, self, name, throw, **args)
272 272
273 273 tag_disallowed = ':\r\n'
274 274
275 275 def _tag(self, names, node, message, local, user, date, extra={}):
276 276 if isinstance(names, str):
277 277 allchars = names
278 278 names = (names,)
279 279 else:
280 280 allchars = ''.join(names)
281 281 for c in self.tag_disallowed:
282 282 if c in allchars:
283 283 raise util.Abort(_('%r cannot be used in a tag name') % c)
284 284
285 285 branches = self.branchmap()
286 286 for name in names:
287 287 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 288 local=local)
289 289 if name in branches:
290 290 self.ui.warn(_("warning: tag %s conflicts with existing"
291 291 " branch name\n") % name)
292 292
293 293 def writetags(fp, names, munge, prevtags):
294 294 fp.seek(0, 2)
295 295 if prevtags and prevtags[-1] != '\n':
296 296 fp.write('\n')
297 297 for name in names:
298 298 m = munge and munge(name) or name
299 299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
300 300 old = self.tags().get(name, nullid)
301 301 fp.write('%s %s\n' % (hex(old), m))
302 302 fp.write('%s %s\n' % (hex(node), m))
303 303 fp.close()
304 304
305 305 prevtags = ''
306 306 if local:
307 307 try:
308 308 fp = self.opener('localtags', 'r+')
309 309 except IOError:
310 310 fp = self.opener('localtags', 'a')
311 311 else:
312 312 prevtags = fp.read()
313 313
314 314 # local tags are stored in the current charset
315 315 writetags(fp, names, None, prevtags)
316 316 for name in names:
317 317 self.hook('tag', node=hex(node), tag=name, local=local)
318 318 return
319 319
320 320 try:
321 321 fp = self.wfile('.hgtags', 'rb+')
322 322 except IOError, e:
323 323 if e.errno != errno.ENOENT:
324 324 raise
325 325 fp = self.wfile('.hgtags', 'ab')
326 326 else:
327 327 prevtags = fp.read()
328 328
329 329 # committed tags are stored in UTF-8
330 330 writetags(fp, names, encoding.fromlocal, prevtags)
331 331
332 332 fp.close()
333 333
334 334 self.invalidatecaches()
335 335
336 336 if '.hgtags' not in self.dirstate:
337 337 self[None].add(['.hgtags'])
338 338
339 339 m = matchmod.exact(self.root, '', ['.hgtags'])
340 340 tagnode = self.commit(message, user, date, extra=extra, match=m)
341 341
342 342 for name in names:
343 343 self.hook('tag', node=hex(node), tag=name, local=local)
344 344
345 345 return tagnode
346 346
347 347 def tag(self, names, node, message, local, user, date):
348 348 '''tag a revision with one or more symbolic names.
349 349
350 350 names is a list of strings or, when adding a single tag, names may be a
351 351 string.
352 352
353 353 if local is True, the tags are stored in a per-repository file.
354 354 otherwise, they are stored in the .hgtags file, and a new
355 355 changeset is committed with the change.
356 356
357 357 keyword arguments:
358 358
359 359 local: whether to store tags in non-version-controlled file
360 360 (default False)
361 361
362 362 message: commit message to use if committing
363 363
364 364 user: name of user to use if committing
365 365
366 366 date: date tuple to use if committing'''
367 367
368 368 if not local:
369 369 for x in self.status()[:5]:
370 370 if '.hgtags' in x:
371 371 raise util.Abort(_('working copy of .hgtags is changed '
372 372 '(please commit .hgtags manually)'))
373 373
374 374 self.tags() # instantiate the cache
375 375 self._tag(names, node, message, local, user, date)
376 376
377 377 @propertycache
378 378 def _tagscache(self):
379 379 '''Returns a tagscache object that contains various tags related caches.'''
380 380
381 381 # This simplifies its cache management by having one decorated
382 382 # function (this one) and the rest simply fetch things from it.
383 383 class tagscache(object):
384 384 def __init__(self):
385 385 # These two define the set of tags for this repository. tags
386 386 # maps tag name to node; tagtypes maps tag name to 'global' or
387 387 # 'local'. (Global tags are defined by .hgtags across all
388 388 # heads, and local tags are defined in .hg/localtags.)
389 389 # They constitute the in-memory cache of tags.
390 390 self.tags = self.tagtypes = None
391 391
392 392 self.nodetagscache = self.tagslist = None
393 393
394 394 cache = tagscache()
395 395 cache.tags, cache.tagtypes = self._findtags()
396 396
397 397 return cache
398 398
399 399 def tags(self):
400 400 '''return a mapping of tag to node'''
401 401 t = {}
402 402 for k, v in self._tagscache.tags.iteritems():
403 403 try:
404 404 # ignore tags to unknown nodes
405 405 self.changelog.rev(v)
406 406 t[k] = v
407 407 except (error.LookupError, ValueError):
408 408 pass
409 409 return t
410 410
411 411 def _findtags(self):
412 412 '''Do the hard work of finding tags. Return a pair of dicts
413 413 (tags, tagtypes) where tags maps tag name to node, and tagtypes
414 414 maps tag name to a string like \'global\' or \'local\'.
415 415 Subclasses or extensions are free to add their own tags, but
416 416 should be aware that the returned dicts will be retained for the
417 417 duration of the localrepo object.'''
418 418
419 419 # XXX what tagtype should subclasses/extensions use? Currently
420 420 # mq and bookmarks add tags, but do not set the tagtype at all.
421 421 # Should each extension invent its own tag type? Should there
422 422 # be one tagtype for all such "virtual" tags? Or is the status
423 423 # quo fine?
424 424
425 425 alltags = {} # map tag name to (node, hist)
426 426 tagtypes = {}
427 427
428 428 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
429 429 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
430 430
431 431 # Build the return dicts. Have to re-encode tag names because
432 432 # the tags module always uses UTF-8 (in order not to lose info
433 433 # writing to the cache), but the rest of Mercurial wants them in
434 434 # local encoding.
435 435 tags = {}
436 436 for (name, (node, hist)) in alltags.iteritems():
437 437 if node != nullid:
438 438 tags[encoding.tolocal(name)] = node
439 439 tags['tip'] = self.changelog.tip()
440 440 tagtypes = dict([(encoding.tolocal(name), value)
441 441 for (name, value) in tagtypes.iteritems()])
442 442 return (tags, tagtypes)
443 443
444 444 def tagtype(self, tagname):
445 445 '''
446 446 return the type of the given tag. result can be:
447 447
448 448 'local' : a local tag
449 449 'global' : a global tag
450 450 None : tag does not exist
451 451 '''
452 452
453 453 return self._tagscache.tagtypes.get(tagname)
454 454
455 455 def tagslist(self):
456 456 '''return a list of tags ordered by revision'''
457 457 if not self._tagscache.tagslist:
458 458 l = []
459 459 for t, n in self.tags().iteritems():
460 460 r = self.changelog.rev(n)
461 461 l.append((r, t, n))
462 462 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
463 463
464 464 return self._tagscache.tagslist
465 465
466 466 def nodetags(self, node):
467 467 '''return the tags associated with a node'''
468 468 if not self._tagscache.nodetagscache:
469 469 nodetagscache = {}
470 470 for t, n in self._tagscache.tags.iteritems():
471 471 nodetagscache.setdefault(n, []).append(t)
472 472 for tags in nodetagscache.itervalues():
473 473 tags.sort()
474 474 self._tagscache.nodetagscache = nodetagscache
475 475 return self._tagscache.nodetagscache.get(node, [])
476 476
477 477 def nodebookmarks(self, node):
478 478 marks = []
479 479 for bookmark, n in self._bookmarks.iteritems():
480 480 if n == node:
481 481 marks.append(bookmark)
482 482 return sorted(marks)
483 483
484 484 def _branchtags(self, partial, lrev):
485 485 # TODO: rename this function?
486 486 tiprev = len(self) - 1
487 487 if lrev != tiprev:
488 488 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
489 489 self._updatebranchcache(partial, ctxgen)
490 490 self._writebranchcache(partial, self.changelog.tip(), tiprev)
491 491
492 492 return partial
493 493
494 494 def updatebranchcache(self):
495 495 tip = self.changelog.tip()
496 496 if self._branchcache is not None and self._branchcachetip == tip:
497 497 return
498 498
499 499 oldtip = self._branchcachetip
500 500 self._branchcachetip = tip
501 501 if oldtip is None or oldtip not in self.changelog.nodemap:
502 502 partial, last, lrev = self._readbranchcache()
503 503 else:
504 504 lrev = self.changelog.rev(oldtip)
505 505 partial = self._branchcache
506 506
507 507 self._branchtags(partial, lrev)
508 508 # this private cache holds all heads (not just tips)
509 509 self._branchcache = partial
510 510
511 511 def branchmap(self):
512 512 '''returns a dictionary {branch: [branchheads]}'''
513 513 self.updatebranchcache()
514 514 return self._branchcache
515 515
516 516 def branchtags(self):
517 517 '''return a dict where branch names map to the tipmost head of
518 518 the branch, open heads come before closed'''
519 519 bt = {}
520 520 for bn, heads in self.branchmap().iteritems():
521 521 tip = heads[-1]
522 522 for h in reversed(heads):
523 523 if 'close' not in self.changelog.read(h)[5]:
524 524 tip = h
525 525 break
526 526 bt[bn] = tip
527 527 return bt
528 528
529 529 def _readbranchcache(self):
530 530 partial = {}
531 531 try:
532 532 f = self.opener("cache/branchheads")
533 533 lines = f.read().split('\n')
534 534 f.close()
535 535 except (IOError, OSError):
536 536 return {}, nullid, nullrev
537 537
538 538 try:
539 539 last, lrev = lines.pop(0).split(" ", 1)
540 540 last, lrev = bin(last), int(lrev)
541 541 if lrev >= len(self) or self[lrev].node() != last:
542 542 # invalidate the cache
543 543 raise ValueError('invalidating branch cache (tip differs)')
544 544 for l in lines:
545 545 if not l:
546 546 continue
547 547 node, label = l.split(" ", 1)
548 548 label = encoding.tolocal(label.strip())
549 549 partial.setdefault(label, []).append(bin(node))
550 550 except KeyboardInterrupt:
551 551 raise
552 552 except Exception, inst:
553 553 if self.ui.debugflag:
554 554 self.ui.warn(str(inst), '\n')
555 555 partial, last, lrev = {}, nullid, nullrev
556 556 return partial, last, lrev
557 557
558 558 def _writebranchcache(self, branches, tip, tiprev):
559 559 try:
560 560 f = self.opener("cache/branchheads", "w", atomictemp=True)
561 561 f.write("%s %s\n" % (hex(tip), tiprev))
562 562 for label, nodes in branches.iteritems():
563 563 for node in nodes:
564 564 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
565 565 f.close()
566 566 except (IOError, OSError):
567 567 pass
568 568
569 569 def _updatebranchcache(self, partial, ctxgen):
570 570 # collect new branch entries
571 571 newbranches = {}
572 572 for c in ctxgen:
573 573 newbranches.setdefault(c.branch(), []).append(c.node())
574 574 # if older branchheads are reachable from new ones, they aren't
575 575 # really branchheads. Note checking parents is insufficient:
576 576 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
577 577 for branch, newnodes in newbranches.iteritems():
578 578 bheads = partial.setdefault(branch, [])
579 579 bheads.extend(newnodes)
580 580 if len(bheads) <= 1:
581 581 continue
582 582 bheads = sorted(bheads, key=lambda x: self[x].rev())
583 583 # starting from tip means fewer passes over reachable
584 584 while newnodes:
585 585 latest = newnodes.pop()
586 586 if latest not in bheads:
587 587 continue
588 588 minbhrev = self[bheads[0]].node()
589 589 reachable = self.changelog.reachable(latest, minbhrev)
590 590 reachable.remove(latest)
591 591 if reachable:
592 592 bheads = [b for b in bheads if b not in reachable]
593 593 partial[branch] = bheads
594 594
595 595 def lookup(self, key):
596 596 return self[key].node()
597 597
598 598 def lookupbranch(self, key, remote=None):
599 599 repo = remote or self
600 600 if key in repo.branchmap():
601 601 return key
602 602
603 603 repo = (remote and remote.local()) and remote or self
604 604 return repo[key].branch()
605 605
606 606 def known(self, nodes):
607 607 nm = self.changelog.nodemap
608 608 result = []
609 609 for n in nodes:
610 610 r = nm.get(n)
611 611 resp = not (r is None or self._phaserev[r] >= phases.secret)
612 612 result.append(resp)
613 613 return result
614 614
615 615 def local(self):
616 616 return self
617 617
618 618 def join(self, f):
619 619 return os.path.join(self.path, f)
620 620
621 621 def wjoin(self, f):
622 622 return os.path.join(self.root, f)
623 623
624 624 def file(self, f):
625 625 if f[0] == '/':
626 626 f = f[1:]
627 627 return filelog.filelog(self.sopener, f)
628 628
629 629 def changectx(self, changeid):
630 630 return self[changeid]
631 631
632 632 def parents(self, changeid=None):
633 633 '''get list of changectxs for parents of changeid'''
634 634 return self[changeid].parents()
635 635
636 636 def setparents(self, p1, p2=nullid):
637 637 copies = self.dirstate.setparents(p1, p2)
638 638 if copies:
639 639 # Adjust copy records, the dirstate cannot do it, it
640 640 # requires access to parents manifests. Preserve them
641 641 # only for entries added to first parent.
642 642 pctx = self[p1]
643 643 for f in copies:
644 644 if f not in pctx and copies[f] in pctx:
645 645 self.dirstate.copy(copies[f], f)
646 646
647 647 def filectx(self, path, changeid=None, fileid=None):
648 648 """changeid can be a changeset revision, node, or tag.
649 649 fileid can be a file revision or node."""
650 650 return context.filectx(self, path, changeid, fileid)
651 651
652 652 def getcwd(self):
653 653 return self.dirstate.getcwd()
654 654
655 655 def pathto(self, f, cwd=None):
656 656 return self.dirstate.pathto(f, cwd)
657 657
658 658 def wfile(self, f, mode='r'):
659 659 return self.wopener(f, mode)
660 660
661 661 def _link(self, f):
662 662 return os.path.islink(self.wjoin(f))
663 663
664 664 def _loadfilter(self, filter):
665 665 if filter not in self.filterpats:
666 666 l = []
667 667 for pat, cmd in self.ui.configitems(filter):
668 668 if cmd == '!':
669 669 continue
670 670 mf = matchmod.match(self.root, '', [pat])
671 671 fn = None
672 672 params = cmd
673 673 for name, filterfn in self._datafilters.iteritems():
674 674 if cmd.startswith(name):
675 675 fn = filterfn
676 676 params = cmd[len(name):].lstrip()
677 677 break
678 678 if not fn:
679 679 fn = lambda s, c, **kwargs: util.filter(s, c)
680 680 # Wrap old filters not supporting keyword arguments
681 681 if not inspect.getargspec(fn)[2]:
682 682 oldfn = fn
683 683 fn = lambda s, c, **kwargs: oldfn(s, c)
684 684 l.append((mf, fn, params))
685 685 self.filterpats[filter] = l
686 686 return self.filterpats[filter]
687 687
688 688 def _filter(self, filterpats, filename, data):
689 689 for mf, fn, cmd in filterpats:
690 690 if mf(filename):
691 691 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
692 692 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
693 693 break
694 694
695 695 return data
696 696
697 697 @propertycache
698 698 def _encodefilterpats(self):
699 699 return self._loadfilter('encode')
700 700
701 701 @propertycache
702 702 def _decodefilterpats(self):
703 703 return self._loadfilter('decode')
704 704
705 705 def adddatafilter(self, name, filter):
706 706 self._datafilters[name] = filter
707 707
708 708 def wread(self, filename):
709 709 if self._link(filename):
710 710 data = os.readlink(self.wjoin(filename))
711 711 else:
712 712 data = self.wopener.read(filename)
713 713 return self._filter(self._encodefilterpats, filename, data)
714 714
715 715 def wwrite(self, filename, data, flags):
716 716 data = self._filter(self._decodefilterpats, filename, data)
717 717 if 'l' in flags:
718 718 self.wopener.symlink(data, filename)
719 719 else:
720 720 self.wopener.write(filename, data)
721 721 if 'x' in flags:
722 722 util.setflags(self.wjoin(filename), False, True)
723 723
724 724 def wwritedata(self, filename, data):
725 725 return self._filter(self._decodefilterpats, filename, data)
726 726
727 727 def transaction(self, desc):
728 728 tr = self._transref and self._transref() or None
729 729 if tr and tr.running():
730 730 return tr.nest()
731 731
732 732 # abort here if the journal already exists
733 733 if os.path.exists(self.sjoin("journal")):
734 734 raise error.RepoError(
735 735 _("abandoned transaction found - run hg recover"))
736 736
737 737 self._writejournal(desc)
738 738 renames = [(x, undoname(x)) for x in self._journalfiles()]
739 739
740 740 tr = transaction.transaction(self.ui.warn, self.sopener,
741 741 self.sjoin("journal"),
742 742 aftertrans(renames),
743 743 self.store.createmode)
744 744 self._transref = weakref.ref(tr)
745 745 return tr
746 746
747 747 def _journalfiles(self):
748 748 return (self.sjoin('journal'), self.join('journal.dirstate'),
749 749 self.join('journal.branch'), self.join('journal.desc'),
750 750 self.join('journal.bookmarks'),
751 751 self.sjoin('journal.phaseroots'))
752 752
753 753 def undofiles(self):
754 754 return [undoname(x) for x in self._journalfiles()]
755 755
756 756 def _writejournal(self, desc):
757 757 self.opener.write("journal.dirstate",
758 758 self.opener.tryread("dirstate"))
759 759 self.opener.write("journal.branch",
760 760 encoding.fromlocal(self.dirstate.branch()))
761 761 self.opener.write("journal.desc",
762 762 "%d\n%s\n" % (len(self), desc))
763 763 self.opener.write("journal.bookmarks",
764 764 self.opener.tryread("bookmarks"))
765 765 self.sopener.write("journal.phaseroots",
766 766 self.sopener.tryread("phaseroots"))
767 767
768 768 def recover(self):
769 769 lock = self.lock()
770 770 try:
771 771 if os.path.exists(self.sjoin("journal")):
772 772 self.ui.status(_("rolling back interrupted transaction\n"))
773 773 transaction.rollback(self.sopener, self.sjoin("journal"),
774 774 self.ui.warn)
775 775 self.invalidate()
776 776 return True
777 777 else:
778 778 self.ui.warn(_("no interrupted transaction available\n"))
779 779 return False
780 780 finally:
781 781 lock.release()
782 782
783 783 def rollback(self, dryrun=False, force=False):
784 784 wlock = lock = None
785 785 try:
786 786 wlock = self.wlock()
787 787 lock = self.lock()
788 788 if os.path.exists(self.sjoin("undo")):
789 789 return self._rollback(dryrun, force)
790 790 else:
791 791 self.ui.warn(_("no rollback information available\n"))
792 792 return 1
793 793 finally:
794 794 release(lock, wlock)
795 795
796 796 def _rollback(self, dryrun, force):
797 797 ui = self.ui
798 798 try:
799 799 args = self.opener.read('undo.desc').splitlines()
800 800 (oldlen, desc, detail) = (int(args[0]), args[1], None)
801 801 if len(args) >= 3:
802 802 detail = args[2]
803 803 oldtip = oldlen - 1
804 804
805 805 if detail and ui.verbose:
806 806 msg = (_('repository tip rolled back to revision %s'
807 807 ' (undo %s: %s)\n')
808 808 % (oldtip, desc, detail))
809 809 else:
810 810 msg = (_('repository tip rolled back to revision %s'
811 811 ' (undo %s)\n')
812 812 % (oldtip, desc))
813 813 except IOError:
814 814 msg = _('rolling back unknown transaction\n')
815 815 desc = None
816 816
817 817 if not force and self['.'] != self['tip'] and desc == 'commit':
818 818 raise util.Abort(
819 819 _('rollback of last commit while not checked out '
820 820 'may lose data'), hint=_('use -f to force'))
821 821
822 822 ui.status(msg)
823 823 if dryrun:
824 824 return 0
825 825
826 826 parents = self.dirstate.parents()
827 827 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
828 828 if os.path.exists(self.join('undo.bookmarks')):
829 829 util.rename(self.join('undo.bookmarks'),
830 830 self.join('bookmarks'))
831 831 if os.path.exists(self.sjoin('undo.phaseroots')):
832 832 util.rename(self.sjoin('undo.phaseroots'),
833 833 self.sjoin('phaseroots'))
834 834 self.invalidate()
835 835
836 836 parentgone = (parents[0] not in self.changelog.nodemap or
837 837 parents[1] not in self.changelog.nodemap)
838 838 if parentgone:
839 839 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
840 840 try:
841 841 branch = self.opener.read('undo.branch')
842 842 self.dirstate.setbranch(branch)
843 843 except IOError:
844 844 ui.warn(_('named branch could not be reset: '
845 845 'current branch is still \'%s\'\n')
846 846 % self.dirstate.branch())
847 847
848 848 self.dirstate.invalidate()
849 849 parents = tuple([p.rev() for p in self.parents()])
850 850 if len(parents) > 1:
851 851 ui.status(_('working directory now based on '
852 852 'revisions %d and %d\n') % parents)
853 853 else:
854 854 ui.status(_('working directory now based on '
855 855 'revision %d\n') % parents)
856 856 self.destroyed()
857 857 return 0
858 858
859 859 def invalidatecaches(self):
860 860 def delcache(name):
861 861 try:
862 862 delattr(self, name)
863 863 except AttributeError:
864 864 pass
865 865
866 866 delcache('_tagscache')
867 867 delcache('_phaserev')
868 868
869 869 self._branchcache = None # in UTF-8
870 870 self._branchcachetip = None
871 871
872 872 def invalidatedirstate(self):
873 873 '''Invalidates the dirstate, causing the next call to dirstate
874 874 to check if it was modified since the last time it was read,
875 875 rereading it if it has.
876 876
877 877 This is different to dirstate.invalidate() that it doesn't always
878 878 rereads the dirstate. Use dirstate.invalidate() if you want to
879 879 explicitly read the dirstate again (i.e. restoring it to a previous
880 880 known good state).'''
881 881 if 'dirstate' in self.__dict__:
882 882 for k in self.dirstate._filecache:
883 883 try:
884 884 delattr(self.dirstate, k)
885 885 except AttributeError:
886 886 pass
887 887 delattr(self, 'dirstate')
888 888
889 889 def invalidate(self):
890 890 for k in self._filecache:
891 891 # dirstate is invalidated separately in invalidatedirstate()
892 892 if k == 'dirstate':
893 893 continue
894 894
895 895 try:
896 896 delattr(self, k)
897 897 except AttributeError:
898 898 pass
899 899 self.invalidatecaches()
900 900
901 901 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
902 902 try:
903 903 l = lock.lock(lockname, 0, releasefn, desc=desc)
904 904 except error.LockHeld, inst:
905 905 if not wait:
906 906 raise
907 907 self.ui.warn(_("waiting for lock on %s held by %r\n") %
908 908 (desc, inst.locker))
909 909 # default to 600 seconds timeout
910 910 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
911 911 releasefn, desc=desc)
912 912 if acquirefn:
913 913 acquirefn()
914 914 return l
915 915
916 916 def _afterlock(self, callback):
917 917 """add a callback to the current repository lock.
918 918
919 919 The callback will be executed on lock release."""
920 920 l = self._lockref and self._lockref()
921 921 if l:
922 922 l.postrelease.append(callback)
923 else:
924 callback()
923 925
924 926 def lock(self, wait=True):
925 927 '''Lock the repository store (.hg/store) and return a weak reference
926 928 to the lock. Use this before modifying the store (e.g. committing or
927 929 stripping). If you are opening a transaction, get a lock as well.)'''
928 930 l = self._lockref and self._lockref()
929 931 if l is not None and l.held:
930 932 l.lock()
931 933 return l
932 934
933 935 def unlock():
934 936 self.store.write()
935 937 if self._dirtyphases:
936 938 phases.writeroots(self)
937 939 self._dirtyphases = False
938 940 for k, ce in self._filecache.items():
939 941 if k == 'dirstate':
940 942 continue
941 943 ce.refresh()
942 944
943 945 l = self._lock(self.sjoin("lock"), wait, unlock,
944 946 self.invalidate, _('repository %s') % self.origroot)
945 947 self._lockref = weakref.ref(l)
946 948 return l
947 949
948 950 def wlock(self, wait=True):
949 951 '''Lock the non-store parts of the repository (everything under
950 952 .hg except .hg/store) and return a weak reference to the lock.
951 953 Use this before modifying files in .hg.'''
952 954 l = self._wlockref and self._wlockref()
953 955 if l is not None and l.held:
954 956 l.lock()
955 957 return l
956 958
957 959 def unlock():
958 960 self.dirstate.write()
959 961 ce = self._filecache.get('dirstate')
960 962 if ce:
961 963 ce.refresh()
962 964
963 965 l = self._lock(self.join("wlock"), wait, unlock,
964 966 self.invalidatedirstate, _('working directory of %s') %
965 967 self.origroot)
966 968 self._wlockref = weakref.ref(l)
967 969 return l
968 970
969 971 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
970 972 """
971 973 commit an individual file as part of a larger transaction
972 974 """
973 975
974 976 fname = fctx.path()
975 977 text = fctx.data()
976 978 flog = self.file(fname)
977 979 fparent1 = manifest1.get(fname, nullid)
978 980 fparent2 = fparent2o = manifest2.get(fname, nullid)
979 981
980 982 meta = {}
981 983 copy = fctx.renamed()
982 984 if copy and copy[0] != fname:
983 985 # Mark the new revision of this file as a copy of another
984 986 # file. This copy data will effectively act as a parent
985 987 # of this new revision. If this is a merge, the first
986 988 # parent will be the nullid (meaning "look up the copy data")
987 989 # and the second one will be the other parent. For example:
988 990 #
989 991 # 0 --- 1 --- 3 rev1 changes file foo
990 992 # \ / rev2 renames foo to bar and changes it
991 993 # \- 2 -/ rev3 should have bar with all changes and
992 994 # should record that bar descends from
993 995 # bar in rev2 and foo in rev1
994 996 #
995 997 # this allows this merge to succeed:
996 998 #
997 999 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
998 1000 # \ / merging rev3 and rev4 should use bar@rev2
999 1001 # \- 2 --- 4 as the merge base
1000 1002 #
1001 1003
1002 1004 cfname = copy[0]
1003 1005 crev = manifest1.get(cfname)
1004 1006 newfparent = fparent2
1005 1007
1006 1008 if manifest2: # branch merge
1007 1009 if fparent2 == nullid or crev is None: # copied on remote side
1008 1010 if cfname in manifest2:
1009 1011 crev = manifest2[cfname]
1010 1012 newfparent = fparent1
1011 1013
1012 1014 # find source in nearest ancestor if we've lost track
1013 1015 if not crev:
1014 1016 self.ui.debug(" %s: searching for copy revision for %s\n" %
1015 1017 (fname, cfname))
1016 1018 for ancestor in self[None].ancestors():
1017 1019 if cfname in ancestor:
1018 1020 crev = ancestor[cfname].filenode()
1019 1021 break
1020 1022
1021 1023 if crev:
1022 1024 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1023 1025 meta["copy"] = cfname
1024 1026 meta["copyrev"] = hex(crev)
1025 1027 fparent1, fparent2 = nullid, newfparent
1026 1028 else:
1027 1029 self.ui.warn(_("warning: can't find ancestor for '%s' "
1028 1030 "copied from '%s'!\n") % (fname, cfname))
1029 1031
1030 1032 elif fparent2 != nullid:
1031 1033 # is one parent an ancestor of the other?
1032 1034 fparentancestor = flog.ancestor(fparent1, fparent2)
1033 1035 if fparentancestor == fparent1:
1034 1036 fparent1, fparent2 = fparent2, nullid
1035 1037 elif fparentancestor == fparent2:
1036 1038 fparent2 = nullid
1037 1039
1038 1040 # is the file changed?
1039 1041 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1040 1042 changelist.append(fname)
1041 1043 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1042 1044
1043 1045 # are just the flags changed during merge?
1044 1046 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1045 1047 changelist.append(fname)
1046 1048
1047 1049 return fparent1
1048 1050
1049 1051 def commit(self, text="", user=None, date=None, match=None, force=False,
1050 1052 editor=False, extra={}):
1051 1053 """Add a new revision to current repository.
1052 1054
1053 1055 Revision information is gathered from the working directory,
1054 1056 match can be used to filter the committed files. If editor is
1055 1057 supplied, it is called to get a commit message.
1056 1058 """
1057 1059
1058 1060 def fail(f, msg):
1059 1061 raise util.Abort('%s: %s' % (f, msg))
1060 1062
1061 1063 if not match:
1062 1064 match = matchmod.always(self.root, '')
1063 1065
1064 1066 if not force:
1065 1067 vdirs = []
1066 1068 match.dir = vdirs.append
1067 1069 match.bad = fail
1068 1070
1069 1071 wlock = self.wlock()
1070 1072 try:
1071 1073 wctx = self[None]
1072 1074 merge = len(wctx.parents()) > 1
1073 1075
1074 1076 if (not force and merge and match and
1075 1077 (match.files() or match.anypats())):
1076 1078 raise util.Abort(_('cannot partially commit a merge '
1077 1079 '(do not specify files or patterns)'))
1078 1080
1079 1081 changes = self.status(match=match, clean=force)
1080 1082 if force:
1081 1083 changes[0].extend(changes[6]) # mq may commit unchanged files
1082 1084
1083 1085 # check subrepos
1084 1086 subs = []
1085 1087 commitsubs = set()
1086 1088 newstate = wctx.substate.copy()
1087 1089 # only manage subrepos and .hgsubstate if .hgsub is present
1088 1090 if '.hgsub' in wctx:
1089 1091 # we'll decide whether to track this ourselves, thanks
1090 1092 if '.hgsubstate' in changes[0]:
1091 1093 changes[0].remove('.hgsubstate')
1092 1094 if '.hgsubstate' in changes[2]:
1093 1095 changes[2].remove('.hgsubstate')
1094 1096
1095 1097 # compare current state to last committed state
1096 1098 # build new substate based on last committed state
1097 1099 oldstate = wctx.p1().substate
1098 1100 for s in sorted(newstate.keys()):
1099 1101 if not match(s):
1100 1102 # ignore working copy, use old state if present
1101 1103 if s in oldstate:
1102 1104 newstate[s] = oldstate[s]
1103 1105 continue
1104 1106 if not force:
1105 1107 raise util.Abort(
1106 1108 _("commit with new subrepo %s excluded") % s)
1107 1109 if wctx.sub(s).dirty(True):
1108 1110 if not self.ui.configbool('ui', 'commitsubrepos'):
1109 1111 raise util.Abort(
1110 1112 _("uncommitted changes in subrepo %s") % s,
1111 1113 hint=_("use --subrepos for recursive commit"))
1112 1114 subs.append(s)
1113 1115 commitsubs.add(s)
1114 1116 else:
1115 1117 bs = wctx.sub(s).basestate()
1116 1118 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1117 1119 if oldstate.get(s, (None, None, None))[1] != bs:
1118 1120 subs.append(s)
1119 1121
1120 1122 # check for removed subrepos
1121 1123 for p in wctx.parents():
1122 1124 r = [s for s in p.substate if s not in newstate]
1123 1125 subs += [s for s in r if match(s)]
1124 1126 if subs:
1125 1127 if (not match('.hgsub') and
1126 1128 '.hgsub' in (wctx.modified() + wctx.added())):
1127 1129 raise util.Abort(
1128 1130 _("can't commit subrepos without .hgsub"))
1129 1131 changes[0].insert(0, '.hgsubstate')
1130 1132
1131 1133 elif '.hgsub' in changes[2]:
1132 1134 # clean up .hgsubstate when .hgsub is removed
1133 1135 if ('.hgsubstate' in wctx and
1134 1136 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1135 1137 changes[2].insert(0, '.hgsubstate')
1136 1138
1137 1139 # make sure all explicit patterns are matched
1138 1140 if not force and match.files():
1139 1141 matched = set(changes[0] + changes[1] + changes[2])
1140 1142
1141 1143 for f in match.files():
1142 1144 if f == '.' or f in matched or f in wctx.substate:
1143 1145 continue
1144 1146 if f in changes[3]: # missing
1145 1147 fail(f, _('file not found!'))
1146 1148 if f in vdirs: # visited directory
1147 1149 d = f + '/'
1148 1150 for mf in matched:
1149 1151 if mf.startswith(d):
1150 1152 break
1151 1153 else:
1152 1154 fail(f, _("no match under directory!"))
1153 1155 elif f not in self.dirstate:
1154 1156 fail(f, _("file not tracked!"))
1155 1157
1156 1158 if (not force and not extra.get("close") and not merge
1157 1159 and not (changes[0] or changes[1] or changes[2])
1158 1160 and wctx.branch() == wctx.p1().branch()):
1159 1161 return None
1160 1162
1161 1163 if merge and changes[3]:
1162 1164 raise util.Abort(_("cannot commit merge with missing files"))
1163 1165
1164 1166 ms = mergemod.mergestate(self)
1165 1167 for f in changes[0]:
1166 1168 if f in ms and ms[f] == 'u':
1167 1169 raise util.Abort(_("unresolved merge conflicts "
1168 1170 "(see hg help resolve)"))
1169 1171
1170 1172 cctx = context.workingctx(self, text, user, date, extra, changes)
1171 1173 if editor:
1172 1174 cctx._text = editor(self, cctx, subs)
1173 1175 edited = (text != cctx._text)
1174 1176
1175 1177 # commit subs and write new state
1176 1178 if subs:
1177 1179 for s in sorted(commitsubs):
1178 1180 sub = wctx.sub(s)
1179 1181 self.ui.status(_('committing subrepository %s\n') %
1180 1182 subrepo.subrelpath(sub))
1181 1183 sr = sub.commit(cctx._text, user, date)
1182 1184 newstate[s] = (newstate[s][0], sr)
1183 1185 subrepo.writestate(self, newstate)
1184 1186
1185 1187 # Save commit message in case this transaction gets rolled back
1186 1188 # (e.g. by a pretxncommit hook). Leave the content alone on
1187 1189 # the assumption that the user will use the same editor again.
1188 1190 msgfn = self.savecommitmessage(cctx._text)
1189 1191
1190 1192 p1, p2 = self.dirstate.parents()
1191 1193 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1192 1194 try:
1193 1195 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1194 1196 ret = self.commitctx(cctx, True)
1195 1197 except:
1196 1198 if edited:
1197 1199 self.ui.write(
1198 1200 _('note: commit message saved in %s\n') % msgfn)
1199 1201 raise
1200 1202
1201 1203 # update bookmarks, dirstate and mergestate
1202 1204 bookmarks.update(self, p1, ret)
1203 1205 for f in changes[0] + changes[1]:
1204 1206 self.dirstate.normal(f)
1205 1207 for f in changes[2]:
1206 1208 self.dirstate.drop(f)
1207 1209 self.dirstate.setparents(ret)
1208 1210 ms.reset()
1209 1211 finally:
1210 1212 wlock.release()
1211 1213
1212 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1214 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1215 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1216 self._afterlock(commithook)
1213 1217 return ret
1214 1218
1215 1219 def commitctx(self, ctx, error=False):
1216 1220 """Add a new revision to current repository.
1217 1221 Revision information is passed via the context argument.
1218 1222 """
1219 1223
1220 1224 tr = lock = None
1221 1225 removed = list(ctx.removed())
1222 1226 p1, p2 = ctx.p1(), ctx.p2()
1223 1227 user = ctx.user()
1224 1228
1225 1229 lock = self.lock()
1226 1230 try:
1227 1231 tr = self.transaction("commit")
1228 1232 trp = weakref.proxy(tr)
1229 1233
1230 1234 if ctx.files():
1231 1235 m1 = p1.manifest().copy()
1232 1236 m2 = p2.manifest()
1233 1237
1234 1238 # check in files
1235 1239 new = {}
1236 1240 changed = []
1237 1241 linkrev = len(self)
1238 1242 for f in sorted(ctx.modified() + ctx.added()):
1239 1243 self.ui.note(f + "\n")
1240 1244 try:
1241 1245 fctx = ctx[f]
1242 1246 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1243 1247 changed)
1244 1248 m1.set(f, fctx.flags())
1245 1249 except OSError, inst:
1246 1250 self.ui.warn(_("trouble committing %s!\n") % f)
1247 1251 raise
1248 1252 except IOError, inst:
1249 1253 errcode = getattr(inst, 'errno', errno.ENOENT)
1250 1254 if error or errcode and errcode != errno.ENOENT:
1251 1255 self.ui.warn(_("trouble committing %s!\n") % f)
1252 1256 raise
1253 1257 else:
1254 1258 removed.append(f)
1255 1259
1256 1260 # update manifest
1257 1261 m1.update(new)
1258 1262 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1259 1263 drop = [f for f in removed if f in m1]
1260 1264 for f in drop:
1261 1265 del m1[f]
1262 1266 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1263 1267 p2.manifestnode(), (new, drop))
1264 1268 files = changed + removed
1265 1269 else:
1266 1270 mn = p1.manifestnode()
1267 1271 files = []
1268 1272
1269 1273 # update changelog
1270 1274 self.changelog.delayupdate()
1271 1275 n = self.changelog.add(mn, files, ctx.description(),
1272 1276 trp, p1.node(), p2.node(),
1273 1277 user, ctx.date(), ctx.extra().copy())
1274 1278 p = lambda: self.changelog.writepending() and self.root or ""
1275 1279 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1276 1280 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1277 1281 parent2=xp2, pending=p)
1278 1282 self.changelog.finalize(trp)
1279 1283 # set the new commit is proper phase
1280 1284 targetphase = phases.newcommitphase(self.ui)
1281 1285 if targetphase:
1282 1286 # retract boundary do not alter parent changeset.
1283 1287 # if a parent have higher the resulting phase will
1284 1288 # be compliant anyway
1285 1289 #
1286 1290 # if minimal phase was 0 we don't need to retract anything
1287 1291 phases.retractboundary(self, targetphase, [n])
1288 1292 tr.close()
1289 1293 self.updatebranchcache()
1290 1294 return n
1291 1295 finally:
1292 1296 if tr:
1293 1297 tr.release()
1294 1298 lock.release()
1295 1299
1296 1300 def destroyed(self):
1297 1301 '''Inform the repository that nodes have been destroyed.
1298 1302 Intended for use by strip and rollback, so there's a common
1299 1303 place for anything that has to be done after destroying history.'''
1300 1304 # XXX it might be nice if we could take the list of destroyed
1301 1305 # nodes, but I don't see an easy way for rollback() to do that
1302 1306
1303 1307 # Ensure the persistent tag cache is updated. Doing it now
1304 1308 # means that the tag cache only has to worry about destroyed
1305 1309 # heads immediately after a strip/rollback. That in turn
1306 1310 # guarantees that "cachetip == currenttip" (comparing both rev
1307 1311 # and node) always means no nodes have been added or destroyed.
1308 1312
1309 1313 # XXX this is suboptimal when qrefresh'ing: we strip the current
1310 1314 # head, refresh the tag cache, then immediately add a new head.
1311 1315 # But I think doing it this way is necessary for the "instant
1312 1316 # tag cache retrieval" case to work.
1313 1317 self.invalidatecaches()
1314 1318
1315 1319 # Discard all cache entries to force reloading everything.
1316 1320 self._filecache.clear()
1317 1321
1318 1322 def walk(self, match, node=None):
1319 1323 '''
1320 1324 walk recursively through the directory tree or a given
1321 1325 changeset, finding all files matched by the match
1322 1326 function
1323 1327 '''
1324 1328 return self[node].walk(match)
1325 1329
1326 1330 def status(self, node1='.', node2=None, match=None,
1327 1331 ignored=False, clean=False, unknown=False,
1328 1332 listsubrepos=False):
1329 1333 """return status of files between two nodes or node and working directory
1330 1334
1331 1335 If node1 is None, use the first dirstate parent instead.
1332 1336 If node2 is None, compare node1 with working directory.
1333 1337 """
1334 1338
1335 1339 def mfmatches(ctx):
1336 1340 mf = ctx.manifest().copy()
1337 1341 for fn in mf.keys():
1338 1342 if not match(fn):
1339 1343 del mf[fn]
1340 1344 return mf
1341 1345
1342 1346 if isinstance(node1, context.changectx):
1343 1347 ctx1 = node1
1344 1348 else:
1345 1349 ctx1 = self[node1]
1346 1350 if isinstance(node2, context.changectx):
1347 1351 ctx2 = node2
1348 1352 else:
1349 1353 ctx2 = self[node2]
1350 1354
1351 1355 working = ctx2.rev() is None
1352 1356 parentworking = working and ctx1 == self['.']
1353 1357 match = match or matchmod.always(self.root, self.getcwd())
1354 1358 listignored, listclean, listunknown = ignored, clean, unknown
1355 1359
1356 1360 # load earliest manifest first for caching reasons
1357 1361 if not working and ctx2.rev() < ctx1.rev():
1358 1362 ctx2.manifest()
1359 1363
1360 1364 if not parentworking:
1361 1365 def bad(f, msg):
1362 1366 # 'f' may be a directory pattern from 'match.files()',
1363 1367 # so 'f not in ctx1' is not enough
1364 1368 if f not in ctx1 and f not in ctx1.dirs():
1365 1369 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1366 1370 match.bad = bad
1367 1371
1368 1372 if working: # we need to scan the working dir
1369 1373 subrepos = []
1370 1374 if '.hgsub' in self.dirstate:
1371 1375 subrepos = ctx2.substate.keys()
1372 1376 s = self.dirstate.status(match, subrepos, listignored,
1373 1377 listclean, listunknown)
1374 1378 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1375 1379
1376 1380 # check for any possibly clean files
1377 1381 if parentworking and cmp:
1378 1382 fixup = []
1379 1383 # do a full compare of any files that might have changed
1380 1384 for f in sorted(cmp):
1381 1385 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1382 1386 or ctx1[f].cmp(ctx2[f])):
1383 1387 modified.append(f)
1384 1388 else:
1385 1389 fixup.append(f)
1386 1390
1387 1391 # update dirstate for files that are actually clean
1388 1392 if fixup:
1389 1393 if listclean:
1390 1394 clean += fixup
1391 1395
1392 1396 try:
1393 1397 # updating the dirstate is optional
1394 1398 # so we don't wait on the lock
1395 1399 wlock = self.wlock(False)
1396 1400 try:
1397 1401 for f in fixup:
1398 1402 self.dirstate.normal(f)
1399 1403 finally:
1400 1404 wlock.release()
1401 1405 except error.LockError:
1402 1406 pass
1403 1407
1404 1408 if not parentworking:
1405 1409 mf1 = mfmatches(ctx1)
1406 1410 if working:
1407 1411 # we are comparing working dir against non-parent
1408 1412 # generate a pseudo-manifest for the working dir
1409 1413 mf2 = mfmatches(self['.'])
1410 1414 for f in cmp + modified + added:
1411 1415 mf2[f] = None
1412 1416 mf2.set(f, ctx2.flags(f))
1413 1417 for f in removed:
1414 1418 if f in mf2:
1415 1419 del mf2[f]
1416 1420 else:
1417 1421 # we are comparing two revisions
1418 1422 deleted, unknown, ignored = [], [], []
1419 1423 mf2 = mfmatches(ctx2)
1420 1424
1421 1425 modified, added, clean = [], [], []
1422 1426 for fn in mf2:
1423 1427 if fn in mf1:
1424 1428 if (fn not in deleted and
1425 1429 (mf1.flags(fn) != mf2.flags(fn) or
1426 1430 (mf1[fn] != mf2[fn] and
1427 1431 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1428 1432 modified.append(fn)
1429 1433 elif listclean:
1430 1434 clean.append(fn)
1431 1435 del mf1[fn]
1432 1436 elif fn not in deleted:
1433 1437 added.append(fn)
1434 1438 removed = mf1.keys()
1435 1439
1436 1440 if working and modified and not self.dirstate._checklink:
1437 1441 # Symlink placeholders may get non-symlink-like contents
1438 1442 # via user error or dereferencing by NFS or Samba servers,
1439 1443 # so we filter out any placeholders that don't look like a
1440 1444 # symlink
1441 1445 sane = []
1442 1446 for f in modified:
1443 1447 if ctx2.flags(f) == 'l':
1444 1448 d = ctx2[f].data()
1445 1449 if len(d) >= 1024 or '\n' in d or util.binary(d):
1446 1450 self.ui.debug('ignoring suspect symlink placeholder'
1447 1451 ' "%s"\n' % f)
1448 1452 continue
1449 1453 sane.append(f)
1450 1454 modified = sane
1451 1455
1452 1456 r = modified, added, removed, deleted, unknown, ignored, clean
1453 1457
1454 1458 if listsubrepos:
1455 1459 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1456 1460 if working:
1457 1461 rev2 = None
1458 1462 else:
1459 1463 rev2 = ctx2.substate[subpath][1]
1460 1464 try:
1461 1465 submatch = matchmod.narrowmatcher(subpath, match)
1462 1466 s = sub.status(rev2, match=submatch, ignored=listignored,
1463 1467 clean=listclean, unknown=listunknown,
1464 1468 listsubrepos=True)
1465 1469 for rfiles, sfiles in zip(r, s):
1466 1470 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1467 1471 except error.LookupError:
1468 1472 self.ui.status(_("skipping missing subrepository: %s\n")
1469 1473 % subpath)
1470 1474
1471 1475 for l in r:
1472 1476 l.sort()
1473 1477 return r
1474 1478
1475 1479 def heads(self, start=None):
1476 1480 heads = self.changelog.heads(start)
1477 1481 # sort the output in rev descending order
1478 1482 return sorted(heads, key=self.changelog.rev, reverse=True)
1479 1483
1480 1484 def branchheads(self, branch=None, start=None, closed=False):
1481 1485 '''return a (possibly filtered) list of heads for the given branch
1482 1486
1483 1487 Heads are returned in topological order, from newest to oldest.
1484 1488 If branch is None, use the dirstate branch.
1485 1489 If start is not None, return only heads reachable from start.
1486 1490 If closed is True, return heads that are marked as closed as well.
1487 1491 '''
1488 1492 if branch is None:
1489 1493 branch = self[None].branch()
1490 1494 branches = self.branchmap()
1491 1495 if branch not in branches:
1492 1496 return []
1493 1497 # the cache returns heads ordered lowest to highest
1494 1498 bheads = list(reversed(branches[branch]))
1495 1499 if start is not None:
1496 1500 # filter out the heads that cannot be reached from startrev
1497 1501 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1498 1502 bheads = [h for h in bheads if h in fbheads]
1499 1503 if not closed:
1500 1504 bheads = [h for h in bheads if
1501 1505 ('close' not in self.changelog.read(h)[5])]
1502 1506 return bheads
1503 1507
1504 1508 def branches(self, nodes):
1505 1509 if not nodes:
1506 1510 nodes = [self.changelog.tip()]
1507 1511 b = []
1508 1512 for n in nodes:
1509 1513 t = n
1510 1514 while True:
1511 1515 p = self.changelog.parents(n)
1512 1516 if p[1] != nullid or p[0] == nullid:
1513 1517 b.append((t, n, p[0], p[1]))
1514 1518 break
1515 1519 n = p[0]
1516 1520 return b
1517 1521
1518 1522 def between(self, pairs):
1519 1523 r = []
1520 1524
1521 1525 for top, bottom in pairs:
1522 1526 n, l, i = top, [], 0
1523 1527 f = 1
1524 1528
1525 1529 while n != bottom and n != nullid:
1526 1530 p = self.changelog.parents(n)[0]
1527 1531 if i == f:
1528 1532 l.append(n)
1529 1533 f = f * 2
1530 1534 n = p
1531 1535 i += 1
1532 1536
1533 1537 r.append(l)
1534 1538
1535 1539 return r
1536 1540
1537 1541 def pull(self, remote, heads=None, force=False):
1538 1542 lock = self.lock()
1539 1543 try:
1540 1544 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1541 1545 force=force)
1542 1546 common, fetch, rheads = tmp
1543 1547 if not fetch:
1544 1548 self.ui.status(_("no changes found\n"))
1545 1549 added = []
1546 1550 result = 0
1547 1551 else:
1548 1552 if heads is None and list(common) == [nullid]:
1549 1553 self.ui.status(_("requesting all changes\n"))
1550 1554 elif heads is None and remote.capable('changegroupsubset'):
1551 1555 # issue1320, avoid a race if remote changed after discovery
1552 1556 heads = rheads
1553 1557
1554 1558 if remote.capable('getbundle'):
1555 1559 cg = remote.getbundle('pull', common=common,
1556 1560 heads=heads or rheads)
1557 1561 elif heads is None:
1558 1562 cg = remote.changegroup(fetch, 'pull')
1559 1563 elif not remote.capable('changegroupsubset'):
1560 1564 raise util.Abort(_("partial pull cannot be done because "
1561 1565 "other repository doesn't support "
1562 1566 "changegroupsubset."))
1563 1567 else:
1564 1568 cg = remote.changegroupsubset(fetch, heads, 'pull')
1565 1569 clstart = len(self.changelog)
1566 1570 result = self.addchangegroup(cg, 'pull', remote.url())
1567 1571 clend = len(self.changelog)
1568 1572 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1569 1573
1570 1574 # compute target subset
1571 1575 if heads is None:
1572 1576 # We pulled every thing possible
1573 1577 # sync on everything common
1574 1578 subset = common + added
1575 1579 else:
1576 1580 # We pulled a specific subset
1577 1581 # sync on this subset
1578 1582 subset = heads
1579 1583
1580 1584 # Get remote phases data from remote
1581 1585 remotephases = remote.listkeys('phases')
1582 1586 publishing = bool(remotephases.get('publishing', False))
1583 1587 if remotephases and not publishing:
1584 1588 # remote is new and unpublishing
1585 1589 pheads, _dr = phases.analyzeremotephases(self, subset,
1586 1590 remotephases)
1587 1591 phases.advanceboundary(self, phases.public, pheads)
1588 1592 phases.advanceboundary(self, phases.draft, subset)
1589 1593 else:
1590 1594 # Remote is old or publishing all common changesets
1591 1595 # should be seen as public
1592 1596 phases.advanceboundary(self, phases.public, subset)
1593 1597 finally:
1594 1598 lock.release()
1595 1599
1596 1600 return result
1597 1601
1598 1602 def checkpush(self, force, revs):
1599 1603 """Extensions can override this function if additional checks have
1600 1604 to be performed before pushing, or call it if they override push
1601 1605 command.
1602 1606 """
1603 1607 pass
1604 1608
1605 1609 def push(self, remote, force=False, revs=None, newbranch=False):
1606 1610 '''Push outgoing changesets (limited by revs) from the current
1607 1611 repository to remote. Return an integer:
1608 1612 - None means nothing to push
1609 1613 - 0 means HTTP error
1610 1614 - 1 means we pushed and remote head count is unchanged *or*
1611 1615 we have outgoing changesets but refused to push
1612 1616 - other values as described by addchangegroup()
1613 1617 '''
1614 1618 # there are two ways to push to remote repo:
1615 1619 #
1616 1620 # addchangegroup assumes local user can lock remote
1617 1621 # repo (local filesystem, old ssh servers).
1618 1622 #
1619 1623 # unbundle assumes local user cannot lock remote repo (new ssh
1620 1624 # servers, http servers).
1621 1625
1622 1626 # get local lock as we might write phase data
1623 1627 locallock = self.lock()
1624 1628 try:
1625 1629 self.checkpush(force, revs)
1626 1630 lock = None
1627 1631 unbundle = remote.capable('unbundle')
1628 1632 if not unbundle:
1629 1633 lock = remote.lock()
1630 1634 try:
1631 1635 # discovery
1632 1636 fci = discovery.findcommonincoming
1633 1637 commoninc = fci(self, remote, force=force)
1634 1638 common, inc, remoteheads = commoninc
1635 1639 fco = discovery.findcommonoutgoing
1636 1640 outgoing = fco(self, remote, onlyheads=revs,
1637 1641 commoninc=commoninc, force=force)
1638 1642
1639 1643
1640 1644 if not outgoing.missing:
1641 1645 # nothing to push
1642 1646 scmutil.nochangesfound(self.ui, outgoing.excluded)
1643 1647 ret = None
1644 1648 else:
1645 1649 # something to push
1646 1650 if not force:
1647 1651 discovery.checkheads(self, remote, outgoing,
1648 1652 remoteheads, newbranch,
1649 1653 bool(inc))
1650 1654
1651 1655 # create a changegroup from local
1652 1656 if revs is None and not outgoing.excluded:
1653 1657 # push everything,
1654 1658 # use the fast path, no race possible on push
1655 1659 cg = self._changegroup(outgoing.missing, 'push')
1656 1660 else:
1657 1661 cg = self.getlocalbundle('push', outgoing)
1658 1662
1659 1663 # apply changegroup to remote
1660 1664 if unbundle:
1661 1665 # local repo finds heads on server, finds out what
1662 1666 # revs it must push. once revs transferred, if server
1663 1667 # finds it has different heads (someone else won
1664 1668 # commit/push race), server aborts.
1665 1669 if force:
1666 1670 remoteheads = ['force']
1667 1671 # ssh: return remote's addchangegroup()
1668 1672 # http: return remote's addchangegroup() or 0 for error
1669 1673 ret = remote.unbundle(cg, remoteheads, 'push')
1670 1674 else:
1671 1675 # we return an integer indicating remote head count change
1672 1676 ret = remote.addchangegroup(cg, 'push', self.url())
1673 1677
1674 1678 if ret:
1675 1679 # push succeed, synchonize target of the push
1676 1680 cheads = outgoing.missingheads
1677 1681 elif revs is None:
1678 1682 # All out push fails. synchronize all common
1679 1683 cheads = outgoing.commonheads
1680 1684 else:
1681 1685 # I want cheads = heads(::missingheads and ::commonheads)
1682 1686 # (missingheads is revs with secret changeset filtered out)
1683 1687 #
1684 1688 # This can be expressed as:
1685 1689 # cheads = ( (missingheads and ::commonheads)
1686 1690 # + (commonheads and ::missingheads))"
1687 1691 # )
1688 1692 #
1689 1693 # while trying to push we already computed the following:
1690 1694 # common = (::commonheads)
1691 1695 # missing = ((commonheads::missingheads) - commonheads)
1692 1696 #
1693 1697 # We can pick:
1694 1698 # * missingheads part of comon (::commonheads)
1695 1699 common = set(outgoing.common)
1696 1700 cheads = [node for node in revs if node in common]
1697 1701 # and
1698 1702 # * commonheads parents on missing
1699 1703 revset = self.set('%ln and parents(roots(%ln))',
1700 1704 outgoing.commonheads,
1701 1705 outgoing.missing)
1702 1706 cheads.extend(c.node() for c in revset)
1703 1707 # even when we don't push, exchanging phase data is useful
1704 1708 remotephases = remote.listkeys('phases')
1705 1709 if not remotephases: # old server or public only repo
1706 1710 phases.advanceboundary(self, phases.public, cheads)
1707 1711 # don't push any phase data as there is nothing to push
1708 1712 else:
1709 1713 ana = phases.analyzeremotephases(self, cheads, remotephases)
1710 1714 pheads, droots = ana
1711 1715 ### Apply remote phase on local
1712 1716 if remotephases.get('publishing', False):
1713 1717 phases.advanceboundary(self, phases.public, cheads)
1714 1718 else: # publish = False
1715 1719 phases.advanceboundary(self, phases.public, pheads)
1716 1720 phases.advanceboundary(self, phases.draft, cheads)
1717 1721 ### Apply local phase on remote
1718 1722
1719 1723 # Get the list of all revs draft on remote by public here.
1720 1724 # XXX Beware that revset break if droots is not strictly
1721 1725 # XXX root we may want to ensure it is but it is costly
1722 1726 outdated = self.set('heads((%ln::%ln) and public())',
1723 1727 droots, cheads)
1724 1728 for newremotehead in outdated:
1725 1729 r = remote.pushkey('phases',
1726 1730 newremotehead.hex(),
1727 1731 str(phases.draft),
1728 1732 str(phases.public))
1729 1733 if not r:
1730 1734 self.ui.warn(_('updating %s to public failed!\n')
1731 1735 % newremotehead)
1732 1736 finally:
1733 1737 if lock is not None:
1734 1738 lock.release()
1735 1739 finally:
1736 1740 locallock.release()
1737 1741
1738 1742 self.ui.debug("checking for updated bookmarks\n")
1739 1743 rb = remote.listkeys('bookmarks')
1740 1744 for k in rb.keys():
1741 1745 if k in self._bookmarks:
1742 1746 nr, nl = rb[k], hex(self._bookmarks[k])
1743 1747 if nr in self:
1744 1748 cr = self[nr]
1745 1749 cl = self[nl]
1746 1750 if cl in cr.descendants():
1747 1751 r = remote.pushkey('bookmarks', k, nr, nl)
1748 1752 if r:
1749 1753 self.ui.status(_("updating bookmark %s\n") % k)
1750 1754 else:
1751 1755 self.ui.warn(_('updating bookmark %s'
1752 1756 ' failed!\n') % k)
1753 1757
1754 1758 return ret
1755 1759
1756 1760 def changegroupinfo(self, nodes, source):
1757 1761 if self.ui.verbose or source == 'bundle':
1758 1762 self.ui.status(_("%d changesets found\n") % len(nodes))
1759 1763 if self.ui.debugflag:
1760 1764 self.ui.debug("list of changesets:\n")
1761 1765 for node in nodes:
1762 1766 self.ui.debug("%s\n" % hex(node))
1763 1767
1764 1768 def changegroupsubset(self, bases, heads, source):
1765 1769 """Compute a changegroup consisting of all the nodes that are
1766 1770 descendants of any of the bases and ancestors of any of the heads.
1767 1771 Return a chunkbuffer object whose read() method will return
1768 1772 successive changegroup chunks.
1769 1773
1770 1774 It is fairly complex as determining which filenodes and which
1771 1775 manifest nodes need to be included for the changeset to be complete
1772 1776 is non-trivial.
1773 1777
1774 1778 Another wrinkle is doing the reverse, figuring out which changeset in
1775 1779 the changegroup a particular filenode or manifestnode belongs to.
1776 1780 """
1777 1781 cl = self.changelog
1778 1782 if not bases:
1779 1783 bases = [nullid]
1780 1784 csets, bases, heads = cl.nodesbetween(bases, heads)
1781 1785 # We assume that all ancestors of bases are known
1782 1786 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1783 1787 return self._changegroupsubset(common, csets, heads, source)
1784 1788
1785 1789 def getlocalbundle(self, source, outgoing):
1786 1790 """Like getbundle, but taking a discovery.outgoing as an argument.
1787 1791
1788 1792 This is only implemented for local repos and reuses potentially
1789 1793 precomputed sets in outgoing."""
1790 1794 if not outgoing.missing:
1791 1795 return None
1792 1796 return self._changegroupsubset(outgoing.common,
1793 1797 outgoing.missing,
1794 1798 outgoing.missingheads,
1795 1799 source)
1796 1800
1797 1801 def getbundle(self, source, heads=None, common=None):
1798 1802 """Like changegroupsubset, but returns the set difference between the
1799 1803 ancestors of heads and the ancestors common.
1800 1804
1801 1805 If heads is None, use the local heads. If common is None, use [nullid].
1802 1806
1803 1807 The nodes in common might not all be known locally due to the way the
1804 1808 current discovery protocol works.
1805 1809 """
1806 1810 cl = self.changelog
1807 1811 if common:
1808 1812 nm = cl.nodemap
1809 1813 common = [n for n in common if n in nm]
1810 1814 else:
1811 1815 common = [nullid]
1812 1816 if not heads:
1813 1817 heads = cl.heads()
1814 1818 return self.getlocalbundle(source,
1815 1819 discovery.outgoing(cl, common, heads))
1816 1820
1817 1821 def _changegroupsubset(self, commonrevs, csets, heads, source):
1818 1822
1819 1823 cl = self.changelog
1820 1824 mf = self.manifest
1821 1825 mfs = {} # needed manifests
1822 1826 fnodes = {} # needed file nodes
1823 1827 changedfiles = set()
1824 1828 fstate = ['', {}]
1825 1829 count = [0, 0]
1826 1830
1827 1831 # can we go through the fast path ?
1828 1832 heads.sort()
1829 1833 if heads == sorted(self.heads()):
1830 1834 return self._changegroup(csets, source)
1831 1835
1832 1836 # slow path
1833 1837 self.hook('preoutgoing', throw=True, source=source)
1834 1838 self.changegroupinfo(csets, source)
1835 1839
1836 1840 # filter any nodes that claim to be part of the known set
1837 1841 def prune(revlog, missing):
1838 1842 rr, rl = revlog.rev, revlog.linkrev
1839 1843 return [n for n in missing
1840 1844 if rl(rr(n)) not in commonrevs]
1841 1845
1842 1846 progress = self.ui.progress
1843 1847 _bundling = _('bundling')
1844 1848 _changesets = _('changesets')
1845 1849 _manifests = _('manifests')
1846 1850 _files = _('files')
1847 1851
1848 1852 def lookup(revlog, x):
1849 1853 if revlog == cl:
1850 1854 c = cl.read(x)
1851 1855 changedfiles.update(c[3])
1852 1856 mfs.setdefault(c[0], x)
1853 1857 count[0] += 1
1854 1858 progress(_bundling, count[0],
1855 1859 unit=_changesets, total=count[1])
1856 1860 return x
1857 1861 elif revlog == mf:
1858 1862 clnode = mfs[x]
1859 1863 mdata = mf.readfast(x)
1860 1864 for f, n in mdata.iteritems():
1861 1865 if f in changedfiles:
1862 1866 fnodes[f].setdefault(n, clnode)
1863 1867 count[0] += 1
1864 1868 progress(_bundling, count[0],
1865 1869 unit=_manifests, total=count[1])
1866 1870 return clnode
1867 1871 else:
1868 1872 progress(_bundling, count[0], item=fstate[0],
1869 1873 unit=_files, total=count[1])
1870 1874 return fstate[1][x]
1871 1875
1872 1876 bundler = changegroup.bundle10(lookup)
1873 1877 reorder = self.ui.config('bundle', 'reorder', 'auto')
1874 1878 if reorder == 'auto':
1875 1879 reorder = None
1876 1880 else:
1877 1881 reorder = util.parsebool(reorder)
1878 1882
1879 1883 def gengroup():
1880 1884 # Create a changenode group generator that will call our functions
1881 1885 # back to lookup the owning changenode and collect information.
1882 1886 count[:] = [0, len(csets)]
1883 1887 for chunk in cl.group(csets, bundler, reorder=reorder):
1884 1888 yield chunk
1885 1889 progress(_bundling, None)
1886 1890
1887 1891 # Create a generator for the manifestnodes that calls our lookup
1888 1892 # and data collection functions back.
1889 1893 for f in changedfiles:
1890 1894 fnodes[f] = {}
1891 1895 count[:] = [0, len(mfs)]
1892 1896 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1893 1897 yield chunk
1894 1898 progress(_bundling, None)
1895 1899
1896 1900 mfs.clear()
1897 1901
1898 1902 # Go through all our files in order sorted by name.
1899 1903 count[:] = [0, len(changedfiles)]
1900 1904 for fname in sorted(changedfiles):
1901 1905 filerevlog = self.file(fname)
1902 1906 if not len(filerevlog):
1903 1907 raise util.Abort(_("empty or missing revlog for %s") % fname)
1904 1908 fstate[0] = fname
1905 1909 fstate[1] = fnodes.pop(fname, {})
1906 1910
1907 1911 nodelist = prune(filerevlog, fstate[1])
1908 1912 if nodelist:
1909 1913 count[0] += 1
1910 1914 yield bundler.fileheader(fname)
1911 1915 for chunk in filerevlog.group(nodelist, bundler, reorder):
1912 1916 yield chunk
1913 1917
1914 1918 # Signal that no more groups are left.
1915 1919 yield bundler.close()
1916 1920 progress(_bundling, None)
1917 1921
1918 1922 if csets:
1919 1923 self.hook('outgoing', node=hex(csets[0]), source=source)
1920 1924
1921 1925 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1922 1926
1923 1927 def changegroup(self, basenodes, source):
1924 1928 # to avoid a race we use changegroupsubset() (issue1320)
1925 1929 return self.changegroupsubset(basenodes, self.heads(), source)
1926 1930
1927 1931 def _changegroup(self, nodes, source):
1928 1932 """Compute the changegroup of all nodes that we have that a recipient
1929 1933 doesn't. Return a chunkbuffer object whose read() method will return
1930 1934 successive changegroup chunks.
1931 1935
1932 1936 This is much easier than the previous function as we can assume that
1933 1937 the recipient has any changenode we aren't sending them.
1934 1938
1935 1939 nodes is the set of nodes to send"""
1936 1940
1937 1941 cl = self.changelog
1938 1942 mf = self.manifest
1939 1943 mfs = {}
1940 1944 changedfiles = set()
1941 1945 fstate = ['']
1942 1946 count = [0, 0]
1943 1947
1944 1948 self.hook('preoutgoing', throw=True, source=source)
1945 1949 self.changegroupinfo(nodes, source)
1946 1950
1947 1951 revset = set([cl.rev(n) for n in nodes])
1948 1952
1949 1953 def gennodelst(log):
1950 1954 ln, llr = log.node, log.linkrev
1951 1955 return [ln(r) for r in log if llr(r) in revset]
1952 1956
1953 1957 progress = self.ui.progress
1954 1958 _bundling = _('bundling')
1955 1959 _changesets = _('changesets')
1956 1960 _manifests = _('manifests')
1957 1961 _files = _('files')
1958 1962
1959 1963 def lookup(revlog, x):
1960 1964 if revlog == cl:
1961 1965 c = cl.read(x)
1962 1966 changedfiles.update(c[3])
1963 1967 mfs.setdefault(c[0], x)
1964 1968 count[0] += 1
1965 1969 progress(_bundling, count[0],
1966 1970 unit=_changesets, total=count[1])
1967 1971 return x
1968 1972 elif revlog == mf:
1969 1973 count[0] += 1
1970 1974 progress(_bundling, count[0],
1971 1975 unit=_manifests, total=count[1])
1972 1976 return cl.node(revlog.linkrev(revlog.rev(x)))
1973 1977 else:
1974 1978 progress(_bundling, count[0], item=fstate[0],
1975 1979 total=count[1], unit=_files)
1976 1980 return cl.node(revlog.linkrev(revlog.rev(x)))
1977 1981
1978 1982 bundler = changegroup.bundle10(lookup)
1979 1983 reorder = self.ui.config('bundle', 'reorder', 'auto')
1980 1984 if reorder == 'auto':
1981 1985 reorder = None
1982 1986 else:
1983 1987 reorder = util.parsebool(reorder)
1984 1988
1985 1989 def gengroup():
1986 1990 '''yield a sequence of changegroup chunks (strings)'''
1987 1991 # construct a list of all changed files
1988 1992
1989 1993 count[:] = [0, len(nodes)]
1990 1994 for chunk in cl.group(nodes, bundler, reorder=reorder):
1991 1995 yield chunk
1992 1996 progress(_bundling, None)
1993 1997
1994 1998 count[:] = [0, len(mfs)]
1995 1999 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1996 2000 yield chunk
1997 2001 progress(_bundling, None)
1998 2002
1999 2003 count[:] = [0, len(changedfiles)]
2000 2004 for fname in sorted(changedfiles):
2001 2005 filerevlog = self.file(fname)
2002 2006 if not len(filerevlog):
2003 2007 raise util.Abort(_("empty or missing revlog for %s") % fname)
2004 2008 fstate[0] = fname
2005 2009 nodelist = gennodelst(filerevlog)
2006 2010 if nodelist:
2007 2011 count[0] += 1
2008 2012 yield bundler.fileheader(fname)
2009 2013 for chunk in filerevlog.group(nodelist, bundler, reorder):
2010 2014 yield chunk
2011 2015 yield bundler.close()
2012 2016 progress(_bundling, None)
2013 2017
2014 2018 if nodes:
2015 2019 self.hook('outgoing', node=hex(nodes[0]), source=source)
2016 2020
2017 2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2018 2022
2019 2023 def addchangegroup(self, source, srctype, url, emptyok=False):
2020 2024 """Add the changegroup returned by source.read() to this repo.
2021 2025 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2022 2026 the URL of the repo where this changegroup is coming from.
2023 2027
2024 2028 Return an integer summarizing the change to this repo:
2025 2029 - nothing changed or no source: 0
2026 2030 - more heads than before: 1+added heads (2..n)
2027 2031 - fewer heads than before: -1-removed heads (-2..-n)
2028 2032 - number of heads stays the same: 1
2029 2033 """
2030 2034 def csmap(x):
2031 2035 self.ui.debug("add changeset %s\n" % short(x))
2032 2036 return len(cl)
2033 2037
2034 2038 def revmap(x):
2035 2039 return cl.rev(x)
2036 2040
2037 2041 if not source:
2038 2042 return 0
2039 2043
2040 2044 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2041 2045
2042 2046 changesets = files = revisions = 0
2043 2047 efiles = set()
2044 2048
2045 2049 # write changelog data to temp files so concurrent readers will not see
2046 2050 # inconsistent view
2047 2051 cl = self.changelog
2048 2052 cl.delayupdate()
2049 2053 oldheads = cl.heads()
2050 2054
2051 2055 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2052 2056 try:
2053 2057 trp = weakref.proxy(tr)
2054 2058 # pull off the changeset group
2055 2059 self.ui.status(_("adding changesets\n"))
2056 2060 clstart = len(cl)
2057 2061 class prog(object):
2058 2062 step = _('changesets')
2059 2063 count = 1
2060 2064 ui = self.ui
2061 2065 total = None
2062 2066 def __call__(self):
2063 2067 self.ui.progress(self.step, self.count, unit=_('chunks'),
2064 2068 total=self.total)
2065 2069 self.count += 1
2066 2070 pr = prog()
2067 2071 source.callback = pr
2068 2072
2069 2073 source.changelogheader()
2070 2074 srccontent = cl.addgroup(source, csmap, trp)
2071 2075 if not (srccontent or emptyok):
2072 2076 raise util.Abort(_("received changelog group is empty"))
2073 2077 clend = len(cl)
2074 2078 changesets = clend - clstart
2075 2079 for c in xrange(clstart, clend):
2076 2080 efiles.update(self[c].files())
2077 2081 efiles = len(efiles)
2078 2082 self.ui.progress(_('changesets'), None)
2079 2083
2080 2084 # pull off the manifest group
2081 2085 self.ui.status(_("adding manifests\n"))
2082 2086 pr.step = _('manifests')
2083 2087 pr.count = 1
2084 2088 pr.total = changesets # manifests <= changesets
2085 2089 # no need to check for empty manifest group here:
2086 2090 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2087 2091 # no new manifest will be created and the manifest group will
2088 2092 # be empty during the pull
2089 2093 source.manifestheader()
2090 2094 self.manifest.addgroup(source, revmap, trp)
2091 2095 self.ui.progress(_('manifests'), None)
2092 2096
2093 2097 needfiles = {}
2094 2098 if self.ui.configbool('server', 'validate', default=False):
2095 2099 # validate incoming csets have their manifests
2096 2100 for cset in xrange(clstart, clend):
2097 2101 mfest = self.changelog.read(self.changelog.node(cset))[0]
2098 2102 mfest = self.manifest.readdelta(mfest)
2099 2103 # store file nodes we must see
2100 2104 for f, n in mfest.iteritems():
2101 2105 needfiles.setdefault(f, set()).add(n)
2102 2106
2103 2107 # process the files
2104 2108 self.ui.status(_("adding file changes\n"))
2105 2109 pr.step = _('files')
2106 2110 pr.count = 1
2107 2111 pr.total = efiles
2108 2112 source.callback = None
2109 2113
2110 2114 while True:
2111 2115 chunkdata = source.filelogheader()
2112 2116 if not chunkdata:
2113 2117 break
2114 2118 f = chunkdata["filename"]
2115 2119 self.ui.debug("adding %s revisions\n" % f)
2116 2120 pr()
2117 2121 fl = self.file(f)
2118 2122 o = len(fl)
2119 2123 if not fl.addgroup(source, revmap, trp):
2120 2124 raise util.Abort(_("received file revlog group is empty"))
2121 2125 revisions += len(fl) - o
2122 2126 files += 1
2123 2127 if f in needfiles:
2124 2128 needs = needfiles[f]
2125 2129 for new in xrange(o, len(fl)):
2126 2130 n = fl.node(new)
2127 2131 if n in needs:
2128 2132 needs.remove(n)
2129 2133 if not needs:
2130 2134 del needfiles[f]
2131 2135 self.ui.progress(_('files'), None)
2132 2136
2133 2137 for f, needs in needfiles.iteritems():
2134 2138 fl = self.file(f)
2135 2139 for n in needs:
2136 2140 try:
2137 2141 fl.rev(n)
2138 2142 except error.LookupError:
2139 2143 raise util.Abort(
2140 2144 _('missing file data for %s:%s - run hg verify') %
2141 2145 (f, hex(n)))
2142 2146
2143 2147 dh = 0
2144 2148 if oldheads:
2145 2149 heads = cl.heads()
2146 2150 dh = len(heads) - len(oldheads)
2147 2151 for h in heads:
2148 2152 if h not in oldheads and 'close' in self[h].extra():
2149 2153 dh -= 1
2150 2154 htext = ""
2151 2155 if dh:
2152 2156 htext = _(" (%+d heads)") % dh
2153 2157
2154 2158 self.ui.status(_("added %d changesets"
2155 2159 " with %d changes to %d files%s\n")
2156 2160 % (changesets, revisions, files, htext))
2157 2161
2158 2162 if changesets > 0:
2159 2163 p = lambda: cl.writepending() and self.root or ""
2160 2164 self.hook('pretxnchangegroup', throw=True,
2161 2165 node=hex(cl.node(clstart)), source=srctype,
2162 2166 url=url, pending=p)
2163 2167
2164 2168 added = [cl.node(r) for r in xrange(clstart, clend)]
2165 2169 publishing = self.ui.configbool('phases', 'publish', True)
2166 2170 if srctype == 'push':
2167 2171 # Old server can not push the boundary themself.
2168 2172 # New server won't push the boundary if changeset already
2169 2173 # existed locally as secrete
2170 2174 #
2171 2175 # We should not use added here but the list of all change in
2172 2176 # the bundle
2173 2177 if publishing:
2174 2178 phases.advanceboundary(self, phases.public, srccontent)
2175 2179 else:
2176 2180 phases.advanceboundary(self, phases.draft, srccontent)
2177 2181 phases.retractboundary(self, phases.draft, added)
2178 2182 elif srctype != 'strip':
2179 2183 # publishing only alter behavior during push
2180 2184 #
2181 2185 # strip should not touch boundary at all
2182 2186 phases.retractboundary(self, phases.draft, added)
2183 2187
2184 2188 # make changelog see real files again
2185 2189 cl.finalize(trp)
2186 2190
2187 2191 tr.close()
2188 2192
2189 2193 if changesets > 0:
2190 2194 def runhooks():
2191 2195 # forcefully update the on-disk branch cache
2192 2196 self.ui.debug("updating the branch cache\n")
2193 2197 self.updatebranchcache()
2194 2198 self.hook("changegroup", node=hex(cl.node(clstart)),
2195 2199 source=srctype, url=url)
2196 2200
2197 2201 for n in added:
2198 2202 self.hook("incoming", node=hex(n), source=srctype,
2199 2203 url=url)
2200 2204 self._afterlock(runhooks)
2201 2205
2202 2206 finally:
2203 2207 tr.release()
2204 2208 # never return 0 here:
2205 2209 if dh < 0:
2206 2210 return dh - 1
2207 2211 else:
2208 2212 return dh + 1
2209 2213
2210 2214 def stream_in(self, remote, requirements):
2211 2215 lock = self.lock()
2212 2216 try:
2213 2217 fp = remote.stream_out()
2214 2218 l = fp.readline()
2215 2219 try:
2216 2220 resp = int(l)
2217 2221 except ValueError:
2218 2222 raise error.ResponseError(
2219 2223 _('Unexpected response from remote server:'), l)
2220 2224 if resp == 1:
2221 2225 raise util.Abort(_('operation forbidden by server'))
2222 2226 elif resp == 2:
2223 2227 raise util.Abort(_('locking the remote repository failed'))
2224 2228 elif resp != 0:
2225 2229 raise util.Abort(_('the server sent an unknown error code'))
2226 2230 self.ui.status(_('streaming all changes\n'))
2227 2231 l = fp.readline()
2228 2232 try:
2229 2233 total_files, total_bytes = map(int, l.split(' ', 1))
2230 2234 except (ValueError, TypeError):
2231 2235 raise error.ResponseError(
2232 2236 _('Unexpected response from remote server:'), l)
2233 2237 self.ui.status(_('%d files to transfer, %s of data\n') %
2234 2238 (total_files, util.bytecount(total_bytes)))
2235 2239 start = time.time()
2236 2240 for i in xrange(total_files):
2237 2241 # XXX doesn't support '\n' or '\r' in filenames
2238 2242 l = fp.readline()
2239 2243 try:
2240 2244 name, size = l.split('\0', 1)
2241 2245 size = int(size)
2242 2246 except (ValueError, TypeError):
2243 2247 raise error.ResponseError(
2244 2248 _('Unexpected response from remote server:'), l)
2245 2249 if self.ui.debugflag:
2246 2250 self.ui.debug('adding %s (%s)\n' %
2247 2251 (name, util.bytecount(size)))
2248 2252 # for backwards compat, name was partially encoded
2249 2253 ofp = self.sopener(store.decodedir(name), 'w')
2250 2254 for chunk in util.filechunkiter(fp, limit=size):
2251 2255 ofp.write(chunk)
2252 2256 ofp.close()
2253 2257 elapsed = time.time() - start
2254 2258 if elapsed <= 0:
2255 2259 elapsed = 0.001
2256 2260 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2257 2261 (util.bytecount(total_bytes), elapsed,
2258 2262 util.bytecount(total_bytes / elapsed)))
2259 2263
2260 2264 # new requirements = old non-format requirements + new format-related
2261 2265 # requirements from the streamed-in repository
2262 2266 requirements.update(set(self.requirements) - self.supportedformats)
2263 2267 self._applyrequirements(requirements)
2264 2268 self._writerequirements()
2265 2269
2266 2270 self.invalidate()
2267 2271 return len(self.heads()) + 1
2268 2272 finally:
2269 2273 lock.release()
2270 2274
2271 2275 def clone(self, remote, heads=[], stream=False):
2272 2276 '''clone remote repository.
2273 2277
2274 2278 keyword arguments:
2275 2279 heads: list of revs to clone (forces use of pull)
2276 2280 stream: use streaming clone if possible'''
2277 2281
2278 2282 # now, all clients that can request uncompressed clones can
2279 2283 # read repo formats supported by all servers that can serve
2280 2284 # them.
2281 2285
2282 2286 # if revlog format changes, client will have to check version
2283 2287 # and format flags on "stream" capability, and use
2284 2288 # uncompressed only if compatible.
2285 2289
2286 2290 if not stream:
2287 2291 # if the server explicitely prefer to stream (for fast LANs)
2288 2292 stream = remote.capable('stream-preferred')
2289 2293
2290 2294 if stream and not heads:
2291 2295 # 'stream' means remote revlog format is revlogv1 only
2292 2296 if remote.capable('stream'):
2293 2297 return self.stream_in(remote, set(('revlogv1',)))
2294 2298 # otherwise, 'streamreqs' contains the remote revlog format
2295 2299 streamreqs = remote.capable('streamreqs')
2296 2300 if streamreqs:
2297 2301 streamreqs = set(streamreqs.split(','))
2298 2302 # if we support it, stream in and adjust our requirements
2299 2303 if not streamreqs - self.supportedformats:
2300 2304 return self.stream_in(remote, streamreqs)
2301 2305 return self.pull(remote, heads)
2302 2306
2303 2307 def pushkey(self, namespace, key, old, new):
2304 2308 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2305 2309 old=old, new=new)
2306 2310 ret = pushkey.push(self, namespace, key, old, new)
2307 2311 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2308 2312 ret=ret)
2309 2313 return ret
2310 2314
2311 2315 def listkeys(self, namespace):
2312 2316 self.hook('prelistkeys', throw=True, namespace=namespace)
2313 2317 values = pushkey.list(self, namespace)
2314 2318 self.hook('listkeys', namespace=namespace, values=values)
2315 2319 return values
2316 2320
2317 2321 def debugwireargs(self, one, two, three=None, four=None, five=None):
2318 2322 '''used to test argument passing over the wire'''
2319 2323 return "%s %s %s %s %s" % (one, two, three, four, five)
2320 2324
2321 2325 def savecommitmessage(self, text):
2322 2326 fp = self.opener('last-message.txt', 'wb')
2323 2327 try:
2324 2328 fp.write(text)
2325 2329 finally:
2326 2330 fp.close()
2327 2331 return self.pathto(fp.name[len(self.root)+1:])
2328 2332
2329 2333 # used to avoid circular references so destructors work
2330 2334 def aftertrans(files):
2331 2335 renamefiles = [tuple(t) for t in files]
2332 2336 def a():
2333 2337 for src, dest in renamefiles:
2334 2338 try:
2335 2339 util.rename(src, dest)
2336 2340 except OSError: # journal file does not yet exist
2337 2341 pass
2338 2342 return a
2339 2343
2340 2344 def undoname(fn):
2341 2345 base, name = os.path.split(fn)
2342 2346 assert name.startswith('journal')
2343 2347 return os.path.join(base, name.replace('journal', 'undo', 1))
2344 2348
2345 2349 def instance(ui, path, create):
2346 2350 return localrepository(ui, util.urllocalpath(path), create)
2347 2351
2348 2352 def islocal(path):
2349 2353 return True
@@ -1,608 +1,608
1 1 $ "$TESTDIR/hghave" system-sh || exit 80
2 2
3 3 commit hooks can see env vars
4 4
5 5 $ hg init a
6 6 $ cd a
7 7 $ echo "[hooks]" > .hg/hgrc
8 8 $ echo 'commit = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py commit' >> .hg/hgrc
9 9 $ echo 'commit.b = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py commit.b' >> .hg/hgrc
10 10 $ echo 'precommit = unset HG_LOCAL HG_NODE HG_TAG; python "$TESTDIR"/printenv.py precommit' >> .hg/hgrc
11 11 $ echo 'pretxncommit = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py pretxncommit' >> .hg/hgrc
12 12 $ echo 'pretxncommit.tip = hg -q tip' >> .hg/hgrc
13 13 $ echo 'pre-identify = python "$TESTDIR"/printenv.py pre-identify 1' >> .hg/hgrc
14 14 $ echo 'pre-cat = python "$TESTDIR"/printenv.py pre-cat' >> .hg/hgrc
15 15 $ echo 'post-cat = python "$TESTDIR"/printenv.py post-cat' >> .hg/hgrc
16 16 $ echo a > a
17 17 $ hg add a
18 18 $ hg commit -m a
19 19 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
20 20 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
21 21 0:cb9a9f314b8b
22 22 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
23 23 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
24 24
25 25 $ hg clone . ../b
26 26 updating to branch default
27 27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 28 $ cd ../b
29 29
30 30 changegroup hooks can see env vars
31 31
32 32 $ echo '[hooks]' > .hg/hgrc
33 33 $ echo 'prechangegroup = python "$TESTDIR"/printenv.py prechangegroup' >> .hg/hgrc
34 34 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup' >> .hg/hgrc
35 35 $ echo 'incoming = python "$TESTDIR"/printenv.py incoming' >> .hg/hgrc
36 36
37 37 pretxncommit and commit hooks can see both parents of merge
38 38
39 39 $ cd ../a
40 40 $ echo b >> a
41 41 $ hg commit -m a1 -d "1 0"
42 42 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
43 43 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
44 44 1:ab228980c14d
45 45 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
46 46 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
47 47 $ hg update -C 0
48 48 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 49 $ echo b > b
50 50 $ hg add b
51 51 $ hg commit -m b -d '1 0'
52 52 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
53 53 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
54 54 2:ee9deb46ab31
55 55 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
56 56 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
57 57 created new head
58 58 $ hg merge 1
59 59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 60 (branch merge, don't forget to commit)
61 61 $ hg commit -m merge -d '2 0'
62 62 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
63 63 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
64 64 3:07f3376c1e65
65 65 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
66 66 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
67 67
68 68 test generic hooks
69 69
70 70 $ hg id
71 71 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
72 72 warning: pre-identify hook exited with status 1
73 73 [1]
74 74 $ hg cat b
75 75 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
76 76 b
77 77 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
78 78
79 79 $ cd ../b
80 80 $ hg pull ../a
81 81 pulling from ../a
82 82 searching for changes
83 83 prechangegroup hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
84 84 adding changesets
85 85 adding manifests
86 86 adding file changes
87 87 added 3 changesets with 2 changes to 2 files
88 88 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
89 89 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
90 90 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
91 91 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
92 92 (run 'hg update' to get a working copy)
93 93
94 94 tag hooks can see env vars
95 95
96 96 $ cd ../a
97 97 $ echo 'pretag = python "$TESTDIR"/printenv.py pretag' >> .hg/hgrc
98 98 $ echo 'tag = unset HG_PARENT1 HG_PARENT2; python "$TESTDIR"/printenv.py tag' >> .hg/hgrc
99 99 $ hg tag -d '3 0' a
100 100 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
101 101 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
102 102 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
103 103 4:539e4b31b6dc
104 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
104 105 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
105 106 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
106 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
107 107 $ hg tag -l la
108 108 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
109 109 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
110 110
111 111 pretag hook can forbid tagging
112 112
113 113 $ echo 'pretag.forbid = python "$TESTDIR"/printenv.py pretag.forbid 1' >> .hg/hgrc
114 114 $ hg tag -d '4 0' fa
115 115 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
116 116 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
117 117 abort: pretag.forbid hook exited with status 1
118 118 [255]
119 119 $ hg tag -l fla
120 120 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
121 121 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
122 122 abort: pretag.forbid hook exited with status 1
123 123 [255]
124 124
125 125 pretxncommit hook can see changeset, can roll back txn, changeset no
126 126 more there after
127 127
128 128 $ echo 'pretxncommit.forbid0 = hg tip -q' >> .hg/hgrc
129 129 $ echo 'pretxncommit.forbid1 = python "$TESTDIR"/printenv.py pretxncommit.forbid 1' >> .hg/hgrc
130 130 $ echo z > z
131 131 $ hg add z
132 132 $ hg -q tip
133 133 4:539e4b31b6dc
134 134 $ hg commit -m 'fail' -d '4 0'
135 135 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
136 136 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
137 137 5:6f611f8018c1
138 138 5:6f611f8018c1
139 139 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
140 140 transaction abort!
141 141 rollback completed
142 142 abort: pretxncommit.forbid1 hook exited with status 1
143 143 [255]
144 144 $ hg -q tip
145 145 4:539e4b31b6dc
146 146
147 147 precommit hook can prevent commit
148 148
149 149 $ echo 'precommit.forbid = python "$TESTDIR"/printenv.py precommit.forbid 1' >> .hg/hgrc
150 150 $ hg commit -m 'fail' -d '4 0'
151 151 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
152 152 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
153 153 abort: precommit.forbid hook exited with status 1
154 154 [255]
155 155 $ hg -q tip
156 156 4:539e4b31b6dc
157 157
158 158 preupdate hook can prevent update
159 159
160 160 $ echo 'preupdate = python "$TESTDIR"/printenv.py preupdate' >> .hg/hgrc
161 161 $ hg update 1
162 162 preupdate hook: HG_PARENT1=ab228980c14d
163 163 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
164 164
165 165 update hook
166 166
167 167 $ echo 'update = python "$TESTDIR"/printenv.py update' >> .hg/hgrc
168 168 $ hg update
169 169 preupdate hook: HG_PARENT1=539e4b31b6dc
170 170 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
171 171 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
172 172
173 173 pushkey hook
174 174
175 175 $ echo 'pushkey = python "$TESTDIR"/printenv.py pushkey' >> .hg/hgrc
176 176 $ cd ../b
177 177 $ hg bookmark -r null foo
178 178 $ hg push -B foo ../a
179 179 pushing to ../a
180 180 searching for changes
181 181 no changes found
182 182 exporting bookmark foo
183 183 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
184 184 [1]
185 185 $ cd ../a
186 186
187 187 listkeys hook
188 188
189 189 $ echo 'listkeys = python "$TESTDIR"/printenv.py listkeys' >> .hg/hgrc
190 190 $ hg bookmark -r null bar
191 191 $ cd ../b
192 192 $ hg pull -B bar ../a
193 193 pulling from ../a
194 194 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
195 195 no changes found
196 196 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
197 197 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
198 198 importing bookmark bar
199 199 $ cd ../a
200 200
201 201 test that prepushkey can prevent incoming keys
202 202
203 203 $ echo 'prepushkey = python "$TESTDIR"/printenv.py prepushkey.forbid 1' >> .hg/hgrc
204 204 $ cd ../b
205 205 $ hg bookmark -r null baz
206 206 $ hg push -B baz ../a
207 207 pushing to ../a
208 208 searching for changes
209 209 no changes found
210 210 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
211 211 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
212 212 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
213 213 exporting bookmark baz
214 214 prepushkey.forbid hook: HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000
215 215 abort: prepushkey hook exited with status 1
216 216 [255]
217 217 $ cd ../a
218 218
219 219 test that prelistkeys can prevent listing keys
220 220
221 221 $ echo 'prelistkeys = python "$TESTDIR"/printenv.py prelistkeys.forbid 1' >> .hg/hgrc
222 222 $ hg bookmark -r null quux
223 223 $ cd ../b
224 224 $ hg pull -B quux ../a
225 225 pulling from ../a
226 226 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
227 227 abort: prelistkeys hook exited with status 1
228 228 [255]
229 229 $ cd ../a
230 230
231 231 prechangegroup hook can prevent incoming changes
232 232
233 233 $ cd ../b
234 234 $ hg -q tip
235 235 3:07f3376c1e65
236 236 $ echo '[hooks]' > .hg/hgrc
237 237 $ echo 'prechangegroup.forbid = python "$TESTDIR"/printenv.py prechangegroup.forbid 1' >> .hg/hgrc
238 238 $ hg pull ../a
239 239 pulling from ../a
240 240 searching for changes
241 241 prechangegroup.forbid hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
242 242 abort: prechangegroup.forbid hook exited with status 1
243 243 [255]
244 244
245 245 pretxnchangegroup hook can see incoming changes, can roll back txn,
246 246 incoming changes no longer there after
247 247
248 248 $ echo '[hooks]' > .hg/hgrc
249 249 $ echo 'pretxnchangegroup.forbid0 = hg tip -q' >> .hg/hgrc
250 250 $ echo 'pretxnchangegroup.forbid1 = python "$TESTDIR"/printenv.py pretxnchangegroup.forbid 1' >> .hg/hgrc
251 251 $ hg pull ../a
252 252 pulling from ../a
253 253 searching for changes
254 254 adding changesets
255 255 adding manifests
256 256 adding file changes
257 257 added 1 changesets with 1 changes to 1 files
258 258 4:539e4b31b6dc
259 259 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_URL=file:$TESTTMP/a
260 260 transaction abort!
261 261 rollback completed
262 262 abort: pretxnchangegroup.forbid1 hook exited with status 1
263 263 [255]
264 264 $ hg -q tip
265 265 3:07f3376c1e65
266 266
267 267 outgoing hooks can see env vars
268 268
269 269 $ rm .hg/hgrc
270 270 $ echo '[hooks]' > ../a/.hg/hgrc
271 271 $ echo 'preoutgoing = python "$TESTDIR"/printenv.py preoutgoing' >> ../a/.hg/hgrc
272 272 $ echo 'outgoing = python "$TESTDIR"/printenv.py outgoing' >> ../a/.hg/hgrc
273 273 $ hg pull ../a
274 274 pulling from ../a
275 275 searching for changes
276 276 preoutgoing hook: HG_SOURCE=pull
277 277 adding changesets
278 278 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
279 279 adding manifests
280 280 adding file changes
281 281 added 1 changesets with 1 changes to 1 files
282 282 (run 'hg update' to get a working copy)
283 283 $ hg rollback
284 284 repository tip rolled back to revision 3 (undo pull)
285 285
286 286 preoutgoing hook can prevent outgoing changes
287 287
288 288 $ echo 'preoutgoing.forbid = python "$TESTDIR"/printenv.py preoutgoing.forbid 1' >> ../a/.hg/hgrc
289 289 $ hg pull ../a
290 290 pulling from ../a
291 291 searching for changes
292 292 preoutgoing hook: HG_SOURCE=pull
293 293 preoutgoing.forbid hook: HG_SOURCE=pull
294 294 abort: preoutgoing.forbid hook exited with status 1
295 295 [255]
296 296
297 297 outgoing hooks work for local clones
298 298
299 299 $ cd ..
300 300 $ echo '[hooks]' > a/.hg/hgrc
301 301 $ echo 'preoutgoing = python "$TESTDIR"/printenv.py preoutgoing' >> a/.hg/hgrc
302 302 $ echo 'outgoing = python "$TESTDIR"/printenv.py outgoing' >> a/.hg/hgrc
303 303 $ hg clone a c
304 304 preoutgoing hook: HG_SOURCE=clone
305 305 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
306 306 updating to branch default
307 307 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
308 308 $ rm -rf c
309 309
310 310 preoutgoing hook can prevent outgoing changes for local clones
311 311
312 312 $ echo 'preoutgoing.forbid = python "$TESTDIR"/printenv.py preoutgoing.forbid 1' >> a/.hg/hgrc
313 313 $ hg clone a zzz
314 314 preoutgoing hook: HG_SOURCE=clone
315 315 preoutgoing.forbid hook: HG_SOURCE=clone
316 316 abort: preoutgoing.forbid hook exited with status 1
317 317 [255]
318 318 $ cd b
319 319
320 320 $ cat > hooktests.py <<EOF
321 321 > from mercurial import util
322 322 >
323 323 > uncallable = 0
324 324 >
325 325 > def printargs(args):
326 326 > args.pop('ui', None)
327 327 > args.pop('repo', None)
328 328 > a = list(args.items())
329 329 > a.sort()
330 330 > print 'hook args:'
331 331 > for k, v in a:
332 332 > print ' ', k, v
333 333 >
334 334 > def passhook(**args):
335 335 > printargs(args)
336 336 >
337 337 > def failhook(**args):
338 338 > printargs(args)
339 339 > return True
340 340 >
341 341 > class LocalException(Exception):
342 342 > pass
343 343 >
344 344 > def raisehook(**args):
345 345 > raise LocalException('exception from hook')
346 346 >
347 347 > def aborthook(**args):
348 348 > raise util.Abort('raise abort from hook')
349 349 >
350 350 > def brokenhook(**args):
351 351 > return 1 + {}
352 352 >
353 353 > def verbosehook(ui, **args):
354 354 > ui.note('verbose output from hook\n')
355 355 >
356 356 > def printtags(ui, repo, **args):
357 357 > print repo.tags().keys()
358 358 >
359 359 > class container:
360 360 > unreachable = 1
361 361 > EOF
362 362
363 363 test python hooks
364 364
365 365 $ PYTHONPATH="`pwd`:$PYTHONPATH"
366 366 $ export PYTHONPATH
367 367
368 368 $ echo '[hooks]' > ../a/.hg/hgrc
369 369 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
370 370 $ hg pull ../a 2>&1 | grep 'raised an exception'
371 371 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
372 372
373 373 $ echo '[hooks]' > ../a/.hg/hgrc
374 374 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
375 375 $ hg pull ../a 2>&1 | grep 'raised an exception'
376 376 error: preoutgoing.raise hook raised an exception: exception from hook
377 377
378 378 $ echo '[hooks]' > ../a/.hg/hgrc
379 379 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
380 380 $ hg pull ../a
381 381 pulling from ../a
382 382 searching for changes
383 383 error: preoutgoing.abort hook failed: raise abort from hook
384 384 abort: raise abort from hook
385 385 [255]
386 386
387 387 $ echo '[hooks]' > ../a/.hg/hgrc
388 388 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
389 389 $ hg pull ../a
390 390 pulling from ../a
391 391 searching for changes
392 392 hook args:
393 393 hooktype preoutgoing
394 394 source pull
395 395 abort: preoutgoing.fail hook failed
396 396 [255]
397 397
398 398 $ echo '[hooks]' > ../a/.hg/hgrc
399 399 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
400 400 $ hg pull ../a
401 401 pulling from ../a
402 402 searching for changes
403 403 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
404 404 [255]
405 405
406 406 $ echo '[hooks]' > ../a/.hg/hgrc
407 407 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
408 408 $ hg pull ../a
409 409 pulling from ../a
410 410 searching for changes
411 411 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
412 412 [255]
413 413
414 414 $ echo '[hooks]' > ../a/.hg/hgrc
415 415 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
416 416 $ hg pull ../a
417 417 pulling from ../a
418 418 searching for changes
419 419 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
420 420 [255]
421 421
422 422 $ echo '[hooks]' > ../a/.hg/hgrc
423 423 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
424 424 $ hg pull ../a
425 425 pulling from ../a
426 426 searching for changes
427 427 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
428 428 [255]
429 429
430 430 $ echo '[hooks]' > ../a/.hg/hgrc
431 431 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
432 432 $ hg pull ../a
433 433 pulling from ../a
434 434 searching for changes
435 435 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
436 436 [255]
437 437
438 438 $ echo '[hooks]' > ../a/.hg/hgrc
439 439 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
440 440 $ hg pull ../a
441 441 pulling from ../a
442 442 searching for changes
443 443 hook args:
444 444 hooktype preoutgoing
445 445 source pull
446 446 adding changesets
447 447 adding manifests
448 448 adding file changes
449 449 added 1 changesets with 1 changes to 1 files
450 450 (run 'hg update' to get a working copy)
451 451
452 452 make sure --traceback works
453 453
454 454 $ echo '[hooks]' > .hg/hgrc
455 455 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
456 456
457 457 $ echo aa > a
458 458 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
459 459 Traceback (most recent call last):
460 460
461 461 $ cd ..
462 462 $ hg init c
463 463 $ cd c
464 464
465 465 $ cat > hookext.py <<EOF
466 466 > def autohook(**args):
467 467 > print "Automatically installed hook"
468 468 >
469 469 > def reposetup(ui, repo):
470 470 > repo.ui.setconfig("hooks", "commit.auto", autohook)
471 471 > EOF
472 472 $ echo '[extensions]' >> .hg/hgrc
473 473 $ echo 'hookext = hookext.py' >> .hg/hgrc
474 474
475 475 $ touch foo
476 476 $ hg add foo
477 477 $ hg ci -d '0 0' -m 'add foo'
478 478 Automatically installed hook
479 479 $ echo >> foo
480 480 $ hg ci --debug -d '0 0' -m 'change foo'
481 481 foo
482 482 calling hook commit.auto: <function autohook at *> (glob)
483 483 Automatically installed hook
484 484 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
485 485
486 486 $ hg showconfig hooks
487 487 hooks.commit.auto=<function autohook at *> (glob)
488 488
489 489 test python hook configured with python:[file]:[hook] syntax
490 490
491 491 $ cd ..
492 492 $ mkdir d
493 493 $ cd d
494 494 $ hg init repo
495 495 $ mkdir hooks
496 496
497 497 $ cd hooks
498 498 $ cat > testhooks.py <<EOF
499 499 > def testhook(**args):
500 500 > print 'hook works'
501 501 > EOF
502 502 $ echo '[hooks]' > ../repo/.hg/hgrc
503 503 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
504 504
505 505 $ cd ../repo
506 506 $ hg commit -d '0 0'
507 507 hook works
508 508 nothing changed
509 509 [1]
510 510
511 511 $ cd ../../b
512 512
513 513 make sure --traceback works on hook import failure
514 514
515 515 $ cat > importfail.py <<EOF
516 516 > import somebogusmodule
517 517 > # dereference something in the module to force demandimport to load it
518 518 > somebogusmodule.whatever
519 519 > EOF
520 520
521 521 $ echo '[hooks]' > .hg/hgrc
522 522 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
523 523
524 524 $ echo a >> a
525 525 $ hg --traceback commit -ma 2>&1 | egrep '^(exception|Traceback|ImportError)'
526 526 exception from first failed import attempt:
527 527 Traceback (most recent call last):
528 528 ImportError: No module named somebogusmodule
529 529 exception from second failed import attempt:
530 530 Traceback (most recent call last):
531 531 ImportError: No module named hgext_importfail
532 532 Traceback (most recent call last):
533 533
534 534 Issue1827: Hooks Update & Commit not completely post operation
535 535
536 536 commit and update hooks should run after command completion
537 537
538 538 $ echo '[hooks]' > .hg/hgrc
539 539 $ echo 'commit = hg id' >> .hg/hgrc
540 540 $ echo 'update = hg id' >> .hg/hgrc
541 541 $ echo bb > a
542 542 $ hg ci -ma
543 543 223eafe2750c tip
544 544 $ hg up 0
545 545 cb9a9f314b8b
546 546 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
547 547
548 548 make sure --verbose (and --quiet/--debug etc.) are propogated to the local ui
549 549 that is passed to pre/post hooks
550 550
551 551 $ echo '[hooks]' > .hg/hgrc
552 552 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
553 553 $ hg id
554 554 cb9a9f314b8b
555 555 $ hg id --verbose
556 556 calling hook pre-identify: hooktests.verbosehook
557 557 verbose output from hook
558 558 cb9a9f314b8b
559 559
560 560 Ensure hooks can be prioritized
561 561
562 562 $ echo '[hooks]' > .hg/hgrc
563 563 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
564 564 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
565 565 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
566 566 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
567 567 $ hg id --verbose
568 568 calling hook pre-identify.b: hooktests.verbosehook
569 569 verbose output from hook
570 570 calling hook pre-identify.a: hooktests.verbosehook
571 571 verbose output from hook
572 572 calling hook pre-identify.c: hooktests.verbosehook
573 573 verbose output from hook
574 574 cb9a9f314b8b
575 575
576 576 new tags must be visible in pretxncommit (issue3210)
577 577
578 578 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
579 579 $ hg tag -f foo
580 580 ['a', 'foo', 'tip']
581 581
582 582 new commits must be visible in pretxnchangegroup (issue3428)
583 583
584 584 $ cd ..
585 585 $ hg init to
586 586 $ echo '[hooks]' >> to/.hg/hgrc
587 587 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
588 588 $ echo a >> to/a
589 589 $ hg --cwd to ci -Ama
590 590 adding a
591 591 $ hg clone to from
592 592 updating to branch default
593 593 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
594 594 $ echo aa >> from/a
595 595 $ hg --cwd from ci -mb
596 596 $ hg --cwd from push
597 597 pushing to $TESTTMP/to
598 598 searching for changes
599 599 adding changesets
600 600 adding manifests
601 601 adding file changes
602 602 added 1 changesets with 1 changes to 1 files
603 603 changeset: 1:9836a07b9b9d
604 604 tag: tip
605 605 user: test
606 606 date: Thu Jan 01 00:00:00 1970 +0000
607 607 summary: b
608 608
@@ -1,302 +1,315
1 1 $ "$TESTDIR/hghave" system-sh || exit 80
2 2
3 3 $ hg init test
4 4 $ cd test
5 5
6 6 $ echo a > a
7 7 $ hg add a
8 8 $ hg commit -m "test"
9 9 $ hg history
10 10 changeset: 0:acb14030fe0a
11 11 tag: tip
12 12 user: test
13 13 date: Thu Jan 01 00:00:00 1970 +0000
14 14 summary: test
15 15
16 16
17 17 $ hg tag ' '
18 18 abort: tag names cannot consist entirely of whitespace
19 19 [255]
20 20
21 21 $ hg tag "bleah"
22 22 $ hg history
23 23 changeset: 1:d4f0d2909abc
24 24 tag: tip
25 25 user: test
26 26 date: Thu Jan 01 00:00:00 1970 +0000
27 27 summary: Added tag bleah for changeset acb14030fe0a
28 28
29 29 changeset: 0:acb14030fe0a
30 30 tag: bleah
31 31 user: test
32 32 date: Thu Jan 01 00:00:00 1970 +0000
33 33 summary: test
34 34
35 35
36 36 $ echo foo >> .hgtags
37 37 $ hg tag "bleah2"
38 38 abort: working copy of .hgtags is changed (please commit .hgtags manually)
39 39 [255]
40 40
41 41 $ hg revert .hgtags
42 42 $ hg tag -r 0 x y z y y z
43 43 abort: tag names must be unique
44 44 [255]
45 45 $ hg tag tap nada dot tip null .
46 46 abort: the name 'tip' is reserved
47 47 [255]
48 48 $ hg tag "bleah"
49 49 abort: tag 'bleah' already exists (use -f to force)
50 50 [255]
51 51 $ hg tag "blecch" "bleah"
52 52 abort: tag 'bleah' already exists (use -f to force)
53 53 [255]
54 54
55 55 $ hg tag --remove "blecch"
56 56 abort: tag 'blecch' does not exist
57 57 [255]
58 58 $ hg tag --remove "bleah" "blecch" "blough"
59 59 abort: tag 'blecch' does not exist
60 60 [255]
61 61
62 62 $ hg tag -r 0 "bleah0"
63 63 $ hg tag -l -r 1 "bleah1"
64 64 $ hg tag gack gawk gorp
65 65 $ hg tag -f gack
66 66 $ hg tag --remove gack gorp
67 67
68 68 $ hg tag "bleah "
69 69 abort: tag 'bleah' already exists (use -f to force)
70 70 [255]
71 71 $ hg tag " bleah"
72 72 abort: tag 'bleah' already exists (use -f to force)
73 73 [255]
74 74 $ hg tag " bleah"
75 75 abort: tag 'bleah' already exists (use -f to force)
76 76 [255]
77 77 $ hg tag -r 0 " bleahbleah "
78 78 $ hg tag -r 0 " bleah bleah "
79 79
80 80 $ cat .hgtags
81 81 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
82 82 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
83 83 336fccc858a4eb69609a291105009e484a6b6b8d gack
84 84 336fccc858a4eb69609a291105009e484a6b6b8d gawk
85 85 336fccc858a4eb69609a291105009e484a6b6b8d gorp
86 86 336fccc858a4eb69609a291105009e484a6b6b8d gack
87 87 799667b6f2d9b957f73fa644a918c2df22bab58f gack
88 88 799667b6f2d9b957f73fa644a918c2df22bab58f gack
89 89 0000000000000000000000000000000000000000 gack
90 90 336fccc858a4eb69609a291105009e484a6b6b8d gorp
91 91 0000000000000000000000000000000000000000 gorp
92 92 acb14030fe0a21b60322c440ad2d20cf7685a376 bleahbleah
93 93 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah bleah
94 94
95 95 $ cat .hg/localtags
96 96 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
97 97
98 98 tagging on a non-head revision
99 99
100 100 $ hg update 0
101 101 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
102 102 $ hg tag -l localblah
103 103 $ hg tag "foobar"
104 104 abort: not at a branch head (use -f to force)
105 105 [255]
106 106 $ hg tag -f "foobar"
107 107 $ cat .hgtags
108 108 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
109 109 $ cat .hg/localtags
110 110 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
111 111 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
112 112
113 113 $ hg tag -l 'xx
114 114 > newline'
115 115 abort: '\n' cannot be used in a tag name
116 116 [255]
117 117 $ hg tag -l 'xx:xx'
118 118 abort: ':' cannot be used in a tag name
119 119 [255]
120 120
121 121 cloning local tags
122 122
123 123 $ cd ..
124 124 $ hg -R test log -r0:5
125 125 changeset: 0:acb14030fe0a
126 126 tag: bleah
127 127 tag: bleah bleah
128 128 tag: bleah0
129 129 tag: bleahbleah
130 130 tag: foobar
131 131 tag: localblah
132 132 user: test
133 133 date: Thu Jan 01 00:00:00 1970 +0000
134 134 summary: test
135 135
136 136 changeset: 1:d4f0d2909abc
137 137 tag: bleah1
138 138 user: test
139 139 date: Thu Jan 01 00:00:00 1970 +0000
140 140 summary: Added tag bleah for changeset acb14030fe0a
141 141
142 142 changeset: 2:336fccc858a4
143 143 tag: gawk
144 144 user: test
145 145 date: Thu Jan 01 00:00:00 1970 +0000
146 146 summary: Added tag bleah0 for changeset acb14030fe0a
147 147
148 148 changeset: 3:799667b6f2d9
149 149 user: test
150 150 date: Thu Jan 01 00:00:00 1970 +0000
151 151 summary: Added tag gack, gawk, gorp for changeset 336fccc858a4
152 152
153 153 changeset: 4:154eeb7c0138
154 154 user: test
155 155 date: Thu Jan 01 00:00:00 1970 +0000
156 156 summary: Added tag gack for changeset 799667b6f2d9
157 157
158 158 changeset: 5:b4bb47aaff09
159 159 user: test
160 160 date: Thu Jan 01 00:00:00 1970 +0000
161 161 summary: Removed tag gack, gorp
162 162
163 163 $ hg clone -q -rbleah1 test test1
164 164 $ hg -R test1 parents --style=compact
165 165 1[tip] d4f0d2909abc 1970-01-01 00:00 +0000 test
166 166 Added tag bleah for changeset acb14030fe0a
167 167
168 168 $ hg clone -q -r5 test#bleah1 test2
169 169 $ hg -R test2 parents --style=compact
170 170 5[tip] b4bb47aaff09 1970-01-01 00:00 +0000 test
171 171 Removed tag gack, gorp
172 172
173 173 $ hg clone -q -U test#bleah1 test3
174 174 $ hg -R test3 parents --style=compact
175 175
176 176 $ cd test
177 177
178 178 Issue601: hg tag doesn't do the right thing if .hgtags or localtags
179 179 doesn't end with EOL
180 180
181 181 $ python << EOF
182 182 > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
183 183 > f = file('.hg/localtags', 'w'); f.write(last); f.close()
184 184 > EOF
185 185 $ cat .hg/localtags; echo
186 186 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
187 187 $ hg tag -l localnewline
188 188 $ cat .hg/localtags; echo
189 189 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
190 190 c2899151f4e76890c602a2597a650a72666681bf localnewline
191 191
192 192
193 193 $ python << EOF
194 194 > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
195 195 > f = file('.hgtags', 'w'); f.write(last); f.close()
196 196 > EOF
197 197 $ hg ci -m'broken manual edit of .hgtags'
198 198 $ cat .hgtags; echo
199 199 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
200 200 $ hg tag newline
201 201 $ cat .hgtags; echo
202 202 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
203 203 a0eea09de1eeec777b46f2085260a373b2fbc293 newline
204 204
205 205
206 206 tag and branch using same name
207 207
208 208 $ hg branch tag-and-branch-same-name
209 209 marked working directory as branch tag-and-branch-same-name
210 210 (branches are permanent and global, did you want a bookmark?)
211 211 $ hg ci -m"discouraged"
212 212 $ hg tag tag-and-branch-same-name
213 213 warning: tag tag-and-branch-same-name conflicts with existing branch name
214 214
215 215 test custom commit messages
216 216
217 217 $ cat > editor << '__EOF__'
218 218 > #!/bin/sh
219 219 > echo "custom tag message" > "$1"
220 220 > echo "second line" >> "$1"
221 221 > __EOF__
222 222 $ chmod +x editor
223 223 $ HGEDITOR="'`pwd`'"/editor hg tag custom-tag -e
224 224 $ hg log -l1 --template "{desc}\n"
225 225 custom tag message
226 226 second line
227 227
228 228
229 229 local tag with .hgtags modified
230 230
231 231 $ hg tag hgtags-modified
232 232 $ hg rollback
233 233 repository tip rolled back to revision 13 (undo commit)
234 234 working directory now based on revision 13
235 235 $ hg st
236 236 M .hgtags
237 237 ? .hgtags.orig
238 238 ? editor
239 239 $ hg tag --local baz
240 240 $ hg revert --no-backup .hgtags
241 241
242 242
243 243 tagging when at named-branch-head that's not a topo-head
244 244
245 245 $ hg up default
246 246 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 247 $ hg merge -t internal:local
248 248 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
249 249 (branch merge, don't forget to commit)
250 250 $ hg ci -m 'merge named branch'
251 251 $ hg up 13
252 252 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
253 253 $ hg tag new-topo-head
254 254
255 255
256 256 tagging on null rev
257 257
258 258 $ hg up null
259 259 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
260 260 $ hg tag nullrev
261 261 abort: not at a branch head (use -f to force)
262 262 [255]
263 263
264 264 $ hg init empty
265 265 $ hg tag -R empty nullrev
266 266
267 267 $ cd ..
268 268
269 269 tagging on an uncommitted merge (issue2542)
270 270
271 271 $ hg init repo-tag-uncommitted-merge
272 272 $ cd repo-tag-uncommitted-merge
273 273 $ echo c1 > f1
274 274 $ hg ci -Am0
275 275 adding f1
276 276 $ echo c2 > f2
277 277 $ hg ci -Am1
278 278 adding f2
279 279 $ hg co -q 0
280 280 $ hg branch b1
281 281 marked working directory as branch b1
282 282 (branches are permanent and global, did you want a bookmark?)
283 283 $ hg ci -m2
284 284 $ hg up default
285 285 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
286 286 $ hg merge b1
287 287 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
288 288 (branch merge, don't forget to commit)
289 289
290 290 $ hg tag t1
291 291 abort: uncommitted merge
292 292 [255]
293 293 $ hg status
294 294 $ hg tag --rev 1 t2
295 295 abort: uncommitted merge
296 296 [255]
297 297 $ hg tag --rev 1 --local t3
298 298 $ hg tags -v
299 299 tip 2:2a156e8887cc
300 300 t3 1:c3adabd1a5f4 local
301 301
302 302 $ cd ..
303
304 commit hook on tag used to be run without write lock - issue3344
305
306 $ hg init repo-tag
307 $ hg init repo-tag-target
308 $ hg -R repo-tag --config hooks.commit="hg push \"`pwd`/repo-tag-target\"" tag tag
309 pushing to $TESTTMP/repo-tag-target
310 searching for changes
311 adding changesets
312 adding manifests
313 adding file changes
314 added 1 changesets with 1 changes to 1 files
315
General Comments 0
You need to be logged in to leave comments. Login now