##// END OF EJS Templates
hooks: fix hooks not firing if prechangegroup was set (issue4934)...
Durham Goode -
r26859:e7c618ce stable
parent child Browse files
Show More
@@ -1,948 +1,952 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import tempfile
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 branchmap,
25 25 dagutil,
26 26 discovery,
27 27 error,
28 28 mdiff,
29 29 phases,
30 30 util,
31 31 )
32 32
33 33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 35
36 36 def readexactly(stream, n):
37 37 '''read n bytes from stream.read and abort if less was available'''
38 38 s = stream.read(n)
39 39 if len(s) < n:
40 40 raise error.Abort(_("stream ended unexpectedly"
41 41 " (got %d bytes, expected %d)")
42 42 % (len(s), n))
43 43 return s
44 44
45 45 def getchunk(stream):
46 46 """return the next chunk from stream as a string"""
47 47 d = readexactly(stream, 4)
48 48 l = struct.unpack(">l", d)[0]
49 49 if l <= 4:
50 50 if l:
51 51 raise error.Abort(_("invalid chunk length %d") % l)
52 52 return ""
53 53 return readexactly(stream, l - 4)
54 54
55 55 def chunkheader(length):
56 56 """return a changegroup chunk header (string)"""
57 57 return struct.pack(">l", length + 4)
58 58
59 59 def closechunk():
60 60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 61 return struct.pack(">l", 0)
62 62
63 63 def combineresults(results):
64 64 """logic to combine 0 or more addchangegroup results into one"""
65 65 changedheads = 0
66 66 result = 1
67 67 for ret in results:
68 68 # If any changegroup result is 0, return 0
69 69 if ret == 0:
70 70 result = 0
71 71 break
72 72 if ret < -1:
73 73 changedheads += ret + 1
74 74 elif ret > 1:
75 75 changedheads += ret - 1
76 76 if changedheads > 0:
77 77 result = 1 + changedheads
78 78 elif changedheads < 0:
79 79 result = -1 + changedheads
80 80 return result
81 81
82 82 bundletypes = {
83 83 "": ("", None), # only when using unbundle on ssh and old http servers
84 84 # since the unification ssh accepts a header but there
85 85 # is no capability signaling it.
86 86 "HG20": (), # special-cased below
87 87 "HG10UN": ("HG10UN", None),
88 88 "HG10BZ": ("HG10", 'BZ'),
89 89 "HG10GZ": ("HG10GZ", 'GZ'),
90 90 }
91 91
92 92 # hgweb uses this list to communicate its preferred type
93 93 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
94 94
95 95 def writechunks(ui, chunks, filename, vfs=None):
96 96 """Write chunks to a file and return its filename.
97 97
98 98 The stream is assumed to be a bundle file.
99 99 Existing files will not be overwritten.
100 100 If no filename is specified, a temporary file is created.
101 101 """
102 102 fh = None
103 103 cleanup = None
104 104 try:
105 105 if filename:
106 106 if vfs:
107 107 fh = vfs.open(filename, "wb")
108 108 else:
109 109 fh = open(filename, "wb")
110 110 else:
111 111 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
112 112 fh = os.fdopen(fd, "wb")
113 113 cleanup = filename
114 114 for c in chunks:
115 115 fh.write(c)
116 116 cleanup = None
117 117 return filename
118 118 finally:
119 119 if fh is not None:
120 120 fh.close()
121 121 if cleanup is not None:
122 122 if filename and vfs:
123 123 vfs.unlink(cleanup)
124 124 else:
125 125 os.unlink(cleanup)
126 126
127 127 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
128 128 """Write a bundle file and return its filename.
129 129
130 130 Existing files will not be overwritten.
131 131 If no filename is specified, a temporary file is created.
132 132 bz2 compression can be turned off.
133 133 The bundle file will be deleted in case of errors.
134 134 """
135 135
136 136 if bundletype == "HG20":
137 137 from . import bundle2
138 138 bundle = bundle2.bundle20(ui)
139 139 bundle.setcompression(compression)
140 140 part = bundle.newpart('changegroup', data=cg.getchunks())
141 141 part.addparam('version', cg.version)
142 142 chunkiter = bundle.getchunks()
143 143 else:
144 144 # compression argument is only for the bundle2 case
145 145 assert compression is None
146 146 if cg.version != '01':
147 147 raise error.Abort(_('old bundle types only supports v1 '
148 148 'changegroups'))
149 149 header, comp = bundletypes[bundletype]
150 150 if comp not in util.compressors:
151 151 raise error.Abort(_('unknown stream compression type: %s')
152 152 % comp)
153 153 z = util.compressors[comp]()
154 154 subchunkiter = cg.getchunks()
155 155 def chunkiter():
156 156 yield header
157 157 for chunk in subchunkiter:
158 158 yield z.compress(chunk)
159 159 yield z.flush()
160 160 chunkiter = chunkiter()
161 161
162 162 # parse the changegroup data, otherwise we will block
163 163 # in case of sshrepo because we don't know the end of the stream
164 164
165 165 # an empty chunkgroup is the end of the changegroup
166 166 # a changegroup has at least 2 chunkgroups (changelog and manifest).
167 167 # after that, an empty chunkgroup is the end of the changegroup
168 168 return writechunks(ui, chunkiter, filename, vfs=vfs)
169 169
170 170 class cg1unpacker(object):
171 171 """Unpacker for cg1 changegroup streams.
172 172
173 173 A changegroup unpacker handles the framing of the revision data in
174 174 the wire format. Most consumers will want to use the apply()
175 175 method to add the changes from the changegroup to a repository.
176 176
177 177 If you're forwarding a changegroup unmodified to another consumer,
178 178 use getchunks(), which returns an iterator of changegroup
179 179 chunks. This is mostly useful for cases where you need to know the
180 180 data stream has ended by observing the end of the changegroup.
181 181
182 182 deltachunk() is useful only if you're applying delta data. Most
183 183 consumers should prefer apply() instead.
184 184
185 185 A few other public methods exist. Those are used only for
186 186 bundlerepo and some debug commands - their use is discouraged.
187 187 """
188 188 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
189 189 deltaheadersize = struct.calcsize(deltaheader)
190 190 version = '01'
191 191 def __init__(self, fh, alg):
192 192 if alg == 'UN':
193 193 alg = None # get more modern without breaking too much
194 194 if not alg in util.decompressors:
195 195 raise error.Abort(_('unknown stream compression type: %s')
196 196 % alg)
197 197 if alg == 'BZ':
198 198 alg = '_truncatedBZ'
199 199 self._stream = util.decompressors[alg](fh)
200 200 self._type = alg
201 201 self.callback = None
202 202
203 203 # These methods (compressed, read, seek, tell) all appear to only
204 204 # be used by bundlerepo, but it's a little hard to tell.
205 205 def compressed(self):
206 206 return self._type is not None
207 207 def read(self, l):
208 208 return self._stream.read(l)
209 209 def seek(self, pos):
210 210 return self._stream.seek(pos)
211 211 def tell(self):
212 212 return self._stream.tell()
213 213 def close(self):
214 214 return self._stream.close()
215 215
216 216 def _chunklength(self):
217 217 d = readexactly(self._stream, 4)
218 218 l = struct.unpack(">l", d)[0]
219 219 if l <= 4:
220 220 if l:
221 221 raise error.Abort(_("invalid chunk length %d") % l)
222 222 return 0
223 223 if self.callback:
224 224 self.callback()
225 225 return l - 4
226 226
227 227 def changelogheader(self):
228 228 """v10 does not have a changelog header chunk"""
229 229 return {}
230 230
231 231 def manifestheader(self):
232 232 """v10 does not have a manifest header chunk"""
233 233 return {}
234 234
235 235 def filelogheader(self):
236 236 """return the header of the filelogs chunk, v10 only has the filename"""
237 237 l = self._chunklength()
238 238 if not l:
239 239 return {}
240 240 fname = readexactly(self._stream, l)
241 241 return {'filename': fname}
242 242
243 243 def _deltaheader(self, headertuple, prevnode):
244 244 node, p1, p2, cs = headertuple
245 245 if prevnode is None:
246 246 deltabase = p1
247 247 else:
248 248 deltabase = prevnode
249 249 return node, p1, p2, deltabase, cs
250 250
251 251 def deltachunk(self, prevnode):
252 252 l = self._chunklength()
253 253 if not l:
254 254 return {}
255 255 headerdata = readexactly(self._stream, self.deltaheadersize)
256 256 header = struct.unpack(self.deltaheader, headerdata)
257 257 delta = readexactly(self._stream, l - self.deltaheadersize)
258 258 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
259 259 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
260 260 'deltabase': deltabase, 'delta': delta}
261 261
262 262 def getchunks(self):
263 263 """returns all the chunks contains in the bundle
264 264
265 265 Used when you need to forward the binary stream to a file or another
266 266 network API. To do so, it parse the changegroup data, otherwise it will
267 267 block in case of sshrepo because it don't know the end of the stream.
268 268 """
269 269 # an empty chunkgroup is the end of the changegroup
270 270 # a changegroup has at least 2 chunkgroups (changelog and manifest).
271 271 # after that, an empty chunkgroup is the end of the changegroup
272 272 empty = False
273 273 count = 0
274 274 while not empty or count <= 2:
275 275 empty = True
276 276 count += 1
277 277 while True:
278 278 chunk = getchunk(self)
279 279 if not chunk:
280 280 break
281 281 empty = False
282 282 yield chunkheader(len(chunk))
283 283 pos = 0
284 284 while pos < len(chunk):
285 285 next = pos + 2**20
286 286 yield chunk[pos:next]
287 287 pos = next
288 288 yield closechunk()
289 289
290 290 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
291 291 # We know that we'll never have more manifests than we had
292 292 # changesets.
293 293 self.callback = prog(_('manifests'), numchanges)
294 294 # no need to check for empty manifest group here:
295 295 # if the result of the merge of 1 and 2 is the same in 3 and 4,
296 296 # no new manifest will be created and the manifest group will
297 297 # be empty during the pull
298 298 self.manifestheader()
299 299 repo.manifest.addgroup(self, revmap, trp)
300 300 repo.ui.progress(_('manifests'), None)
301 301
302 302 def apply(self, repo, srctype, url, emptyok=False,
303 303 targetphase=phases.draft, expectedtotal=None):
304 304 """Add the changegroup returned by source.read() to this repo.
305 305 srctype is a string like 'push', 'pull', or 'unbundle'. url is
306 306 the URL of the repo where this changegroup is coming from.
307 307
308 308 Return an integer summarizing the change to this repo:
309 309 - nothing changed or no source: 0
310 310 - more heads than before: 1+added heads (2..n)
311 311 - fewer heads than before: -1-removed heads (-2..-n)
312 312 - number of heads stays the same: 1
313 313 """
314 314 repo = repo.unfiltered()
315 315 def csmap(x):
316 316 repo.ui.debug("add changeset %s\n" % short(x))
317 317 return len(cl)
318 318
319 319 def revmap(x):
320 320 return cl.rev(x)
321 321
322 322 changesets = files = revisions = 0
323 323
324 324 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
325 325 # The transaction could have been created before and already
326 326 # carries source information. In this case we use the top
327 327 # level data. We overwrite the argument because we need to use
328 328 # the top level value (if they exist) in this function.
329 329 srctype = tr.hookargs.setdefault('source', srctype)
330 330 url = tr.hookargs.setdefault('url', url)
331 331
332 332 # write changelog data to temp files so concurrent readers will not see
333 333 # inconsistent view
334 334 cl = repo.changelog
335 335 cl.delayupdate(tr)
336 336 oldheads = cl.heads()
337 337 try:
338 338 repo.hook('prechangegroup', throw=True, **tr.hookargs)
339 339
340 340 trp = weakref.proxy(tr)
341 341 # pull off the changeset group
342 342 repo.ui.status(_("adding changesets\n"))
343 343 clstart = len(cl)
344 344 class prog(object):
345 345 def __init__(self, step, total):
346 346 self._step = step
347 347 self._total = total
348 348 self._count = 1
349 349 def __call__(self):
350 350 repo.ui.progress(self._step, self._count, unit=_('chunks'),
351 351 total=self._total)
352 352 self._count += 1
353 353 self.callback = prog(_('changesets'), expectedtotal)
354 354
355 355 efiles = set()
356 356 def onchangelog(cl, node):
357 357 efiles.update(cl.read(node)[3])
358 358
359 359 self.changelogheader()
360 360 srccontent = cl.addgroup(self, csmap, trp,
361 361 addrevisioncb=onchangelog)
362 362 efiles = len(efiles)
363 363
364 364 if not (srccontent or emptyok):
365 365 raise error.Abort(_("received changelog group is empty"))
366 366 clend = len(cl)
367 367 changesets = clend - clstart
368 368 repo.ui.progress(_('changesets'), None)
369 369
370 370 # pull off the manifest group
371 371 repo.ui.status(_("adding manifests\n"))
372 372 self._unpackmanifests(repo, revmap, trp, prog, changesets)
373 373
374 374 needfiles = {}
375 375 if repo.ui.configbool('server', 'validate', default=False):
376 376 # validate incoming csets have their manifests
377 377 for cset in xrange(clstart, clend):
378 378 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
379 379 mfest = repo.manifest.readdelta(mfnode)
380 380 # store file nodes we must see
381 381 for f, n in mfest.iteritems():
382 382 needfiles.setdefault(f, set()).add(n)
383 383
384 384 # process the files
385 385 repo.ui.status(_("adding file changes\n"))
386 386 self.callback = None
387 387 pr = prog(_('files'), efiles)
388 388 newrevs, newfiles = _addchangegroupfiles(
389 389 repo, self, revmap, trp, pr, needfiles)
390 390 revisions += newrevs
391 391 files += newfiles
392 392
393 393 dh = 0
394 394 if oldheads:
395 395 heads = cl.heads()
396 396 dh = len(heads) - len(oldheads)
397 397 for h in heads:
398 398 if h not in oldheads and repo[h].closesbranch():
399 399 dh -= 1
400 400 htext = ""
401 401 if dh:
402 402 htext = _(" (%+d heads)") % dh
403 403
404 404 repo.ui.status(_("added %d changesets"
405 405 " with %d changes to %d files%s\n")
406 406 % (changesets, revisions, files, htext))
407 407 repo.invalidatevolatilesets()
408 408
409 # Call delayupdate again to ensure the transaction writepending
410 # subscriptions are still in place.
411 cl.delayupdate(tr)
412
409 413 if changesets > 0:
410 414 if 'node' not in tr.hookargs:
411 415 tr.hookargs['node'] = hex(cl.node(clstart))
412 416 hookargs = dict(tr.hookargs)
413 417 else:
414 418 hookargs = dict(tr.hookargs)
415 419 hookargs['node'] = hex(cl.node(clstart))
416 420 repo.hook('pretxnchangegroup', throw=True, **hookargs)
417 421
418 422 added = [cl.node(r) for r in xrange(clstart, clend)]
419 423 publishing = repo.publishing()
420 424 if srctype in ('push', 'serve'):
421 425 # Old servers can not push the boundary themselves.
422 426 # New servers won't push the boundary if changeset already
423 427 # exists locally as secret
424 428 #
425 429 # We should not use added here but the list of all change in
426 430 # the bundle
427 431 if publishing:
428 432 phases.advanceboundary(repo, tr, phases.public, srccontent)
429 433 else:
430 434 # Those changesets have been pushed from the outside, their
431 435 # phases are going to be pushed alongside. Therefor
432 436 # `targetphase` is ignored.
433 437 phases.advanceboundary(repo, tr, phases.draft, srccontent)
434 438 phases.retractboundary(repo, tr, phases.draft, added)
435 439 elif srctype != 'strip':
436 440 # publishing only alter behavior during push
437 441 #
438 442 # strip should not touch boundary at all
439 443 phases.retractboundary(repo, tr, targetphase, added)
440 444
441 445 if changesets > 0:
442 446 if srctype != 'strip':
443 447 # During strip, branchcache is invalid but coming call to
444 448 # `destroyed` will repair it.
445 449 # In other case we can safely update cache on disk.
446 450 branchmap.updatecache(repo.filtered('served'))
447 451
448 452 def runhooks():
449 453 # These hooks run when the lock releases, not when the
450 454 # transaction closes. So it's possible for the changelog
451 455 # to have changed since we last saw it.
452 456 if clstart >= len(repo):
453 457 return
454 458
455 459 # forcefully update the on-disk branch cache
456 460 repo.ui.debug("updating the branch cache\n")
457 461 repo.hook("changegroup", **hookargs)
458 462
459 463 for n in added:
460 464 args = hookargs.copy()
461 465 args['node'] = hex(n)
462 466 repo.hook("incoming", **args)
463 467
464 468 newheads = [h for h in repo.heads() if h not in oldheads]
465 469 repo.ui.log("incoming",
466 470 "%s incoming changes - new heads: %s\n",
467 471 len(added),
468 472 ', '.join([hex(c[:6]) for c in newheads]))
469 473
470 474 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
471 475 lambda tr: repo._afterlock(runhooks))
472 476
473 477 tr.close()
474 478
475 479 finally:
476 480 tr.release()
477 481 repo.ui.flush()
478 482 # never return 0 here:
479 483 if dh < 0:
480 484 return dh - 1
481 485 else:
482 486 return dh + 1
483 487
484 488 class cg2unpacker(cg1unpacker):
485 489 """Unpacker for cg2 streams.
486 490
487 491 cg2 streams add support for generaldelta, so the delta header
488 492 format is slightly different. All other features about the data
489 493 remain the same.
490 494 """
491 495 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
492 496 deltaheadersize = struct.calcsize(deltaheader)
493 497 version = '02'
494 498
495 499 def _deltaheader(self, headertuple, prevnode):
496 500 node, p1, p2, deltabase, cs = headertuple
497 501 return node, p1, p2, deltabase, cs
498 502
499 503 class headerlessfixup(object):
500 504 def __init__(self, fh, h):
501 505 self._h = h
502 506 self._fh = fh
503 507 def read(self, n):
504 508 if self._h:
505 509 d, self._h = self._h[:n], self._h[n:]
506 510 if len(d) < n:
507 511 d += readexactly(self._fh, n - len(d))
508 512 return d
509 513 return readexactly(self._fh, n)
510 514
511 515 class cg1packer(object):
512 516 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
513 517 version = '01'
514 518 def __init__(self, repo, bundlecaps=None):
515 519 """Given a source repo, construct a bundler.
516 520
517 521 bundlecaps is optional and can be used to specify the set of
518 522 capabilities which can be used to build the bundle.
519 523 """
520 524 # Set of capabilities we can use to build the bundle.
521 525 if bundlecaps is None:
522 526 bundlecaps = set()
523 527 self._bundlecaps = bundlecaps
524 528 # experimental config: bundle.reorder
525 529 reorder = repo.ui.config('bundle', 'reorder', 'auto')
526 530 if reorder == 'auto':
527 531 reorder = None
528 532 else:
529 533 reorder = util.parsebool(reorder)
530 534 self._repo = repo
531 535 self._reorder = reorder
532 536 self._progress = repo.ui.progress
533 537 if self._repo.ui.verbose and not self._repo.ui.debugflag:
534 538 self._verbosenote = self._repo.ui.note
535 539 else:
536 540 self._verbosenote = lambda s: None
537 541
538 542 def close(self):
539 543 return closechunk()
540 544
541 545 def fileheader(self, fname):
542 546 return chunkheader(len(fname)) + fname
543 547
544 548 def group(self, nodelist, revlog, lookup, units=None):
545 549 """Calculate a delta group, yielding a sequence of changegroup chunks
546 550 (strings).
547 551
548 552 Given a list of changeset revs, return a set of deltas and
549 553 metadata corresponding to nodes. The first delta is
550 554 first parent(nodelist[0]) -> nodelist[0], the receiver is
551 555 guaranteed to have this parent as it has all history before
552 556 these changesets. In the case firstparent is nullrev the
553 557 changegroup starts with a full revision.
554 558
555 559 If units is not None, progress detail will be generated, units specifies
556 560 the type of revlog that is touched (changelog, manifest, etc.).
557 561 """
558 562 # if we don't have any revisions touched by these changesets, bail
559 563 if len(nodelist) == 0:
560 564 yield self.close()
561 565 return
562 566
563 567 # for generaldelta revlogs, we linearize the revs; this will both be
564 568 # much quicker and generate a much smaller bundle
565 569 if (revlog._generaldelta and self._reorder is None) or self._reorder:
566 570 dag = dagutil.revlogdag(revlog)
567 571 revs = set(revlog.rev(n) for n in nodelist)
568 572 revs = dag.linearize(revs)
569 573 else:
570 574 revs = sorted([revlog.rev(n) for n in nodelist])
571 575
572 576 # add the parent of the first rev
573 577 p = revlog.parentrevs(revs[0])[0]
574 578 revs.insert(0, p)
575 579
576 580 # build deltas
577 581 total = len(revs) - 1
578 582 msgbundling = _('bundling')
579 583 for r in xrange(len(revs) - 1):
580 584 if units is not None:
581 585 self._progress(msgbundling, r + 1, unit=units, total=total)
582 586 prev, curr = revs[r], revs[r + 1]
583 587 linknode = lookup(revlog.node(curr))
584 588 for c in self.revchunk(revlog, curr, prev, linknode):
585 589 yield c
586 590
587 591 if units is not None:
588 592 self._progress(msgbundling, None)
589 593 yield self.close()
590 594
591 595 # filter any nodes that claim to be part of the known set
592 596 def prune(self, revlog, missing, commonrevs):
593 597 rr, rl = revlog.rev, revlog.linkrev
594 598 return [n for n in missing if rl(rr(n)) not in commonrevs]
595 599
596 600 def _packmanifests(self, mfnodes, lookuplinknode):
597 601 """Pack flat manifests into a changegroup stream."""
598 602 ml = self._repo.manifest
599 603 size = 0
600 604 for chunk in self.group(
601 605 mfnodes, ml, lookuplinknode, units=_('manifests')):
602 606 size += len(chunk)
603 607 yield chunk
604 608 self._verbosenote(_('%8.i (manifests)\n') % size)
605 609
606 610 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
607 611 '''yield a sequence of changegroup chunks (strings)'''
608 612 repo = self._repo
609 613 cl = repo.changelog
610 614 ml = repo.manifest
611 615
612 616 clrevorder = {}
613 617 mfs = {} # needed manifests
614 618 fnodes = {} # needed file nodes
615 619 changedfiles = set()
616 620
617 621 # Callback for the changelog, used to collect changed files and manifest
618 622 # nodes.
619 623 # Returns the linkrev node (identity in the changelog case).
620 624 def lookupcl(x):
621 625 c = cl.read(x)
622 626 clrevorder[x] = len(clrevorder)
623 627 changedfiles.update(c[3])
624 628 # record the first changeset introducing this manifest version
625 629 mfs.setdefault(c[0], x)
626 630 return x
627 631
628 632 self._verbosenote(_('uncompressed size of bundle content:\n'))
629 633 size = 0
630 634 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
631 635 size += len(chunk)
632 636 yield chunk
633 637 self._verbosenote(_('%8.i (changelog)\n') % size)
634 638
635 639 # We need to make sure that the linkrev in the changegroup refers to
636 640 # the first changeset that introduced the manifest or file revision.
637 641 # The fastpath is usually safer than the slowpath, because the filelogs
638 642 # are walked in revlog order.
639 643 #
640 644 # When taking the slowpath with reorder=None and the manifest revlog
641 645 # uses generaldelta, the manifest may be walked in the "wrong" order.
642 646 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
643 647 # cc0ff93d0c0c).
644 648 #
645 649 # When taking the fastpath, we are only vulnerable to reordering
646 650 # of the changelog itself. The changelog never uses generaldelta, so
647 651 # it is only reordered when reorder=True. To handle this case, we
648 652 # simply take the slowpath, which already has the 'clrevorder' logic.
649 653 # This was also fixed in cc0ff93d0c0c.
650 654 fastpathlinkrev = fastpathlinkrev and not self._reorder
651 655 # Callback for the manifest, used to collect linkrevs for filelog
652 656 # revisions.
653 657 # Returns the linkrev node (collected in lookupcl).
654 658 def lookupmflinknode(x):
655 659 clnode = mfs[x]
656 660 if not fastpathlinkrev:
657 661 mdata = ml.readfast(x)
658 662 for f, n in mdata.iteritems():
659 663 if f in changedfiles:
660 664 # record the first changeset introducing this filelog
661 665 # version
662 666 fclnodes = fnodes.setdefault(f, {})
663 667 fclnode = fclnodes.setdefault(n, clnode)
664 668 if clrevorder[clnode] < clrevorder[fclnode]:
665 669 fclnodes[n] = clnode
666 670 return clnode
667 671
668 672 mfnodes = self.prune(ml, mfs, commonrevs)
669 673 for x in self._packmanifests(mfnodes, lookupmflinknode):
670 674 yield x
671 675
672 676 mfs.clear()
673 677 clrevs = set(cl.rev(x) for x in clnodes)
674 678
675 679 def linknodes(filerevlog, fname):
676 680 if fastpathlinkrev:
677 681 llr = filerevlog.linkrev
678 682 def genfilenodes():
679 683 for r in filerevlog:
680 684 linkrev = llr(r)
681 685 if linkrev in clrevs:
682 686 yield filerevlog.node(r), cl.node(linkrev)
683 687 return dict(genfilenodes())
684 688 return fnodes.get(fname, {})
685 689
686 690 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
687 691 source):
688 692 yield chunk
689 693
690 694 yield self.close()
691 695
692 696 if clnodes:
693 697 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
694 698
695 699 # The 'source' parameter is useful for extensions
696 700 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
697 701 repo = self._repo
698 702 progress = self._progress
699 703 msgbundling = _('bundling')
700 704
701 705 total = len(changedfiles)
702 706 # for progress output
703 707 msgfiles = _('files')
704 708 for i, fname in enumerate(sorted(changedfiles)):
705 709 filerevlog = repo.file(fname)
706 710 if not filerevlog:
707 711 raise error.Abort(_("empty or missing revlog for %s") % fname)
708 712
709 713 linkrevnodes = linknodes(filerevlog, fname)
710 714 # Lookup for filenodes, we collected the linkrev nodes above in the
711 715 # fastpath case and with lookupmf in the slowpath case.
712 716 def lookupfilelog(x):
713 717 return linkrevnodes[x]
714 718
715 719 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
716 720 if filenodes:
717 721 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
718 722 total=total)
719 723 h = self.fileheader(fname)
720 724 size = len(h)
721 725 yield h
722 726 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
723 727 size += len(chunk)
724 728 yield chunk
725 729 self._verbosenote(_('%8.i %s\n') % (size, fname))
726 730 progress(msgbundling, None)
727 731
728 732 def deltaparent(self, revlog, rev, p1, p2, prev):
729 733 return prev
730 734
731 735 def revchunk(self, revlog, rev, prev, linknode):
732 736 node = revlog.node(rev)
733 737 p1, p2 = revlog.parentrevs(rev)
734 738 base = self.deltaparent(revlog, rev, p1, p2, prev)
735 739
736 740 prefix = ''
737 741 if revlog.iscensored(base) or revlog.iscensored(rev):
738 742 try:
739 743 delta = revlog.revision(node)
740 744 except error.CensoredNodeError as e:
741 745 delta = e.tombstone
742 746 if base == nullrev:
743 747 prefix = mdiff.trivialdiffheader(len(delta))
744 748 else:
745 749 baselen = revlog.rawsize(base)
746 750 prefix = mdiff.replacediffheader(baselen, len(delta))
747 751 elif base == nullrev:
748 752 delta = revlog.revision(node)
749 753 prefix = mdiff.trivialdiffheader(len(delta))
750 754 else:
751 755 delta = revlog.revdiff(base, rev)
752 756 p1n, p2n = revlog.parents(node)
753 757 basenode = revlog.node(base)
754 758 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
755 759 meta += prefix
756 760 l = len(meta) + len(delta)
757 761 yield chunkheader(l)
758 762 yield meta
759 763 yield delta
760 764 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
761 765 # do nothing with basenode, it is implicitly the previous one in HG10
762 766 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
763 767
764 768 class cg2packer(cg1packer):
765 769 version = '02'
766 770 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
767 771
768 772 def __init__(self, repo, bundlecaps=None):
769 773 super(cg2packer, self).__init__(repo, bundlecaps)
770 774 if self._reorder is None:
771 775 # Since generaldelta is directly supported by cg2, reordering
772 776 # generally doesn't help, so we disable it by default (treating
773 777 # bundle.reorder=auto just like bundle.reorder=False).
774 778 self._reorder = False
775 779
776 780 def deltaparent(self, revlog, rev, p1, p2, prev):
777 781 dp = revlog.deltaparent(rev)
778 782 # avoid storing full revisions; pick prev in those cases
779 783 # also pick prev when we can't be sure remote has dp
780 784 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
781 785 return prev
782 786 return dp
783 787
784 788 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
785 789 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
786 790
787 791 packermap = {'01': (cg1packer, cg1unpacker),
788 792 # cg2 adds support for exchanging generaldelta
789 793 '02': (cg2packer, cg2unpacker),
790 794 }
791 795
792 796 def _changegroupinfo(repo, nodes, source):
793 797 if repo.ui.verbose or source == 'bundle':
794 798 repo.ui.status(_("%d changesets found\n") % len(nodes))
795 799 if repo.ui.debugflag:
796 800 repo.ui.debug("list of changesets:\n")
797 801 for node in nodes:
798 802 repo.ui.debug("%s\n" % hex(node))
799 803
800 804 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
801 805 repo = repo.unfiltered()
802 806 commonrevs = outgoing.common
803 807 csets = outgoing.missing
804 808 heads = outgoing.missingheads
805 809 # We go through the fast path if we get told to, or if all (unfiltered
806 810 # heads have been requested (since we then know there all linkrevs will
807 811 # be pulled by the client).
808 812 heads.sort()
809 813 fastpathlinkrev = fastpath or (
810 814 repo.filtername is None and heads == sorted(repo.heads()))
811 815
812 816 repo.hook('preoutgoing', throw=True, source=source)
813 817 _changegroupinfo(repo, csets, source)
814 818 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
815 819
816 820 def getsubset(repo, outgoing, bundler, source, fastpath=False):
817 821 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
818 822 return packermap[bundler.version][1](util.chunkbuffer(gengroup), None)
819 823
820 824 def changegroupsubset(repo, roots, heads, source, version='01'):
821 825 """Compute a changegroup consisting of all the nodes that are
822 826 descendants of any of the roots and ancestors of any of the heads.
823 827 Return a chunkbuffer object whose read() method will return
824 828 successive changegroup chunks.
825 829
826 830 It is fairly complex as determining which filenodes and which
827 831 manifest nodes need to be included for the changeset to be complete
828 832 is non-trivial.
829 833
830 834 Another wrinkle is doing the reverse, figuring out which changeset in
831 835 the changegroup a particular filenode or manifestnode belongs to.
832 836 """
833 837 cl = repo.changelog
834 838 if not roots:
835 839 roots = [nullid]
836 840 discbases = []
837 841 for n in roots:
838 842 discbases.extend([p for p in cl.parents(n) if p != nullid])
839 843 # TODO: remove call to nodesbetween.
840 844 csets, roots, heads = cl.nodesbetween(roots, heads)
841 845 included = set(csets)
842 846 discbases = [n for n in discbases if n not in included]
843 847 outgoing = discovery.outgoing(cl, discbases, heads)
844 848 bundler = packermap[version][0](repo)
845 849 return getsubset(repo, outgoing, bundler, source)
846 850
847 851 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
848 852 version='01'):
849 853 """Like getbundle, but taking a discovery.outgoing as an argument.
850 854
851 855 This is only implemented for local repos and reuses potentially
852 856 precomputed sets in outgoing. Returns a raw changegroup generator."""
853 857 if not outgoing.missing:
854 858 return None
855 859 bundler = packermap[version][0](repo, bundlecaps)
856 860 return getsubsetraw(repo, outgoing, bundler, source)
857 861
858 862 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
859 863 version='01'):
860 864 """Like getbundle, but taking a discovery.outgoing as an argument.
861 865
862 866 This is only implemented for local repos and reuses potentially
863 867 precomputed sets in outgoing."""
864 868 if not outgoing.missing:
865 869 return None
866 870 bundler = packermap[version][0](repo, bundlecaps)
867 871 return getsubset(repo, outgoing, bundler, source)
868 872
869 873 def computeoutgoing(repo, heads, common):
870 874 """Computes which revs are outgoing given a set of common
871 875 and a set of heads.
872 876
873 877 This is a separate function so extensions can have access to
874 878 the logic.
875 879
876 880 Returns a discovery.outgoing object.
877 881 """
878 882 cl = repo.changelog
879 883 if common:
880 884 hasnode = cl.hasnode
881 885 common = [n for n in common if hasnode(n)]
882 886 else:
883 887 common = [nullid]
884 888 if not heads:
885 889 heads = cl.heads()
886 890 return discovery.outgoing(cl, common, heads)
887 891
888 892 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
889 893 version='01'):
890 894 """Like changegroupsubset, but returns the set difference between the
891 895 ancestors of heads and the ancestors common.
892 896
893 897 If heads is None, use the local heads. If common is None, use [nullid].
894 898
895 899 The nodes in common might not all be known locally due to the way the
896 900 current discovery protocol works.
897 901 """
898 902 outgoing = computeoutgoing(repo, heads, common)
899 903 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
900 904 version=version)
901 905
902 906 def changegroup(repo, basenodes, source):
903 907 # to avoid a race we use changegroupsubset() (issue1320)
904 908 return changegroupsubset(repo, basenodes, repo.heads(), source)
905 909
906 910 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
907 911 revisions = 0
908 912 files = 0
909 913 while True:
910 914 chunkdata = source.filelogheader()
911 915 if not chunkdata:
912 916 break
913 917 f = chunkdata["filename"]
914 918 repo.ui.debug("adding %s revisions\n" % f)
915 919 pr()
916 920 fl = repo.file(f)
917 921 o = len(fl)
918 922 try:
919 923 if not fl.addgroup(source, revmap, trp):
920 924 raise error.Abort(_("received file revlog group is empty"))
921 925 except error.CensoredBaseError as e:
922 926 raise error.Abort(_("received delta base is censored: %s") % e)
923 927 revisions += len(fl) - o
924 928 files += 1
925 929 if f in needfiles:
926 930 needs = needfiles[f]
927 931 for new in xrange(o, len(fl)):
928 932 n = fl.node(new)
929 933 if n in needs:
930 934 needs.remove(n)
931 935 else:
932 936 raise error.Abort(
933 937 _("received spurious file revlog entry"))
934 938 if not needs:
935 939 del needfiles[f]
936 940 repo.ui.progress(_('files'), None)
937 941
938 942 for f, needs in needfiles.iteritems():
939 943 fl = repo.file(f)
940 944 for n in needs:
941 945 try:
942 946 fl.rev(n)
943 947 except error.LookupError:
944 948 raise error.Abort(
945 949 _('missing file data for %s:%s - run hg verify') %
946 950 (f, hex(n)))
947 951
948 952 return revisions, files
@@ -1,710 +1,717 b''
1 1 commit hooks can see env vars
2 2 (and post-transaction one are run unlocked)
3 3
4 4 $ cat << EOF >> $HGRCPATH
5 5 > [experimental]
6 6 > # drop me once bundle2 is the default,
7 7 > # added to get test change early.
8 8 > bundle2-exp = True
9 9 > EOF
10 10
11 11 $ cat > $TESTTMP/txnabort.checkargs.py <<EOF
12 12 > def showargs(ui, repo, hooktype, **kwargs):
13 13 > ui.write('%s python hook: %s\n' % (hooktype, ','.join(sorted(kwargs))))
14 14 > EOF
15 15
16 16 $ hg init a
17 17 $ cd a
18 18 $ cat > .hg/hgrc <<EOF
19 19 > [hooks]
20 20 > commit = sh -c "HG_LOCAL= HG_TAG= printenv.py commit"
21 21 > commit.b = sh -c "HG_LOCAL= HG_TAG= printenv.py commit.b"
22 22 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= printenv.py precommit"
23 23 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= printenv.py pretxncommit"
24 24 > pretxncommit.tip = hg -q tip
25 25 > pre-identify = printenv.py pre-identify 1
26 26 > pre-cat = printenv.py pre-cat
27 27 > post-cat = printenv.py post-cat
28 28 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= printenv.py pretxnopen"
29 29 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py pretxnclose"
30 30 > txnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py txnclose"
31 31 > txnabort.0 = python:$TESTTMP/txnabort.checkargs.py:showargs
32 32 > txnabort.1 = sh -c "HG_LOCAL= HG_TAG= printenv.py txnabort"
33 33 > txnclose.checklock = sh -c "hg debuglock > /dev/null"
34 34 > EOF
35 35 $ echo a > a
36 36 $ hg add a
37 37 $ hg commit -m a
38 38 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
39 39 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
40 40 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
41 41 0:cb9a9f314b8b
42 42 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
43 43 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
44 44 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
45 45 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
46 46
47 47 $ hg clone . ../b
48 48 updating to branch default
49 49 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
50 50 $ cd ../b
51 51
52 52 changegroup hooks can see env vars
53 53
54 54 $ cat > .hg/hgrc <<EOF
55 55 > [hooks]
56 56 > prechangegroup = printenv.py prechangegroup
57 57 > changegroup = printenv.py changegroup
58 58 > incoming = printenv.py incoming
59 59 > EOF
60 60
61 61 pretxncommit and commit hooks can see both parents of merge
62 62
63 63 $ cd ../a
64 64 $ echo b >> a
65 65 $ hg commit -m a1 -d "1 0"
66 66 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
67 67 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
68 68 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
69 69 1:ab228980c14d
70 70 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
71 71 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
72 72 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
73 73 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
74 74 $ hg update -C 0
75 75 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 76 $ echo b > b
77 77 $ hg add b
78 78 $ hg commit -m b -d '1 0'
79 79 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
80 80 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
81 81 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
82 82 2:ee9deb46ab31
83 83 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
84 84 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
85 85 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
86 86 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
87 87 created new head
88 88 $ hg merge 1
89 89 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 90 (branch merge, don't forget to commit)
91 91 $ hg commit -m merge -d '2 0'
92 92 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
93 93 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
94 94 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
95 95 3:07f3376c1e65
96 96 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
97 97 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
98 98 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
99 99 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
100 100
101 101 test generic hooks
102 102
103 103 $ hg id
104 104 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
105 105 abort: pre-identify hook exited with status 1
106 106 [255]
107 107 $ hg cat b
108 108 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
109 109 b
110 110 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
111 111
112 112 $ cd ../b
113 113 $ hg pull ../a
114 114 pulling from ../a
115 115 searching for changes
116 116 prechangegroup hook: HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
117 117 adding changesets
118 118 adding manifests
119 119 adding file changes
120 120 added 3 changesets with 2 changes to 2 files
121 121 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
122 122 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
123 123 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
124 124 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
125 125 (run 'hg update' to get a working copy)
126 126
127 127 tag hooks can see env vars
128 128
129 129 $ cd ../a
130 130 $ cat >> .hg/hgrc <<EOF
131 131 > pretag = printenv.py pretag
132 132 > tag = sh -c "HG_PARENT1= HG_PARENT2= printenv.py tag"
133 133 > EOF
134 134 $ hg tag -d '3 0' a
135 135 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
136 136 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
137 137 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
138 138 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
139 139 4:539e4b31b6dc
140 140 pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
141 141 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
142 142 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
143 143 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
144 144 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
145 145 $ hg tag -l la
146 146 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
147 147 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
148 148
149 149 pretag hook can forbid tagging
150 150
151 151 $ echo "pretag.forbid = printenv.py pretag.forbid 1" >> .hg/hgrc
152 152 $ hg tag -d '4 0' fa
153 153 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
154 154 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
155 155 abort: pretag.forbid hook exited with status 1
156 156 [255]
157 157 $ hg tag -l fla
158 158 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
159 159 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
160 160 abort: pretag.forbid hook exited with status 1
161 161 [255]
162 162
163 163 pretxncommit hook can see changeset, can roll back txn, changeset no
164 164 more there after
165 165
166 166 $ echo "pretxncommit.forbid0 = hg tip -q" >> .hg/hgrc
167 167 $ echo "pretxncommit.forbid1 = printenv.py pretxncommit.forbid 1" >> .hg/hgrc
168 168 $ echo z > z
169 169 $ hg add z
170 170 $ hg -q tip
171 171 4:539e4b31b6dc
172 172 $ hg commit -m 'fail' -d '4 0'
173 173 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
174 174 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
175 175 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
176 176 5:6f611f8018c1
177 177 5:6f611f8018c1
178 178 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
179 179 transaction abort!
180 180 txnabort python hook: txnid,txnname
181 181 txnabort hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
182 182 rollback completed
183 183 abort: pretxncommit.forbid1 hook exited with status 1
184 184 [255]
185 185 $ hg -q tip
186 186 4:539e4b31b6dc
187 187
188 188 (Check that no 'changelog.i.a' file were left behind)
189 189
190 190 $ ls -1 .hg/store/
191 191 00changelog.i
192 192 00manifest.i
193 193 data
194 194 fncache
195 195 journal.phaseroots
196 196 phaseroots
197 197 undo
198 198 undo.backup.fncache
199 199 undo.backupfiles
200 200 undo.phaseroots
201 201
202 202
203 203 precommit hook can prevent commit
204 204
205 205 $ echo "precommit.forbid = printenv.py precommit.forbid 1" >> .hg/hgrc
206 206 $ hg commit -m 'fail' -d '4 0'
207 207 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
208 208 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
209 209 abort: precommit.forbid hook exited with status 1
210 210 [255]
211 211 $ hg -q tip
212 212 4:539e4b31b6dc
213 213
214 214 preupdate hook can prevent update
215 215
216 216 $ echo "preupdate = printenv.py preupdate" >> .hg/hgrc
217 217 $ hg update 1
218 218 preupdate hook: HG_PARENT1=ab228980c14d
219 219 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
220 220
221 221 update hook
222 222
223 223 $ echo "update = printenv.py update" >> .hg/hgrc
224 224 $ hg update
225 225 preupdate hook: HG_PARENT1=539e4b31b6dc
226 226 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
227 227 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
228 228
229 229 pushkey hook
230 230
231 231 $ echo "pushkey = printenv.py pushkey" >> .hg/hgrc
232 232 $ cd ../b
233 233 $ hg bookmark -r null foo
234 234 $ hg push -B foo ../a
235 235 pushing to ../a
236 236 searching for changes
237 237 no changes found
238 238 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=push (glob)
239 239 pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_PENDING=$TESTTMP/a HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=push (glob)
240 240 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
241 241 txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=push (glob)
242 242 exporting bookmark foo
243 243 [1]
244 244 $ cd ../a
245 245
246 246 listkeys hook
247 247
248 248 $ echo "listkeys = printenv.py listkeys" >> .hg/hgrc
249 249 $ hg bookmark -r null bar
250 250 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
251 251 pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
252 252 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
253 253 $ cd ../b
254 254 $ hg pull -B bar ../a
255 255 pulling from ../a
256 256 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
257 257 no changes found
258 258 listkeys hook: HG_NAMESPACE=phase HG_VALUES={}
259 259 adding remote bookmark bar
260 260 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
261 261 $ cd ../a
262 262
263 263 test that prepushkey can prevent incoming keys
264 264
265 265 $ echo "prepushkey = printenv.py prepushkey.forbid 1" >> .hg/hgrc
266 266 $ cd ../b
267 267 $ hg bookmark -r null baz
268 268 $ hg push -B baz ../a
269 269 pushing to ../a
270 270 searching for changes
271 271 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
272 272 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
273 273 no changes found
274 274 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=push (glob)
275 275 prepushkey.forbid hook: HG_BUNDLE2=1 HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a HG_SOURCE=push HG_TXNID=TXN:* HG_URL=push (glob)
276 276 pushkey-abort: prepushkey hook exited with status 1
277 277 abort: exporting bookmark baz failed!
278 278 [255]
279 279 $ cd ../a
280 280
281 281 test that prelistkeys can prevent listing keys
282 282
283 283 $ echo "prelistkeys = printenv.py prelistkeys.forbid 1" >> .hg/hgrc
284 284 $ hg bookmark -r null quux
285 285 pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
286 286 pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
287 287 txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
288 288 $ cd ../b
289 289 $ hg pull -B quux ../a
290 290 pulling from ../a
291 291 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
292 292 abort: prelistkeys hook exited with status 1
293 293 [255]
294 294 $ cd ../a
295 295 $ rm .hg/hgrc
296 296
297 297 prechangegroup hook can prevent incoming changes
298 298
299 299 $ cd ../b
300 300 $ hg -q tip
301 301 3:07f3376c1e65
302 302 $ cat > .hg/hgrc <<EOF
303 303 > [hooks]
304 304 > prechangegroup.forbid = printenv.py prechangegroup.forbid 1
305 305 > EOF
306 306 $ hg pull ../a
307 307 pulling from ../a
308 308 searching for changes
309 309 prechangegroup.forbid hook: HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
310 310 abort: prechangegroup.forbid hook exited with status 1
311 311 [255]
312 312
313 313 pretxnchangegroup hook can see incoming changes, can roll back txn,
314 314 incoming changes no longer there after
315 315
316 316 $ cat > .hg/hgrc <<EOF
317 317 > [hooks]
318 318 > pretxnchangegroup.forbid0 = hg tip -q
319 319 > pretxnchangegroup.forbid1 = printenv.py pretxnchangegroup.forbid 1
320 320 > EOF
321 321 $ hg pull ../a
322 322 pulling from ../a
323 323 searching for changes
324 324 adding changesets
325 325 adding manifests
326 326 adding file changes
327 327 added 1 changesets with 1 changes to 1 files
328 328 4:539e4b31b6dc
329 329 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
330 330 transaction abort!
331 331 rollback completed
332 332 abort: pretxnchangegroup.forbid1 hook exited with status 1
333 333 [255]
334 334 $ hg -q tip
335 335 3:07f3376c1e65
336 336
337 337 outgoing hooks can see env vars
338 338
339 339 $ rm .hg/hgrc
340 340 $ cat > ../a/.hg/hgrc <<EOF
341 341 > [hooks]
342 342 > preoutgoing = printenv.py preoutgoing
343 343 > outgoing = printenv.py outgoing
344 344 > EOF
345 345 $ hg pull ../a
346 346 pulling from ../a
347 347 searching for changes
348 348 preoutgoing hook: HG_SOURCE=pull
349 349 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
350 350 adding changesets
351 351 adding manifests
352 352 adding file changes
353 353 added 1 changesets with 1 changes to 1 files
354 354 adding remote bookmark quux
355 355 (run 'hg update' to get a working copy)
356 356 $ hg rollback
357 357 repository tip rolled back to revision 3 (undo pull)
358 358
359 359 preoutgoing hook can prevent outgoing changes
360 360
361 361 $ echo "preoutgoing.forbid = printenv.py preoutgoing.forbid 1" >> ../a/.hg/hgrc
362 362 $ hg pull ../a
363 363 pulling from ../a
364 364 searching for changes
365 365 preoutgoing hook: HG_SOURCE=pull
366 366 preoutgoing.forbid hook: HG_SOURCE=pull
367 367 abort: preoutgoing.forbid hook exited with status 1
368 368 [255]
369 369
370 370 outgoing hooks work for local clones
371 371
372 372 $ cd ..
373 373 $ cat > a/.hg/hgrc <<EOF
374 374 > [hooks]
375 375 > preoutgoing = printenv.py preoutgoing
376 376 > outgoing = printenv.py outgoing
377 377 > EOF
378 378 $ hg clone a c
379 379 preoutgoing hook: HG_SOURCE=clone
380 380 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
381 381 updating to branch default
382 382 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
383 383 $ rm -rf c
384 384
385 385 preoutgoing hook can prevent outgoing changes for local clones
386 386
387 387 $ echo "preoutgoing.forbid = printenv.py preoutgoing.forbid 1" >> a/.hg/hgrc
388 388 $ hg clone a zzz
389 389 preoutgoing hook: HG_SOURCE=clone
390 390 preoutgoing.forbid hook: HG_SOURCE=clone
391 391 abort: preoutgoing.forbid hook exited with status 1
392 392 [255]
393 393
394 394 $ cd "$TESTTMP/b"
395 395
396 396 $ cat > hooktests.py <<EOF
397 397 > from mercurial import error
398 398 >
399 399 > uncallable = 0
400 400 >
401 401 > def printargs(args):
402 402 > args.pop('ui', None)
403 403 > args.pop('repo', None)
404 404 > a = list(args.items())
405 405 > a.sort()
406 406 > print 'hook args:'
407 407 > for k, v in a:
408 408 > print ' ', k, v
409 409 >
410 410 > def passhook(**args):
411 411 > printargs(args)
412 412 >
413 413 > def failhook(**args):
414 414 > printargs(args)
415 415 > return True
416 416 >
417 417 > class LocalException(Exception):
418 418 > pass
419 419 >
420 420 > def raisehook(**args):
421 421 > raise LocalException('exception from hook')
422 422 >
423 423 > def aborthook(**args):
424 424 > raise error.Abort('raise abort from hook')
425 425 >
426 426 > def brokenhook(**args):
427 427 > return 1 + {}
428 428 >
429 429 > def verbosehook(ui, **args):
430 430 > ui.note('verbose output from hook\n')
431 431 >
432 432 > def printtags(ui, repo, **args):
433 433 > print sorted(repo.tags())
434 434 >
435 435 > class container:
436 436 > unreachable = 1
437 437 > EOF
438 438
439 439 test python hooks
440 440
441 441 #if windows
442 442 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
443 443 #else
444 444 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
445 445 #endif
446 446 $ export PYTHONPATH
447 447
448 448 $ echo '[hooks]' > ../a/.hg/hgrc
449 449 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
450 450 $ hg pull ../a 2>&1 | grep 'raised an exception'
451 451 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
452 452
453 453 $ echo '[hooks]' > ../a/.hg/hgrc
454 454 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
455 455 $ hg pull ../a 2>&1 | grep 'raised an exception'
456 456 error: preoutgoing.raise hook raised an exception: exception from hook
457 457
458 458 $ echo '[hooks]' > ../a/.hg/hgrc
459 459 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
460 460 $ hg pull ../a
461 461 pulling from ../a
462 462 searching for changes
463 463 error: preoutgoing.abort hook failed: raise abort from hook
464 464 abort: raise abort from hook
465 465 [255]
466 466
467 467 $ echo '[hooks]' > ../a/.hg/hgrc
468 468 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
469 469 $ hg pull ../a
470 470 pulling from ../a
471 471 searching for changes
472 472 hook args:
473 473 hooktype preoutgoing
474 474 source pull
475 475 abort: preoutgoing.fail hook failed
476 476 [255]
477 477
478 478 $ echo '[hooks]' > ../a/.hg/hgrc
479 479 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
480 480 $ hg pull ../a
481 481 pulling from ../a
482 482 searching for changes
483 483 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
484 484 [255]
485 485
486 486 $ echo '[hooks]' > ../a/.hg/hgrc
487 487 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
488 488 $ hg pull ../a
489 489 pulling from ../a
490 490 searching for changes
491 491 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
492 492 [255]
493 493
494 494 $ echo '[hooks]' > ../a/.hg/hgrc
495 495 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
496 496 $ hg pull ../a
497 497 pulling from ../a
498 498 searching for changes
499 499 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
500 500 [255]
501 501
502 502 $ echo '[hooks]' > ../a/.hg/hgrc
503 503 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
504 504 $ hg pull ../a
505 505 pulling from ../a
506 506 searching for changes
507 507 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
508 508 [255]
509 509
510 510 $ echo '[hooks]' > ../a/.hg/hgrc
511 511 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
512 512 $ hg pull ../a
513 513 pulling from ../a
514 514 searching for changes
515 515 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
516 516 [255]
517 517
518 518 $ echo '[hooks]' > ../a/.hg/hgrc
519 519 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
520 520 $ hg pull ../a
521 521 pulling from ../a
522 522 searching for changes
523 523 hook args:
524 524 hooktype preoutgoing
525 525 source pull
526 526 adding changesets
527 527 adding manifests
528 528 adding file changes
529 529 added 1 changesets with 1 changes to 1 files
530 530 adding remote bookmark quux
531 531 (run 'hg update' to get a working copy)
532 532
533 533 make sure --traceback works
534 534
535 535 $ echo '[hooks]' > .hg/hgrc
536 536 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
537 537
538 538 $ echo aa > a
539 539 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
540 540 Traceback (most recent call last):
541 541
542 542 $ cd ..
543 543 $ hg init c
544 544 $ cd c
545 545
546 546 $ cat > hookext.py <<EOF
547 547 > def autohook(**args):
548 548 > print "Automatically installed hook"
549 549 >
550 550 > def reposetup(ui, repo):
551 551 > repo.ui.setconfig("hooks", "commit.auto", autohook)
552 552 > EOF
553 553 $ echo '[extensions]' >> .hg/hgrc
554 554 $ echo 'hookext = hookext.py' >> .hg/hgrc
555 555
556 556 $ touch foo
557 557 $ hg add foo
558 558 $ hg ci -d '0 0' -m 'add foo'
559 559 Automatically installed hook
560 560 $ echo >> foo
561 561 $ hg ci --debug -d '0 0' -m 'change foo'
562 562 committing files:
563 563 foo
564 564 committing manifest
565 565 committing changelog
566 566 calling hook commit.auto: hgext_hookext.autohook
567 567 Automatically installed hook
568 568 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
569 569
570 570 $ hg showconfig hooks
571 571 hooks.commit.auto=<function autohook at *> (glob)
572 572
573 573 test python hook configured with python:[file]:[hook] syntax
574 574
575 575 $ cd ..
576 576 $ mkdir d
577 577 $ cd d
578 578 $ hg init repo
579 579 $ mkdir hooks
580 580
581 581 $ cd hooks
582 582 $ cat > testhooks.py <<EOF
583 583 > def testhook(**args):
584 584 > print 'hook works'
585 585 > EOF
586 586 $ echo '[hooks]' > ../repo/.hg/hgrc
587 587 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
588 588
589 589 $ cd ../repo
590 590 $ hg commit -d '0 0'
591 591 hook works
592 592 nothing changed
593 593 [1]
594 594
595 595 $ echo '[hooks]' > .hg/hgrc
596 596 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
597 597 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
598 598
599 599 $ hg up null
600 600 loading update.ne hook failed:
601 601 abort: No such file or directory: $TESTTMP/d/repo/nonexistent.py
602 602 [255]
603 603
604 604 $ hg id
605 605 loading pre-identify.npmd hook failed:
606 606 abort: No module named repo!
607 607 [255]
608 608
609 609 $ cd ../../b
610 610
611 611 make sure --traceback works on hook import failure
612 612
613 613 $ cat > importfail.py <<EOF
614 614 > import somebogusmodule
615 615 > # dereference something in the module to force demandimport to load it
616 616 > somebogusmodule.whatever
617 617 > EOF
618 618
619 619 $ echo '[hooks]' > .hg/hgrc
620 620 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
621 621
622 622 $ echo a >> a
623 623 $ hg --traceback commit -ma 2>&1 | egrep -v '^( +File| [a-zA-Z(])'
624 624 exception from first failed import attempt:
625 625 Traceback (most recent call last):
626 626 ImportError: No module named somebogusmodule
627 627 exception from second failed import attempt:
628 628 Traceback (most recent call last):
629 629 ImportError: No module named hgext_importfail
630 630 Traceback (most recent call last):
631 631 HookLoadError: precommit.importfail hook is invalid (import of "importfail" failed)
632 632 abort: precommit.importfail hook is invalid (import of "importfail" failed)
633 633
634 634 Issue1827: Hooks Update & Commit not completely post operation
635 635
636 636 commit and update hooks should run after command completion. The largefiles
637 637 use demonstrates a recursive wlock, showing the hook doesn't run until the
638 638 final release (and dirstate flush).
639 639
640 640 $ echo '[hooks]' > .hg/hgrc
641 641 $ echo 'commit = hg id' >> .hg/hgrc
642 642 $ echo 'update = hg id' >> .hg/hgrc
643 643 $ echo bb > a
644 644 $ hg ci -ma
645 645 223eafe2750c tip
646 646 $ hg up 0 --config extensions.largefiles=
647 647 cb9a9f314b8b
648 648 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
649 649
650 650 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
651 651 that is passed to pre/post hooks
652 652
653 653 $ echo '[hooks]' > .hg/hgrc
654 654 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
655 655 $ hg id
656 656 cb9a9f314b8b
657 657 $ hg id --verbose
658 658 calling hook pre-identify: hooktests.verbosehook
659 659 verbose output from hook
660 660 cb9a9f314b8b
661 661
662 662 Ensure hooks can be prioritized
663 663
664 664 $ echo '[hooks]' > .hg/hgrc
665 665 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
666 666 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
667 667 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
668 668 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
669 669 $ hg id --verbose
670 670 calling hook pre-identify.b: hooktests.verbosehook
671 671 verbose output from hook
672 672 calling hook pre-identify.a: hooktests.verbosehook
673 673 verbose output from hook
674 674 calling hook pre-identify.c: hooktests.verbosehook
675 675 verbose output from hook
676 676 cb9a9f314b8b
677 677
678 678 new tags must be visible in pretxncommit (issue3210)
679 679
680 680 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
681 681 $ hg tag -f foo
682 682 ['a', 'foo', 'tip']
683 683
684 684 new commits must be visible in pretxnchangegroup (issue3428)
685 685
686 686 $ cd ..
687 687 $ hg init to
688 688 $ echo '[hooks]' >> to/.hg/hgrc
689 $ echo 'prechangegroup = hg --traceback tip' >> to/.hg/hgrc
689 690 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
690 691 $ echo a >> to/a
691 692 $ hg --cwd to ci -Ama
692 693 adding a
693 694 $ hg clone to from
694 695 updating to branch default
695 696 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
696 697 $ echo aa >> from/a
697 698 $ hg --cwd from ci -mb
698 699 $ hg --cwd from push
699 700 pushing to $TESTTMP/to (glob)
700 701 searching for changes
702 changeset: 0:cb9a9f314b8b
703 tag: tip
704 user: test
705 date: Thu Jan 01 00:00:00 1970 +0000
706 summary: a
707
701 708 adding changesets
702 709 adding manifests
703 710 adding file changes
704 711 added 1 changesets with 1 changes to 1 files
705 712 changeset: 1:9836a07b9b9d
706 713 tag: tip
707 714 user: test
708 715 date: Thu Jan 01 00:00:00 1970 +0000
709 716 summary: b
710 717
General Comments 0
You need to be logged in to leave comments. Login now