##// END OF EJS Templates
phase: add a transaction argument to retractboundary...
Pierre-Yves David -
r22070:c1ca4720 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,756 +1,756 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import weakref
9 9 from i18n import _
10 10 from node import nullrev, nullid, hex, short
11 11 import mdiff, util, dagutil
12 12 import struct, os, bz2, zlib, tempfile
13 13 import discovery, error, phases, branchmap
14 14
15 15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
16 16
17 17 def readexactly(stream, n):
18 18 '''read n bytes from stream.read and abort if less was available'''
19 19 s = stream.read(n)
20 20 if len(s) < n:
21 21 raise util.Abort(_("stream ended unexpectedly"
22 22 " (got %d bytes, expected %d)")
23 23 % (len(s), n))
24 24 return s
25 25
26 26 def getchunk(stream):
27 27 """return the next chunk from stream as a string"""
28 28 d = readexactly(stream, 4)
29 29 l = struct.unpack(">l", d)[0]
30 30 if l <= 4:
31 31 if l:
32 32 raise util.Abort(_("invalid chunk length %d") % l)
33 33 return ""
34 34 return readexactly(stream, l - 4)
35 35
36 36 def chunkheader(length):
37 37 """return a changegroup chunk header (string)"""
38 38 return struct.pack(">l", length + 4)
39 39
40 40 def closechunk():
41 41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 42 return struct.pack(">l", 0)
43 43
44 44 class nocompress(object):
45 45 def compress(self, x):
46 46 return x
47 47 def flush(self):
48 48 return ""
49 49
50 50 bundletypes = {
51 51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 52 # since the unification ssh accepts a header but there
53 53 # is no capability signaling it.
54 54 "HG10UN": ("HG10UN", nocompress),
55 55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 57 }
58 58
59 59 # hgweb uses this list to communicate its preferred type
60 60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61 61
62 62 def writebundle(cg, filename, bundletype, vfs=None):
63 63 """Write a bundle file and return its filename.
64 64
65 65 Existing files will not be overwritten.
66 66 If no filename is specified, a temporary file is created.
67 67 bz2 compression can be turned off.
68 68 The bundle file will be deleted in case of errors.
69 69 """
70 70
71 71 fh = None
72 72 cleanup = None
73 73 try:
74 74 if filename:
75 75 if vfs:
76 76 fh = vfs.open(filename, "wb")
77 77 else:
78 78 fh = open(filename, "wb")
79 79 else:
80 80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 81 fh = os.fdopen(fd, "wb")
82 82 cleanup = filename
83 83
84 84 header, compressor = bundletypes[bundletype]
85 85 fh.write(header)
86 86 z = compressor()
87 87
88 88 # parse the changegroup data, otherwise we will block
89 89 # in case of sshrepo because we don't know the end of the stream
90 90
91 91 # an empty chunkgroup is the end of the changegroup
92 92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 93 # after that, an empty chunkgroup is the end of the changegroup
94 94 for chunk in cg.getchunks():
95 95 fh.write(z.compress(chunk))
96 96 fh.write(z.flush())
97 97 cleanup = None
98 98 return filename
99 99 finally:
100 100 if fh is not None:
101 101 fh.close()
102 102 if cleanup is not None:
103 103 if filename and vfs:
104 104 vfs.unlink(cleanup)
105 105 else:
106 106 os.unlink(cleanup)
107 107
108 108 def decompressor(fh, alg):
109 109 if alg == 'UN':
110 110 return fh
111 111 elif alg == 'GZ':
112 112 def generator(f):
113 113 zd = zlib.decompressobj()
114 114 for chunk in util.filechunkiter(f):
115 115 yield zd.decompress(chunk)
116 116 elif alg == 'BZ':
117 117 def generator(f):
118 118 zd = bz2.BZ2Decompressor()
119 119 zd.decompress("BZ")
120 120 for chunk in util.filechunkiter(f, 4096):
121 121 yield zd.decompress(chunk)
122 122 else:
123 123 raise util.Abort("unknown bundle compression '%s'" % alg)
124 124 return util.chunkbuffer(generator(fh))
125 125
126 126 class unbundle10(object):
127 127 deltaheader = _BUNDLE10_DELTA_HEADER
128 128 deltaheadersize = struct.calcsize(deltaheader)
129 129 def __init__(self, fh, alg):
130 130 self._stream = decompressor(fh, alg)
131 131 self._type = alg
132 132 self.callback = None
133 133 def compressed(self):
134 134 return self._type != 'UN'
135 135 def read(self, l):
136 136 return self._stream.read(l)
137 137 def seek(self, pos):
138 138 return self._stream.seek(pos)
139 139 def tell(self):
140 140 return self._stream.tell()
141 141 def close(self):
142 142 return self._stream.close()
143 143
144 144 def chunklength(self):
145 145 d = readexactly(self._stream, 4)
146 146 l = struct.unpack(">l", d)[0]
147 147 if l <= 4:
148 148 if l:
149 149 raise util.Abort(_("invalid chunk length %d") % l)
150 150 return 0
151 151 if self.callback:
152 152 self.callback()
153 153 return l - 4
154 154
155 155 def changelogheader(self):
156 156 """v10 does not have a changelog header chunk"""
157 157 return {}
158 158
159 159 def manifestheader(self):
160 160 """v10 does not have a manifest header chunk"""
161 161 return {}
162 162
163 163 def filelogheader(self):
164 164 """return the header of the filelogs chunk, v10 only has the filename"""
165 165 l = self.chunklength()
166 166 if not l:
167 167 return {}
168 168 fname = readexactly(self._stream, l)
169 169 return {'filename': fname}
170 170
171 171 def _deltaheader(self, headertuple, prevnode):
172 172 node, p1, p2, cs = headertuple
173 173 if prevnode is None:
174 174 deltabase = p1
175 175 else:
176 176 deltabase = prevnode
177 177 return node, p1, p2, deltabase, cs
178 178
179 179 def deltachunk(self, prevnode):
180 180 l = self.chunklength()
181 181 if not l:
182 182 return {}
183 183 headerdata = readexactly(self._stream, self.deltaheadersize)
184 184 header = struct.unpack(self.deltaheader, headerdata)
185 185 delta = readexactly(self._stream, l - self.deltaheadersize)
186 186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 188 'deltabase': deltabase, 'delta': delta}
189 189
190 190 def getchunks(self):
191 191 """returns all the chunks contains in the bundle
192 192
193 193 Used when you need to forward the binary stream to a file or another
194 194 network API. To do so, it parse the changegroup data, otherwise it will
195 195 block in case of sshrepo because it don't know the end of the stream.
196 196 """
197 197 # an empty chunkgroup is the end of the changegroup
198 198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 199 # after that, an empty chunkgroup is the end of the changegroup
200 200 empty = False
201 201 count = 0
202 202 while not empty or count <= 2:
203 203 empty = True
204 204 count += 1
205 205 while True:
206 206 chunk = getchunk(self)
207 207 if not chunk:
208 208 break
209 209 empty = False
210 210 yield chunkheader(len(chunk))
211 211 pos = 0
212 212 while pos < len(chunk):
213 213 next = pos + 2**20
214 214 yield chunk[pos:next]
215 215 pos = next
216 216 yield closechunk()
217 217
218 218 class headerlessfixup(object):
219 219 def __init__(self, fh, h):
220 220 self._h = h
221 221 self._fh = fh
222 222 def read(self, n):
223 223 if self._h:
224 224 d, self._h = self._h[:n], self._h[n:]
225 225 if len(d) < n:
226 226 d += readexactly(self._fh, n - len(d))
227 227 return d
228 228 return readexactly(self._fh, n)
229 229
230 230 class bundle10(object):
231 231 deltaheader = _BUNDLE10_DELTA_HEADER
232 232 def __init__(self, repo, bundlecaps=None):
233 233 """Given a source repo, construct a bundler.
234 234
235 235 bundlecaps is optional and can be used to specify the set of
236 236 capabilities which can be used to build the bundle.
237 237 """
238 238 # Set of capabilities we can use to build the bundle.
239 239 if bundlecaps is None:
240 240 bundlecaps = set()
241 241 self._bundlecaps = bundlecaps
242 242 self._changelog = repo.changelog
243 243 self._manifest = repo.manifest
244 244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
245 245 if reorder == 'auto':
246 246 reorder = None
247 247 else:
248 248 reorder = util.parsebool(reorder)
249 249 self._repo = repo
250 250 self._reorder = reorder
251 251 self._progress = repo.ui.progress
252 252 def close(self):
253 253 return closechunk()
254 254
255 255 def fileheader(self, fname):
256 256 return chunkheader(len(fname)) + fname
257 257
258 258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
259 259 """Calculate a delta group, yielding a sequence of changegroup chunks
260 260 (strings).
261 261
262 262 Given a list of changeset revs, return a set of deltas and
263 263 metadata corresponding to nodes. The first delta is
264 264 first parent(nodelist[0]) -> nodelist[0], the receiver is
265 265 guaranteed to have this parent as it has all history before
266 266 these changesets. In the case firstparent is nullrev the
267 267 changegroup starts with a full revision.
268 268
269 269 If units is not None, progress detail will be generated, units specifies
270 270 the type of revlog that is touched (changelog, manifest, etc.).
271 271 """
272 272 # if we don't have any revisions touched by these changesets, bail
273 273 if len(nodelist) == 0:
274 274 yield self.close()
275 275 return
276 276
277 277 # for generaldelta revlogs, we linearize the revs; this will both be
278 278 # much quicker and generate a much smaller bundle
279 279 if (revlog._generaldelta and reorder is not False) or reorder:
280 280 dag = dagutil.revlogdag(revlog)
281 281 revs = set(revlog.rev(n) for n in nodelist)
282 282 revs = dag.linearize(revs)
283 283 else:
284 284 revs = sorted([revlog.rev(n) for n in nodelist])
285 285
286 286 # add the parent of the first rev
287 287 p = revlog.parentrevs(revs[0])[0]
288 288 revs.insert(0, p)
289 289
290 290 # build deltas
291 291 total = len(revs) - 1
292 292 msgbundling = _('bundling')
293 293 for r in xrange(len(revs) - 1):
294 294 if units is not None:
295 295 self._progress(msgbundling, r + 1, unit=units, total=total)
296 296 prev, curr = revs[r], revs[r + 1]
297 297 linknode = lookup(revlog.node(curr))
298 298 for c in self.revchunk(revlog, curr, prev, linknode):
299 299 yield c
300 300
301 301 yield self.close()
302 302
303 303 # filter any nodes that claim to be part of the known set
304 304 def prune(self, revlog, missing, commonrevs, source):
305 305 rr, rl = revlog.rev, revlog.linkrev
306 306 return [n for n in missing if rl(rr(n)) not in commonrevs]
307 307
308 308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
309 309 '''yield a sequence of changegroup chunks (strings)'''
310 310 repo = self._repo
311 311 cl = self._changelog
312 312 mf = self._manifest
313 313 reorder = self._reorder
314 314 progress = self._progress
315 315
316 316 # for progress output
317 317 msgbundling = _('bundling')
318 318
319 319 mfs = {} # needed manifests
320 320 fnodes = {} # needed file nodes
321 321 changedfiles = set()
322 322
323 323 # Callback for the changelog, used to collect changed files and manifest
324 324 # nodes.
325 325 # Returns the linkrev node (identity in the changelog case).
326 326 def lookupcl(x):
327 327 c = cl.read(x)
328 328 changedfiles.update(c[3])
329 329 # record the first changeset introducing this manifest version
330 330 mfs.setdefault(c[0], x)
331 331 return x
332 332
333 333 # Callback for the manifest, used to collect linkrevs for filelog
334 334 # revisions.
335 335 # Returns the linkrev node (collected in lookupcl).
336 336 def lookupmf(x):
337 337 clnode = mfs[x]
338 338 if not fastpathlinkrev:
339 339 mdata = mf.readfast(x)
340 340 for f, n in mdata.iteritems():
341 341 if f in changedfiles:
342 342 # record the first changeset introducing this filelog
343 343 # version
344 344 fnodes[f].setdefault(n, clnode)
345 345 return clnode
346 346
347 347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
348 348 reorder=reorder):
349 349 yield chunk
350 350 progress(msgbundling, None)
351 351
352 352 for f in changedfiles:
353 353 fnodes[f] = {}
354 354 mfnodes = self.prune(mf, mfs, commonrevs, source)
355 355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
356 356 reorder=reorder):
357 357 yield chunk
358 358 progress(msgbundling, None)
359 359
360 360 mfs.clear()
361 361 needed = set(cl.rev(x) for x in clnodes)
362 362
363 363 def linknodes(filerevlog, fname):
364 364 if fastpathlinkrev:
365 365 llr = filerevlog.linkrev
366 366 def genfilenodes():
367 367 for r in filerevlog:
368 368 linkrev = llr(r)
369 369 if linkrev in needed:
370 370 yield filerevlog.node(r), cl.node(linkrev)
371 371 fnodes[fname] = dict(genfilenodes())
372 372 return fnodes.get(fname, {})
373 373
374 374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
375 375 source):
376 376 yield chunk
377 377
378 378 yield self.close()
379 379 progress(msgbundling, None)
380 380
381 381 if clnodes:
382 382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
383 383
384 384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
385 385 repo = self._repo
386 386 progress = self._progress
387 387 reorder = self._reorder
388 388 msgbundling = _('bundling')
389 389
390 390 total = len(changedfiles)
391 391 # for progress output
392 392 msgfiles = _('files')
393 393 for i, fname in enumerate(sorted(changedfiles)):
394 394 filerevlog = repo.file(fname)
395 395 if not filerevlog:
396 396 raise util.Abort(_("empty or missing revlog for %s") % fname)
397 397
398 398 linkrevnodes = linknodes(filerevlog, fname)
399 399 # Lookup for filenodes, we collected the linkrev nodes above in the
400 400 # fastpath case and with lookupmf in the slowpath case.
401 401 def lookupfilelog(x):
402 402 return linkrevnodes[x]
403 403
404 404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
405 405 if filenodes:
406 406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
407 407 total=total)
408 408 yield self.fileheader(fname)
409 409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
410 410 reorder=reorder):
411 411 yield chunk
412 412
413 413 def revchunk(self, revlog, rev, prev, linknode):
414 414 node = revlog.node(rev)
415 415 p1, p2 = revlog.parentrevs(rev)
416 416 base = prev
417 417
418 418 prefix = ''
419 419 if base == nullrev:
420 420 delta = revlog.revision(node)
421 421 prefix = mdiff.trivialdiffheader(len(delta))
422 422 else:
423 423 delta = revlog.revdiff(base, rev)
424 424 p1n, p2n = revlog.parents(node)
425 425 basenode = revlog.node(base)
426 426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
427 427 meta += prefix
428 428 l = len(meta) + len(delta)
429 429 yield chunkheader(l)
430 430 yield meta
431 431 yield delta
432 432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
433 433 # do nothing with basenode, it is implicitly the previous one in HG10
434 434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
435 435
436 436 def _changegroupinfo(repo, nodes, source):
437 437 if repo.ui.verbose or source == 'bundle':
438 438 repo.ui.status(_("%d changesets found\n") % len(nodes))
439 439 if repo.ui.debugflag:
440 440 repo.ui.debug("list of changesets:\n")
441 441 for node in nodes:
442 442 repo.ui.debug("%s\n" % hex(node))
443 443
444 444 def getsubset(repo, outgoing, bundler, source, fastpath=False):
445 445 repo = repo.unfiltered()
446 446 commonrevs = outgoing.common
447 447 csets = outgoing.missing
448 448 heads = outgoing.missingheads
449 449 # We go through the fast path if we get told to, or if all (unfiltered
450 450 # heads have been requested (since we then know there all linkrevs will
451 451 # be pulled by the client).
452 452 heads.sort()
453 453 fastpathlinkrev = fastpath or (
454 454 repo.filtername is None and heads == sorted(repo.heads()))
455 455
456 456 repo.hook('preoutgoing', throw=True, source=source)
457 457 _changegroupinfo(repo, csets, source)
458 458 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
459 459 return unbundle10(util.chunkbuffer(gengroup), 'UN')
460 460
461 461 def changegroupsubset(repo, roots, heads, source):
462 462 """Compute a changegroup consisting of all the nodes that are
463 463 descendants of any of the roots and ancestors of any of the heads.
464 464 Return a chunkbuffer object whose read() method will return
465 465 successive changegroup chunks.
466 466
467 467 It is fairly complex as determining which filenodes and which
468 468 manifest nodes need to be included for the changeset to be complete
469 469 is non-trivial.
470 470
471 471 Another wrinkle is doing the reverse, figuring out which changeset in
472 472 the changegroup a particular filenode or manifestnode belongs to.
473 473 """
474 474 cl = repo.changelog
475 475 if not roots:
476 476 roots = [nullid]
477 477 # TODO: remove call to nodesbetween.
478 478 csets, roots, heads = cl.nodesbetween(roots, heads)
479 479 discbases = []
480 480 for n in roots:
481 481 discbases.extend([p for p in cl.parents(n) if p != nullid])
482 482 outgoing = discovery.outgoing(cl, discbases, heads)
483 483 bundler = bundle10(repo)
484 484 return getsubset(repo, outgoing, bundler, source)
485 485
486 486 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
487 487 """Like getbundle, but taking a discovery.outgoing as an argument.
488 488
489 489 This is only implemented for local repos and reuses potentially
490 490 precomputed sets in outgoing."""
491 491 if not outgoing.missing:
492 492 return None
493 493 bundler = bundle10(repo, bundlecaps)
494 494 return getsubset(repo, outgoing, bundler, source)
495 495
496 496 def _computeoutgoing(repo, heads, common):
497 497 """Computes which revs are outgoing given a set of common
498 498 and a set of heads.
499 499
500 500 This is a separate function so extensions can have access to
501 501 the logic.
502 502
503 503 Returns a discovery.outgoing object.
504 504 """
505 505 cl = repo.changelog
506 506 if common:
507 507 hasnode = cl.hasnode
508 508 common = [n for n in common if hasnode(n)]
509 509 else:
510 510 common = [nullid]
511 511 if not heads:
512 512 heads = cl.heads()
513 513 return discovery.outgoing(cl, common, heads)
514 514
515 515 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
516 516 """Like changegroupsubset, but returns the set difference between the
517 517 ancestors of heads and the ancestors common.
518 518
519 519 If heads is None, use the local heads. If common is None, use [nullid].
520 520
521 521 The nodes in common might not all be known locally due to the way the
522 522 current discovery protocol works.
523 523 """
524 524 outgoing = _computeoutgoing(repo, heads, common)
525 525 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
526 526
527 527 def changegroup(repo, basenodes, source):
528 528 # to avoid a race we use changegroupsubset() (issue1320)
529 529 return changegroupsubset(repo, basenodes, repo.heads(), source)
530 530
531 531 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
532 532 revisions = 0
533 533 files = 0
534 534 while True:
535 535 chunkdata = source.filelogheader()
536 536 if not chunkdata:
537 537 break
538 538 f = chunkdata["filename"]
539 539 repo.ui.debug("adding %s revisions\n" % f)
540 540 pr()
541 541 fl = repo.file(f)
542 542 o = len(fl)
543 543 if not fl.addgroup(source, revmap, trp):
544 544 raise util.Abort(_("received file revlog group is empty"))
545 545 revisions += len(fl) - o
546 546 files += 1
547 547 if f in needfiles:
548 548 needs = needfiles[f]
549 549 for new in xrange(o, len(fl)):
550 550 n = fl.node(new)
551 551 if n in needs:
552 552 needs.remove(n)
553 553 else:
554 554 raise util.Abort(
555 555 _("received spurious file revlog entry"))
556 556 if not needs:
557 557 del needfiles[f]
558 558 repo.ui.progress(_('files'), None)
559 559
560 560 for f, needs in needfiles.iteritems():
561 561 fl = repo.file(f)
562 562 for n in needs:
563 563 try:
564 564 fl.rev(n)
565 565 except error.LookupError:
566 566 raise util.Abort(
567 567 _('missing file data for %s:%s - run hg verify') %
568 568 (f, hex(n)))
569 569
570 570 return revisions, files
571 571
572 572 def addchangegroup(repo, source, srctype, url, emptyok=False,
573 573 targetphase=phases.draft):
574 574 """Add the changegroup returned by source.read() to this repo.
575 575 srctype is a string like 'push', 'pull', or 'unbundle'. url is
576 576 the URL of the repo where this changegroup is coming from.
577 577
578 578 Return an integer summarizing the change to this repo:
579 579 - nothing changed or no source: 0
580 580 - more heads than before: 1+added heads (2..n)
581 581 - fewer heads than before: -1-removed heads (-2..-n)
582 582 - number of heads stays the same: 1
583 583 """
584 584 repo = repo.unfiltered()
585 585 def csmap(x):
586 586 repo.ui.debug("add changeset %s\n" % short(x))
587 587 return len(cl)
588 588
589 589 def revmap(x):
590 590 return cl.rev(x)
591 591
592 592 if not source:
593 593 return 0
594 594
595 595 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
596 596
597 597 changesets = files = revisions = 0
598 598 efiles = set()
599 599
600 600 # write changelog data to temp files so concurrent readers will not see
601 601 # inconsistent view
602 602 cl = repo.changelog
603 603 cl.delayupdate()
604 604 oldheads = cl.heads()
605 605
606 606 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
607 607 try:
608 608 trp = weakref.proxy(tr)
609 609 # pull off the changeset group
610 610 repo.ui.status(_("adding changesets\n"))
611 611 clstart = len(cl)
612 612 class prog(object):
613 613 step = _('changesets')
614 614 count = 1
615 615 ui = repo.ui
616 616 total = None
617 617 def __call__(repo):
618 618 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
619 619 total=repo.total)
620 620 repo.count += 1
621 621 pr = prog()
622 622 source.callback = pr
623 623
624 624 source.changelogheader()
625 625 srccontent = cl.addgroup(source, csmap, trp)
626 626 if not (srccontent or emptyok):
627 627 raise util.Abort(_("received changelog group is empty"))
628 628 clend = len(cl)
629 629 changesets = clend - clstart
630 630 for c in xrange(clstart, clend):
631 631 efiles.update(repo[c].files())
632 632 efiles = len(efiles)
633 633 repo.ui.progress(_('changesets'), None)
634 634
635 635 # pull off the manifest group
636 636 repo.ui.status(_("adding manifests\n"))
637 637 pr.step = _('manifests')
638 638 pr.count = 1
639 639 pr.total = changesets # manifests <= changesets
640 640 # no need to check for empty manifest group here:
641 641 # if the result of the merge of 1 and 2 is the same in 3 and 4,
642 642 # no new manifest will be created and the manifest group will
643 643 # be empty during the pull
644 644 source.manifestheader()
645 645 repo.manifest.addgroup(source, revmap, trp)
646 646 repo.ui.progress(_('manifests'), None)
647 647
648 648 needfiles = {}
649 649 if repo.ui.configbool('server', 'validate', default=False):
650 650 # validate incoming csets have their manifests
651 651 for cset in xrange(clstart, clend):
652 652 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
653 653 mfest = repo.manifest.readdelta(mfest)
654 654 # store file nodes we must see
655 655 for f, n in mfest.iteritems():
656 656 needfiles.setdefault(f, set()).add(n)
657 657
658 658 # process the files
659 659 repo.ui.status(_("adding file changes\n"))
660 660 pr.step = _('files')
661 661 pr.count = 1
662 662 pr.total = efiles
663 663 source.callback = None
664 664
665 665 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
666 666 needfiles)
667 667 revisions += newrevs
668 668 files += newfiles
669 669
670 670 dh = 0
671 671 if oldheads:
672 672 heads = cl.heads()
673 673 dh = len(heads) - len(oldheads)
674 674 for h in heads:
675 675 if h not in oldheads and repo[h].closesbranch():
676 676 dh -= 1
677 677 htext = ""
678 678 if dh:
679 679 htext = _(" (%+d heads)") % dh
680 680
681 681 repo.ui.status(_("added %d changesets"
682 682 " with %d changes to %d files%s\n")
683 683 % (changesets, revisions, files, htext))
684 684 repo.invalidatevolatilesets()
685 685
686 686 if changesets > 0:
687 687 p = lambda: cl.writepending() and repo.root or ""
688 688 if 'node' not in tr.hookargs:
689 689 tr.hookargs['node'] = hex(cl.node(clstart))
690 690 repo.hook('pretxnchangegroup', throw=True, source=srctype,
691 691 url=url, pending=p, **tr.hookargs)
692 692
693 693 added = [cl.node(r) for r in xrange(clstart, clend)]
694 694 publishing = repo.ui.configbool('phases', 'publish', True)
695 695 if srctype in ('push', 'serve'):
696 696 # Old servers can not push the boundary themselves.
697 697 # New servers won't push the boundary if changeset already
698 698 # exists locally as secret
699 699 #
700 700 # We should not use added here but the list of all change in
701 701 # the bundle
702 702 if publishing:
703 703 phases.advanceboundary(repo, tr, phases.public, srccontent)
704 704 else:
705 705 # Those changesets have been pushed from the outside, their
706 706 # phases are going to be pushed alongside. Therefor
707 707 # `targetphase` is ignored.
708 708 phases.advanceboundary(repo, tr, phases.draft, srccontent)
709 phases.retractboundary(repo, phases.draft, added)
709 phases.retractboundary(repo, tr, phases.draft, added)
710 710 elif srctype != 'strip':
711 711 # publishing only alter behavior during push
712 712 #
713 713 # strip should not touch boundary at all
714 phases.retractboundary(repo, targetphase, added)
714 phases.retractboundary(repo, tr, targetphase, added)
715 715
716 716 # make changelog see real files again
717 717 cl.finalize(trp)
718 718
719 719 tr.close()
720 720
721 721 if changesets > 0:
722 722 if srctype != 'strip':
723 723 # During strip, branchcache is invalid but coming call to
724 724 # `destroyed` will repair it.
725 725 # In other case we can safely update cache on disk.
726 726 branchmap.updatecache(repo.filtered('served'))
727 727 def runhooks():
728 728 # These hooks run when the lock releases, not when the
729 729 # transaction closes. So it's possible for the changelog
730 730 # to have changed since we last saw it.
731 731 if clstart >= len(repo):
732 732 return
733 733
734 734 # forcefully update the on-disk branch cache
735 735 repo.ui.debug("updating the branch cache\n")
736 736 repo.hook("changegroup", source=srctype, url=url,
737 737 **tr.hookargs)
738 738
739 739 for n in added:
740 740 repo.hook("incoming", node=hex(n), source=srctype,
741 741 url=url)
742 742
743 743 newheads = [h for h in repo.heads() if h not in oldheads]
744 744 repo.ui.log("incoming",
745 745 "%s incoming changes - new heads: %s\n",
746 746 len(added),
747 747 ', '.join([hex(c[:6]) for c in newheads]))
748 748 repo._afterlock(runhooks)
749 749
750 750 finally:
751 751 tr.release()
752 752 # never return 0 here:
753 753 if dh < 0:
754 754 return dh - 1
755 755 else:
756 756 return dh + 1
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,1783 +1,1783 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 bundle2caps = {'HG2X': (),
184 184 'b2x:listkeys': (),
185 185 'b2x:pushkey': (),
186 186 'b2x:changegroup': (),
187 187 }
188 188
189 189 # a list of (ui, featureset) functions.
190 190 # only functions defined in module of enabled extensions are invoked
191 191 featuresetupfuncs = set()
192 192
193 193 def _baserequirements(self, create):
194 194 return self.requirements[:]
195 195
196 196 def __init__(self, baseui, path=None, create=False):
197 197 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
198 198 self.wopener = self.wvfs
199 199 self.root = self.wvfs.base
200 200 self.path = self.wvfs.join(".hg")
201 201 self.origroot = path
202 202 self.auditor = pathutil.pathauditor(self.root, self._checknested)
203 203 self.vfs = scmutil.vfs(self.path)
204 204 self.opener = self.vfs
205 205 self.baseui = baseui
206 206 self.ui = baseui.copy()
207 207 self.ui.copy = baseui.copy # prevent copying repo configuration
208 208 # A list of callback to shape the phase if no data were found.
209 209 # Callback are in the form: func(repo, roots) --> processed root.
210 210 # This list it to be filled by extension during repo setup
211 211 self._phasedefaults = []
212 212 try:
213 213 self.ui.readconfig(self.join("hgrc"), self.root)
214 214 extensions.loadall(self.ui)
215 215 except IOError:
216 216 pass
217 217
218 218 if self.featuresetupfuncs:
219 219 self.supported = set(self._basesupported) # use private copy
220 220 extmods = set(m.__name__ for n, m
221 221 in extensions.extensions(self.ui))
222 222 for setupfunc in self.featuresetupfuncs:
223 223 if setupfunc.__module__ in extmods:
224 224 setupfunc(self.ui, self.supported)
225 225 else:
226 226 self.supported = self._basesupported
227 227
228 228 if not self.vfs.isdir():
229 229 if create:
230 230 if not self.wvfs.exists():
231 231 self.wvfs.makedirs()
232 232 self.vfs.makedir(notindexed=True)
233 233 requirements = self._baserequirements(create)
234 234 if self.ui.configbool('format', 'usestore', True):
235 235 self.vfs.mkdir("store")
236 236 requirements.append("store")
237 237 if self.ui.configbool('format', 'usefncache', True):
238 238 requirements.append("fncache")
239 239 if self.ui.configbool('format', 'dotencode', True):
240 240 requirements.append('dotencode')
241 241 # create an invalid changelog
242 242 self.vfs.append(
243 243 "00changelog.i",
244 244 '\0\0\0\2' # represents revlogv2
245 245 ' dummy changelog to prevent using the old repo layout'
246 246 )
247 247 if self.ui.configbool('format', 'generaldelta', False):
248 248 requirements.append("generaldelta")
249 249 requirements = set(requirements)
250 250 else:
251 251 raise error.RepoError(_("repository %s not found") % path)
252 252 elif create:
253 253 raise error.RepoError(_("repository %s already exists") % path)
254 254 else:
255 255 try:
256 256 requirements = scmutil.readrequires(self.vfs, self.supported)
257 257 except IOError, inst:
258 258 if inst.errno != errno.ENOENT:
259 259 raise
260 260 requirements = set()
261 261
262 262 self.sharedpath = self.path
263 263 try:
264 264 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
265 265 realpath=True)
266 266 s = vfs.base
267 267 if not vfs.exists():
268 268 raise error.RepoError(
269 269 _('.hg/sharedpath points to nonexistent directory %s') % s)
270 270 self.sharedpath = s
271 271 except IOError, inst:
272 272 if inst.errno != errno.ENOENT:
273 273 raise
274 274
275 275 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
276 276 self.spath = self.store.path
277 277 self.svfs = self.store.vfs
278 278 self.sopener = self.svfs
279 279 self.sjoin = self.store.join
280 280 self.vfs.createmode = self.store.createmode
281 281 self._applyrequirements(requirements)
282 282 if create:
283 283 self._writerequirements()
284 284
285 285
286 286 self._branchcaches = {}
287 287 self.filterpats = {}
288 288 self._datafilters = {}
289 289 self._transref = self._lockref = self._wlockref = None
290 290
291 291 # A cache for various files under .hg/ that tracks file changes,
292 292 # (used by the filecache decorator)
293 293 #
294 294 # Maps a property name to its util.filecacheentry
295 295 self._filecache = {}
296 296
297 297 # hold sets of revision to be filtered
298 298 # should be cleared when something might have changed the filter value:
299 299 # - new changesets,
300 300 # - phase change,
301 301 # - new obsolescence marker,
302 302 # - working directory parent change,
303 303 # - bookmark changes
304 304 self.filteredrevcache = {}
305 305
306 306 def close(self):
307 307 pass
308 308
309 309 def _restrictcapabilities(self, caps):
310 310 # bundle2 is not ready for prime time, drop it unless explicitly
311 311 # required by the tests (or some brave tester)
312 312 if self.ui.configbool('experimental', 'bundle2-exp', False):
313 313 caps = set(caps)
314 314 capsblob = bundle2.encodecaps(self.bundle2caps)
315 315 caps.add('bundle2-exp=' + urllib.quote(capsblob))
316 316 return caps
317 317
318 318 def _applyrequirements(self, requirements):
319 319 self.requirements = requirements
320 320 self.sopener.options = dict((r, 1) for r in requirements
321 321 if r in self.openerreqs)
322 322 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
323 323 if chunkcachesize is not None:
324 324 self.sopener.options['chunkcachesize'] = chunkcachesize
325 325
326 326 def _writerequirements(self):
327 327 reqfile = self.opener("requires", "w")
328 328 for r in sorted(self.requirements):
329 329 reqfile.write("%s\n" % r)
330 330 reqfile.close()
331 331
332 332 def _checknested(self, path):
333 333 """Determine if path is a legal nested repository."""
334 334 if not path.startswith(self.root):
335 335 return False
336 336 subpath = path[len(self.root) + 1:]
337 337 normsubpath = util.pconvert(subpath)
338 338
339 339 # XXX: Checking against the current working copy is wrong in
340 340 # the sense that it can reject things like
341 341 #
342 342 # $ hg cat -r 10 sub/x.txt
343 343 #
344 344 # if sub/ is no longer a subrepository in the working copy
345 345 # parent revision.
346 346 #
347 347 # However, it can of course also allow things that would have
348 348 # been rejected before, such as the above cat command if sub/
349 349 # is a subrepository now, but was a normal directory before.
350 350 # The old path auditor would have rejected by mistake since it
351 351 # panics when it sees sub/.hg/.
352 352 #
353 353 # All in all, checking against the working copy seems sensible
354 354 # since we want to prevent access to nested repositories on
355 355 # the filesystem *now*.
356 356 ctx = self[None]
357 357 parts = util.splitpath(subpath)
358 358 while parts:
359 359 prefix = '/'.join(parts)
360 360 if prefix in ctx.substate:
361 361 if prefix == normsubpath:
362 362 return True
363 363 else:
364 364 sub = ctx.sub(prefix)
365 365 return sub.checknested(subpath[len(prefix) + 1:])
366 366 else:
367 367 parts.pop()
368 368 return False
369 369
370 370 def peer(self):
371 371 return localpeer(self) # not cached to avoid reference cycle
372 372
373 373 def unfiltered(self):
374 374 """Return unfiltered version of the repository
375 375
376 376 Intended to be overwritten by filtered repo."""
377 377 return self
378 378
379 379 def filtered(self, name):
380 380 """Return a filtered version of a repository"""
381 381 # build a new class with the mixin and the current class
382 382 # (possibly subclass of the repo)
383 383 class proxycls(repoview.repoview, self.unfiltered().__class__):
384 384 pass
385 385 return proxycls(self, name)
386 386
387 387 @repofilecache('bookmarks')
388 388 def _bookmarks(self):
389 389 return bookmarks.bmstore(self)
390 390
391 391 @repofilecache('bookmarks.current')
392 392 def _bookmarkcurrent(self):
393 393 return bookmarks.readcurrent(self)
394 394
395 395 def bookmarkheads(self, bookmark):
396 396 name = bookmark.split('@', 1)[0]
397 397 heads = []
398 398 for mark, n in self._bookmarks.iteritems():
399 399 if mark.split('@', 1)[0] == name:
400 400 heads.append(n)
401 401 return heads
402 402
403 403 @storecache('phaseroots')
404 404 def _phasecache(self):
405 405 return phases.phasecache(self, self._phasedefaults)
406 406
407 407 @storecache('obsstore')
408 408 def obsstore(self):
409 409 store = obsolete.obsstore(self.sopener)
410 410 if store and not obsolete._enabled:
411 411 # message is rare enough to not be translated
412 412 msg = 'obsolete feature not enabled but %i markers found!\n'
413 413 self.ui.warn(msg % len(list(store)))
414 414 return store
415 415
416 416 @storecache('00changelog.i')
417 417 def changelog(self):
418 418 c = changelog.changelog(self.sopener)
419 419 if 'HG_PENDING' in os.environ:
420 420 p = os.environ['HG_PENDING']
421 421 if p.startswith(self.root):
422 422 c.readpending('00changelog.i.a')
423 423 return c
424 424
425 425 @storecache('00manifest.i')
426 426 def manifest(self):
427 427 return manifest.manifest(self.sopener)
428 428
429 429 @repofilecache('dirstate')
430 430 def dirstate(self):
431 431 warned = [0]
432 432 def validate(node):
433 433 try:
434 434 self.changelog.rev(node)
435 435 return node
436 436 except error.LookupError:
437 437 if not warned[0]:
438 438 warned[0] = True
439 439 self.ui.warn(_("warning: ignoring unknown"
440 440 " working parent %s!\n") % short(node))
441 441 return nullid
442 442
443 443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
444 444
445 445 def __getitem__(self, changeid):
446 446 if changeid is None:
447 447 return context.workingctx(self)
448 448 return context.changectx(self, changeid)
449 449
450 450 def __contains__(self, changeid):
451 451 try:
452 452 return bool(self.lookup(changeid))
453 453 except error.RepoLookupError:
454 454 return False
455 455
456 456 def __nonzero__(self):
457 457 return True
458 458
459 459 def __len__(self):
460 460 return len(self.changelog)
461 461
462 462 def __iter__(self):
463 463 return iter(self.changelog)
464 464
465 465 def revs(self, expr, *args):
466 466 '''Return a list of revisions matching the given revset'''
467 467 expr = revset.formatspec(expr, *args)
468 468 m = revset.match(None, expr)
469 469 return m(self, revset.spanset(self))
470 470
471 471 def set(self, expr, *args):
472 472 '''
473 473 Yield a context for each matching revision, after doing arg
474 474 replacement via revset.formatspec
475 475 '''
476 476 for r in self.revs(expr, *args):
477 477 yield self[r]
478 478
479 479 def url(self):
480 480 return 'file:' + self.root
481 481
482 482 def hook(self, name, throw=False, **args):
483 483 """Call a hook, passing this repo instance.
484 484
485 485 This a convenience method to aid invoking hooks. Extensions likely
486 486 won't call this unless they have registered a custom hook or are
487 487 replacing code that is expected to call a hook.
488 488 """
489 489 return hook.hook(self.ui, self, name, throw, **args)
490 490
491 491 @unfilteredmethod
492 492 def _tag(self, names, node, message, local, user, date, extra={},
493 493 editor=False):
494 494 if isinstance(names, str):
495 495 names = (names,)
496 496
497 497 branches = self.branchmap()
498 498 for name in names:
499 499 self.hook('pretag', throw=True, node=hex(node), tag=name,
500 500 local=local)
501 501 if name in branches:
502 502 self.ui.warn(_("warning: tag %s conflicts with existing"
503 503 " branch name\n") % name)
504 504
505 505 def writetags(fp, names, munge, prevtags):
506 506 fp.seek(0, 2)
507 507 if prevtags and prevtags[-1] != '\n':
508 508 fp.write('\n')
509 509 for name in names:
510 510 m = munge and munge(name) or name
511 511 if (self._tagscache.tagtypes and
512 512 name in self._tagscache.tagtypes):
513 513 old = self.tags().get(name, nullid)
514 514 fp.write('%s %s\n' % (hex(old), m))
515 515 fp.write('%s %s\n' % (hex(node), m))
516 516 fp.close()
517 517
518 518 prevtags = ''
519 519 if local:
520 520 try:
521 521 fp = self.opener('localtags', 'r+')
522 522 except IOError:
523 523 fp = self.opener('localtags', 'a')
524 524 else:
525 525 prevtags = fp.read()
526 526
527 527 # local tags are stored in the current charset
528 528 writetags(fp, names, None, prevtags)
529 529 for name in names:
530 530 self.hook('tag', node=hex(node), tag=name, local=local)
531 531 return
532 532
533 533 try:
534 534 fp = self.wfile('.hgtags', 'rb+')
535 535 except IOError, e:
536 536 if e.errno != errno.ENOENT:
537 537 raise
538 538 fp = self.wfile('.hgtags', 'ab')
539 539 else:
540 540 prevtags = fp.read()
541 541
542 542 # committed tags are stored in UTF-8
543 543 writetags(fp, names, encoding.fromlocal, prevtags)
544 544
545 545 fp.close()
546 546
547 547 self.invalidatecaches()
548 548
549 549 if '.hgtags' not in self.dirstate:
550 550 self[None].add(['.hgtags'])
551 551
552 552 m = matchmod.exact(self.root, '', ['.hgtags'])
553 553 tagnode = self.commit(message, user, date, extra=extra, match=m,
554 554 editor=editor)
555 555
556 556 for name in names:
557 557 self.hook('tag', node=hex(node), tag=name, local=local)
558 558
559 559 return tagnode
560 560
561 561 def tag(self, names, node, message, local, user, date, editor=False):
562 562 '''tag a revision with one or more symbolic names.
563 563
564 564 names is a list of strings or, when adding a single tag, names may be a
565 565 string.
566 566
567 567 if local is True, the tags are stored in a per-repository file.
568 568 otherwise, they are stored in the .hgtags file, and a new
569 569 changeset is committed with the change.
570 570
571 571 keyword arguments:
572 572
573 573 local: whether to store tags in non-version-controlled file
574 574 (default False)
575 575
576 576 message: commit message to use if committing
577 577
578 578 user: name of user to use if committing
579 579
580 580 date: date tuple to use if committing'''
581 581
582 582 if not local:
583 583 for x in self.status()[:5]:
584 584 if '.hgtags' in x:
585 585 raise util.Abort(_('working copy of .hgtags is changed '
586 586 '(please commit .hgtags manually)'))
587 587
588 588 self.tags() # instantiate the cache
589 589 self._tag(names, node, message, local, user, date, editor=editor)
590 590
591 591 @filteredpropertycache
592 592 def _tagscache(self):
593 593 '''Returns a tagscache object that contains various tags related
594 594 caches.'''
595 595
596 596 # This simplifies its cache management by having one decorated
597 597 # function (this one) and the rest simply fetch things from it.
598 598 class tagscache(object):
599 599 def __init__(self):
600 600 # These two define the set of tags for this repository. tags
601 601 # maps tag name to node; tagtypes maps tag name to 'global' or
602 602 # 'local'. (Global tags are defined by .hgtags across all
603 603 # heads, and local tags are defined in .hg/localtags.)
604 604 # They constitute the in-memory cache of tags.
605 605 self.tags = self.tagtypes = None
606 606
607 607 self.nodetagscache = self.tagslist = None
608 608
609 609 cache = tagscache()
610 610 cache.tags, cache.tagtypes = self._findtags()
611 611
612 612 return cache
613 613
614 614 def tags(self):
615 615 '''return a mapping of tag to node'''
616 616 t = {}
617 617 if self.changelog.filteredrevs:
618 618 tags, tt = self._findtags()
619 619 else:
620 620 tags = self._tagscache.tags
621 621 for k, v in tags.iteritems():
622 622 try:
623 623 # ignore tags to unknown nodes
624 624 self.changelog.rev(v)
625 625 t[k] = v
626 626 except (error.LookupError, ValueError):
627 627 pass
628 628 return t
629 629
630 630 def _findtags(self):
631 631 '''Do the hard work of finding tags. Return a pair of dicts
632 632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
633 633 maps tag name to a string like \'global\' or \'local\'.
634 634 Subclasses or extensions are free to add their own tags, but
635 635 should be aware that the returned dicts will be retained for the
636 636 duration of the localrepo object.'''
637 637
638 638 # XXX what tagtype should subclasses/extensions use? Currently
639 639 # mq and bookmarks add tags, but do not set the tagtype at all.
640 640 # Should each extension invent its own tag type? Should there
641 641 # be one tagtype for all such "virtual" tags? Or is the status
642 642 # quo fine?
643 643
644 644 alltags = {} # map tag name to (node, hist)
645 645 tagtypes = {}
646 646
647 647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
648 648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
649 649
650 650 # Build the return dicts. Have to re-encode tag names because
651 651 # the tags module always uses UTF-8 (in order not to lose info
652 652 # writing to the cache), but the rest of Mercurial wants them in
653 653 # local encoding.
654 654 tags = {}
655 655 for (name, (node, hist)) in alltags.iteritems():
656 656 if node != nullid:
657 657 tags[encoding.tolocal(name)] = node
658 658 tags['tip'] = self.changelog.tip()
659 659 tagtypes = dict([(encoding.tolocal(name), value)
660 660 for (name, value) in tagtypes.iteritems()])
661 661 return (tags, tagtypes)
662 662
663 663 def tagtype(self, tagname):
664 664 '''
665 665 return the type of the given tag. result can be:
666 666
667 667 'local' : a local tag
668 668 'global' : a global tag
669 669 None : tag does not exist
670 670 '''
671 671
672 672 return self._tagscache.tagtypes.get(tagname)
673 673
674 674 def tagslist(self):
675 675 '''return a list of tags ordered by revision'''
676 676 if not self._tagscache.tagslist:
677 677 l = []
678 678 for t, n in self.tags().iteritems():
679 679 r = self.changelog.rev(n)
680 680 l.append((r, t, n))
681 681 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
682 682
683 683 return self._tagscache.tagslist
684 684
685 685 def nodetags(self, node):
686 686 '''return the tags associated with a node'''
687 687 if not self._tagscache.nodetagscache:
688 688 nodetagscache = {}
689 689 for t, n in self._tagscache.tags.iteritems():
690 690 nodetagscache.setdefault(n, []).append(t)
691 691 for tags in nodetagscache.itervalues():
692 692 tags.sort()
693 693 self._tagscache.nodetagscache = nodetagscache
694 694 return self._tagscache.nodetagscache.get(node, [])
695 695
696 696 def nodebookmarks(self, node):
697 697 marks = []
698 698 for bookmark, n in self._bookmarks.iteritems():
699 699 if n == node:
700 700 marks.append(bookmark)
701 701 return sorted(marks)
702 702
703 703 def branchmap(self):
704 704 '''returns a dictionary {branch: [branchheads]} with branchheads
705 705 ordered by increasing revision number'''
706 706 branchmap.updatecache(self)
707 707 return self._branchcaches[self.filtername]
708 708
709 709 def branchtip(self, branch):
710 710 '''return the tip node for a given branch'''
711 711 try:
712 712 return self.branchmap().branchtip(branch)
713 713 except KeyError:
714 714 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
715 715
716 716 def lookup(self, key):
717 717 return self[key].node()
718 718
719 719 def lookupbranch(self, key, remote=None):
720 720 repo = remote or self
721 721 if key in repo.branchmap():
722 722 return key
723 723
724 724 repo = (remote and remote.local()) and remote or self
725 725 return repo[key].branch()
726 726
727 727 def known(self, nodes):
728 728 nm = self.changelog.nodemap
729 729 pc = self._phasecache
730 730 result = []
731 731 for n in nodes:
732 732 r = nm.get(n)
733 733 resp = not (r is None or pc.phase(self, r) >= phases.secret)
734 734 result.append(resp)
735 735 return result
736 736
737 737 def local(self):
738 738 return self
739 739
740 740 def cancopy(self):
741 741 # so statichttprepo's override of local() works
742 742 if not self.local():
743 743 return False
744 744 if not self.ui.configbool('phases', 'publish', True):
745 745 return True
746 746 # if publishing we can't copy if there is filtered content
747 747 return not self.filtered('visible').changelog.filteredrevs
748 748
749 749 def join(self, f):
750 750 return os.path.join(self.path, f)
751 751
752 752 def wjoin(self, f):
753 753 return os.path.join(self.root, f)
754 754
755 755 def file(self, f):
756 756 if f[0] == '/':
757 757 f = f[1:]
758 758 return filelog.filelog(self.sopener, f)
759 759
760 760 def changectx(self, changeid):
761 761 return self[changeid]
762 762
763 763 def parents(self, changeid=None):
764 764 '''get list of changectxs for parents of changeid'''
765 765 return self[changeid].parents()
766 766
767 767 def setparents(self, p1, p2=nullid):
768 768 copies = self.dirstate.setparents(p1, p2)
769 769 pctx = self[p1]
770 770 if copies:
771 771 # Adjust copy records, the dirstate cannot do it, it
772 772 # requires access to parents manifests. Preserve them
773 773 # only for entries added to first parent.
774 774 for f in copies:
775 775 if f not in pctx and copies[f] in pctx:
776 776 self.dirstate.copy(copies[f], f)
777 777 if p2 == nullid:
778 778 for f, s in sorted(self.dirstate.copies().items()):
779 779 if f not in pctx and s not in pctx:
780 780 self.dirstate.copy(None, f)
781 781
782 782 def filectx(self, path, changeid=None, fileid=None):
783 783 """changeid can be a changeset revision, node, or tag.
784 784 fileid can be a file revision or node."""
785 785 return context.filectx(self, path, changeid, fileid)
786 786
787 787 def getcwd(self):
788 788 return self.dirstate.getcwd()
789 789
790 790 def pathto(self, f, cwd=None):
791 791 return self.dirstate.pathto(f, cwd)
792 792
793 793 def wfile(self, f, mode='r'):
794 794 return self.wopener(f, mode)
795 795
796 796 def _link(self, f):
797 797 return self.wvfs.islink(f)
798 798
799 799 def _loadfilter(self, filter):
800 800 if filter not in self.filterpats:
801 801 l = []
802 802 for pat, cmd in self.ui.configitems(filter):
803 803 if cmd == '!':
804 804 continue
805 805 mf = matchmod.match(self.root, '', [pat])
806 806 fn = None
807 807 params = cmd
808 808 for name, filterfn in self._datafilters.iteritems():
809 809 if cmd.startswith(name):
810 810 fn = filterfn
811 811 params = cmd[len(name):].lstrip()
812 812 break
813 813 if not fn:
814 814 fn = lambda s, c, **kwargs: util.filter(s, c)
815 815 # Wrap old filters not supporting keyword arguments
816 816 if not inspect.getargspec(fn)[2]:
817 817 oldfn = fn
818 818 fn = lambda s, c, **kwargs: oldfn(s, c)
819 819 l.append((mf, fn, params))
820 820 self.filterpats[filter] = l
821 821 return self.filterpats[filter]
822 822
823 823 def _filter(self, filterpats, filename, data):
824 824 for mf, fn, cmd in filterpats:
825 825 if mf(filename):
826 826 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
827 827 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
828 828 break
829 829
830 830 return data
831 831
832 832 @unfilteredpropertycache
833 833 def _encodefilterpats(self):
834 834 return self._loadfilter('encode')
835 835
836 836 @unfilteredpropertycache
837 837 def _decodefilterpats(self):
838 838 return self._loadfilter('decode')
839 839
840 840 def adddatafilter(self, name, filter):
841 841 self._datafilters[name] = filter
842 842
843 843 def wread(self, filename):
844 844 if self._link(filename):
845 845 data = self.wvfs.readlink(filename)
846 846 else:
847 847 data = self.wopener.read(filename)
848 848 return self._filter(self._encodefilterpats, filename, data)
849 849
850 850 def wwrite(self, filename, data, flags):
851 851 data = self._filter(self._decodefilterpats, filename, data)
852 852 if 'l' in flags:
853 853 self.wopener.symlink(data, filename)
854 854 else:
855 855 self.wopener.write(filename, data)
856 856 if 'x' in flags:
857 857 self.wvfs.setflags(filename, False, True)
858 858
859 859 def wwritedata(self, filename, data):
860 860 return self._filter(self._decodefilterpats, filename, data)
861 861
862 862 def transaction(self, desc, report=None):
863 863 tr = self._transref and self._transref() or None
864 864 if tr and tr.running():
865 865 return tr.nest()
866 866
867 867 # abort here if the journal already exists
868 868 if self.svfs.exists("journal"):
869 869 raise error.RepoError(
870 870 _("abandoned transaction found"),
871 871 hint=_("run 'hg recover' to clean up transaction"))
872 872
873 873 def onclose():
874 874 self.store.write(self._transref())
875 875
876 876 self._writejournal(desc)
877 877 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
878 878 rp = report and report or self.ui.warn
879 879 tr = transaction.transaction(rp, self.sopener,
880 880 "journal",
881 881 aftertrans(renames),
882 882 self.store.createmode,
883 883 onclose)
884 884 self._transref = weakref.ref(tr)
885 885 return tr
886 886
887 887 def _journalfiles(self):
888 888 return ((self.svfs, 'journal'),
889 889 (self.vfs, 'journal.dirstate'),
890 890 (self.vfs, 'journal.branch'),
891 891 (self.vfs, 'journal.desc'),
892 892 (self.vfs, 'journal.bookmarks'),
893 893 (self.svfs, 'journal.phaseroots'))
894 894
895 895 def undofiles(self):
896 896 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
897 897
898 898 def _writejournal(self, desc):
899 899 self.opener.write("journal.dirstate",
900 900 self.opener.tryread("dirstate"))
901 901 self.opener.write("journal.branch",
902 902 encoding.fromlocal(self.dirstate.branch()))
903 903 self.opener.write("journal.desc",
904 904 "%d\n%s\n" % (len(self), desc))
905 905 self.opener.write("journal.bookmarks",
906 906 self.opener.tryread("bookmarks"))
907 907 self.sopener.write("journal.phaseroots",
908 908 self.sopener.tryread("phaseroots"))
909 909
910 910 def recover(self):
911 911 lock = self.lock()
912 912 try:
913 913 if self.svfs.exists("journal"):
914 914 self.ui.status(_("rolling back interrupted transaction\n"))
915 915 transaction.rollback(self.sopener, "journal",
916 916 self.ui.warn)
917 917 self.invalidate()
918 918 return True
919 919 else:
920 920 self.ui.warn(_("no interrupted transaction available\n"))
921 921 return False
922 922 finally:
923 923 lock.release()
924 924
925 925 def rollback(self, dryrun=False, force=False):
926 926 wlock = lock = None
927 927 try:
928 928 wlock = self.wlock()
929 929 lock = self.lock()
930 930 if self.svfs.exists("undo"):
931 931 return self._rollback(dryrun, force)
932 932 else:
933 933 self.ui.warn(_("no rollback information available\n"))
934 934 return 1
935 935 finally:
936 936 release(lock, wlock)
937 937
938 938 @unfilteredmethod # Until we get smarter cache management
939 939 def _rollback(self, dryrun, force):
940 940 ui = self.ui
941 941 try:
942 942 args = self.opener.read('undo.desc').splitlines()
943 943 (oldlen, desc, detail) = (int(args[0]), args[1], None)
944 944 if len(args) >= 3:
945 945 detail = args[2]
946 946 oldtip = oldlen - 1
947 947
948 948 if detail and ui.verbose:
949 949 msg = (_('repository tip rolled back to revision %s'
950 950 ' (undo %s: %s)\n')
951 951 % (oldtip, desc, detail))
952 952 else:
953 953 msg = (_('repository tip rolled back to revision %s'
954 954 ' (undo %s)\n')
955 955 % (oldtip, desc))
956 956 except IOError:
957 957 msg = _('rolling back unknown transaction\n')
958 958 desc = None
959 959
960 960 if not force and self['.'] != self['tip'] and desc == 'commit':
961 961 raise util.Abort(
962 962 _('rollback of last commit while not checked out '
963 963 'may lose data'), hint=_('use -f to force'))
964 964
965 965 ui.status(msg)
966 966 if dryrun:
967 967 return 0
968 968
969 969 parents = self.dirstate.parents()
970 970 self.destroying()
971 971 transaction.rollback(self.sopener, 'undo', ui.warn)
972 972 if self.vfs.exists('undo.bookmarks'):
973 973 self.vfs.rename('undo.bookmarks', 'bookmarks')
974 974 if self.svfs.exists('undo.phaseroots'):
975 975 self.svfs.rename('undo.phaseroots', 'phaseroots')
976 976 self.invalidate()
977 977
978 978 parentgone = (parents[0] not in self.changelog.nodemap or
979 979 parents[1] not in self.changelog.nodemap)
980 980 if parentgone:
981 981 self.vfs.rename('undo.dirstate', 'dirstate')
982 982 try:
983 983 branch = self.opener.read('undo.branch')
984 984 self.dirstate.setbranch(encoding.tolocal(branch))
985 985 except IOError:
986 986 ui.warn(_('named branch could not be reset: '
987 987 'current branch is still \'%s\'\n')
988 988 % self.dirstate.branch())
989 989
990 990 self.dirstate.invalidate()
991 991 parents = tuple([p.rev() for p in self.parents()])
992 992 if len(parents) > 1:
993 993 ui.status(_('working directory now based on '
994 994 'revisions %d and %d\n') % parents)
995 995 else:
996 996 ui.status(_('working directory now based on '
997 997 'revision %d\n') % parents)
998 998 # TODO: if we know which new heads may result from this rollback, pass
999 999 # them to destroy(), which will prevent the branchhead cache from being
1000 1000 # invalidated.
1001 1001 self.destroyed()
1002 1002 return 0
1003 1003
1004 1004 def invalidatecaches(self):
1005 1005
1006 1006 if '_tagscache' in vars(self):
1007 1007 # can't use delattr on proxy
1008 1008 del self.__dict__['_tagscache']
1009 1009
1010 1010 self.unfiltered()._branchcaches.clear()
1011 1011 self.invalidatevolatilesets()
1012 1012
1013 1013 def invalidatevolatilesets(self):
1014 1014 self.filteredrevcache.clear()
1015 1015 obsolete.clearobscaches(self)
1016 1016
1017 1017 def invalidatedirstate(self):
1018 1018 '''Invalidates the dirstate, causing the next call to dirstate
1019 1019 to check if it was modified since the last time it was read,
1020 1020 rereading it if it has.
1021 1021
1022 1022 This is different to dirstate.invalidate() that it doesn't always
1023 1023 rereads the dirstate. Use dirstate.invalidate() if you want to
1024 1024 explicitly read the dirstate again (i.e. restoring it to a previous
1025 1025 known good state).'''
1026 1026 if hasunfilteredcache(self, 'dirstate'):
1027 1027 for k in self.dirstate._filecache:
1028 1028 try:
1029 1029 delattr(self.dirstate, k)
1030 1030 except AttributeError:
1031 1031 pass
1032 1032 delattr(self.unfiltered(), 'dirstate')
1033 1033
1034 1034 def invalidate(self):
1035 1035 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1036 1036 for k in self._filecache:
1037 1037 # dirstate is invalidated separately in invalidatedirstate()
1038 1038 if k == 'dirstate':
1039 1039 continue
1040 1040
1041 1041 try:
1042 1042 delattr(unfiltered, k)
1043 1043 except AttributeError:
1044 1044 pass
1045 1045 self.invalidatecaches()
1046 1046 self.store.invalidatecaches()
1047 1047
1048 1048 def invalidateall(self):
1049 1049 '''Fully invalidates both store and non-store parts, causing the
1050 1050 subsequent operation to reread any outside changes.'''
1051 1051 # extension should hook this to invalidate its caches
1052 1052 self.invalidate()
1053 1053 self.invalidatedirstate()
1054 1054
1055 1055 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1056 1056 try:
1057 1057 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1058 1058 except error.LockHeld, inst:
1059 1059 if not wait:
1060 1060 raise
1061 1061 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1062 1062 (desc, inst.locker))
1063 1063 # default to 600 seconds timeout
1064 1064 l = lockmod.lock(vfs, lockname,
1065 1065 int(self.ui.config("ui", "timeout", "600")),
1066 1066 releasefn, desc=desc)
1067 1067 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1068 1068 if acquirefn:
1069 1069 acquirefn()
1070 1070 return l
1071 1071
1072 1072 def _afterlock(self, callback):
1073 1073 """add a callback to the current repository lock.
1074 1074
1075 1075 The callback will be executed on lock release."""
1076 1076 l = self._lockref and self._lockref()
1077 1077 if l:
1078 1078 l.postrelease.append(callback)
1079 1079 else:
1080 1080 callback()
1081 1081
1082 1082 def lock(self, wait=True):
1083 1083 '''Lock the repository store (.hg/store) and return a weak reference
1084 1084 to the lock. Use this before modifying the store (e.g. committing or
1085 1085 stripping). If you are opening a transaction, get a lock as well.)'''
1086 1086 l = self._lockref and self._lockref()
1087 1087 if l is not None and l.held:
1088 1088 l.lock()
1089 1089 return l
1090 1090
1091 1091 def unlock():
1092 1092 if hasunfilteredcache(self, '_phasecache'):
1093 1093 self._phasecache.write()
1094 1094 for k, ce in self._filecache.items():
1095 1095 if k == 'dirstate' or k not in self.__dict__:
1096 1096 continue
1097 1097 ce.refresh()
1098 1098
1099 1099 l = self._lock(self.svfs, "lock", wait, unlock,
1100 1100 self.invalidate, _('repository %s') % self.origroot)
1101 1101 self._lockref = weakref.ref(l)
1102 1102 return l
1103 1103
1104 1104 def wlock(self, wait=True):
1105 1105 '''Lock the non-store parts of the repository (everything under
1106 1106 .hg except .hg/store) and return a weak reference to the lock.
1107 1107 Use this before modifying files in .hg.'''
1108 1108 l = self._wlockref and self._wlockref()
1109 1109 if l is not None and l.held:
1110 1110 l.lock()
1111 1111 return l
1112 1112
1113 1113 def unlock():
1114 1114 self.dirstate.write()
1115 1115 self._filecache['dirstate'].refresh()
1116 1116
1117 1117 l = self._lock(self.vfs, "wlock", wait, unlock,
1118 1118 self.invalidatedirstate, _('working directory of %s') %
1119 1119 self.origroot)
1120 1120 self._wlockref = weakref.ref(l)
1121 1121 return l
1122 1122
1123 1123 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1124 1124 """
1125 1125 commit an individual file as part of a larger transaction
1126 1126 """
1127 1127
1128 1128 fname = fctx.path()
1129 1129 text = fctx.data()
1130 1130 flog = self.file(fname)
1131 1131 fparent1 = manifest1.get(fname, nullid)
1132 1132 fparent2 = fparent2o = manifest2.get(fname, nullid)
1133 1133
1134 1134 meta = {}
1135 1135 copy = fctx.renamed()
1136 1136 if copy and copy[0] != fname:
1137 1137 # Mark the new revision of this file as a copy of another
1138 1138 # file. This copy data will effectively act as a parent
1139 1139 # of this new revision. If this is a merge, the first
1140 1140 # parent will be the nullid (meaning "look up the copy data")
1141 1141 # and the second one will be the other parent. For example:
1142 1142 #
1143 1143 # 0 --- 1 --- 3 rev1 changes file foo
1144 1144 # \ / rev2 renames foo to bar and changes it
1145 1145 # \- 2 -/ rev3 should have bar with all changes and
1146 1146 # should record that bar descends from
1147 1147 # bar in rev2 and foo in rev1
1148 1148 #
1149 1149 # this allows this merge to succeed:
1150 1150 #
1151 1151 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1152 1152 # \ / merging rev3 and rev4 should use bar@rev2
1153 1153 # \- 2 --- 4 as the merge base
1154 1154 #
1155 1155
1156 1156 cfname = copy[0]
1157 1157 crev = manifest1.get(cfname)
1158 1158 newfparent = fparent2
1159 1159
1160 1160 if manifest2: # branch merge
1161 1161 if fparent2 == nullid or crev is None: # copied on remote side
1162 1162 if cfname in manifest2:
1163 1163 crev = manifest2[cfname]
1164 1164 newfparent = fparent1
1165 1165
1166 1166 # find source in nearest ancestor if we've lost track
1167 1167 if not crev:
1168 1168 self.ui.debug(" %s: searching for copy revision for %s\n" %
1169 1169 (fname, cfname))
1170 1170 for ancestor in self[None].ancestors():
1171 1171 if cfname in ancestor:
1172 1172 crev = ancestor[cfname].filenode()
1173 1173 break
1174 1174
1175 1175 if crev:
1176 1176 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1177 1177 meta["copy"] = cfname
1178 1178 meta["copyrev"] = hex(crev)
1179 1179 fparent1, fparent2 = nullid, newfparent
1180 1180 else:
1181 1181 self.ui.warn(_("warning: can't find ancestor for '%s' "
1182 1182 "copied from '%s'!\n") % (fname, cfname))
1183 1183
1184 1184 elif fparent1 == nullid:
1185 1185 fparent1, fparent2 = fparent2, nullid
1186 1186 elif fparent2 != nullid:
1187 1187 # is one parent an ancestor of the other?
1188 1188 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1189 1189 if fparent1 in fparentancestors:
1190 1190 fparent1, fparent2 = fparent2, nullid
1191 1191 elif fparent2 in fparentancestors:
1192 1192 fparent2 = nullid
1193 1193
1194 1194 # is the file changed?
1195 1195 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1196 1196 changelist.append(fname)
1197 1197 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1198 1198
1199 1199 # are just the flags changed during merge?
1200 1200 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1201 1201 changelist.append(fname)
1202 1202
1203 1203 return fparent1
1204 1204
1205 1205 @unfilteredmethod
1206 1206 def commit(self, text="", user=None, date=None, match=None, force=False,
1207 1207 editor=False, extra={}):
1208 1208 """Add a new revision to current repository.
1209 1209
1210 1210 Revision information is gathered from the working directory,
1211 1211 match can be used to filter the committed files. If editor is
1212 1212 supplied, it is called to get a commit message.
1213 1213 """
1214 1214
1215 1215 def fail(f, msg):
1216 1216 raise util.Abort('%s: %s' % (f, msg))
1217 1217
1218 1218 if not match:
1219 1219 match = matchmod.always(self.root, '')
1220 1220
1221 1221 if not force:
1222 1222 vdirs = []
1223 1223 match.explicitdir = vdirs.append
1224 1224 match.bad = fail
1225 1225
1226 1226 wlock = self.wlock()
1227 1227 try:
1228 1228 wctx = self[None]
1229 1229 merge = len(wctx.parents()) > 1
1230 1230
1231 1231 if (not force and merge and match and
1232 1232 (match.files() or match.anypats())):
1233 1233 raise util.Abort(_('cannot partially commit a merge '
1234 1234 '(do not specify files or patterns)'))
1235 1235
1236 1236 changes = self.status(match=match, clean=force)
1237 1237 if force:
1238 1238 changes[0].extend(changes[6]) # mq may commit unchanged files
1239 1239
1240 1240 # check subrepos
1241 1241 subs = []
1242 1242 commitsubs = set()
1243 1243 newstate = wctx.substate.copy()
1244 1244 # only manage subrepos and .hgsubstate if .hgsub is present
1245 1245 if '.hgsub' in wctx:
1246 1246 # we'll decide whether to track this ourselves, thanks
1247 1247 for c in changes[:3]:
1248 1248 if '.hgsubstate' in c:
1249 1249 c.remove('.hgsubstate')
1250 1250
1251 1251 # compare current state to last committed state
1252 1252 # build new substate based on last committed state
1253 1253 oldstate = wctx.p1().substate
1254 1254 for s in sorted(newstate.keys()):
1255 1255 if not match(s):
1256 1256 # ignore working copy, use old state if present
1257 1257 if s in oldstate:
1258 1258 newstate[s] = oldstate[s]
1259 1259 continue
1260 1260 if not force:
1261 1261 raise util.Abort(
1262 1262 _("commit with new subrepo %s excluded") % s)
1263 1263 if wctx.sub(s).dirty(True):
1264 1264 if not self.ui.configbool('ui', 'commitsubrepos'):
1265 1265 raise util.Abort(
1266 1266 _("uncommitted changes in subrepo %s") % s,
1267 1267 hint=_("use --subrepos for recursive commit"))
1268 1268 subs.append(s)
1269 1269 commitsubs.add(s)
1270 1270 else:
1271 1271 bs = wctx.sub(s).basestate()
1272 1272 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1273 1273 if oldstate.get(s, (None, None, None))[1] != bs:
1274 1274 subs.append(s)
1275 1275
1276 1276 # check for removed subrepos
1277 1277 for p in wctx.parents():
1278 1278 r = [s for s in p.substate if s not in newstate]
1279 1279 subs += [s for s in r if match(s)]
1280 1280 if subs:
1281 1281 if (not match('.hgsub') and
1282 1282 '.hgsub' in (wctx.modified() + wctx.added())):
1283 1283 raise util.Abort(
1284 1284 _("can't commit subrepos without .hgsub"))
1285 1285 changes[0].insert(0, '.hgsubstate')
1286 1286
1287 1287 elif '.hgsub' in changes[2]:
1288 1288 # clean up .hgsubstate when .hgsub is removed
1289 1289 if ('.hgsubstate' in wctx and
1290 1290 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1291 1291 changes[2].insert(0, '.hgsubstate')
1292 1292
1293 1293 # make sure all explicit patterns are matched
1294 1294 if not force and match.files():
1295 1295 matched = set(changes[0] + changes[1] + changes[2])
1296 1296
1297 1297 for f in match.files():
1298 1298 f = self.dirstate.normalize(f)
1299 1299 if f == '.' or f in matched or f in wctx.substate:
1300 1300 continue
1301 1301 if f in changes[3]: # missing
1302 1302 fail(f, _('file not found!'))
1303 1303 if f in vdirs: # visited directory
1304 1304 d = f + '/'
1305 1305 for mf in matched:
1306 1306 if mf.startswith(d):
1307 1307 break
1308 1308 else:
1309 1309 fail(f, _("no match under directory!"))
1310 1310 elif f not in self.dirstate:
1311 1311 fail(f, _("file not tracked!"))
1312 1312
1313 1313 cctx = context.workingctx(self, text, user, date, extra, changes)
1314 1314
1315 1315 if (not force and not extra.get("close") and not merge
1316 1316 and not cctx.files()
1317 1317 and wctx.branch() == wctx.p1().branch()):
1318 1318 return None
1319 1319
1320 1320 if merge and cctx.deleted():
1321 1321 raise util.Abort(_("cannot commit merge with missing files"))
1322 1322
1323 1323 ms = mergemod.mergestate(self)
1324 1324 for f in changes[0]:
1325 1325 if f in ms and ms[f] == 'u':
1326 1326 raise util.Abort(_("unresolved merge conflicts "
1327 1327 "(see hg help resolve)"))
1328 1328
1329 1329 if editor:
1330 1330 cctx._text = editor(self, cctx, subs)
1331 1331 edited = (text != cctx._text)
1332 1332
1333 1333 # Save commit message in case this transaction gets rolled back
1334 1334 # (e.g. by a pretxncommit hook). Leave the content alone on
1335 1335 # the assumption that the user will use the same editor again.
1336 1336 msgfn = self.savecommitmessage(cctx._text)
1337 1337
1338 1338 # commit subs and write new state
1339 1339 if subs:
1340 1340 for s in sorted(commitsubs):
1341 1341 sub = wctx.sub(s)
1342 1342 self.ui.status(_('committing subrepository %s\n') %
1343 1343 subrepo.subrelpath(sub))
1344 1344 sr = sub.commit(cctx._text, user, date)
1345 1345 newstate[s] = (newstate[s][0], sr)
1346 1346 subrepo.writestate(self, newstate)
1347 1347
1348 1348 p1, p2 = self.dirstate.parents()
1349 1349 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1350 1350 try:
1351 1351 self.hook("precommit", throw=True, parent1=hookp1,
1352 1352 parent2=hookp2)
1353 1353 ret = self.commitctx(cctx, True)
1354 1354 except: # re-raises
1355 1355 if edited:
1356 1356 self.ui.write(
1357 1357 _('note: commit message saved in %s\n') % msgfn)
1358 1358 raise
1359 1359
1360 1360 # update bookmarks, dirstate and mergestate
1361 1361 bookmarks.update(self, [p1, p2], ret)
1362 1362 cctx.markcommitted(ret)
1363 1363 ms.reset()
1364 1364 finally:
1365 1365 wlock.release()
1366 1366
1367 1367 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1368 1368 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1369 1369 self._afterlock(commithook)
1370 1370 return ret
1371 1371
1372 1372 @unfilteredmethod
1373 1373 def commitctx(self, ctx, error=False):
1374 1374 """Add a new revision to current repository.
1375 1375 Revision information is passed via the context argument.
1376 1376 """
1377 1377
1378 1378 tr = lock = None
1379 1379 removed = list(ctx.removed())
1380 1380 p1, p2 = ctx.p1(), ctx.p2()
1381 1381 user = ctx.user()
1382 1382
1383 1383 lock = self.lock()
1384 1384 try:
1385 1385 tr = self.transaction("commit")
1386 1386 trp = weakref.proxy(tr)
1387 1387
1388 1388 if ctx.files():
1389 1389 m1 = p1.manifest().copy()
1390 1390 m2 = p2.manifest()
1391 1391
1392 1392 # check in files
1393 1393 new = {}
1394 1394 changed = []
1395 1395 linkrev = len(self)
1396 1396 for f in sorted(ctx.modified() + ctx.added()):
1397 1397 self.ui.note(f + "\n")
1398 1398 try:
1399 1399 fctx = ctx[f]
1400 1400 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1401 1401 changed)
1402 1402 m1.set(f, fctx.flags())
1403 1403 except OSError, inst:
1404 1404 self.ui.warn(_("trouble committing %s!\n") % f)
1405 1405 raise
1406 1406 except IOError, inst:
1407 1407 errcode = getattr(inst, 'errno', errno.ENOENT)
1408 1408 if error or errcode and errcode != errno.ENOENT:
1409 1409 self.ui.warn(_("trouble committing %s!\n") % f)
1410 1410 raise
1411 1411 else:
1412 1412 removed.append(f)
1413 1413
1414 1414 # update manifest
1415 1415 m1.update(new)
1416 1416 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1417 1417 drop = [f for f in removed if f in m1]
1418 1418 for f in drop:
1419 1419 del m1[f]
1420 1420 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1421 1421 p2.manifestnode(), (new, drop))
1422 1422 files = changed + removed
1423 1423 else:
1424 1424 mn = p1.manifestnode()
1425 1425 files = []
1426 1426
1427 1427 # update changelog
1428 1428 self.changelog.delayupdate()
1429 1429 n = self.changelog.add(mn, files, ctx.description(),
1430 1430 trp, p1.node(), p2.node(),
1431 1431 user, ctx.date(), ctx.extra().copy())
1432 1432 p = lambda: self.changelog.writepending() and self.root or ""
1433 1433 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1434 1434 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1435 1435 parent2=xp2, pending=p)
1436 1436 self.changelog.finalize(trp)
1437 1437 # set the new commit is proper phase
1438 1438 targetphase = subrepo.newcommitphase(self.ui, ctx)
1439 1439 if targetphase:
1440 1440 # retract boundary do not alter parent changeset.
1441 1441 # if a parent have higher the resulting phase will
1442 1442 # be compliant anyway
1443 1443 #
1444 1444 # if minimal phase was 0 we don't need to retract anything
1445 phases.retractboundary(self, targetphase, [n])
1445 phases.retractboundary(self, tr, targetphase, [n])
1446 1446 tr.close()
1447 1447 branchmap.updatecache(self.filtered('served'))
1448 1448 return n
1449 1449 finally:
1450 1450 if tr:
1451 1451 tr.release()
1452 1452 lock.release()
1453 1453
1454 1454 @unfilteredmethod
1455 1455 def destroying(self):
1456 1456 '''Inform the repository that nodes are about to be destroyed.
1457 1457 Intended for use by strip and rollback, so there's a common
1458 1458 place for anything that has to be done before destroying history.
1459 1459
1460 1460 This is mostly useful for saving state that is in memory and waiting
1461 1461 to be flushed when the current lock is released. Because a call to
1462 1462 destroyed is imminent, the repo will be invalidated causing those
1463 1463 changes to stay in memory (waiting for the next unlock), or vanish
1464 1464 completely.
1465 1465 '''
1466 1466 # When using the same lock to commit and strip, the phasecache is left
1467 1467 # dirty after committing. Then when we strip, the repo is invalidated,
1468 1468 # causing those changes to disappear.
1469 1469 if '_phasecache' in vars(self):
1470 1470 self._phasecache.write()
1471 1471
1472 1472 @unfilteredmethod
1473 1473 def destroyed(self):
1474 1474 '''Inform the repository that nodes have been destroyed.
1475 1475 Intended for use by strip and rollback, so there's a common
1476 1476 place for anything that has to be done after destroying history.
1477 1477 '''
1478 1478 # When one tries to:
1479 1479 # 1) destroy nodes thus calling this method (e.g. strip)
1480 1480 # 2) use phasecache somewhere (e.g. commit)
1481 1481 #
1482 1482 # then 2) will fail because the phasecache contains nodes that were
1483 1483 # removed. We can either remove phasecache from the filecache,
1484 1484 # causing it to reload next time it is accessed, or simply filter
1485 1485 # the removed nodes now and write the updated cache.
1486 1486 self._phasecache.filterunknown(self)
1487 1487 self._phasecache.write()
1488 1488
1489 1489 # update the 'served' branch cache to help read only server process
1490 1490 # Thanks to branchcache collaboration this is done from the nearest
1491 1491 # filtered subset and it is expected to be fast.
1492 1492 branchmap.updatecache(self.filtered('served'))
1493 1493
1494 1494 # Ensure the persistent tag cache is updated. Doing it now
1495 1495 # means that the tag cache only has to worry about destroyed
1496 1496 # heads immediately after a strip/rollback. That in turn
1497 1497 # guarantees that "cachetip == currenttip" (comparing both rev
1498 1498 # and node) always means no nodes have been added or destroyed.
1499 1499
1500 1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1501 1501 # head, refresh the tag cache, then immediately add a new head.
1502 1502 # But I think doing it this way is necessary for the "instant
1503 1503 # tag cache retrieval" case to work.
1504 1504 self.invalidate()
1505 1505
1506 1506 def walk(self, match, node=None):
1507 1507 '''
1508 1508 walk recursively through the directory tree or a given
1509 1509 changeset, finding all files matched by the match
1510 1510 function
1511 1511 '''
1512 1512 return self[node].walk(match)
1513 1513
1514 1514 def status(self, node1='.', node2=None, match=None,
1515 1515 ignored=False, clean=False, unknown=False,
1516 1516 listsubrepos=False):
1517 1517 '''a convenience method that calls node1.status(node2)'''
1518 1518 return self[node1].status(node2, match, ignored, clean, unknown,
1519 1519 listsubrepos)
1520 1520
1521 1521 def heads(self, start=None):
1522 1522 heads = self.changelog.heads(start)
1523 1523 # sort the output in rev descending order
1524 1524 return sorted(heads, key=self.changelog.rev, reverse=True)
1525 1525
1526 1526 def branchheads(self, branch=None, start=None, closed=False):
1527 1527 '''return a (possibly filtered) list of heads for the given branch
1528 1528
1529 1529 Heads are returned in topological order, from newest to oldest.
1530 1530 If branch is None, use the dirstate branch.
1531 1531 If start is not None, return only heads reachable from start.
1532 1532 If closed is True, return heads that are marked as closed as well.
1533 1533 '''
1534 1534 if branch is None:
1535 1535 branch = self[None].branch()
1536 1536 branches = self.branchmap()
1537 1537 if branch not in branches:
1538 1538 return []
1539 1539 # the cache returns heads ordered lowest to highest
1540 1540 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1541 1541 if start is not None:
1542 1542 # filter out the heads that cannot be reached from startrev
1543 1543 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1544 1544 bheads = [h for h in bheads if h in fbheads]
1545 1545 return bheads
1546 1546
1547 1547 def branches(self, nodes):
1548 1548 if not nodes:
1549 1549 nodes = [self.changelog.tip()]
1550 1550 b = []
1551 1551 for n in nodes:
1552 1552 t = n
1553 1553 while True:
1554 1554 p = self.changelog.parents(n)
1555 1555 if p[1] != nullid or p[0] == nullid:
1556 1556 b.append((t, n, p[0], p[1]))
1557 1557 break
1558 1558 n = p[0]
1559 1559 return b
1560 1560
1561 1561 def between(self, pairs):
1562 1562 r = []
1563 1563
1564 1564 for top, bottom in pairs:
1565 1565 n, l, i = top, [], 0
1566 1566 f = 1
1567 1567
1568 1568 while n != bottom and n != nullid:
1569 1569 p = self.changelog.parents(n)[0]
1570 1570 if i == f:
1571 1571 l.append(n)
1572 1572 f = f * 2
1573 1573 n = p
1574 1574 i += 1
1575 1575
1576 1576 r.append(l)
1577 1577
1578 1578 return r
1579 1579
1580 1580 def pull(self, remote, heads=None, force=False):
1581 1581 return exchange.pull (self, remote, heads, force)
1582 1582
1583 1583 def checkpush(self, pushop):
1584 1584 """Extensions can override this function if additional checks have
1585 1585 to be performed before pushing, or call it if they override push
1586 1586 command.
1587 1587 """
1588 1588 pass
1589 1589
1590 1590 @unfilteredpropertycache
1591 1591 def prepushoutgoinghooks(self):
1592 1592 """Return util.hooks consists of "(repo, remote, outgoing)"
1593 1593 functions, which are called before pushing changesets.
1594 1594 """
1595 1595 return util.hooks()
1596 1596
1597 1597 def push(self, remote, force=False, revs=None, newbranch=False):
1598 1598 return exchange.push(self, remote, force, revs, newbranch)
1599 1599
1600 1600 def stream_in(self, remote, requirements):
1601 1601 lock = self.lock()
1602 1602 try:
1603 1603 # Save remote branchmap. We will use it later
1604 1604 # to speed up branchcache creation
1605 1605 rbranchmap = None
1606 1606 if remote.capable("branchmap"):
1607 1607 rbranchmap = remote.branchmap()
1608 1608
1609 1609 fp = remote.stream_out()
1610 1610 l = fp.readline()
1611 1611 try:
1612 1612 resp = int(l)
1613 1613 except ValueError:
1614 1614 raise error.ResponseError(
1615 1615 _('unexpected response from remote server:'), l)
1616 1616 if resp == 1:
1617 1617 raise util.Abort(_('operation forbidden by server'))
1618 1618 elif resp == 2:
1619 1619 raise util.Abort(_('locking the remote repository failed'))
1620 1620 elif resp != 0:
1621 1621 raise util.Abort(_('the server sent an unknown error code'))
1622 1622 self.ui.status(_('streaming all changes\n'))
1623 1623 l = fp.readline()
1624 1624 try:
1625 1625 total_files, total_bytes = map(int, l.split(' ', 1))
1626 1626 except (ValueError, TypeError):
1627 1627 raise error.ResponseError(
1628 1628 _('unexpected response from remote server:'), l)
1629 1629 self.ui.status(_('%d files to transfer, %s of data\n') %
1630 1630 (total_files, util.bytecount(total_bytes)))
1631 1631 handled_bytes = 0
1632 1632 self.ui.progress(_('clone'), 0, total=total_bytes)
1633 1633 start = time.time()
1634 1634
1635 1635 tr = self.transaction(_('clone'))
1636 1636 try:
1637 1637 for i in xrange(total_files):
1638 1638 # XXX doesn't support '\n' or '\r' in filenames
1639 1639 l = fp.readline()
1640 1640 try:
1641 1641 name, size = l.split('\0', 1)
1642 1642 size = int(size)
1643 1643 except (ValueError, TypeError):
1644 1644 raise error.ResponseError(
1645 1645 _('unexpected response from remote server:'), l)
1646 1646 if self.ui.debugflag:
1647 1647 self.ui.debug('adding %s (%s)\n' %
1648 1648 (name, util.bytecount(size)))
1649 1649 # for backwards compat, name was partially encoded
1650 1650 ofp = self.sopener(store.decodedir(name), 'w')
1651 1651 for chunk in util.filechunkiter(fp, limit=size):
1652 1652 handled_bytes += len(chunk)
1653 1653 self.ui.progress(_('clone'), handled_bytes,
1654 1654 total=total_bytes)
1655 1655 ofp.write(chunk)
1656 1656 ofp.close()
1657 1657 tr.close()
1658 1658 finally:
1659 1659 tr.release()
1660 1660
1661 1661 # Writing straight to files circumvented the inmemory caches
1662 1662 self.invalidate()
1663 1663
1664 1664 elapsed = time.time() - start
1665 1665 if elapsed <= 0:
1666 1666 elapsed = 0.001
1667 1667 self.ui.progress(_('clone'), None)
1668 1668 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1669 1669 (util.bytecount(total_bytes), elapsed,
1670 1670 util.bytecount(total_bytes / elapsed)))
1671 1671
1672 1672 # new requirements = old non-format requirements +
1673 1673 # new format-related
1674 1674 # requirements from the streamed-in repository
1675 1675 requirements.update(set(self.requirements) - self.supportedformats)
1676 1676 self._applyrequirements(requirements)
1677 1677 self._writerequirements()
1678 1678
1679 1679 if rbranchmap:
1680 1680 rbheads = []
1681 1681 for bheads in rbranchmap.itervalues():
1682 1682 rbheads.extend(bheads)
1683 1683
1684 1684 if rbheads:
1685 1685 rtiprev = max((int(self.changelog.rev(node))
1686 1686 for node in rbheads))
1687 1687 cache = branchmap.branchcache(rbranchmap,
1688 1688 self[rtiprev].node(),
1689 1689 rtiprev)
1690 1690 # Try to stick it as low as possible
1691 1691 # filter above served are unlikely to be fetch from a clone
1692 1692 for candidate in ('base', 'immutable', 'served'):
1693 1693 rview = self.filtered(candidate)
1694 1694 if cache.validfor(rview):
1695 1695 self._branchcaches[candidate] = cache
1696 1696 cache.write(rview)
1697 1697 break
1698 1698 self.invalidate()
1699 1699 return len(self.heads()) + 1
1700 1700 finally:
1701 1701 lock.release()
1702 1702
1703 1703 def clone(self, remote, heads=[], stream=False):
1704 1704 '''clone remote repository.
1705 1705
1706 1706 keyword arguments:
1707 1707 heads: list of revs to clone (forces use of pull)
1708 1708 stream: use streaming clone if possible'''
1709 1709
1710 1710 # now, all clients that can request uncompressed clones can
1711 1711 # read repo formats supported by all servers that can serve
1712 1712 # them.
1713 1713
1714 1714 # if revlog format changes, client will have to check version
1715 1715 # and format flags on "stream" capability, and use
1716 1716 # uncompressed only if compatible.
1717 1717
1718 1718 if not stream:
1719 1719 # if the server explicitly prefers to stream (for fast LANs)
1720 1720 stream = remote.capable('stream-preferred')
1721 1721
1722 1722 if stream and not heads:
1723 1723 # 'stream' means remote revlog format is revlogv1 only
1724 1724 if remote.capable('stream'):
1725 1725 return self.stream_in(remote, set(('revlogv1',)))
1726 1726 # otherwise, 'streamreqs' contains the remote revlog format
1727 1727 streamreqs = remote.capable('streamreqs')
1728 1728 if streamreqs:
1729 1729 streamreqs = set(streamreqs.split(','))
1730 1730 # if we support it, stream in and adjust our requirements
1731 1731 if not streamreqs - self.supportedformats:
1732 1732 return self.stream_in(remote, streamreqs)
1733 1733 return self.pull(remote, heads)
1734 1734
1735 1735 def pushkey(self, namespace, key, old, new):
1736 1736 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1737 1737 old=old, new=new)
1738 1738 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1739 1739 ret = pushkey.push(self, namespace, key, old, new)
1740 1740 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1741 1741 ret=ret)
1742 1742 return ret
1743 1743
1744 1744 def listkeys(self, namespace):
1745 1745 self.hook('prelistkeys', throw=True, namespace=namespace)
1746 1746 self.ui.debug('listing keys for "%s"\n' % namespace)
1747 1747 values = pushkey.list(self, namespace)
1748 1748 self.hook('listkeys', namespace=namespace, values=values)
1749 1749 return values
1750 1750
1751 1751 def debugwireargs(self, one, two, three=None, four=None, five=None):
1752 1752 '''used to test argument passing over the wire'''
1753 1753 return "%s %s %s %s %s" % (one, two, three, four, five)
1754 1754
1755 1755 def savecommitmessage(self, text):
1756 1756 fp = self.opener('last-message.txt', 'wb')
1757 1757 try:
1758 1758 fp.write(text)
1759 1759 finally:
1760 1760 fp.close()
1761 1761 return self.pathto(fp.name[len(self.root) + 1:])
1762 1762
1763 1763 # used to avoid circular references so destructors work
1764 1764 def aftertrans(files):
1765 1765 renamefiles = [tuple(t) for t in files]
1766 1766 def a():
1767 1767 for vfs, src, dest in renamefiles:
1768 1768 try:
1769 1769 vfs.rename(src, dest)
1770 1770 except OSError: # journal file does not yet exist
1771 1771 pass
1772 1772 return a
1773 1773
1774 1774 def undoname(fn):
1775 1775 base, name = os.path.split(fn)
1776 1776 assert name.startswith('journal')
1777 1777 return os.path.join(base, name.replace('journal', 'undo', 1))
1778 1778
1779 1779 def instance(ui, path, create):
1780 1780 return localrepository(ui, util.urllocalpath(path), create)
1781 1781
1782 1782 def islocal(path):
1783 1783 return True
@@ -1,415 +1,415 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103 import errno
104 104 from node import nullid, nullrev, bin, hex, short
105 105 from i18n import _
106 106 import util, error
107 107
108 108 allphases = public, draft, secret = range(3)
109 109 trackedphases = allphases[1:]
110 110 phasenames = ['public', 'draft', 'secret']
111 111
112 112 def _readroots(repo, phasedefaults=None):
113 113 """Read phase roots from disk
114 114
115 115 phasedefaults is a list of fn(repo, roots) callable, which are
116 116 executed if the phase roots file does not exist. When phases are
117 117 being initialized on an existing repository, this could be used to
118 118 set selected changesets phase to something else than public.
119 119
120 120 Return (roots, dirty) where dirty is true if roots differ from
121 121 what is being stored.
122 122 """
123 123 repo = repo.unfiltered()
124 124 dirty = False
125 125 roots = [set() for i in allphases]
126 126 try:
127 127 f = repo.sopener('phaseroots')
128 128 try:
129 129 for line in f:
130 130 phase, nh = line.split()
131 131 roots[int(phase)].add(bin(nh))
132 132 finally:
133 133 f.close()
134 134 except IOError, inst:
135 135 if inst.errno != errno.ENOENT:
136 136 raise
137 137 if phasedefaults:
138 138 for f in phasedefaults:
139 139 roots = f(repo, roots)
140 140 dirty = True
141 141 return roots, dirty
142 142
143 143 class phasecache(object):
144 144 def __init__(self, repo, phasedefaults, _load=True):
145 145 if _load:
146 146 # Cheap trick to allow shallow-copy without copy module
147 147 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
148 148 self._phaserevs = None
149 149 self.filterunknown(repo)
150 150 self.opener = repo.sopener
151 151
152 152 def copy(self):
153 153 # Shallow copy meant to ensure isolation in
154 154 # advance/retractboundary(), nothing more.
155 155 ph = phasecache(None, None, _load=False)
156 156 ph.phaseroots = self.phaseroots[:]
157 157 ph.dirty = self.dirty
158 158 ph.opener = self.opener
159 159 ph._phaserevs = self._phaserevs
160 160 return ph
161 161
162 162 def replace(self, phcache):
163 163 for a in 'phaseroots dirty opener _phaserevs'.split():
164 164 setattr(self, a, getattr(phcache, a))
165 165
166 166 def getphaserevs(self, repo, rebuild=False):
167 167 if rebuild or self._phaserevs is None:
168 168 repo = repo.unfiltered()
169 169 revs = [public] * len(repo.changelog)
170 170 for phase in trackedphases:
171 171 roots = map(repo.changelog.rev, self.phaseroots[phase])
172 172 if roots:
173 173 for rev in roots:
174 174 revs[rev] = phase
175 175 for rev in repo.changelog.descendants(roots):
176 176 revs[rev] = phase
177 177 self._phaserevs = revs
178 178 return self._phaserevs
179 179
180 180 def phase(self, repo, rev):
181 181 # We need a repo argument here to be able to build _phaserevs
182 182 # if necessary. The repository instance is not stored in
183 183 # phasecache to avoid reference cycles. The changelog instance
184 184 # is not stored because it is a filecache() property and can
185 185 # be replaced without us being notified.
186 186 if rev == nullrev:
187 187 return public
188 188 if rev < nullrev:
189 189 raise ValueError(_('cannot lookup negative revision'))
190 190 if self._phaserevs is None or rev >= len(self._phaserevs):
191 191 self._phaserevs = self.getphaserevs(repo, rebuild=True)
192 192 return self._phaserevs[rev]
193 193
194 194 def write(self):
195 195 if not self.dirty:
196 196 return
197 197 f = self.opener('phaseroots', 'w', atomictemp=True)
198 198 try:
199 199 for phase, roots in enumerate(self.phaseroots):
200 200 for h in roots:
201 201 f.write('%i %s\n' % (phase, hex(h)))
202 202 finally:
203 203 f.close()
204 204 self.dirty = False
205 205
206 206 def _updateroots(self, phase, newroots):
207 207 self.phaseroots[phase] = newroots
208 208 self._phaserevs = None
209 209 self.dirty = True
210 210
211 211 def advanceboundary(self, repo, tr, targetphase, nodes):
212 212 # Be careful to preserve shallow-copied values: do not update
213 213 # phaseroots values, replace them.
214 214
215 215 repo = repo.unfiltered()
216 216 delroots = [] # set of root deleted by this path
217 217 for phase in xrange(targetphase + 1, len(allphases)):
218 218 # filter nodes that are not in a compatible phase already
219 219 nodes = [n for n in nodes
220 220 if self.phase(repo, repo[n].rev()) >= phase]
221 221 if not nodes:
222 222 break # no roots to move anymore
223 223 olds = self.phaseroots[phase]
224 224 roots = set(ctx.node() for ctx in repo.set(
225 225 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
226 226 if olds != roots:
227 227 self._updateroots(phase, roots)
228 228 # some roots may need to be declared for lower phases
229 229 delroots.extend(olds - roots)
230 230 # declare deleted root in the target phase
231 231 if targetphase != 0:
232 self.retractboundary(repo, targetphase, delroots)
232 self.retractboundary(repo, tr, targetphase, delroots)
233 233 repo.invalidatevolatilesets()
234 234
235 def retractboundary(self, repo, targetphase, nodes):
235 def retractboundary(self, repo, tr, targetphase, nodes):
236 236 # Be careful to preserve shallow-copied values: do not update
237 237 # phaseroots values, replace them.
238 238
239 239 repo = repo.unfiltered()
240 240 currentroots = self.phaseroots[targetphase]
241 241 newroots = [n for n in nodes
242 242 if self.phase(repo, repo[n].rev()) < targetphase]
243 243 if newroots:
244 244 if nullid in newroots:
245 245 raise util.Abort(_('cannot change null revision phase'))
246 246 currentroots = currentroots.copy()
247 247 currentroots.update(newroots)
248 248 ctxs = repo.set('roots(%ln::)', currentroots)
249 249 currentroots.intersection_update(ctx.node() for ctx in ctxs)
250 250 self._updateroots(targetphase, currentroots)
251 251 repo.invalidatevolatilesets()
252 252
253 253 def filterunknown(self, repo):
254 254 """remove unknown nodes from the phase boundary
255 255
256 256 Nothing is lost as unknown nodes only hold data for their descendants.
257 257 """
258 258 filtered = False
259 259 nodemap = repo.changelog.nodemap # to filter unknown nodes
260 260 for phase, nodes in enumerate(self.phaseroots):
261 261 missing = sorted(node for node in nodes if node not in nodemap)
262 262 if missing:
263 263 for mnode in missing:
264 264 repo.ui.debug(
265 265 'removing unknown node %s from %i-phase boundary\n'
266 266 % (short(mnode), phase))
267 267 nodes.symmetric_difference_update(missing)
268 268 filtered = True
269 269 if filtered:
270 270 self.dirty = True
271 271 # filterunknown is called by repo.destroyed, we may have no changes in
272 272 # root but phaserevs contents is certainly invalid (or at least we
273 273 # have not proper way to check that). related to issue 3858.
274 274 #
275 275 # The other caller is __init__ that have no _phaserevs initialized
276 276 # anyway. If this change we should consider adding a dedicated
277 277 # "destroyed" function to phasecache or a proper cache key mechanism
278 278 # (see branchmap one)
279 279 self._phaserevs = None
280 280
281 281 def advanceboundary(repo, tr, targetphase, nodes):
282 282 """Add nodes to a phase changing other nodes phases if necessary.
283 283
284 284 This function move boundary *forward* this means that all nodes
285 285 are set in the target phase or kept in a *lower* phase.
286 286
287 287 Simplify boundary to contains phase roots only."""
288 288 phcache = repo._phasecache.copy()
289 289 phcache.advanceboundary(repo, tr, targetphase, nodes)
290 290 repo._phasecache.replace(phcache)
291 291
292 def retractboundary(repo, targetphase, nodes):
292 def retractboundary(repo, tr, targetphase, nodes):
293 293 """Set nodes back to a phase changing other nodes phases if
294 294 necessary.
295 295
296 296 This function move boundary *backward* this means that all nodes
297 297 are set in the target phase or kept in a *higher* phase.
298 298
299 299 Simplify boundary to contains phase roots only."""
300 300 phcache = repo._phasecache.copy()
301 phcache.retractboundary(repo, targetphase, nodes)
301 phcache.retractboundary(repo, tr, targetphase, nodes)
302 302 repo._phasecache.replace(phcache)
303 303
304 304 def listphases(repo):
305 305 """List phases root for serialization over pushkey"""
306 306 keys = {}
307 307 value = '%i' % draft
308 308 for root in repo._phasecache.phaseroots[draft]:
309 309 keys[hex(root)] = value
310 310
311 311 if repo.ui.configbool('phases', 'publish', True):
312 312 # Add an extra data to let remote know we are a publishing
313 313 # repo. Publishing repo can't just pretend they are old repo.
314 314 # When pushing to a publishing repo, the client still need to
315 315 # push phase boundary
316 316 #
317 317 # Push do not only push changeset. It also push phase data.
318 318 # New phase data may apply to common changeset which won't be
319 319 # push (as they are common). Here is a very simple example:
320 320 #
321 321 # 1) repo A push changeset X as draft to repo B
322 322 # 2) repo B make changeset X public
323 323 # 3) repo B push to repo A. X is not pushed but the data that
324 324 # X as now public should
325 325 #
326 326 # The server can't handle it on it's own as it has no idea of
327 327 # client phase data.
328 328 keys['publishing'] = 'True'
329 329 return keys
330 330
331 331 def pushphase(repo, nhex, oldphasestr, newphasestr):
332 332 """List phases root for serialization over pushkey"""
333 333 repo = repo.unfiltered()
334 334 tr = None
335 335 lock = repo.lock()
336 336 try:
337 337 currentphase = repo[nhex].phase()
338 338 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
339 339 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
340 340 if currentphase == oldphase and newphase < oldphase:
341 341 tr = repo.transaction('pushkey-phase')
342 342 advanceboundary(repo, tr, newphase, [bin(nhex)])
343 343 tr.close()
344 344 return 1
345 345 elif currentphase == newphase:
346 346 # raced, but got correct result
347 347 return 1
348 348 else:
349 349 return 0
350 350 finally:
351 351 if tr:
352 352 tr.release()
353 353 lock.release()
354 354
355 355 def analyzeremotephases(repo, subset, roots):
356 356 """Compute phases heads and root in a subset of node from root dict
357 357
358 358 * subset is heads of the subset
359 359 * roots is {<nodeid> => phase} mapping. key and value are string.
360 360
361 361 Accept unknown element input
362 362 """
363 363 repo = repo.unfiltered()
364 364 # build list from dictionary
365 365 draftroots = []
366 366 nodemap = repo.changelog.nodemap # to filter unknown nodes
367 367 for nhex, phase in roots.iteritems():
368 368 if nhex == 'publishing': # ignore data related to publish option
369 369 continue
370 370 node = bin(nhex)
371 371 phase = int(phase)
372 372 if phase == 0:
373 373 if node != nullid:
374 374 repo.ui.warn(_('ignoring inconsistent public root'
375 375 ' from remote: %s\n') % nhex)
376 376 elif phase == 1:
377 377 if node in nodemap:
378 378 draftroots.append(node)
379 379 else:
380 380 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
381 381 % (phase, nhex))
382 382 # compute heads
383 383 publicheads = newheads(repo, subset, draftroots)
384 384 return publicheads, draftroots
385 385
386 386 def newheads(repo, heads, roots):
387 387 """compute new head of a subset minus another
388 388
389 389 * `heads`: define the first subset
390 390 * `roots`: define the second we subtract from the first"""
391 391 repo = repo.unfiltered()
392 392 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
393 393 heads, roots, roots, heads)
394 394 return [c.node() for c in revset]
395 395
396 396
397 397 def newcommitphase(ui):
398 398 """helper to get the target phase of new commit
399 399
400 400 Handle all possible values for the phases.new-commit options.
401 401
402 402 """
403 403 v = ui.config('phases', 'new-commit', draft)
404 404 try:
405 405 return phasenames.index(v)
406 406 except ValueError:
407 407 try:
408 408 return int(v)
409 409 except ValueError:
410 410 msg = _("phases.new-commit: not a valid phase name ('%s')")
411 411 raise error.ConfigError(msg % v)
412 412
413 413 def hassecret(repo):
414 414 """utility function that check if a repo have any secret changeset."""
415 415 return bool(repo._phasecache.phaseroots[2])
General Comments 0
You need to be logged in to leave comments. Login now