##// END OF EJS Templates
bundle2: return a stream from exchange.getbundle...
Pierre-Yves David -
r21068:c15b66a6 default
parent child Browse files
Show More
@@ -1,708 +1,708
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno
11 11 import util, scmutil, changegroup, base85
12 12 import discovery, phases, obsolete, bookmarks, bundle2
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.unbundle10(fh, alg)
35 35 elif version == '20':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40
41 41 class pushoperation(object):
42 42 """A object that represent a single push operation
43 43
44 44 It purpose is to carry push related state and very common operation.
45 45
46 46 A new should be created at the beginning of each push and discarded
47 47 afterward.
48 48 """
49 49
50 50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 51 # repo we push from
52 52 self.repo = repo
53 53 self.ui = repo.ui
54 54 # repo we push to
55 55 self.remote = remote
56 56 # force option provided
57 57 self.force = force
58 58 # revs to be pushed (None is "all")
59 59 self.revs = revs
60 60 # allow push of new branch
61 61 self.newbranch = newbranch
62 62 # did a local lock get acquired?
63 63 self.locallocked = None
64 64 # Integer version of the push result
65 65 # - None means nothing to push
66 66 # - 0 means HTTP error
67 67 # - 1 means we pushed and remote head count is unchanged *or*
68 68 # we have outgoing changesets but refused to push
69 69 # - other values as described by addchangegroup()
70 70 self.ret = None
71 71 # discover.outgoing object (contains common and outgoing data)
72 72 self.outgoing = None
73 73 # all remote heads before the push
74 74 self.remoteheads = None
75 75 # testable as a boolean indicating if any nodes are missing locally.
76 76 self.incoming = None
77 77 # set of all heads common after changeset bundle push
78 78 self.commonheads = None
79 79
80 80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 81 '''Push outgoing changesets (limited by revs) from a local
82 82 repository to remote. Return an integer:
83 83 - None means nothing to push
84 84 - 0 means HTTP error
85 85 - 1 means we pushed and remote head count is unchanged *or*
86 86 we have outgoing changesets but refused to push
87 87 - other values as described by addchangegroup()
88 88 '''
89 89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 90 if pushop.remote.local():
91 91 missing = (set(pushop.repo.requirements)
92 92 - pushop.remote.local().supported)
93 93 if missing:
94 94 msg = _("required features are not"
95 95 " supported in the destination:"
96 96 " %s") % (', '.join(sorted(missing)))
97 97 raise util.Abort(msg)
98 98
99 99 # there are two ways to push to remote repo:
100 100 #
101 101 # addchangegroup assumes local user can lock remote
102 102 # repo (local filesystem, old ssh servers).
103 103 #
104 104 # unbundle assumes local user cannot lock remote repo (new ssh
105 105 # servers, http servers).
106 106
107 107 if not pushop.remote.canpush():
108 108 raise util.Abort(_("destination does not support push"))
109 109 # get local lock as we might write phase data
110 110 locallock = None
111 111 try:
112 112 locallock = pushop.repo.lock()
113 113 pushop.locallocked = True
114 114 except IOError, err:
115 115 pushop.locallocked = False
116 116 if err.errno != errno.EACCES:
117 117 raise
118 118 # source repo cannot be locked.
119 119 # We do not abort the push, but just disable the local phase
120 120 # synchronisation.
121 121 msg = 'cannot lock source repository: %s\n' % err
122 122 pushop.ui.debug(msg)
123 123 try:
124 124 pushop.repo.checkpush(pushop)
125 125 lock = None
126 126 unbundle = pushop.remote.capable('unbundle')
127 127 if not unbundle:
128 128 lock = pushop.remote.lock()
129 129 try:
130 130 _pushdiscovery(pushop)
131 131 if _pushcheckoutgoing(pushop):
132 132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 133 pushop.remote,
134 134 pushop.outgoing)
135 135 if pushop.remote.capable('bundle2'):
136 136 _pushbundle2(pushop)
137 137 else:
138 138 _pushchangeset(pushop)
139 139 _pushcomputecommonheads(pushop)
140 140 _pushsyncphase(pushop)
141 141 _pushobsolete(pushop)
142 142 finally:
143 143 if lock is not None:
144 144 lock.release()
145 145 finally:
146 146 if locallock is not None:
147 147 locallock.release()
148 148
149 149 _pushbookmark(pushop)
150 150 return pushop.ret
151 151
152 152 def _pushdiscovery(pushop):
153 153 # discovery
154 154 unfi = pushop.repo.unfiltered()
155 155 fci = discovery.findcommonincoming
156 156 commoninc = fci(unfi, pushop.remote, force=pushop.force)
157 157 common, inc, remoteheads = commoninc
158 158 fco = discovery.findcommonoutgoing
159 159 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
160 160 commoninc=commoninc, force=pushop.force)
161 161 pushop.outgoing = outgoing
162 162 pushop.remoteheads = remoteheads
163 163 pushop.incoming = inc
164 164
165 165 def _pushcheckoutgoing(pushop):
166 166 outgoing = pushop.outgoing
167 167 unfi = pushop.repo.unfiltered()
168 168 if not outgoing.missing:
169 169 # nothing to push
170 170 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
171 171 return False
172 172 # something to push
173 173 if not pushop.force:
174 174 # if repo.obsstore == False --> no obsolete
175 175 # then, save the iteration
176 176 if unfi.obsstore:
177 177 # this message are here for 80 char limit reason
178 178 mso = _("push includes obsolete changeset: %s!")
179 179 mst = "push includes %s changeset: %s!"
180 180 # plain versions for i18n tool to detect them
181 181 _("push includes unstable changeset: %s!")
182 182 _("push includes bumped changeset: %s!")
183 183 _("push includes divergent changeset: %s!")
184 184 # If we are to push if there is at least one
185 185 # obsolete or unstable changeset in missing, at
186 186 # least one of the missinghead will be obsolete or
187 187 # unstable. So checking heads only is ok
188 188 for node in outgoing.missingheads:
189 189 ctx = unfi[node]
190 190 if ctx.obsolete():
191 191 raise util.Abort(mso % ctx)
192 192 elif ctx.troubled():
193 193 raise util.Abort(_(mst)
194 194 % (ctx.troubles()[0],
195 195 ctx))
196 196 newbm = pushop.ui.configlist('bookmarks', 'pushing')
197 197 discovery.checkheads(unfi, pushop.remote, outgoing,
198 198 pushop.remoteheads,
199 199 pushop.newbranch,
200 200 bool(pushop.incoming),
201 201 newbm)
202 202 return True
203 203
204 204 def _pushbundle2(pushop):
205 205 """push data to the remote using bundle2
206 206
207 207 The only currently supported type of data is changegroup but this will
208 208 evolve in the future."""
209 209 # Send known head to the server for race detection.
210 210 bundler = bundle2.bundle20(pushop.ui)
211 211 if not pushop.force:
212 212 part = bundle2.bundlepart('CHECK:HEADS', data=iter(pushop.remoteheads))
213 213 bundler.addpart(part)
214 214 # add the changegroup bundle
215 215 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
216 216 cgpart = bundle2.bundlepart('CHANGEGROUP', data=cg.getchunks())
217 217 bundler.addpart(cgpart)
218 218 stream = util.chunkbuffer(bundler.getchunks())
219 219 sent = bundle2.unbundle20(pushop.repo.ui, stream)
220 220 reply = pushop.remote.unbundle(sent, ['force'], 'push')
221 221 try:
222 222 op = bundle2.processbundle(pushop.repo, reply)
223 223 except KeyError, exc:
224 224 raise util.Abort('missing support for %s' % exc)
225 225 cgreplies = op.records.getreplies(cgpart.id)
226 226 assert len(cgreplies['changegroup']) == 1
227 227 pushop.ret = cgreplies['changegroup'][0]['return']
228 228
229 229 def _pushchangeset(pushop):
230 230 """Make the actual push of changeset bundle to remote repo"""
231 231 outgoing = pushop.outgoing
232 232 unbundle = pushop.remote.capable('unbundle')
233 233 # TODO: get bundlecaps from remote
234 234 bundlecaps = None
235 235 # create a changegroup from local
236 236 if pushop.revs is None and not (outgoing.excluded
237 237 or pushop.repo.changelog.filteredrevs):
238 238 # push everything,
239 239 # use the fast path, no race possible on push
240 240 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
241 241 cg = changegroup.getsubset(pushop.repo,
242 242 outgoing,
243 243 bundler,
244 244 'push',
245 245 fastpath=True)
246 246 else:
247 247 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
248 248 bundlecaps)
249 249
250 250 # apply changegroup to remote
251 251 if unbundle:
252 252 # local repo finds heads on server, finds out what
253 253 # revs it must push. once revs transferred, if server
254 254 # finds it has different heads (someone else won
255 255 # commit/push race), server aborts.
256 256 if pushop.force:
257 257 remoteheads = ['force']
258 258 else:
259 259 remoteheads = pushop.remoteheads
260 260 # ssh: return remote's addchangegroup()
261 261 # http: return remote's addchangegroup() or 0 for error
262 262 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
263 263 'push')
264 264 else:
265 265 # we return an integer indicating remote head count
266 266 # change
267 267 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
268 268
269 269 def _pushcomputecommonheads(pushop):
270 270 unfi = pushop.repo.unfiltered()
271 271 if pushop.ret:
272 272 # push succeed, synchronize target of the push
273 273 cheads = pushop.outgoing.missingheads
274 274 elif pushop.revs is None:
275 275 # All out push fails. synchronize all common
276 276 cheads = pushop.outgoing.commonheads
277 277 else:
278 278 # I want cheads = heads(::missingheads and ::commonheads)
279 279 # (missingheads is revs with secret changeset filtered out)
280 280 #
281 281 # This can be expressed as:
282 282 # cheads = ( (missingheads and ::commonheads)
283 283 # + (commonheads and ::missingheads))"
284 284 # )
285 285 #
286 286 # while trying to push we already computed the following:
287 287 # common = (::commonheads)
288 288 # missing = ((commonheads::missingheads) - commonheads)
289 289 #
290 290 # We can pick:
291 291 # * missingheads part of common (::commonheads)
292 292 common = set(pushop.outgoing.common)
293 293 nm = pushop.repo.changelog.nodemap
294 294 cheads = [node for node in pushop.revs if nm[node] in common]
295 295 # and
296 296 # * commonheads parents on missing
297 297 revset = unfi.set('%ln and parents(roots(%ln))',
298 298 pushop.outgoing.commonheads,
299 299 pushop.outgoing.missing)
300 300 cheads.extend(c.node() for c in revset)
301 301 pushop.commonheads = cheads
302 302
303 303 def _pushsyncphase(pushop):
304 304 """synchronise phase information locally and remotely"""
305 305 unfi = pushop.repo.unfiltered()
306 306 cheads = pushop.commonheads
307 307 if pushop.ret:
308 308 # push succeed, synchronize target of the push
309 309 cheads = pushop.outgoing.missingheads
310 310 elif pushop.revs is None:
311 311 # All out push fails. synchronize all common
312 312 cheads = pushop.outgoing.commonheads
313 313 else:
314 314 # I want cheads = heads(::missingheads and ::commonheads)
315 315 # (missingheads is revs with secret changeset filtered out)
316 316 #
317 317 # This can be expressed as:
318 318 # cheads = ( (missingheads and ::commonheads)
319 319 # + (commonheads and ::missingheads))"
320 320 # )
321 321 #
322 322 # while trying to push we already computed the following:
323 323 # common = (::commonheads)
324 324 # missing = ((commonheads::missingheads) - commonheads)
325 325 #
326 326 # We can pick:
327 327 # * missingheads part of common (::commonheads)
328 328 common = set(pushop.outgoing.common)
329 329 nm = pushop.repo.changelog.nodemap
330 330 cheads = [node for node in pushop.revs if nm[node] in common]
331 331 # and
332 332 # * commonheads parents on missing
333 333 revset = unfi.set('%ln and parents(roots(%ln))',
334 334 pushop.outgoing.commonheads,
335 335 pushop.outgoing.missing)
336 336 cheads.extend(c.node() for c in revset)
337 337 pushop.commonheads = cheads
338 338 # even when we don't push, exchanging phase data is useful
339 339 remotephases = pushop.remote.listkeys('phases')
340 340 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
341 341 and remotephases # server supports phases
342 342 and pushop.ret is None # nothing was pushed
343 343 and remotephases.get('publishing', False)):
344 344 # When:
345 345 # - this is a subrepo push
346 346 # - and remote support phase
347 347 # - and no changeset was pushed
348 348 # - and remote is publishing
349 349 # We may be in issue 3871 case!
350 350 # We drop the possible phase synchronisation done by
351 351 # courtesy to publish changesets possibly locally draft
352 352 # on the remote.
353 353 remotephases = {'publishing': 'True'}
354 354 if not remotephases: # old server or public only reply from non-publishing
355 355 _localphasemove(pushop, cheads)
356 356 # don't push any phase data as there is nothing to push
357 357 else:
358 358 ana = phases.analyzeremotephases(pushop.repo, cheads,
359 359 remotephases)
360 360 pheads, droots = ana
361 361 ### Apply remote phase on local
362 362 if remotephases.get('publishing', False):
363 363 _localphasemove(pushop, cheads)
364 364 else: # publish = False
365 365 _localphasemove(pushop, pheads)
366 366 _localphasemove(pushop, cheads, phases.draft)
367 367 ### Apply local phase on remote
368 368
369 369 # Get the list of all revs draft on remote by public here.
370 370 # XXX Beware that revset break if droots is not strictly
371 371 # XXX root we may want to ensure it is but it is costly
372 372 outdated = unfi.set('heads((%ln::%ln) and public())',
373 373 droots, cheads)
374 374 for newremotehead in outdated:
375 375 r = pushop.remote.pushkey('phases',
376 376 newremotehead.hex(),
377 377 str(phases.draft),
378 378 str(phases.public))
379 379 if not r:
380 380 pushop.ui.warn(_('updating %s to public failed!\n')
381 381 % newremotehead)
382 382
383 383 def _localphasemove(pushop, nodes, phase=phases.public):
384 384 """move <nodes> to <phase> in the local source repo"""
385 385 if pushop.locallocked:
386 386 phases.advanceboundary(pushop.repo, phase, nodes)
387 387 else:
388 388 # repo is not locked, do not change any phases!
389 389 # Informs the user that phases should have been moved when
390 390 # applicable.
391 391 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
392 392 phasestr = phases.phasenames[phase]
393 393 if actualmoves:
394 394 pushop.ui.status(_('cannot lock source repo, skipping '
395 395 'local %s phase update\n') % phasestr)
396 396
397 397 def _pushobsolete(pushop):
398 398 """utility function to push obsolete markers to a remote"""
399 399 pushop.ui.debug('try to push obsolete markers to remote\n')
400 400 repo = pushop.repo
401 401 remote = pushop.remote
402 402 if (obsolete._enabled and repo.obsstore and
403 403 'obsolete' in remote.listkeys('namespaces')):
404 404 rslts = []
405 405 remotedata = repo.listkeys('obsolete')
406 406 for key in sorted(remotedata, reverse=True):
407 407 # reverse sort to ensure we end with dump0
408 408 data = remotedata[key]
409 409 rslts.append(remote.pushkey('obsolete', key, '', data))
410 410 if [r for r in rslts if not r]:
411 411 msg = _('failed to push some obsolete markers!\n')
412 412 repo.ui.warn(msg)
413 413
414 414 def _pushbookmark(pushop):
415 415 """Update bookmark position on remote"""
416 416 ui = pushop.ui
417 417 repo = pushop.repo.unfiltered()
418 418 remote = pushop.remote
419 419 ui.debug("checking for updated bookmarks\n")
420 420 revnums = map(repo.changelog.rev, pushop.revs or [])
421 421 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
422 422 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
423 423 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
424 424 srchex=hex)
425 425
426 426 for b, scid, dcid in advsrc:
427 427 if ancestors and repo[scid].rev() not in ancestors:
428 428 continue
429 429 if remote.pushkey('bookmarks', b, dcid, scid):
430 430 ui.status(_("updating bookmark %s\n") % b)
431 431 else:
432 432 ui.warn(_('updating bookmark %s failed!\n') % b)
433 433
434 434 class pulloperation(object):
435 435 """A object that represent a single pull operation
436 436
437 437 It purpose is to carry push related state and very common operation.
438 438
439 439 A new should be created at the beginning of each pull and discarded
440 440 afterward.
441 441 """
442 442
443 443 def __init__(self, repo, remote, heads=None, force=False):
444 444 # repo we pull into
445 445 self.repo = repo
446 446 # repo we pull from
447 447 self.remote = remote
448 448 # revision we try to pull (None is "all")
449 449 self.heads = heads
450 450 # do we force pull?
451 451 self.force = force
452 452 # the name the pull transaction
453 453 self._trname = 'pull\n' + util.hidepassword(remote.url())
454 454 # hold the transaction once created
455 455 self._tr = None
456 456 # set of common changeset between local and remote before pull
457 457 self.common = None
458 458 # set of pulled head
459 459 self.rheads = None
460 460 # list of missing changeset to fetch remotely
461 461 self.fetch = None
462 462 # result of changegroup pulling (used as return code by pull)
463 463 self.cgresult = None
464 464 # list of step remaining todo (related to future bundle2 usage)
465 465 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
466 466
467 467 @util.propertycache
468 468 def pulledsubset(self):
469 469 """heads of the set of changeset target by the pull"""
470 470 # compute target subset
471 471 if self.heads is None:
472 472 # We pulled every thing possible
473 473 # sync on everything common
474 474 c = set(self.common)
475 475 ret = list(self.common)
476 476 for n in self.rheads:
477 477 if n not in c:
478 478 ret.append(n)
479 479 return ret
480 480 else:
481 481 # We pulled a specific subset
482 482 # sync on this subset
483 483 return self.heads
484 484
485 485 def gettransaction(self):
486 486 """get appropriate pull transaction, creating it if needed"""
487 487 if self._tr is None:
488 488 self._tr = self.repo.transaction(self._trname)
489 489 return self._tr
490 490
491 491 def closetransaction(self):
492 492 """close transaction if created"""
493 493 if self._tr is not None:
494 494 self._tr.close()
495 495
496 496 def releasetransaction(self):
497 497 """release transaction if created"""
498 498 if self._tr is not None:
499 499 self._tr.release()
500 500
501 501 def pull(repo, remote, heads=None, force=False):
502 502 pullop = pulloperation(repo, remote, heads, force)
503 503 if pullop.remote.local():
504 504 missing = set(pullop.remote.requirements) - pullop.repo.supported
505 505 if missing:
506 506 msg = _("required features are not"
507 507 " supported in the destination:"
508 508 " %s") % (', '.join(sorted(missing)))
509 509 raise util.Abort(msg)
510 510
511 511 lock = pullop.repo.lock()
512 512 try:
513 513 _pulldiscovery(pullop)
514 514 if pullop.remote.capable('bundle2'):
515 515 _pullbundle2(pullop)
516 516 if 'changegroup' in pullop.todosteps:
517 517 _pullchangeset(pullop)
518 518 if 'phases' in pullop.todosteps:
519 519 _pullphase(pullop)
520 520 if 'obsmarkers' in pullop.todosteps:
521 521 _pullobsolete(pullop)
522 522 pullop.closetransaction()
523 523 finally:
524 524 pullop.releasetransaction()
525 525 lock.release()
526 526
527 527 return pullop.cgresult
528 528
529 529 def _pulldiscovery(pullop):
530 530 """discovery phase for the pull
531 531
532 532 Current handle changeset discovery only, will change handle all discovery
533 533 at some point."""
534 534 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
535 535 pullop.remote,
536 536 heads=pullop.heads,
537 537 force=pullop.force)
538 538 pullop.common, pullop.fetch, pullop.rheads = tmp
539 539
540 540 def _pullbundle2(pullop):
541 541 """pull data using bundle2
542 542
543 543 For now, the only supported data are changegroup."""
544 544 kwargs = {'bundlecaps': set(['HG20'])}
545 545 # pulling changegroup
546 546 pullop.todosteps.remove('changegroup')
547 547 if not pullop.fetch:
548 548 pullop.repo.ui.status(_("no changes found\n"))
549 549 pullop.cgresult = 0
550 550 else:
551 551 kwargs['common'] = pullop.common
552 552 kwargs['heads'] = pullop.heads or pullop.rheads
553 553 if pullop.heads is None and list(pullop.common) == [nullid]:
554 554 pullop.repo.ui.status(_("requesting all changes\n"))
555 555 if kwargs.keys() == ['format']:
556 556 return # nothing to pull
557 557 bundle = pullop.remote.getbundle('pull', **kwargs)
558 558 try:
559 559 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
560 560 except KeyError, exc:
561 561 raise util.Abort('missing support for %s' % exc)
562 562 assert len(op.records['changegroup']) == 1
563 563 pullop.cgresult = op.records['changegroup'][0]['return']
564 564
565 565 def _pullchangeset(pullop):
566 566 """pull changeset from unbundle into the local repo"""
567 567 # We delay the open of the transaction as late as possible so we
568 568 # don't open transaction for nothing or you break future useful
569 569 # rollback call
570 570 pullop.todosteps.remove('changegroup')
571 571 if not pullop.fetch:
572 572 pullop.repo.ui.status(_("no changes found\n"))
573 573 pullop.cgresult = 0
574 574 return
575 575 pullop.gettransaction()
576 576 if pullop.heads is None and list(pullop.common) == [nullid]:
577 577 pullop.repo.ui.status(_("requesting all changes\n"))
578 578 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
579 579 # issue1320, avoid a race if remote changed after discovery
580 580 pullop.heads = pullop.rheads
581 581
582 582 if pullop.remote.capable('getbundle'):
583 583 # TODO: get bundlecaps from remote
584 584 cg = pullop.remote.getbundle('pull', common=pullop.common,
585 585 heads=pullop.heads or pullop.rheads)
586 586 elif pullop.heads is None:
587 587 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
588 588 elif not pullop.remote.capable('changegroupsubset'):
589 589 raise util.Abort(_("partial pull cannot be done because "
590 590 "other repository doesn't support "
591 591 "changegroupsubset."))
592 592 else:
593 593 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
594 594 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
595 595 pullop.remote.url())
596 596
597 597 def _pullphase(pullop):
598 598 # Get remote phases data from remote
599 599 pullop.todosteps.remove('phases')
600 600 remotephases = pullop.remote.listkeys('phases')
601 601 publishing = bool(remotephases.get('publishing', False))
602 602 if remotephases and not publishing:
603 603 # remote is new and unpublishing
604 604 pheads, _dr = phases.analyzeremotephases(pullop.repo,
605 605 pullop.pulledsubset,
606 606 remotephases)
607 607 phases.advanceboundary(pullop.repo, phases.public, pheads)
608 608 phases.advanceboundary(pullop.repo, phases.draft,
609 609 pullop.pulledsubset)
610 610 else:
611 611 # Remote is old or publishing all common changesets
612 612 # should be seen as public
613 613 phases.advanceboundary(pullop.repo, phases.public,
614 614 pullop.pulledsubset)
615 615
616 616 def _pullobsolete(pullop):
617 617 """utility function to pull obsolete markers from a remote
618 618
619 619 The `gettransaction` is function that return the pull transaction, creating
620 620 one if necessary. We return the transaction to inform the calling code that
621 621 a new transaction have been created (when applicable).
622 622
623 623 Exists mostly to allow overriding for experimentation purpose"""
624 624 pullop.todosteps.remove('obsmarkers')
625 625 tr = None
626 626 if obsolete._enabled:
627 627 pullop.repo.ui.debug('fetching remote obsolete markers\n')
628 628 remoteobs = pullop.remote.listkeys('obsolete')
629 629 if 'dump0' in remoteobs:
630 630 tr = pullop.gettransaction()
631 631 for key in sorted(remoteobs, reverse=True):
632 632 if key.startswith('dump'):
633 633 data = base85.b85decode(remoteobs[key])
634 634 pullop.repo.obsstore.mergemarkers(tr, data)
635 635 pullop.repo.invalidatevolatilesets()
636 636 return tr
637 637
638 638 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
639 639 """return a full bundle (with potentially multiple kind of parts)
640 640
641 641 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
642 642 passed. For now, the bundle can contain only changegroup, but this will
643 643 changes when more part type will be available for bundle2.
644 644
645 645 This is different from changegroup.getbundle that only returns an HG10
646 646 changegroup bundle. They may eventually get reunited in the future when we
647 647 have a clearer idea of the API we what to query different data.
648 648
649 649 The implementation is at a very early stage and will get massive rework
650 650 when the API of bundle is refined.
651 651 """
652 652 # build bundle here.
653 653 cg = changegroup.getbundle(repo, source, heads=heads,
654 654 common=common, bundlecaps=bundlecaps)
655 655 if bundlecaps is None or 'HG20' not in bundlecaps:
656 656 return cg
657 657 # very crude first implementation,
658 658 # the bundle API will change and the generation will be done lazily.
659 659 bundler = bundle2.bundle20(repo.ui)
660 660 part = bundle2.bundlepart('changegroup', data=cg.getchunks())
661 661 bundler.addpart(part)
662 return bundle2.unbundle20(repo.ui, util.chunkbuffer(bundler.getchunks()))
662 return util.chunkbuffer(bundler.getchunks())
663 663
664 664 class PushRaced(RuntimeError):
665 665 """An exception raised during unbundling that indicate a push race"""
666 666
667 667 def check_heads(repo, their_heads, context):
668 668 """check if the heads of a repo have been modified
669 669
670 670 Used by peer for unbundling.
671 671 """
672 672 heads = repo.heads()
673 673 heads_hash = util.sha1(''.join(sorted(heads))).digest()
674 674 if not (their_heads == ['force'] or their_heads == heads or
675 675 their_heads == ['hashed', heads_hash]):
676 676 # someone else committed/pushed/unbundled while we
677 677 # were transferring data
678 678 raise PushRaced('repository changed while %s - '
679 679 'please try again' % context)
680 680
681 681 def unbundle(repo, cg, heads, source, url):
682 682 """Apply a bundle to a repo.
683 683
684 684 this function makes sure the repo is locked during the application and have
685 685 mechanism to check that no push race occurred between the creation of the
686 686 bundle and its application.
687 687
688 688 If the push was raced as PushRaced exception is raised."""
689 689 r = 0
690 690 # need a transaction when processing a bundle2 stream
691 691 tr = None
692 692 lock = repo.lock()
693 693 try:
694 694 check_heads(repo, heads, 'uploading changes')
695 695 # push can proceed
696 696 if util.safehasattr(cg, 'params'):
697 697 tr = repo.transaction('unbundle')
698 698 ret = bundle2.processbundle(repo, cg, lambda: tr)
699 699 tr.close()
700 700 stream = util.chunkbuffer(ret.reply.getchunks())
701 701 r = bundle2.unbundle20(repo.ui, stream)
702 702 else:
703 703 r = changegroup.addchangegroup(repo, cg, source, url)
704 704 finally:
705 705 if tr is not None:
706 706 tr.release()
707 707 lock.release()
708 708 return r
@@ -1,1892 +1,1898
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock as lockmod
12 import transaction, store, encoding, exchange
12 import transaction, store, encoding, exchange, bundle2
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 import branchmap, pathutil
20 20 propertycache = util.propertycache
21 21 filecache = scmutil.filecache
22 22
23 23 class repofilecache(filecache):
24 24 """All filecache usage on repo are done for logic that should be unfiltered
25 25 """
26 26
27 27 def __get__(self, repo, type=None):
28 28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 29 def __set__(self, repo, value):
30 30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 31 def __delete__(self, repo):
32 32 return super(repofilecache, self).__delete__(repo.unfiltered())
33 33
34 34 class storecache(repofilecache):
35 35 """filecache for files in the store"""
36 36 def join(self, obj, fname):
37 37 return obj.sjoin(fname)
38 38
39 39 class unfilteredpropertycache(propertycache):
40 40 """propertycache that apply to unfiltered repo only"""
41 41
42 42 def __get__(self, repo, type=None):
43 43 unfi = repo.unfiltered()
44 44 if unfi is repo:
45 45 return super(unfilteredpropertycache, self).__get__(unfi)
46 46 return getattr(unfi, self.name)
47 47
48 48 class filteredpropertycache(propertycache):
49 49 """propertycache that must take filtering in account"""
50 50
51 51 def cachevalue(self, obj, value):
52 52 object.__setattr__(obj, self.name, value)
53 53
54 54
55 55 def hasunfilteredcache(repo, name):
56 56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 57 return name in vars(repo.unfiltered())
58 58
59 59 def unfilteredmethod(orig):
60 60 """decorate method that always need to be run on unfiltered version"""
61 61 def wrapper(repo, *args, **kwargs):
62 62 return orig(repo.unfiltered(), *args, **kwargs)
63 63 return wrapper
64 64
65 65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 66 'bundle2', 'unbundle'))
67 67 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 68
69 69 class localpeer(peer.peerrepository):
70 70 '''peer for a local repo; reflects only the most recent API'''
71 71
72 72 def __init__(self, repo, caps=moderncaps):
73 73 peer.peerrepository.__init__(self)
74 74 self._repo = repo.filtered('served')
75 75 self.ui = repo.ui
76 76 self._caps = repo._restrictcapabilities(caps)
77 77 self.requirements = repo.requirements
78 78 self.supportedformats = repo.supportedformats
79 79
80 80 def close(self):
81 81 self._repo.close()
82 82
83 83 def _capabilities(self):
84 84 return self._caps
85 85
86 86 def local(self):
87 87 return self._repo
88 88
89 89 def canpush(self):
90 90 return True
91 91
92 92 def url(self):
93 93 return self._repo.url()
94 94
95 95 def lookup(self, key):
96 96 return self._repo.lookup(key)
97 97
98 98 def branchmap(self):
99 99 return self._repo.branchmap()
100 100
101 101 def heads(self):
102 102 return self._repo.heads()
103 103
104 104 def known(self, nodes):
105 105 return self._repo.known(nodes)
106 106
107 107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 108 format='HG10'):
109 return exchange.getbundle(self._repo, source, heads=heads,
110 common=common, bundlecaps=bundlecaps)
109 cg = exchange.getbundle(self._repo, source, heads=heads,
110 common=common, bundlecaps=bundlecaps)
111 if bundlecaps is not None and 'HG20' in bundlecaps:
112 # When requesting a bundle2, getbundle returns a stream to make the
113 # wire level function happier. We need to build a proper object
114 # from it in local peer.
115 cg = bundle2.unbundle20(self.ui, cg)
116 return cg
111 117
112 118 # TODO We might want to move the next two calls into legacypeer and add
113 119 # unbundle instead.
114 120
115 121 def unbundle(self, cg, heads, url):
116 122 """apply a bundle on a repo
117 123
118 124 This function handles the repo locking itself."""
119 125 try:
120 126 return exchange.unbundle(self._repo, cg, heads, 'push', url)
121 127 except exchange.PushRaced, exc:
122 128 raise error.ResponseError(_('push failed:'), exc.message)
123 129
124 130 def lock(self):
125 131 return self._repo.lock()
126 132
127 133 def addchangegroup(self, cg, source, url):
128 134 return changegroup.addchangegroup(self._repo, cg, source, url)
129 135
130 136 def pushkey(self, namespace, key, old, new):
131 137 return self._repo.pushkey(namespace, key, old, new)
132 138
133 139 def listkeys(self, namespace):
134 140 return self._repo.listkeys(namespace)
135 141
136 142 def debugwireargs(self, one, two, three=None, four=None, five=None):
137 143 '''used to test argument passing over the wire'''
138 144 return "%s %s %s %s %s" % (one, two, three, four, five)
139 145
140 146 class locallegacypeer(localpeer):
141 147 '''peer extension which implements legacy methods too; used for tests with
142 148 restricted capabilities'''
143 149
144 150 def __init__(self, repo):
145 151 localpeer.__init__(self, repo, caps=legacycaps)
146 152
147 153 def branches(self, nodes):
148 154 return self._repo.branches(nodes)
149 155
150 156 def between(self, pairs):
151 157 return self._repo.between(pairs)
152 158
153 159 def changegroup(self, basenodes, source):
154 160 return changegroup.changegroup(self._repo, basenodes, source)
155 161
156 162 def changegroupsubset(self, bases, heads, source):
157 163 return changegroup.changegroupsubset(self._repo, bases, heads, source)
158 164
159 165 class localrepository(object):
160 166
161 167 supportedformats = set(('revlogv1', 'generaldelta'))
162 168 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
163 169 'dotencode'))
164 170 openerreqs = set(('revlogv1', 'generaldelta'))
165 171 requirements = ['revlogv1']
166 172 filtername = None
167 173
168 174 # a list of (ui, featureset) functions.
169 175 # only functions defined in module of enabled extensions are invoked
170 176 featuresetupfuncs = set()
171 177
172 178 def _baserequirements(self, create):
173 179 return self.requirements[:]
174 180
175 181 def __init__(self, baseui, path=None, create=False):
176 182 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
177 183 self.wopener = self.wvfs
178 184 self.root = self.wvfs.base
179 185 self.path = self.wvfs.join(".hg")
180 186 self.origroot = path
181 187 self.auditor = pathutil.pathauditor(self.root, self._checknested)
182 188 self.vfs = scmutil.vfs(self.path)
183 189 self.opener = self.vfs
184 190 self.baseui = baseui
185 191 self.ui = baseui.copy()
186 192 self.ui.copy = baseui.copy # prevent copying repo configuration
187 193 # A list of callback to shape the phase if no data were found.
188 194 # Callback are in the form: func(repo, roots) --> processed root.
189 195 # This list it to be filled by extension during repo setup
190 196 self._phasedefaults = []
191 197 try:
192 198 self.ui.readconfig(self.join("hgrc"), self.root)
193 199 extensions.loadall(self.ui)
194 200 except IOError:
195 201 pass
196 202
197 203 if self.featuresetupfuncs:
198 204 self.supported = set(self._basesupported) # use private copy
199 205 extmods = set(m.__name__ for n, m
200 206 in extensions.extensions(self.ui))
201 207 for setupfunc in self.featuresetupfuncs:
202 208 if setupfunc.__module__ in extmods:
203 209 setupfunc(self.ui, self.supported)
204 210 else:
205 211 self.supported = self._basesupported
206 212
207 213 if not self.vfs.isdir():
208 214 if create:
209 215 if not self.wvfs.exists():
210 216 self.wvfs.makedirs()
211 217 self.vfs.makedir(notindexed=True)
212 218 requirements = self._baserequirements(create)
213 219 if self.ui.configbool('format', 'usestore', True):
214 220 self.vfs.mkdir("store")
215 221 requirements.append("store")
216 222 if self.ui.configbool('format', 'usefncache', True):
217 223 requirements.append("fncache")
218 224 if self.ui.configbool('format', 'dotencode', True):
219 225 requirements.append('dotencode')
220 226 # create an invalid changelog
221 227 self.vfs.append(
222 228 "00changelog.i",
223 229 '\0\0\0\2' # represents revlogv2
224 230 ' dummy changelog to prevent using the old repo layout'
225 231 )
226 232 if self.ui.configbool('format', 'generaldelta', False):
227 233 requirements.append("generaldelta")
228 234 requirements = set(requirements)
229 235 else:
230 236 raise error.RepoError(_("repository %s not found") % path)
231 237 elif create:
232 238 raise error.RepoError(_("repository %s already exists") % path)
233 239 else:
234 240 try:
235 241 requirements = scmutil.readrequires(self.vfs, self.supported)
236 242 except IOError, inst:
237 243 if inst.errno != errno.ENOENT:
238 244 raise
239 245 requirements = set()
240 246
241 247 self.sharedpath = self.path
242 248 try:
243 249 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
244 250 realpath=True)
245 251 s = vfs.base
246 252 if not vfs.exists():
247 253 raise error.RepoError(
248 254 _('.hg/sharedpath points to nonexistent directory %s') % s)
249 255 self.sharedpath = s
250 256 except IOError, inst:
251 257 if inst.errno != errno.ENOENT:
252 258 raise
253 259
254 260 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
255 261 self.spath = self.store.path
256 262 self.svfs = self.store.vfs
257 263 self.sopener = self.svfs
258 264 self.sjoin = self.store.join
259 265 self.vfs.createmode = self.store.createmode
260 266 self._applyrequirements(requirements)
261 267 if create:
262 268 self._writerequirements()
263 269
264 270
265 271 self._branchcaches = {}
266 272 self.filterpats = {}
267 273 self._datafilters = {}
268 274 self._transref = self._lockref = self._wlockref = None
269 275
270 276 # A cache for various files under .hg/ that tracks file changes,
271 277 # (used by the filecache decorator)
272 278 #
273 279 # Maps a property name to its util.filecacheentry
274 280 self._filecache = {}
275 281
276 282 # hold sets of revision to be filtered
277 283 # should be cleared when something might have changed the filter value:
278 284 # - new changesets,
279 285 # - phase change,
280 286 # - new obsolescence marker,
281 287 # - working directory parent change,
282 288 # - bookmark changes
283 289 self.filteredrevcache = {}
284 290
285 291 def close(self):
286 292 pass
287 293
288 294 def _restrictcapabilities(self, caps):
289 295 # bundle2 is not ready for prime time, drop it unless explicitly
290 296 # required by the tests (or some brave tester)
291 297 if not self.ui.configbool('server', 'bundle2', False):
292 298 caps = set(caps)
293 299 caps.discard('bundle2')
294 300 return caps
295 301
296 302 def _applyrequirements(self, requirements):
297 303 self.requirements = requirements
298 304 self.sopener.options = dict((r, 1) for r in requirements
299 305 if r in self.openerreqs)
300 306 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
301 307 if chunkcachesize is not None:
302 308 self.sopener.options['chunkcachesize'] = chunkcachesize
303 309
304 310 def _writerequirements(self):
305 311 reqfile = self.opener("requires", "w")
306 312 for r in sorted(self.requirements):
307 313 reqfile.write("%s\n" % r)
308 314 reqfile.close()
309 315
310 316 def _checknested(self, path):
311 317 """Determine if path is a legal nested repository."""
312 318 if not path.startswith(self.root):
313 319 return False
314 320 subpath = path[len(self.root) + 1:]
315 321 normsubpath = util.pconvert(subpath)
316 322
317 323 # XXX: Checking against the current working copy is wrong in
318 324 # the sense that it can reject things like
319 325 #
320 326 # $ hg cat -r 10 sub/x.txt
321 327 #
322 328 # if sub/ is no longer a subrepository in the working copy
323 329 # parent revision.
324 330 #
325 331 # However, it can of course also allow things that would have
326 332 # been rejected before, such as the above cat command if sub/
327 333 # is a subrepository now, but was a normal directory before.
328 334 # The old path auditor would have rejected by mistake since it
329 335 # panics when it sees sub/.hg/.
330 336 #
331 337 # All in all, checking against the working copy seems sensible
332 338 # since we want to prevent access to nested repositories on
333 339 # the filesystem *now*.
334 340 ctx = self[None]
335 341 parts = util.splitpath(subpath)
336 342 while parts:
337 343 prefix = '/'.join(parts)
338 344 if prefix in ctx.substate:
339 345 if prefix == normsubpath:
340 346 return True
341 347 else:
342 348 sub = ctx.sub(prefix)
343 349 return sub.checknested(subpath[len(prefix) + 1:])
344 350 else:
345 351 parts.pop()
346 352 return False
347 353
348 354 def peer(self):
349 355 return localpeer(self) # not cached to avoid reference cycle
350 356
351 357 def unfiltered(self):
352 358 """Return unfiltered version of the repository
353 359
354 360 Intended to be overwritten by filtered repo."""
355 361 return self
356 362
357 363 def filtered(self, name):
358 364 """Return a filtered version of a repository"""
359 365 # build a new class with the mixin and the current class
360 366 # (possibly subclass of the repo)
361 367 class proxycls(repoview.repoview, self.unfiltered().__class__):
362 368 pass
363 369 return proxycls(self, name)
364 370
365 371 @repofilecache('bookmarks')
366 372 def _bookmarks(self):
367 373 return bookmarks.bmstore(self)
368 374
369 375 @repofilecache('bookmarks.current')
370 376 def _bookmarkcurrent(self):
371 377 return bookmarks.readcurrent(self)
372 378
373 379 def bookmarkheads(self, bookmark):
374 380 name = bookmark.split('@', 1)[0]
375 381 heads = []
376 382 for mark, n in self._bookmarks.iteritems():
377 383 if mark.split('@', 1)[0] == name:
378 384 heads.append(n)
379 385 return heads
380 386
381 387 @storecache('phaseroots')
382 388 def _phasecache(self):
383 389 return phases.phasecache(self, self._phasedefaults)
384 390
385 391 @storecache('obsstore')
386 392 def obsstore(self):
387 393 store = obsolete.obsstore(self.sopener)
388 394 if store and not obsolete._enabled:
389 395 # message is rare enough to not be translated
390 396 msg = 'obsolete feature not enabled but %i markers found!\n'
391 397 self.ui.warn(msg % len(list(store)))
392 398 return store
393 399
394 400 @storecache('00changelog.i')
395 401 def changelog(self):
396 402 c = changelog.changelog(self.sopener)
397 403 if 'HG_PENDING' in os.environ:
398 404 p = os.environ['HG_PENDING']
399 405 if p.startswith(self.root):
400 406 c.readpending('00changelog.i.a')
401 407 return c
402 408
403 409 @storecache('00manifest.i')
404 410 def manifest(self):
405 411 return manifest.manifest(self.sopener)
406 412
407 413 @repofilecache('dirstate')
408 414 def dirstate(self):
409 415 warned = [0]
410 416 def validate(node):
411 417 try:
412 418 self.changelog.rev(node)
413 419 return node
414 420 except error.LookupError:
415 421 if not warned[0]:
416 422 warned[0] = True
417 423 self.ui.warn(_("warning: ignoring unknown"
418 424 " working parent %s!\n") % short(node))
419 425 return nullid
420 426
421 427 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
422 428
423 429 def __getitem__(self, changeid):
424 430 if changeid is None:
425 431 return context.workingctx(self)
426 432 return context.changectx(self, changeid)
427 433
428 434 def __contains__(self, changeid):
429 435 try:
430 436 return bool(self.lookup(changeid))
431 437 except error.RepoLookupError:
432 438 return False
433 439
434 440 def __nonzero__(self):
435 441 return True
436 442
437 443 def __len__(self):
438 444 return len(self.changelog)
439 445
440 446 def __iter__(self):
441 447 return iter(self.changelog)
442 448
443 449 def revs(self, expr, *args):
444 450 '''Return a list of revisions matching the given revset'''
445 451 expr = revset.formatspec(expr, *args)
446 452 m = revset.match(None, expr)
447 453 return m(self, revset.spanset(self))
448 454
449 455 def set(self, expr, *args):
450 456 '''
451 457 Yield a context for each matching revision, after doing arg
452 458 replacement via revset.formatspec
453 459 '''
454 460 for r in self.revs(expr, *args):
455 461 yield self[r]
456 462
457 463 def url(self):
458 464 return 'file:' + self.root
459 465
460 466 def hook(self, name, throw=False, **args):
461 467 return hook.hook(self.ui, self, name, throw, **args)
462 468
463 469 @unfilteredmethod
464 470 def _tag(self, names, node, message, local, user, date, extra={}):
465 471 if isinstance(names, str):
466 472 names = (names,)
467 473
468 474 branches = self.branchmap()
469 475 for name in names:
470 476 self.hook('pretag', throw=True, node=hex(node), tag=name,
471 477 local=local)
472 478 if name in branches:
473 479 self.ui.warn(_("warning: tag %s conflicts with existing"
474 480 " branch name\n") % name)
475 481
476 482 def writetags(fp, names, munge, prevtags):
477 483 fp.seek(0, 2)
478 484 if prevtags and prevtags[-1] != '\n':
479 485 fp.write('\n')
480 486 for name in names:
481 487 m = munge and munge(name) or name
482 488 if (self._tagscache.tagtypes and
483 489 name in self._tagscache.tagtypes):
484 490 old = self.tags().get(name, nullid)
485 491 fp.write('%s %s\n' % (hex(old), m))
486 492 fp.write('%s %s\n' % (hex(node), m))
487 493 fp.close()
488 494
489 495 prevtags = ''
490 496 if local:
491 497 try:
492 498 fp = self.opener('localtags', 'r+')
493 499 except IOError:
494 500 fp = self.opener('localtags', 'a')
495 501 else:
496 502 prevtags = fp.read()
497 503
498 504 # local tags are stored in the current charset
499 505 writetags(fp, names, None, prevtags)
500 506 for name in names:
501 507 self.hook('tag', node=hex(node), tag=name, local=local)
502 508 return
503 509
504 510 try:
505 511 fp = self.wfile('.hgtags', 'rb+')
506 512 except IOError, e:
507 513 if e.errno != errno.ENOENT:
508 514 raise
509 515 fp = self.wfile('.hgtags', 'ab')
510 516 else:
511 517 prevtags = fp.read()
512 518
513 519 # committed tags are stored in UTF-8
514 520 writetags(fp, names, encoding.fromlocal, prevtags)
515 521
516 522 fp.close()
517 523
518 524 self.invalidatecaches()
519 525
520 526 if '.hgtags' not in self.dirstate:
521 527 self[None].add(['.hgtags'])
522 528
523 529 m = matchmod.exact(self.root, '', ['.hgtags'])
524 530 tagnode = self.commit(message, user, date, extra=extra, match=m)
525 531
526 532 for name in names:
527 533 self.hook('tag', node=hex(node), tag=name, local=local)
528 534
529 535 return tagnode
530 536
531 537 def tag(self, names, node, message, local, user, date):
532 538 '''tag a revision with one or more symbolic names.
533 539
534 540 names is a list of strings or, when adding a single tag, names may be a
535 541 string.
536 542
537 543 if local is True, the tags are stored in a per-repository file.
538 544 otherwise, they are stored in the .hgtags file, and a new
539 545 changeset is committed with the change.
540 546
541 547 keyword arguments:
542 548
543 549 local: whether to store tags in non-version-controlled file
544 550 (default False)
545 551
546 552 message: commit message to use if committing
547 553
548 554 user: name of user to use if committing
549 555
550 556 date: date tuple to use if committing'''
551 557
552 558 if not local:
553 559 for x in self.status()[:5]:
554 560 if '.hgtags' in x:
555 561 raise util.Abort(_('working copy of .hgtags is changed '
556 562 '(please commit .hgtags manually)'))
557 563
558 564 self.tags() # instantiate the cache
559 565 self._tag(names, node, message, local, user, date)
560 566
561 567 @filteredpropertycache
562 568 def _tagscache(self):
563 569 '''Returns a tagscache object that contains various tags related
564 570 caches.'''
565 571
566 572 # This simplifies its cache management by having one decorated
567 573 # function (this one) and the rest simply fetch things from it.
568 574 class tagscache(object):
569 575 def __init__(self):
570 576 # These two define the set of tags for this repository. tags
571 577 # maps tag name to node; tagtypes maps tag name to 'global' or
572 578 # 'local'. (Global tags are defined by .hgtags across all
573 579 # heads, and local tags are defined in .hg/localtags.)
574 580 # They constitute the in-memory cache of tags.
575 581 self.tags = self.tagtypes = None
576 582
577 583 self.nodetagscache = self.tagslist = None
578 584
579 585 cache = tagscache()
580 586 cache.tags, cache.tagtypes = self._findtags()
581 587
582 588 return cache
583 589
584 590 def tags(self):
585 591 '''return a mapping of tag to node'''
586 592 t = {}
587 593 if self.changelog.filteredrevs:
588 594 tags, tt = self._findtags()
589 595 else:
590 596 tags = self._tagscache.tags
591 597 for k, v in tags.iteritems():
592 598 try:
593 599 # ignore tags to unknown nodes
594 600 self.changelog.rev(v)
595 601 t[k] = v
596 602 except (error.LookupError, ValueError):
597 603 pass
598 604 return t
599 605
600 606 def _findtags(self):
601 607 '''Do the hard work of finding tags. Return a pair of dicts
602 608 (tags, tagtypes) where tags maps tag name to node, and tagtypes
603 609 maps tag name to a string like \'global\' or \'local\'.
604 610 Subclasses or extensions are free to add their own tags, but
605 611 should be aware that the returned dicts will be retained for the
606 612 duration of the localrepo object.'''
607 613
608 614 # XXX what tagtype should subclasses/extensions use? Currently
609 615 # mq and bookmarks add tags, but do not set the tagtype at all.
610 616 # Should each extension invent its own tag type? Should there
611 617 # be one tagtype for all such "virtual" tags? Or is the status
612 618 # quo fine?
613 619
614 620 alltags = {} # map tag name to (node, hist)
615 621 tagtypes = {}
616 622
617 623 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
618 624 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
619 625
620 626 # Build the return dicts. Have to re-encode tag names because
621 627 # the tags module always uses UTF-8 (in order not to lose info
622 628 # writing to the cache), but the rest of Mercurial wants them in
623 629 # local encoding.
624 630 tags = {}
625 631 for (name, (node, hist)) in alltags.iteritems():
626 632 if node != nullid:
627 633 tags[encoding.tolocal(name)] = node
628 634 tags['tip'] = self.changelog.tip()
629 635 tagtypes = dict([(encoding.tolocal(name), value)
630 636 for (name, value) in tagtypes.iteritems()])
631 637 return (tags, tagtypes)
632 638
633 639 def tagtype(self, tagname):
634 640 '''
635 641 return the type of the given tag. result can be:
636 642
637 643 'local' : a local tag
638 644 'global' : a global tag
639 645 None : tag does not exist
640 646 '''
641 647
642 648 return self._tagscache.tagtypes.get(tagname)
643 649
644 650 def tagslist(self):
645 651 '''return a list of tags ordered by revision'''
646 652 if not self._tagscache.tagslist:
647 653 l = []
648 654 for t, n in self.tags().iteritems():
649 655 r = self.changelog.rev(n)
650 656 l.append((r, t, n))
651 657 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
652 658
653 659 return self._tagscache.tagslist
654 660
655 661 def nodetags(self, node):
656 662 '''return the tags associated with a node'''
657 663 if not self._tagscache.nodetagscache:
658 664 nodetagscache = {}
659 665 for t, n in self._tagscache.tags.iteritems():
660 666 nodetagscache.setdefault(n, []).append(t)
661 667 for tags in nodetagscache.itervalues():
662 668 tags.sort()
663 669 self._tagscache.nodetagscache = nodetagscache
664 670 return self._tagscache.nodetagscache.get(node, [])
665 671
666 672 def nodebookmarks(self, node):
667 673 marks = []
668 674 for bookmark, n in self._bookmarks.iteritems():
669 675 if n == node:
670 676 marks.append(bookmark)
671 677 return sorted(marks)
672 678
673 679 def branchmap(self):
674 680 '''returns a dictionary {branch: [branchheads]} with branchheads
675 681 ordered by increasing revision number'''
676 682 branchmap.updatecache(self)
677 683 return self._branchcaches[self.filtername]
678 684
679 685 def branchtip(self, branch):
680 686 '''return the tip node for a given branch'''
681 687 try:
682 688 return self.branchmap().branchtip(branch)
683 689 except KeyError:
684 690 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
685 691
686 692 def lookup(self, key):
687 693 return self[key].node()
688 694
689 695 def lookupbranch(self, key, remote=None):
690 696 repo = remote or self
691 697 if key in repo.branchmap():
692 698 return key
693 699
694 700 repo = (remote and remote.local()) and remote or self
695 701 return repo[key].branch()
696 702
697 703 def known(self, nodes):
698 704 nm = self.changelog.nodemap
699 705 pc = self._phasecache
700 706 result = []
701 707 for n in nodes:
702 708 r = nm.get(n)
703 709 resp = not (r is None or pc.phase(self, r) >= phases.secret)
704 710 result.append(resp)
705 711 return result
706 712
707 713 def local(self):
708 714 return self
709 715
710 716 def cancopy(self):
711 717 # so statichttprepo's override of local() works
712 718 if not self.local():
713 719 return False
714 720 if not self.ui.configbool('phases', 'publish', True):
715 721 return True
716 722 # if publishing we can't copy if there is filtered content
717 723 return not self.filtered('visible').changelog.filteredrevs
718 724
719 725 def join(self, f):
720 726 return os.path.join(self.path, f)
721 727
722 728 def wjoin(self, f):
723 729 return os.path.join(self.root, f)
724 730
725 731 def file(self, f):
726 732 if f[0] == '/':
727 733 f = f[1:]
728 734 return filelog.filelog(self.sopener, f)
729 735
730 736 def changectx(self, changeid):
731 737 return self[changeid]
732 738
733 739 def parents(self, changeid=None):
734 740 '''get list of changectxs for parents of changeid'''
735 741 return self[changeid].parents()
736 742
737 743 def setparents(self, p1, p2=nullid):
738 744 copies = self.dirstate.setparents(p1, p2)
739 745 pctx = self[p1]
740 746 if copies:
741 747 # Adjust copy records, the dirstate cannot do it, it
742 748 # requires access to parents manifests. Preserve them
743 749 # only for entries added to first parent.
744 750 for f in copies:
745 751 if f not in pctx and copies[f] in pctx:
746 752 self.dirstate.copy(copies[f], f)
747 753 if p2 == nullid:
748 754 for f, s in sorted(self.dirstate.copies().items()):
749 755 if f not in pctx and s not in pctx:
750 756 self.dirstate.copy(None, f)
751 757
752 758 def filectx(self, path, changeid=None, fileid=None):
753 759 """changeid can be a changeset revision, node, or tag.
754 760 fileid can be a file revision or node."""
755 761 return context.filectx(self, path, changeid, fileid)
756 762
757 763 def getcwd(self):
758 764 return self.dirstate.getcwd()
759 765
760 766 def pathto(self, f, cwd=None):
761 767 return self.dirstate.pathto(f, cwd)
762 768
763 769 def wfile(self, f, mode='r'):
764 770 return self.wopener(f, mode)
765 771
766 772 def _link(self, f):
767 773 return self.wvfs.islink(f)
768 774
769 775 def _loadfilter(self, filter):
770 776 if filter not in self.filterpats:
771 777 l = []
772 778 for pat, cmd in self.ui.configitems(filter):
773 779 if cmd == '!':
774 780 continue
775 781 mf = matchmod.match(self.root, '', [pat])
776 782 fn = None
777 783 params = cmd
778 784 for name, filterfn in self._datafilters.iteritems():
779 785 if cmd.startswith(name):
780 786 fn = filterfn
781 787 params = cmd[len(name):].lstrip()
782 788 break
783 789 if not fn:
784 790 fn = lambda s, c, **kwargs: util.filter(s, c)
785 791 # Wrap old filters not supporting keyword arguments
786 792 if not inspect.getargspec(fn)[2]:
787 793 oldfn = fn
788 794 fn = lambda s, c, **kwargs: oldfn(s, c)
789 795 l.append((mf, fn, params))
790 796 self.filterpats[filter] = l
791 797 return self.filterpats[filter]
792 798
793 799 def _filter(self, filterpats, filename, data):
794 800 for mf, fn, cmd in filterpats:
795 801 if mf(filename):
796 802 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
797 803 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
798 804 break
799 805
800 806 return data
801 807
802 808 @unfilteredpropertycache
803 809 def _encodefilterpats(self):
804 810 return self._loadfilter('encode')
805 811
806 812 @unfilteredpropertycache
807 813 def _decodefilterpats(self):
808 814 return self._loadfilter('decode')
809 815
810 816 def adddatafilter(self, name, filter):
811 817 self._datafilters[name] = filter
812 818
813 819 def wread(self, filename):
814 820 if self._link(filename):
815 821 data = self.wvfs.readlink(filename)
816 822 else:
817 823 data = self.wopener.read(filename)
818 824 return self._filter(self._encodefilterpats, filename, data)
819 825
820 826 def wwrite(self, filename, data, flags):
821 827 data = self._filter(self._decodefilterpats, filename, data)
822 828 if 'l' in flags:
823 829 self.wopener.symlink(data, filename)
824 830 else:
825 831 self.wopener.write(filename, data)
826 832 if 'x' in flags:
827 833 self.wvfs.setflags(filename, False, True)
828 834
829 835 def wwritedata(self, filename, data):
830 836 return self._filter(self._decodefilterpats, filename, data)
831 837
832 838 def transaction(self, desc, report=None):
833 839 tr = self._transref and self._transref() or None
834 840 if tr and tr.running():
835 841 return tr.nest()
836 842
837 843 # abort here if the journal already exists
838 844 if self.svfs.exists("journal"):
839 845 raise error.RepoError(
840 846 _("abandoned transaction found - run hg recover"))
841 847
842 848 def onclose():
843 849 self.store.write(tr)
844 850
845 851 self._writejournal(desc)
846 852 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
847 853 rp = report and report or self.ui.warn
848 854 tr = transaction.transaction(rp, self.sopener,
849 855 "journal",
850 856 aftertrans(renames),
851 857 self.store.createmode,
852 858 onclose)
853 859 self._transref = weakref.ref(tr)
854 860 return tr
855 861
856 862 def _journalfiles(self):
857 863 return ((self.svfs, 'journal'),
858 864 (self.vfs, 'journal.dirstate'),
859 865 (self.vfs, 'journal.branch'),
860 866 (self.vfs, 'journal.desc'),
861 867 (self.vfs, 'journal.bookmarks'),
862 868 (self.svfs, 'journal.phaseroots'))
863 869
864 870 def undofiles(self):
865 871 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
866 872
867 873 def _writejournal(self, desc):
868 874 self.opener.write("journal.dirstate",
869 875 self.opener.tryread("dirstate"))
870 876 self.opener.write("journal.branch",
871 877 encoding.fromlocal(self.dirstate.branch()))
872 878 self.opener.write("journal.desc",
873 879 "%d\n%s\n" % (len(self), desc))
874 880 self.opener.write("journal.bookmarks",
875 881 self.opener.tryread("bookmarks"))
876 882 self.sopener.write("journal.phaseroots",
877 883 self.sopener.tryread("phaseroots"))
878 884
879 885 def recover(self):
880 886 lock = self.lock()
881 887 try:
882 888 if self.svfs.exists("journal"):
883 889 self.ui.status(_("rolling back interrupted transaction\n"))
884 890 transaction.rollback(self.sopener, "journal",
885 891 self.ui.warn)
886 892 self.invalidate()
887 893 return True
888 894 else:
889 895 self.ui.warn(_("no interrupted transaction available\n"))
890 896 return False
891 897 finally:
892 898 lock.release()
893 899
894 900 def rollback(self, dryrun=False, force=False):
895 901 wlock = lock = None
896 902 try:
897 903 wlock = self.wlock()
898 904 lock = self.lock()
899 905 if self.svfs.exists("undo"):
900 906 return self._rollback(dryrun, force)
901 907 else:
902 908 self.ui.warn(_("no rollback information available\n"))
903 909 return 1
904 910 finally:
905 911 release(lock, wlock)
906 912
907 913 @unfilteredmethod # Until we get smarter cache management
908 914 def _rollback(self, dryrun, force):
909 915 ui = self.ui
910 916 try:
911 917 args = self.opener.read('undo.desc').splitlines()
912 918 (oldlen, desc, detail) = (int(args[0]), args[1], None)
913 919 if len(args) >= 3:
914 920 detail = args[2]
915 921 oldtip = oldlen - 1
916 922
917 923 if detail and ui.verbose:
918 924 msg = (_('repository tip rolled back to revision %s'
919 925 ' (undo %s: %s)\n')
920 926 % (oldtip, desc, detail))
921 927 else:
922 928 msg = (_('repository tip rolled back to revision %s'
923 929 ' (undo %s)\n')
924 930 % (oldtip, desc))
925 931 except IOError:
926 932 msg = _('rolling back unknown transaction\n')
927 933 desc = None
928 934
929 935 if not force and self['.'] != self['tip'] and desc == 'commit':
930 936 raise util.Abort(
931 937 _('rollback of last commit while not checked out '
932 938 'may lose data'), hint=_('use -f to force'))
933 939
934 940 ui.status(msg)
935 941 if dryrun:
936 942 return 0
937 943
938 944 parents = self.dirstate.parents()
939 945 self.destroying()
940 946 transaction.rollback(self.sopener, 'undo', ui.warn)
941 947 if self.vfs.exists('undo.bookmarks'):
942 948 self.vfs.rename('undo.bookmarks', 'bookmarks')
943 949 if self.svfs.exists('undo.phaseroots'):
944 950 self.svfs.rename('undo.phaseroots', 'phaseroots')
945 951 self.invalidate()
946 952
947 953 parentgone = (parents[0] not in self.changelog.nodemap or
948 954 parents[1] not in self.changelog.nodemap)
949 955 if parentgone:
950 956 self.vfs.rename('undo.dirstate', 'dirstate')
951 957 try:
952 958 branch = self.opener.read('undo.branch')
953 959 self.dirstate.setbranch(encoding.tolocal(branch))
954 960 except IOError:
955 961 ui.warn(_('named branch could not be reset: '
956 962 'current branch is still \'%s\'\n')
957 963 % self.dirstate.branch())
958 964
959 965 self.dirstate.invalidate()
960 966 parents = tuple([p.rev() for p in self.parents()])
961 967 if len(parents) > 1:
962 968 ui.status(_('working directory now based on '
963 969 'revisions %d and %d\n') % parents)
964 970 else:
965 971 ui.status(_('working directory now based on '
966 972 'revision %d\n') % parents)
967 973 # TODO: if we know which new heads may result from this rollback, pass
968 974 # them to destroy(), which will prevent the branchhead cache from being
969 975 # invalidated.
970 976 self.destroyed()
971 977 return 0
972 978
973 979 def invalidatecaches(self):
974 980
975 981 if '_tagscache' in vars(self):
976 982 # can't use delattr on proxy
977 983 del self.__dict__['_tagscache']
978 984
979 985 self.unfiltered()._branchcaches.clear()
980 986 self.invalidatevolatilesets()
981 987
982 988 def invalidatevolatilesets(self):
983 989 self.filteredrevcache.clear()
984 990 obsolete.clearobscaches(self)
985 991
986 992 def invalidatedirstate(self):
987 993 '''Invalidates the dirstate, causing the next call to dirstate
988 994 to check if it was modified since the last time it was read,
989 995 rereading it if it has.
990 996
991 997 This is different to dirstate.invalidate() that it doesn't always
992 998 rereads the dirstate. Use dirstate.invalidate() if you want to
993 999 explicitly read the dirstate again (i.e. restoring it to a previous
994 1000 known good state).'''
995 1001 if hasunfilteredcache(self, 'dirstate'):
996 1002 for k in self.dirstate._filecache:
997 1003 try:
998 1004 delattr(self.dirstate, k)
999 1005 except AttributeError:
1000 1006 pass
1001 1007 delattr(self.unfiltered(), 'dirstate')
1002 1008
1003 1009 def invalidate(self):
1004 1010 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1005 1011 for k in self._filecache:
1006 1012 # dirstate is invalidated separately in invalidatedirstate()
1007 1013 if k == 'dirstate':
1008 1014 continue
1009 1015
1010 1016 try:
1011 1017 delattr(unfiltered, k)
1012 1018 except AttributeError:
1013 1019 pass
1014 1020 self.invalidatecaches()
1015 1021 self.store.invalidatecaches()
1016 1022
1017 1023 def invalidateall(self):
1018 1024 '''Fully invalidates both store and non-store parts, causing the
1019 1025 subsequent operation to reread any outside changes.'''
1020 1026 # extension should hook this to invalidate its caches
1021 1027 self.invalidate()
1022 1028 self.invalidatedirstate()
1023 1029
1024 1030 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1025 1031 try:
1026 1032 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1027 1033 except error.LockHeld, inst:
1028 1034 if not wait:
1029 1035 raise
1030 1036 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1031 1037 (desc, inst.locker))
1032 1038 # default to 600 seconds timeout
1033 1039 l = lockmod.lock(vfs, lockname,
1034 1040 int(self.ui.config("ui", "timeout", "600")),
1035 1041 releasefn, desc=desc)
1036 1042 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1037 1043 if acquirefn:
1038 1044 acquirefn()
1039 1045 return l
1040 1046
1041 1047 def _afterlock(self, callback):
1042 1048 """add a callback to the current repository lock.
1043 1049
1044 1050 The callback will be executed on lock release."""
1045 1051 l = self._lockref and self._lockref()
1046 1052 if l:
1047 1053 l.postrelease.append(callback)
1048 1054 else:
1049 1055 callback()
1050 1056
1051 1057 def lock(self, wait=True):
1052 1058 '''Lock the repository store (.hg/store) and return a weak reference
1053 1059 to the lock. Use this before modifying the store (e.g. committing or
1054 1060 stripping). If you are opening a transaction, get a lock as well.)'''
1055 1061 l = self._lockref and self._lockref()
1056 1062 if l is not None and l.held:
1057 1063 l.lock()
1058 1064 return l
1059 1065
1060 1066 def unlock():
1061 1067 if hasunfilteredcache(self, '_phasecache'):
1062 1068 self._phasecache.write()
1063 1069 for k, ce in self._filecache.items():
1064 1070 if k == 'dirstate' or k not in self.__dict__:
1065 1071 continue
1066 1072 ce.refresh()
1067 1073
1068 1074 l = self._lock(self.svfs, "lock", wait, unlock,
1069 1075 self.invalidate, _('repository %s') % self.origroot)
1070 1076 self._lockref = weakref.ref(l)
1071 1077 return l
1072 1078
1073 1079 def wlock(self, wait=True):
1074 1080 '''Lock the non-store parts of the repository (everything under
1075 1081 .hg except .hg/store) and return a weak reference to the lock.
1076 1082 Use this before modifying files in .hg.'''
1077 1083 l = self._wlockref and self._wlockref()
1078 1084 if l is not None and l.held:
1079 1085 l.lock()
1080 1086 return l
1081 1087
1082 1088 def unlock():
1083 1089 self.dirstate.write()
1084 1090 self._filecache['dirstate'].refresh()
1085 1091
1086 1092 l = self._lock(self.vfs, "wlock", wait, unlock,
1087 1093 self.invalidatedirstate, _('working directory of %s') %
1088 1094 self.origroot)
1089 1095 self._wlockref = weakref.ref(l)
1090 1096 return l
1091 1097
1092 1098 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1093 1099 """
1094 1100 commit an individual file as part of a larger transaction
1095 1101 """
1096 1102
1097 1103 fname = fctx.path()
1098 1104 text = fctx.data()
1099 1105 flog = self.file(fname)
1100 1106 fparent1 = manifest1.get(fname, nullid)
1101 1107 fparent2 = fparent2o = manifest2.get(fname, nullid)
1102 1108
1103 1109 meta = {}
1104 1110 copy = fctx.renamed()
1105 1111 if copy and copy[0] != fname:
1106 1112 # Mark the new revision of this file as a copy of another
1107 1113 # file. This copy data will effectively act as a parent
1108 1114 # of this new revision. If this is a merge, the first
1109 1115 # parent will be the nullid (meaning "look up the copy data")
1110 1116 # and the second one will be the other parent. For example:
1111 1117 #
1112 1118 # 0 --- 1 --- 3 rev1 changes file foo
1113 1119 # \ / rev2 renames foo to bar and changes it
1114 1120 # \- 2 -/ rev3 should have bar with all changes and
1115 1121 # should record that bar descends from
1116 1122 # bar in rev2 and foo in rev1
1117 1123 #
1118 1124 # this allows this merge to succeed:
1119 1125 #
1120 1126 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1121 1127 # \ / merging rev3 and rev4 should use bar@rev2
1122 1128 # \- 2 --- 4 as the merge base
1123 1129 #
1124 1130
1125 1131 cfname = copy[0]
1126 1132 crev = manifest1.get(cfname)
1127 1133 newfparent = fparent2
1128 1134
1129 1135 if manifest2: # branch merge
1130 1136 if fparent2 == nullid or crev is None: # copied on remote side
1131 1137 if cfname in manifest2:
1132 1138 crev = manifest2[cfname]
1133 1139 newfparent = fparent1
1134 1140
1135 1141 # find source in nearest ancestor if we've lost track
1136 1142 if not crev:
1137 1143 self.ui.debug(" %s: searching for copy revision for %s\n" %
1138 1144 (fname, cfname))
1139 1145 for ancestor in self[None].ancestors():
1140 1146 if cfname in ancestor:
1141 1147 crev = ancestor[cfname].filenode()
1142 1148 break
1143 1149
1144 1150 if crev:
1145 1151 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1146 1152 meta["copy"] = cfname
1147 1153 meta["copyrev"] = hex(crev)
1148 1154 fparent1, fparent2 = nullid, newfparent
1149 1155 else:
1150 1156 self.ui.warn(_("warning: can't find ancestor for '%s' "
1151 1157 "copied from '%s'!\n") % (fname, cfname))
1152 1158
1153 1159 elif fparent1 == nullid:
1154 1160 fparent1, fparent2 = fparent2, nullid
1155 1161 elif fparent2 != nullid:
1156 1162 # is one parent an ancestor of the other?
1157 1163 fparentancestors = flog.commonancestors(fparent1, fparent2)
1158 1164 if fparent1 in fparentancestors:
1159 1165 fparent1, fparent2 = fparent2, nullid
1160 1166 elif fparent2 in fparentancestors:
1161 1167 fparent2 = nullid
1162 1168
1163 1169 # is the file changed?
1164 1170 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1165 1171 changelist.append(fname)
1166 1172 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1167 1173
1168 1174 # are just the flags changed during merge?
1169 1175 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1170 1176 changelist.append(fname)
1171 1177
1172 1178 return fparent1
1173 1179
1174 1180 @unfilteredmethod
1175 1181 def commit(self, text="", user=None, date=None, match=None, force=False,
1176 1182 editor=False, extra={}):
1177 1183 """Add a new revision to current repository.
1178 1184
1179 1185 Revision information is gathered from the working directory,
1180 1186 match can be used to filter the committed files. If editor is
1181 1187 supplied, it is called to get a commit message.
1182 1188 """
1183 1189
1184 1190 def fail(f, msg):
1185 1191 raise util.Abort('%s: %s' % (f, msg))
1186 1192
1187 1193 if not match:
1188 1194 match = matchmod.always(self.root, '')
1189 1195
1190 1196 if not force:
1191 1197 vdirs = []
1192 1198 match.explicitdir = vdirs.append
1193 1199 match.bad = fail
1194 1200
1195 1201 wlock = self.wlock()
1196 1202 try:
1197 1203 wctx = self[None]
1198 1204 merge = len(wctx.parents()) > 1
1199 1205
1200 1206 if (not force and merge and match and
1201 1207 (match.files() or match.anypats())):
1202 1208 raise util.Abort(_('cannot partially commit a merge '
1203 1209 '(do not specify files or patterns)'))
1204 1210
1205 1211 changes = self.status(match=match, clean=force)
1206 1212 if force:
1207 1213 changes[0].extend(changes[6]) # mq may commit unchanged files
1208 1214
1209 1215 # check subrepos
1210 1216 subs = []
1211 1217 commitsubs = set()
1212 1218 newstate = wctx.substate.copy()
1213 1219 # only manage subrepos and .hgsubstate if .hgsub is present
1214 1220 if '.hgsub' in wctx:
1215 1221 # we'll decide whether to track this ourselves, thanks
1216 1222 for c in changes[:3]:
1217 1223 if '.hgsubstate' in c:
1218 1224 c.remove('.hgsubstate')
1219 1225
1220 1226 # compare current state to last committed state
1221 1227 # build new substate based on last committed state
1222 1228 oldstate = wctx.p1().substate
1223 1229 for s in sorted(newstate.keys()):
1224 1230 if not match(s):
1225 1231 # ignore working copy, use old state if present
1226 1232 if s in oldstate:
1227 1233 newstate[s] = oldstate[s]
1228 1234 continue
1229 1235 if not force:
1230 1236 raise util.Abort(
1231 1237 _("commit with new subrepo %s excluded") % s)
1232 1238 if wctx.sub(s).dirty(True):
1233 1239 if not self.ui.configbool('ui', 'commitsubrepos'):
1234 1240 raise util.Abort(
1235 1241 _("uncommitted changes in subrepo %s") % s,
1236 1242 hint=_("use --subrepos for recursive commit"))
1237 1243 subs.append(s)
1238 1244 commitsubs.add(s)
1239 1245 else:
1240 1246 bs = wctx.sub(s).basestate()
1241 1247 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1242 1248 if oldstate.get(s, (None, None, None))[1] != bs:
1243 1249 subs.append(s)
1244 1250
1245 1251 # check for removed subrepos
1246 1252 for p in wctx.parents():
1247 1253 r = [s for s in p.substate if s not in newstate]
1248 1254 subs += [s for s in r if match(s)]
1249 1255 if subs:
1250 1256 if (not match('.hgsub') and
1251 1257 '.hgsub' in (wctx.modified() + wctx.added())):
1252 1258 raise util.Abort(
1253 1259 _("can't commit subrepos without .hgsub"))
1254 1260 changes[0].insert(0, '.hgsubstate')
1255 1261
1256 1262 elif '.hgsub' in changes[2]:
1257 1263 # clean up .hgsubstate when .hgsub is removed
1258 1264 if ('.hgsubstate' in wctx and
1259 1265 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1260 1266 changes[2].insert(0, '.hgsubstate')
1261 1267
1262 1268 # make sure all explicit patterns are matched
1263 1269 if not force and match.files():
1264 1270 matched = set(changes[0] + changes[1] + changes[2])
1265 1271
1266 1272 for f in match.files():
1267 1273 f = self.dirstate.normalize(f)
1268 1274 if f == '.' or f in matched or f in wctx.substate:
1269 1275 continue
1270 1276 if f in changes[3]: # missing
1271 1277 fail(f, _('file not found!'))
1272 1278 if f in vdirs: # visited directory
1273 1279 d = f + '/'
1274 1280 for mf in matched:
1275 1281 if mf.startswith(d):
1276 1282 break
1277 1283 else:
1278 1284 fail(f, _("no match under directory!"))
1279 1285 elif f not in self.dirstate:
1280 1286 fail(f, _("file not tracked!"))
1281 1287
1282 1288 cctx = context.workingctx(self, text, user, date, extra, changes)
1283 1289
1284 1290 if (not force and not extra.get("close") and not merge
1285 1291 and not cctx.files()
1286 1292 and wctx.branch() == wctx.p1().branch()):
1287 1293 return None
1288 1294
1289 1295 if merge and cctx.deleted():
1290 1296 raise util.Abort(_("cannot commit merge with missing files"))
1291 1297
1292 1298 ms = mergemod.mergestate(self)
1293 1299 for f in changes[0]:
1294 1300 if f in ms and ms[f] == 'u':
1295 1301 raise util.Abort(_("unresolved merge conflicts "
1296 1302 "(see hg help resolve)"))
1297 1303
1298 1304 if editor:
1299 1305 cctx._text = editor(self, cctx, subs)
1300 1306 edited = (text != cctx._text)
1301 1307
1302 1308 # Save commit message in case this transaction gets rolled back
1303 1309 # (e.g. by a pretxncommit hook). Leave the content alone on
1304 1310 # the assumption that the user will use the same editor again.
1305 1311 msgfn = self.savecommitmessage(cctx._text)
1306 1312
1307 1313 # commit subs and write new state
1308 1314 if subs:
1309 1315 for s in sorted(commitsubs):
1310 1316 sub = wctx.sub(s)
1311 1317 self.ui.status(_('committing subrepository %s\n') %
1312 1318 subrepo.subrelpath(sub))
1313 1319 sr = sub.commit(cctx._text, user, date)
1314 1320 newstate[s] = (newstate[s][0], sr)
1315 1321 subrepo.writestate(self, newstate)
1316 1322
1317 1323 p1, p2 = self.dirstate.parents()
1318 1324 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1319 1325 try:
1320 1326 self.hook("precommit", throw=True, parent1=hookp1,
1321 1327 parent2=hookp2)
1322 1328 ret = self.commitctx(cctx, True)
1323 1329 except: # re-raises
1324 1330 if edited:
1325 1331 self.ui.write(
1326 1332 _('note: commit message saved in %s\n') % msgfn)
1327 1333 raise
1328 1334
1329 1335 # update bookmarks, dirstate and mergestate
1330 1336 bookmarks.update(self, [p1, p2], ret)
1331 1337 cctx.markcommitted(ret)
1332 1338 ms.reset()
1333 1339 finally:
1334 1340 wlock.release()
1335 1341
1336 1342 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1337 1343 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1338 1344 self._afterlock(commithook)
1339 1345 return ret
1340 1346
1341 1347 @unfilteredmethod
1342 1348 def commitctx(self, ctx, error=False):
1343 1349 """Add a new revision to current repository.
1344 1350 Revision information is passed via the context argument.
1345 1351 """
1346 1352
1347 1353 tr = lock = None
1348 1354 removed = list(ctx.removed())
1349 1355 p1, p2 = ctx.p1(), ctx.p2()
1350 1356 user = ctx.user()
1351 1357
1352 1358 lock = self.lock()
1353 1359 try:
1354 1360 tr = self.transaction("commit")
1355 1361 trp = weakref.proxy(tr)
1356 1362
1357 1363 if ctx.files():
1358 1364 m1 = p1.manifest().copy()
1359 1365 m2 = p2.manifest()
1360 1366
1361 1367 # check in files
1362 1368 new = {}
1363 1369 changed = []
1364 1370 linkrev = len(self)
1365 1371 for f in sorted(ctx.modified() + ctx.added()):
1366 1372 self.ui.note(f + "\n")
1367 1373 try:
1368 1374 fctx = ctx[f]
1369 1375 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1370 1376 changed)
1371 1377 m1.set(f, fctx.flags())
1372 1378 except OSError, inst:
1373 1379 self.ui.warn(_("trouble committing %s!\n") % f)
1374 1380 raise
1375 1381 except IOError, inst:
1376 1382 errcode = getattr(inst, 'errno', errno.ENOENT)
1377 1383 if error or errcode and errcode != errno.ENOENT:
1378 1384 self.ui.warn(_("trouble committing %s!\n") % f)
1379 1385 raise
1380 1386 else:
1381 1387 removed.append(f)
1382 1388
1383 1389 # update manifest
1384 1390 m1.update(new)
1385 1391 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1386 1392 drop = [f for f in removed if f in m1]
1387 1393 for f in drop:
1388 1394 del m1[f]
1389 1395 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1390 1396 p2.manifestnode(), (new, drop))
1391 1397 files = changed + removed
1392 1398 else:
1393 1399 mn = p1.manifestnode()
1394 1400 files = []
1395 1401
1396 1402 # update changelog
1397 1403 self.changelog.delayupdate()
1398 1404 n = self.changelog.add(mn, files, ctx.description(),
1399 1405 trp, p1.node(), p2.node(),
1400 1406 user, ctx.date(), ctx.extra().copy())
1401 1407 p = lambda: self.changelog.writepending() and self.root or ""
1402 1408 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1403 1409 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1404 1410 parent2=xp2, pending=p)
1405 1411 self.changelog.finalize(trp)
1406 1412 # set the new commit is proper phase
1407 1413 targetphase = subrepo.newcommitphase(self.ui, ctx)
1408 1414 if targetphase:
1409 1415 # retract boundary do not alter parent changeset.
1410 1416 # if a parent have higher the resulting phase will
1411 1417 # be compliant anyway
1412 1418 #
1413 1419 # if minimal phase was 0 we don't need to retract anything
1414 1420 phases.retractboundary(self, targetphase, [n])
1415 1421 tr.close()
1416 1422 branchmap.updatecache(self.filtered('served'))
1417 1423 return n
1418 1424 finally:
1419 1425 if tr:
1420 1426 tr.release()
1421 1427 lock.release()
1422 1428
1423 1429 @unfilteredmethod
1424 1430 def destroying(self):
1425 1431 '''Inform the repository that nodes are about to be destroyed.
1426 1432 Intended for use by strip and rollback, so there's a common
1427 1433 place for anything that has to be done before destroying history.
1428 1434
1429 1435 This is mostly useful for saving state that is in memory and waiting
1430 1436 to be flushed when the current lock is released. Because a call to
1431 1437 destroyed is imminent, the repo will be invalidated causing those
1432 1438 changes to stay in memory (waiting for the next unlock), or vanish
1433 1439 completely.
1434 1440 '''
1435 1441 # When using the same lock to commit and strip, the phasecache is left
1436 1442 # dirty after committing. Then when we strip, the repo is invalidated,
1437 1443 # causing those changes to disappear.
1438 1444 if '_phasecache' in vars(self):
1439 1445 self._phasecache.write()
1440 1446
1441 1447 @unfilteredmethod
1442 1448 def destroyed(self):
1443 1449 '''Inform the repository that nodes have been destroyed.
1444 1450 Intended for use by strip and rollback, so there's a common
1445 1451 place for anything that has to be done after destroying history.
1446 1452 '''
1447 1453 # When one tries to:
1448 1454 # 1) destroy nodes thus calling this method (e.g. strip)
1449 1455 # 2) use phasecache somewhere (e.g. commit)
1450 1456 #
1451 1457 # then 2) will fail because the phasecache contains nodes that were
1452 1458 # removed. We can either remove phasecache from the filecache,
1453 1459 # causing it to reload next time it is accessed, or simply filter
1454 1460 # the removed nodes now and write the updated cache.
1455 1461 self._phasecache.filterunknown(self)
1456 1462 self._phasecache.write()
1457 1463
1458 1464 # update the 'served' branch cache to help read only server process
1459 1465 # Thanks to branchcache collaboration this is done from the nearest
1460 1466 # filtered subset and it is expected to be fast.
1461 1467 branchmap.updatecache(self.filtered('served'))
1462 1468
1463 1469 # Ensure the persistent tag cache is updated. Doing it now
1464 1470 # means that the tag cache only has to worry about destroyed
1465 1471 # heads immediately after a strip/rollback. That in turn
1466 1472 # guarantees that "cachetip == currenttip" (comparing both rev
1467 1473 # and node) always means no nodes have been added or destroyed.
1468 1474
1469 1475 # XXX this is suboptimal when qrefresh'ing: we strip the current
1470 1476 # head, refresh the tag cache, then immediately add a new head.
1471 1477 # But I think doing it this way is necessary for the "instant
1472 1478 # tag cache retrieval" case to work.
1473 1479 self.invalidate()
1474 1480
1475 1481 def walk(self, match, node=None):
1476 1482 '''
1477 1483 walk recursively through the directory tree or a given
1478 1484 changeset, finding all files matched by the match
1479 1485 function
1480 1486 '''
1481 1487 return self[node].walk(match)
1482 1488
1483 1489 def status(self, node1='.', node2=None, match=None,
1484 1490 ignored=False, clean=False, unknown=False,
1485 1491 listsubrepos=False):
1486 1492 """return status of files between two nodes or node and working
1487 1493 directory.
1488 1494
1489 1495 If node1 is None, use the first dirstate parent instead.
1490 1496 If node2 is None, compare node1 with working directory.
1491 1497 """
1492 1498
1493 1499 def mfmatches(ctx):
1494 1500 mf = ctx.manifest().copy()
1495 1501 if match.always():
1496 1502 return mf
1497 1503 for fn in mf.keys():
1498 1504 if not match(fn):
1499 1505 del mf[fn]
1500 1506 return mf
1501 1507
1502 1508 ctx1 = self[node1]
1503 1509 ctx2 = self[node2]
1504 1510
1505 1511 working = ctx2.rev() is None
1506 1512 parentworking = working and ctx1 == self['.']
1507 1513 match = match or matchmod.always(self.root, self.getcwd())
1508 1514 listignored, listclean, listunknown = ignored, clean, unknown
1509 1515
1510 1516 # load earliest manifest first for caching reasons
1511 1517 if not working and ctx2.rev() < ctx1.rev():
1512 1518 ctx2.manifest()
1513 1519
1514 1520 if not parentworking:
1515 1521 def bad(f, msg):
1516 1522 # 'f' may be a directory pattern from 'match.files()',
1517 1523 # so 'f not in ctx1' is not enough
1518 1524 if f not in ctx1 and f not in ctx1.dirs():
1519 1525 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1520 1526 match.bad = bad
1521 1527
1522 1528 if working: # we need to scan the working dir
1523 1529 subrepos = []
1524 1530 if '.hgsub' in self.dirstate:
1525 1531 subrepos = sorted(ctx2.substate)
1526 1532 s = self.dirstate.status(match, subrepos, listignored,
1527 1533 listclean, listunknown)
1528 1534 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1529 1535
1530 1536 # check for any possibly clean files
1531 1537 if parentworking and cmp:
1532 1538 fixup = []
1533 1539 # do a full compare of any files that might have changed
1534 1540 for f in sorted(cmp):
1535 1541 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1536 1542 or ctx1[f].cmp(ctx2[f])):
1537 1543 modified.append(f)
1538 1544 else:
1539 1545 fixup.append(f)
1540 1546
1541 1547 # update dirstate for files that are actually clean
1542 1548 if fixup:
1543 1549 if listclean:
1544 1550 clean += fixup
1545 1551
1546 1552 try:
1547 1553 # updating the dirstate is optional
1548 1554 # so we don't wait on the lock
1549 1555 wlock = self.wlock(False)
1550 1556 try:
1551 1557 for f in fixup:
1552 1558 self.dirstate.normal(f)
1553 1559 finally:
1554 1560 wlock.release()
1555 1561 except error.LockError:
1556 1562 pass
1557 1563
1558 1564 if not parentworking:
1559 1565 mf1 = mfmatches(ctx1)
1560 1566 if working:
1561 1567 # we are comparing working dir against non-parent
1562 1568 # generate a pseudo-manifest for the working dir
1563 1569 mf2 = mfmatches(self['.'])
1564 1570 for f in cmp + modified + added:
1565 1571 mf2[f] = None
1566 1572 mf2.set(f, ctx2.flags(f))
1567 1573 for f in removed:
1568 1574 if f in mf2:
1569 1575 del mf2[f]
1570 1576 else:
1571 1577 # we are comparing two revisions
1572 1578 deleted, unknown, ignored = [], [], []
1573 1579 mf2 = mfmatches(ctx2)
1574 1580
1575 1581 modified, added, clean = [], [], []
1576 1582 withflags = mf1.withflags() | mf2.withflags()
1577 1583 for fn, mf2node in mf2.iteritems():
1578 1584 if fn in mf1:
1579 1585 if (fn not in deleted and
1580 1586 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1581 1587 (mf1[fn] != mf2node and
1582 1588 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1583 1589 modified.append(fn)
1584 1590 elif listclean:
1585 1591 clean.append(fn)
1586 1592 del mf1[fn]
1587 1593 elif fn not in deleted:
1588 1594 added.append(fn)
1589 1595 removed = mf1.keys()
1590 1596
1591 1597 if working and modified and not self.dirstate._checklink:
1592 1598 # Symlink placeholders may get non-symlink-like contents
1593 1599 # via user error or dereferencing by NFS or Samba servers,
1594 1600 # so we filter out any placeholders that don't look like a
1595 1601 # symlink
1596 1602 sane = []
1597 1603 for f in modified:
1598 1604 if ctx2.flags(f) == 'l':
1599 1605 d = ctx2[f].data()
1600 1606 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1601 1607 self.ui.debug('ignoring suspect symlink placeholder'
1602 1608 ' "%s"\n' % f)
1603 1609 continue
1604 1610 sane.append(f)
1605 1611 modified = sane
1606 1612
1607 1613 r = modified, added, removed, deleted, unknown, ignored, clean
1608 1614
1609 1615 if listsubrepos:
1610 1616 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1611 1617 if working:
1612 1618 rev2 = None
1613 1619 else:
1614 1620 rev2 = ctx2.substate[subpath][1]
1615 1621 try:
1616 1622 submatch = matchmod.narrowmatcher(subpath, match)
1617 1623 s = sub.status(rev2, match=submatch, ignored=listignored,
1618 1624 clean=listclean, unknown=listunknown,
1619 1625 listsubrepos=True)
1620 1626 for rfiles, sfiles in zip(r, s):
1621 1627 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1622 1628 except error.LookupError:
1623 1629 self.ui.status(_("skipping missing subrepository: %s\n")
1624 1630 % subpath)
1625 1631
1626 1632 for l in r:
1627 1633 l.sort()
1628 1634 return r
1629 1635
1630 1636 def heads(self, start=None):
1631 1637 heads = self.changelog.heads(start)
1632 1638 # sort the output in rev descending order
1633 1639 return sorted(heads, key=self.changelog.rev, reverse=True)
1634 1640
1635 1641 def branchheads(self, branch=None, start=None, closed=False):
1636 1642 '''return a (possibly filtered) list of heads for the given branch
1637 1643
1638 1644 Heads are returned in topological order, from newest to oldest.
1639 1645 If branch is None, use the dirstate branch.
1640 1646 If start is not None, return only heads reachable from start.
1641 1647 If closed is True, return heads that are marked as closed as well.
1642 1648 '''
1643 1649 if branch is None:
1644 1650 branch = self[None].branch()
1645 1651 branches = self.branchmap()
1646 1652 if branch not in branches:
1647 1653 return []
1648 1654 # the cache returns heads ordered lowest to highest
1649 1655 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1650 1656 if start is not None:
1651 1657 # filter out the heads that cannot be reached from startrev
1652 1658 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1653 1659 bheads = [h for h in bheads if h in fbheads]
1654 1660 return bheads
1655 1661
1656 1662 def branches(self, nodes):
1657 1663 if not nodes:
1658 1664 nodes = [self.changelog.tip()]
1659 1665 b = []
1660 1666 for n in nodes:
1661 1667 t = n
1662 1668 while True:
1663 1669 p = self.changelog.parents(n)
1664 1670 if p[1] != nullid or p[0] == nullid:
1665 1671 b.append((t, n, p[0], p[1]))
1666 1672 break
1667 1673 n = p[0]
1668 1674 return b
1669 1675
1670 1676 def between(self, pairs):
1671 1677 r = []
1672 1678
1673 1679 for top, bottom in pairs:
1674 1680 n, l, i = top, [], 0
1675 1681 f = 1
1676 1682
1677 1683 while n != bottom and n != nullid:
1678 1684 p = self.changelog.parents(n)[0]
1679 1685 if i == f:
1680 1686 l.append(n)
1681 1687 f = f * 2
1682 1688 n = p
1683 1689 i += 1
1684 1690
1685 1691 r.append(l)
1686 1692
1687 1693 return r
1688 1694
1689 1695 def pull(self, remote, heads=None, force=False):
1690 1696 return exchange.pull (self, remote, heads, force)
1691 1697
1692 1698 def checkpush(self, pushop):
1693 1699 """Extensions can override this function if additional checks have
1694 1700 to be performed before pushing, or call it if they override push
1695 1701 command.
1696 1702 """
1697 1703 pass
1698 1704
1699 1705 @unfilteredpropertycache
1700 1706 def prepushoutgoinghooks(self):
1701 1707 """Return util.hooks consists of "(repo, remote, outgoing)"
1702 1708 functions, which are called before pushing changesets.
1703 1709 """
1704 1710 return util.hooks()
1705 1711
1706 1712 def push(self, remote, force=False, revs=None, newbranch=False):
1707 1713 return exchange.push(self, remote, force, revs, newbranch)
1708 1714
1709 1715 def stream_in(self, remote, requirements):
1710 1716 lock = self.lock()
1711 1717 try:
1712 1718 # Save remote branchmap. We will use it later
1713 1719 # to speed up branchcache creation
1714 1720 rbranchmap = None
1715 1721 if remote.capable("branchmap"):
1716 1722 rbranchmap = remote.branchmap()
1717 1723
1718 1724 fp = remote.stream_out()
1719 1725 l = fp.readline()
1720 1726 try:
1721 1727 resp = int(l)
1722 1728 except ValueError:
1723 1729 raise error.ResponseError(
1724 1730 _('unexpected response from remote server:'), l)
1725 1731 if resp == 1:
1726 1732 raise util.Abort(_('operation forbidden by server'))
1727 1733 elif resp == 2:
1728 1734 raise util.Abort(_('locking the remote repository failed'))
1729 1735 elif resp != 0:
1730 1736 raise util.Abort(_('the server sent an unknown error code'))
1731 1737 self.ui.status(_('streaming all changes\n'))
1732 1738 l = fp.readline()
1733 1739 try:
1734 1740 total_files, total_bytes = map(int, l.split(' ', 1))
1735 1741 except (ValueError, TypeError):
1736 1742 raise error.ResponseError(
1737 1743 _('unexpected response from remote server:'), l)
1738 1744 self.ui.status(_('%d files to transfer, %s of data\n') %
1739 1745 (total_files, util.bytecount(total_bytes)))
1740 1746 handled_bytes = 0
1741 1747 self.ui.progress(_('clone'), 0, total=total_bytes)
1742 1748 start = time.time()
1743 1749
1744 1750 tr = self.transaction(_('clone'))
1745 1751 try:
1746 1752 for i in xrange(total_files):
1747 1753 # XXX doesn't support '\n' or '\r' in filenames
1748 1754 l = fp.readline()
1749 1755 try:
1750 1756 name, size = l.split('\0', 1)
1751 1757 size = int(size)
1752 1758 except (ValueError, TypeError):
1753 1759 raise error.ResponseError(
1754 1760 _('unexpected response from remote server:'), l)
1755 1761 if self.ui.debugflag:
1756 1762 self.ui.debug('adding %s (%s)\n' %
1757 1763 (name, util.bytecount(size)))
1758 1764 # for backwards compat, name was partially encoded
1759 1765 ofp = self.sopener(store.decodedir(name), 'w')
1760 1766 for chunk in util.filechunkiter(fp, limit=size):
1761 1767 handled_bytes += len(chunk)
1762 1768 self.ui.progress(_('clone'), handled_bytes,
1763 1769 total=total_bytes)
1764 1770 ofp.write(chunk)
1765 1771 ofp.close()
1766 1772 tr.close()
1767 1773 finally:
1768 1774 tr.release()
1769 1775
1770 1776 # Writing straight to files circumvented the inmemory caches
1771 1777 self.invalidate()
1772 1778
1773 1779 elapsed = time.time() - start
1774 1780 if elapsed <= 0:
1775 1781 elapsed = 0.001
1776 1782 self.ui.progress(_('clone'), None)
1777 1783 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1778 1784 (util.bytecount(total_bytes), elapsed,
1779 1785 util.bytecount(total_bytes / elapsed)))
1780 1786
1781 1787 # new requirements = old non-format requirements +
1782 1788 # new format-related
1783 1789 # requirements from the streamed-in repository
1784 1790 requirements.update(set(self.requirements) - self.supportedformats)
1785 1791 self._applyrequirements(requirements)
1786 1792 self._writerequirements()
1787 1793
1788 1794 if rbranchmap:
1789 1795 rbheads = []
1790 1796 for bheads in rbranchmap.itervalues():
1791 1797 rbheads.extend(bheads)
1792 1798
1793 1799 if rbheads:
1794 1800 rtiprev = max((int(self.changelog.rev(node))
1795 1801 for node in rbheads))
1796 1802 cache = branchmap.branchcache(rbranchmap,
1797 1803 self[rtiprev].node(),
1798 1804 rtiprev)
1799 1805 # Try to stick it as low as possible
1800 1806 # filter above served are unlikely to be fetch from a clone
1801 1807 for candidate in ('base', 'immutable', 'served'):
1802 1808 rview = self.filtered(candidate)
1803 1809 if cache.validfor(rview):
1804 1810 self._branchcaches[candidate] = cache
1805 1811 cache.write(rview)
1806 1812 break
1807 1813 self.invalidate()
1808 1814 return len(self.heads()) + 1
1809 1815 finally:
1810 1816 lock.release()
1811 1817
1812 1818 def clone(self, remote, heads=[], stream=False):
1813 1819 '''clone remote repository.
1814 1820
1815 1821 keyword arguments:
1816 1822 heads: list of revs to clone (forces use of pull)
1817 1823 stream: use streaming clone if possible'''
1818 1824
1819 1825 # now, all clients that can request uncompressed clones can
1820 1826 # read repo formats supported by all servers that can serve
1821 1827 # them.
1822 1828
1823 1829 # if revlog format changes, client will have to check version
1824 1830 # and format flags on "stream" capability, and use
1825 1831 # uncompressed only if compatible.
1826 1832
1827 1833 if not stream:
1828 1834 # if the server explicitly prefers to stream (for fast LANs)
1829 1835 stream = remote.capable('stream-preferred')
1830 1836
1831 1837 if stream and not heads:
1832 1838 # 'stream' means remote revlog format is revlogv1 only
1833 1839 if remote.capable('stream'):
1834 1840 return self.stream_in(remote, set(('revlogv1',)))
1835 1841 # otherwise, 'streamreqs' contains the remote revlog format
1836 1842 streamreqs = remote.capable('streamreqs')
1837 1843 if streamreqs:
1838 1844 streamreqs = set(streamreqs.split(','))
1839 1845 # if we support it, stream in and adjust our requirements
1840 1846 if not streamreqs - self.supportedformats:
1841 1847 return self.stream_in(remote, streamreqs)
1842 1848 return self.pull(remote, heads)
1843 1849
1844 1850 def pushkey(self, namespace, key, old, new):
1845 1851 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1846 1852 old=old, new=new)
1847 1853 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1848 1854 ret = pushkey.push(self, namespace, key, old, new)
1849 1855 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1850 1856 ret=ret)
1851 1857 return ret
1852 1858
1853 1859 def listkeys(self, namespace):
1854 1860 self.hook('prelistkeys', throw=True, namespace=namespace)
1855 1861 self.ui.debug('listing keys for "%s"\n' % namespace)
1856 1862 values = pushkey.list(self, namespace)
1857 1863 self.hook('listkeys', namespace=namespace, values=values)
1858 1864 return values
1859 1865
1860 1866 def debugwireargs(self, one, two, three=None, four=None, five=None):
1861 1867 '''used to test argument passing over the wire'''
1862 1868 return "%s %s %s %s %s" % (one, two, three, four, five)
1863 1869
1864 1870 def savecommitmessage(self, text):
1865 1871 fp = self.opener('last-message.txt', 'wb')
1866 1872 try:
1867 1873 fp.write(text)
1868 1874 finally:
1869 1875 fp.close()
1870 1876 return self.pathto(fp.name[len(self.root) + 1:])
1871 1877
1872 1878 # used to avoid circular references so destructors work
1873 1879 def aftertrans(files):
1874 1880 renamefiles = [tuple(t) for t in files]
1875 1881 def a():
1876 1882 for vfs, src, dest in renamefiles:
1877 1883 try:
1878 1884 vfs.rename(src, dest)
1879 1885 except OSError: # journal file does not yet exist
1880 1886 pass
1881 1887 return a
1882 1888
1883 1889 def undoname(fn):
1884 1890 base, name = os.path.split(fn)
1885 1891 assert name.startswith('journal')
1886 1892 return os.path.join(base, name.replace('journal', 'undo', 1))
1887 1893
1888 1894 def instance(ui, path, create):
1889 1895 return localrepository(ui, util.urllocalpath(path), create)
1890 1896
1891 1897 def islocal(path):
1892 1898 return True
General Comments 0
You need to be logged in to leave comments. Login now