##// END OF EJS Templates
getbundle: support of listkeys argument when bundle2 is used...
Pierre-Yves David -
r21657:0ff44e06 default
parent child Browse files
Show More
@@ -1,740 +1,746 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.unbundle10(fh, alg)
35 35 elif version == '2X':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40
41 41 class pushoperation(object):
42 42 """A object that represent a single push operation
43 43
44 44 It purpose is to carry push related state and very common operation.
45 45
46 46 A new should be created at the beginning of each push and discarded
47 47 afterward.
48 48 """
49 49
50 50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 51 # repo we push from
52 52 self.repo = repo
53 53 self.ui = repo.ui
54 54 # repo we push to
55 55 self.remote = remote
56 56 # force option provided
57 57 self.force = force
58 58 # revs to be pushed (None is "all")
59 59 self.revs = revs
60 60 # allow push of new branch
61 61 self.newbranch = newbranch
62 62 # did a local lock get acquired?
63 63 self.locallocked = None
64 64 # Integer version of the push result
65 65 # - None means nothing to push
66 66 # - 0 means HTTP error
67 67 # - 1 means we pushed and remote head count is unchanged *or*
68 68 # we have outgoing changesets but refused to push
69 69 # - other values as described by addchangegroup()
70 70 self.ret = None
71 71 # discover.outgoing object (contains common and outgoing data)
72 72 self.outgoing = None
73 73 # all remote heads before the push
74 74 self.remoteheads = None
75 75 # testable as a boolean indicating if any nodes are missing locally.
76 76 self.incoming = None
77 77 # set of all heads common after changeset bundle push
78 78 self.commonheads = None
79 79
80 80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 81 '''Push outgoing changesets (limited by revs) from a local
82 82 repository to remote. Return an integer:
83 83 - None means nothing to push
84 84 - 0 means HTTP error
85 85 - 1 means we pushed and remote head count is unchanged *or*
86 86 we have outgoing changesets but refused to push
87 87 - other values as described by addchangegroup()
88 88 '''
89 89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 90 if pushop.remote.local():
91 91 missing = (set(pushop.repo.requirements)
92 92 - pushop.remote.local().supported)
93 93 if missing:
94 94 msg = _("required features are not"
95 95 " supported in the destination:"
96 96 " %s") % (', '.join(sorted(missing)))
97 97 raise util.Abort(msg)
98 98
99 99 # there are two ways to push to remote repo:
100 100 #
101 101 # addchangegroup assumes local user can lock remote
102 102 # repo (local filesystem, old ssh servers).
103 103 #
104 104 # unbundle assumes local user cannot lock remote repo (new ssh
105 105 # servers, http servers).
106 106
107 107 if not pushop.remote.canpush():
108 108 raise util.Abort(_("destination does not support push"))
109 109 # get local lock as we might write phase data
110 110 locallock = None
111 111 try:
112 112 locallock = pushop.repo.lock()
113 113 pushop.locallocked = True
114 114 except IOError, err:
115 115 pushop.locallocked = False
116 116 if err.errno != errno.EACCES:
117 117 raise
118 118 # source repo cannot be locked.
119 119 # We do not abort the push, but just disable the local phase
120 120 # synchronisation.
121 121 msg = 'cannot lock source repository: %s\n' % err
122 122 pushop.ui.debug(msg)
123 123 try:
124 124 pushop.repo.checkpush(pushop)
125 125 lock = None
126 126 unbundle = pushop.remote.capable('unbundle')
127 127 if not unbundle:
128 128 lock = pushop.remote.lock()
129 129 try:
130 130 _pushdiscovery(pushop)
131 131 if _pushcheckoutgoing(pushop):
132 132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 133 pushop.remote,
134 134 pushop.outgoing)
135 135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
136 136 False)
137 137 and pushop.remote.capable('bundle2-exp')):
138 138 _pushbundle2(pushop)
139 139 else:
140 140 _pushchangeset(pushop)
141 141 _pushcomputecommonheads(pushop)
142 142 _pushsyncphase(pushop)
143 143 _pushobsolete(pushop)
144 144 finally:
145 145 if lock is not None:
146 146 lock.release()
147 147 finally:
148 148 if locallock is not None:
149 149 locallock.release()
150 150
151 151 _pushbookmark(pushop)
152 152 return pushop.ret
153 153
154 154 def _pushdiscovery(pushop):
155 155 # discovery
156 156 unfi = pushop.repo.unfiltered()
157 157 fci = discovery.findcommonincoming
158 158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
159 159 common, inc, remoteheads = commoninc
160 160 fco = discovery.findcommonoutgoing
161 161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
162 162 commoninc=commoninc, force=pushop.force)
163 163 pushop.outgoing = outgoing
164 164 pushop.remoteheads = remoteheads
165 165 pushop.incoming = inc
166 166
167 167 def _pushcheckoutgoing(pushop):
168 168 outgoing = pushop.outgoing
169 169 unfi = pushop.repo.unfiltered()
170 170 if not outgoing.missing:
171 171 # nothing to push
172 172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
173 173 return False
174 174 # something to push
175 175 if not pushop.force:
176 176 # if repo.obsstore == False --> no obsolete
177 177 # then, save the iteration
178 178 if unfi.obsstore:
179 179 # this message are here for 80 char limit reason
180 180 mso = _("push includes obsolete changeset: %s!")
181 181 mst = "push includes %s changeset: %s!"
182 182 # plain versions for i18n tool to detect them
183 183 _("push includes unstable changeset: %s!")
184 184 _("push includes bumped changeset: %s!")
185 185 _("push includes divergent changeset: %s!")
186 186 # If we are to push if there is at least one
187 187 # obsolete or unstable changeset in missing, at
188 188 # least one of the missinghead will be obsolete or
189 189 # unstable. So checking heads only is ok
190 190 for node in outgoing.missingheads:
191 191 ctx = unfi[node]
192 192 if ctx.obsolete():
193 193 raise util.Abort(mso % ctx)
194 194 elif ctx.troubled():
195 195 raise util.Abort(_(mst)
196 196 % (ctx.troubles()[0],
197 197 ctx))
198 198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
199 199 discovery.checkheads(unfi, pushop.remote, outgoing,
200 200 pushop.remoteheads,
201 201 pushop.newbranch,
202 202 bool(pushop.incoming),
203 203 newbm)
204 204 return True
205 205
206 206 def _pushbundle2(pushop):
207 207 """push data to the remote using bundle2
208 208
209 209 The only currently supported type of data is changegroup but this will
210 210 evolve in the future."""
211 211 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
212 212 # create reply capability
213 213 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
214 214 bundler.newpart('b2x:replycaps', data=capsblob)
215 215 # Send known heads to the server for race detection.
216 216 if not pushop.force:
217 217 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
218 218 extrainfo = _pushbundle2extraparts(pushop, bundler)
219 219 # add the changegroup bundle
220 220 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
221 221 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
222 222 stream = util.chunkbuffer(bundler.getchunks())
223 223 try:
224 224 reply = pushop.remote.unbundle(stream, ['force'], 'push')
225 225 except error.BundleValueError, exc:
226 226 raise util.Abort('missing support for %s' % exc)
227 227 try:
228 228 op = bundle2.processbundle(pushop.repo, reply)
229 229 except error.BundleValueError, exc:
230 230 raise util.Abort('missing support for %s' % exc)
231 231 cgreplies = op.records.getreplies(cgpart.id)
232 232 assert len(cgreplies['changegroup']) == 1
233 233 pushop.ret = cgreplies['changegroup'][0]['return']
234 234 _pushbundle2extrareply(pushop, op, extrainfo)
235 235
236 236 def _pushbundle2extraparts(pushop, bundler):
237 237 """hook function to let extensions add parts
238 238
239 239 Return a dict to let extensions pass data to the reply processing.
240 240 """
241 241 return {}
242 242
243 243 def _pushbundle2extrareply(pushop, op, extrainfo):
244 244 """hook function to let extensions react to part replies
245 245
246 246 The dict from _pushbundle2extrareply is fed to this function.
247 247 """
248 248 pass
249 249
250 250 def _pushchangeset(pushop):
251 251 """Make the actual push of changeset bundle to remote repo"""
252 252 outgoing = pushop.outgoing
253 253 unbundle = pushop.remote.capable('unbundle')
254 254 # TODO: get bundlecaps from remote
255 255 bundlecaps = None
256 256 # create a changegroup from local
257 257 if pushop.revs is None and not (outgoing.excluded
258 258 or pushop.repo.changelog.filteredrevs):
259 259 # push everything,
260 260 # use the fast path, no race possible on push
261 261 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
262 262 cg = changegroup.getsubset(pushop.repo,
263 263 outgoing,
264 264 bundler,
265 265 'push',
266 266 fastpath=True)
267 267 else:
268 268 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
269 269 bundlecaps)
270 270
271 271 # apply changegroup to remote
272 272 if unbundle:
273 273 # local repo finds heads on server, finds out what
274 274 # revs it must push. once revs transferred, if server
275 275 # finds it has different heads (someone else won
276 276 # commit/push race), server aborts.
277 277 if pushop.force:
278 278 remoteheads = ['force']
279 279 else:
280 280 remoteheads = pushop.remoteheads
281 281 # ssh: return remote's addchangegroup()
282 282 # http: return remote's addchangegroup() or 0 for error
283 283 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
284 284 'push')
285 285 else:
286 286 # we return an integer indicating remote head count
287 287 # change
288 288 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
289 289
290 290 def _pushcomputecommonheads(pushop):
291 291 unfi = pushop.repo.unfiltered()
292 292 if pushop.ret:
293 293 # push succeed, synchronize target of the push
294 294 cheads = pushop.outgoing.missingheads
295 295 elif pushop.revs is None:
296 296 # All out push fails. synchronize all common
297 297 cheads = pushop.outgoing.commonheads
298 298 else:
299 299 # I want cheads = heads(::missingheads and ::commonheads)
300 300 # (missingheads is revs with secret changeset filtered out)
301 301 #
302 302 # This can be expressed as:
303 303 # cheads = ( (missingheads and ::commonheads)
304 304 # + (commonheads and ::missingheads))"
305 305 # )
306 306 #
307 307 # while trying to push we already computed the following:
308 308 # common = (::commonheads)
309 309 # missing = ((commonheads::missingheads) - commonheads)
310 310 #
311 311 # We can pick:
312 312 # * missingheads part of common (::commonheads)
313 313 common = set(pushop.outgoing.common)
314 314 nm = pushop.repo.changelog.nodemap
315 315 cheads = [node for node in pushop.revs if nm[node] in common]
316 316 # and
317 317 # * commonheads parents on missing
318 318 revset = unfi.set('%ln and parents(roots(%ln))',
319 319 pushop.outgoing.commonheads,
320 320 pushop.outgoing.missing)
321 321 cheads.extend(c.node() for c in revset)
322 322 pushop.commonheads = cheads
323 323
324 324 def _pushsyncphase(pushop):
325 325 """synchronise phase information locally and remotely"""
326 326 unfi = pushop.repo.unfiltered()
327 327 cheads = pushop.commonheads
328 328 # even when we don't push, exchanging phase data is useful
329 329 remotephases = pushop.remote.listkeys('phases')
330 330 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
331 331 and remotephases # server supports phases
332 332 and pushop.ret is None # nothing was pushed
333 333 and remotephases.get('publishing', False)):
334 334 # When:
335 335 # - this is a subrepo push
336 336 # - and remote support phase
337 337 # - and no changeset was pushed
338 338 # - and remote is publishing
339 339 # We may be in issue 3871 case!
340 340 # We drop the possible phase synchronisation done by
341 341 # courtesy to publish changesets possibly locally draft
342 342 # on the remote.
343 343 remotephases = {'publishing': 'True'}
344 344 if not remotephases: # old server or public only reply from non-publishing
345 345 _localphasemove(pushop, cheads)
346 346 # don't push any phase data as there is nothing to push
347 347 else:
348 348 ana = phases.analyzeremotephases(pushop.repo, cheads,
349 349 remotephases)
350 350 pheads, droots = ana
351 351 ### Apply remote phase on local
352 352 if remotephases.get('publishing', False):
353 353 _localphasemove(pushop, cheads)
354 354 else: # publish = False
355 355 _localphasemove(pushop, pheads)
356 356 _localphasemove(pushop, cheads, phases.draft)
357 357 ### Apply local phase on remote
358 358
359 359 # Get the list of all revs draft on remote by public here.
360 360 # XXX Beware that revset break if droots is not strictly
361 361 # XXX root we may want to ensure it is but it is costly
362 362 outdated = unfi.set('heads((%ln::%ln) and public())',
363 363 droots, cheads)
364 364 for newremotehead in outdated:
365 365 r = pushop.remote.pushkey('phases',
366 366 newremotehead.hex(),
367 367 str(phases.draft),
368 368 str(phases.public))
369 369 if not r:
370 370 pushop.ui.warn(_('updating %s to public failed!\n')
371 371 % newremotehead)
372 372
373 373 def _localphasemove(pushop, nodes, phase=phases.public):
374 374 """move <nodes> to <phase> in the local source repo"""
375 375 if pushop.locallocked:
376 376 phases.advanceboundary(pushop.repo, phase, nodes)
377 377 else:
378 378 # repo is not locked, do not change any phases!
379 379 # Informs the user that phases should have been moved when
380 380 # applicable.
381 381 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
382 382 phasestr = phases.phasenames[phase]
383 383 if actualmoves:
384 384 pushop.ui.status(_('cannot lock source repo, skipping '
385 385 'local %s phase update\n') % phasestr)
386 386
387 387 def _pushobsolete(pushop):
388 388 """utility function to push obsolete markers to a remote"""
389 389 pushop.ui.debug('try to push obsolete markers to remote\n')
390 390 repo = pushop.repo
391 391 remote = pushop.remote
392 392 if (obsolete._enabled and repo.obsstore and
393 393 'obsolete' in remote.listkeys('namespaces')):
394 394 rslts = []
395 395 remotedata = repo.listkeys('obsolete')
396 396 for key in sorted(remotedata, reverse=True):
397 397 # reverse sort to ensure we end with dump0
398 398 data = remotedata[key]
399 399 rslts.append(remote.pushkey('obsolete', key, '', data))
400 400 if [r for r in rslts if not r]:
401 401 msg = _('failed to push some obsolete markers!\n')
402 402 repo.ui.warn(msg)
403 403
404 404 def _pushbookmark(pushop):
405 405 """Update bookmark position on remote"""
406 406 ui = pushop.ui
407 407 repo = pushop.repo.unfiltered()
408 408 remote = pushop.remote
409 409 ui.debug("checking for updated bookmarks\n")
410 410 revnums = map(repo.changelog.rev, pushop.revs or [])
411 411 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
412 412 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
413 413 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
414 414 srchex=hex)
415 415
416 416 for b, scid, dcid in advsrc:
417 417 if ancestors and repo[scid].rev() not in ancestors:
418 418 continue
419 419 if remote.pushkey('bookmarks', b, dcid, scid):
420 420 ui.status(_("updating bookmark %s\n") % b)
421 421 else:
422 422 ui.warn(_('updating bookmark %s failed!\n') % b)
423 423
424 424 class pulloperation(object):
425 425 """A object that represent a single pull operation
426 426
427 427 It purpose is to carry push related state and very common operation.
428 428
429 429 A new should be created at the beginning of each pull and discarded
430 430 afterward.
431 431 """
432 432
433 433 def __init__(self, repo, remote, heads=None, force=False):
434 434 # repo we pull into
435 435 self.repo = repo
436 436 # repo we pull from
437 437 self.remote = remote
438 438 # revision we try to pull (None is "all")
439 439 self.heads = heads
440 440 # do we force pull?
441 441 self.force = force
442 442 # the name the pull transaction
443 443 self._trname = 'pull\n' + util.hidepassword(remote.url())
444 444 # hold the transaction once created
445 445 self._tr = None
446 446 # set of common changeset between local and remote before pull
447 447 self.common = None
448 448 # set of pulled head
449 449 self.rheads = None
450 450 # list of missing changeset to fetch remotely
451 451 self.fetch = None
452 452 # result of changegroup pulling (used as return code by pull)
453 453 self.cgresult = None
454 454 # list of step remaining todo (related to future bundle2 usage)
455 455 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
456 456
457 457 @util.propertycache
458 458 def pulledsubset(self):
459 459 """heads of the set of changeset target by the pull"""
460 460 # compute target subset
461 461 if self.heads is None:
462 462 # We pulled every thing possible
463 463 # sync on everything common
464 464 c = set(self.common)
465 465 ret = list(self.common)
466 466 for n in self.rheads:
467 467 if n not in c:
468 468 ret.append(n)
469 469 return ret
470 470 else:
471 471 # We pulled a specific subset
472 472 # sync on this subset
473 473 return self.heads
474 474
475 475 def gettransaction(self):
476 476 """get appropriate pull transaction, creating it if needed"""
477 477 if self._tr is None:
478 478 self._tr = self.repo.transaction(self._trname)
479 479 return self._tr
480 480
481 481 def closetransaction(self):
482 482 """close transaction if created"""
483 483 if self._tr is not None:
484 484 self._tr.close()
485 485
486 486 def releasetransaction(self):
487 487 """release transaction if created"""
488 488 if self._tr is not None:
489 489 self._tr.release()
490 490
491 491 def pull(repo, remote, heads=None, force=False):
492 492 pullop = pulloperation(repo, remote, heads, force)
493 493 if pullop.remote.local():
494 494 missing = set(pullop.remote.requirements) - pullop.repo.supported
495 495 if missing:
496 496 msg = _("required features are not"
497 497 " supported in the destination:"
498 498 " %s") % (', '.join(sorted(missing)))
499 499 raise util.Abort(msg)
500 500
501 501 lock = pullop.repo.lock()
502 502 try:
503 503 _pulldiscovery(pullop)
504 504 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
505 505 and pullop.remote.capable('bundle2-exp')):
506 506 _pullbundle2(pullop)
507 507 if 'changegroup' in pullop.todosteps:
508 508 _pullchangeset(pullop)
509 509 if 'phases' in pullop.todosteps:
510 510 _pullphase(pullop)
511 511 if 'obsmarkers' in pullop.todosteps:
512 512 _pullobsolete(pullop)
513 513 pullop.closetransaction()
514 514 finally:
515 515 pullop.releasetransaction()
516 516 lock.release()
517 517
518 518 return pullop.cgresult
519 519
520 520 def _pulldiscovery(pullop):
521 521 """discovery phase for the pull
522 522
523 523 Current handle changeset discovery only, will change handle all discovery
524 524 at some point."""
525 525 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
526 526 pullop.remote,
527 527 heads=pullop.heads,
528 528 force=pullop.force)
529 529 pullop.common, pullop.fetch, pullop.rheads = tmp
530 530
531 531 def _pullbundle2(pullop):
532 532 """pull data using bundle2
533 533
534 534 For now, the only supported data are changegroup."""
535 535 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
536 536 # pulling changegroup
537 537 pullop.todosteps.remove('changegroup')
538 538
539 539 kwargs['common'] = pullop.common
540 540 kwargs['heads'] = pullop.heads or pullop.rheads
541 541 if not pullop.fetch:
542 542 pullop.repo.ui.status(_("no changes found\n"))
543 543 pullop.cgresult = 0
544 544 else:
545 545 if pullop.heads is None and list(pullop.common) == [nullid]:
546 546 pullop.repo.ui.status(_("requesting all changes\n"))
547 547 _pullbundle2extraprepare(pullop, kwargs)
548 548 if kwargs.keys() == ['format']:
549 549 return # nothing to pull
550 550 bundle = pullop.remote.getbundle('pull', **kwargs)
551 551 try:
552 552 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
553 553 except error.BundleValueError, exc:
554 554 raise util.Abort('missing support for %s' % exc)
555 555
556 556 if pullop.fetch:
557 557 assert len(op.records['changegroup']) == 1
558 558 pullop.cgresult = op.records['changegroup'][0]['return']
559 559
560 560 def _pullbundle2extraprepare(pullop, kwargs):
561 561 """hook function so that extensions can extend the getbundle call"""
562 562 pass
563 563
564 564 def _pullchangeset(pullop):
565 565 """pull changeset from unbundle into the local repo"""
566 566 # We delay the open of the transaction as late as possible so we
567 567 # don't open transaction for nothing or you break future useful
568 568 # rollback call
569 569 pullop.todosteps.remove('changegroup')
570 570 if not pullop.fetch:
571 571 pullop.repo.ui.status(_("no changes found\n"))
572 572 pullop.cgresult = 0
573 573 return
574 574 pullop.gettransaction()
575 575 if pullop.heads is None and list(pullop.common) == [nullid]:
576 576 pullop.repo.ui.status(_("requesting all changes\n"))
577 577 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
578 578 # issue1320, avoid a race if remote changed after discovery
579 579 pullop.heads = pullop.rheads
580 580
581 581 if pullop.remote.capable('getbundle'):
582 582 # TODO: get bundlecaps from remote
583 583 cg = pullop.remote.getbundle('pull', common=pullop.common,
584 584 heads=pullop.heads or pullop.rheads)
585 585 elif pullop.heads is None:
586 586 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
587 587 elif not pullop.remote.capable('changegroupsubset'):
588 588 raise util.Abort(_("partial pull cannot be done because "
589 589 "other repository doesn't support "
590 590 "changegroupsubset."))
591 591 else:
592 592 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
593 593 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
594 594 pullop.remote.url())
595 595
596 596 def _pullphase(pullop):
597 597 # Get remote phases data from remote
598 598 remotephases = pullop.remote.listkeys('phases')
599 599 _pullapplyphases(pullop, remotephases)
600 600
601 601 def _pullapplyphases(pullop, remotephases):
602 602 """apply phase movement from observed remote state"""
603 603 pullop.todosteps.remove('phases')
604 604 publishing = bool(remotephases.get('publishing', False))
605 605 if remotephases and not publishing:
606 606 # remote is new and unpublishing
607 607 pheads, _dr = phases.analyzeremotephases(pullop.repo,
608 608 pullop.pulledsubset,
609 609 remotephases)
610 610 phases.advanceboundary(pullop.repo, phases.public, pheads)
611 611 phases.advanceboundary(pullop.repo, phases.draft,
612 612 pullop.pulledsubset)
613 613 else:
614 614 # Remote is old or publishing all common changesets
615 615 # should be seen as public
616 616 phases.advanceboundary(pullop.repo, phases.public,
617 617 pullop.pulledsubset)
618 618
619 619 def _pullobsolete(pullop):
620 620 """utility function to pull obsolete markers from a remote
621 621
622 622 The `gettransaction` is function that return the pull transaction, creating
623 623 one if necessary. We return the transaction to inform the calling code that
624 624 a new transaction have been created (when applicable).
625 625
626 626 Exists mostly to allow overriding for experimentation purpose"""
627 627 pullop.todosteps.remove('obsmarkers')
628 628 tr = None
629 629 if obsolete._enabled:
630 630 pullop.repo.ui.debug('fetching remote obsolete markers\n')
631 631 remoteobs = pullop.remote.listkeys('obsolete')
632 632 if 'dump0' in remoteobs:
633 633 tr = pullop.gettransaction()
634 634 for key in sorted(remoteobs, reverse=True):
635 635 if key.startswith('dump'):
636 636 data = base85.b85decode(remoteobs[key])
637 637 pullop.repo.obsstore.mergemarkers(tr, data)
638 638 pullop.repo.invalidatevolatilesets()
639 639 return tr
640 640
641 641 def caps20to10(repo):
642 642 """return a set with appropriate options to use bundle20 during getbundle"""
643 643 caps = set(['HG2X'])
644 644 capsblob = bundle2.encodecaps(repo.bundle2caps)
645 645 caps.add('bundle2=' + urllib.quote(capsblob))
646 646 return caps
647 647
648 648 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
649 649 **kwargs):
650 650 """return a full bundle (with potentially multiple kind of parts)
651 651
652 652 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
653 653 passed. For now, the bundle can contain only changegroup, but this will
654 654 changes when more part type will be available for bundle2.
655 655
656 656 This is different from changegroup.getbundle that only returns an HG10
657 657 changegroup bundle. They may eventually get reunited in the future when we
658 658 have a clearer idea of the API we what to query different data.
659 659
660 660 The implementation is at a very early stage and will get massive rework
661 661 when the API of bundle is refined.
662 662 """
663 663 # build changegroup bundle here.
664 664 cg = changegroup.getbundle(repo, source, heads=heads,
665 665 common=common, bundlecaps=bundlecaps)
666 666 if bundlecaps is None or 'HG2X' not in bundlecaps:
667 667 if kwargs:
668 668 raise ValueError(_('unsupported getbundle arguments: %s')
669 669 % ', '.join(sorted(kwargs.keys())))
670 670 return cg
671 671 # very crude first implementation,
672 672 # the bundle API will change and the generation will be done lazily.
673 673 b2caps = {}
674 674 for bcaps in bundlecaps:
675 675 if bcaps.startswith('bundle2='):
676 676 blob = urllib.unquote(bcaps[len('bundle2='):])
677 677 b2caps.update(bundle2.decodecaps(blob))
678 678 bundler = bundle2.bundle20(repo.ui, b2caps)
679 679 if cg:
680 680 bundler.newpart('b2x:changegroup', data=cg.getchunks())
681 listkeys = kwargs.get('listkeys', ())
682 for namespace in listkeys:
683 part = bundler.newpart('b2x:listkeys')
684 part.addparam('namespace', namespace)
685 keys = repo.listkeys(namespace).items()
686 part.data = pushkey.encodekeys(keys)
681 687 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
682 688 bundlecaps=bundlecaps, **kwargs)
683 689 return util.chunkbuffer(bundler.getchunks())
684 690
685 691 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
686 692 bundlecaps=None, **kwargs):
687 693 """hook function to let extensions add parts to the requested bundle"""
688 694 pass
689 695
690 696 def check_heads(repo, their_heads, context):
691 697 """check if the heads of a repo have been modified
692 698
693 699 Used by peer for unbundling.
694 700 """
695 701 heads = repo.heads()
696 702 heads_hash = util.sha1(''.join(sorted(heads))).digest()
697 703 if not (their_heads == ['force'] or their_heads == heads or
698 704 their_heads == ['hashed', heads_hash]):
699 705 # someone else committed/pushed/unbundled while we
700 706 # were transferring data
701 707 raise error.PushRaced('repository changed while %s - '
702 708 'please try again' % context)
703 709
704 710 def unbundle(repo, cg, heads, source, url):
705 711 """Apply a bundle to a repo.
706 712
707 713 this function makes sure the repo is locked during the application and have
708 714 mechanism to check that no push race occurred between the creation of the
709 715 bundle and its application.
710 716
711 717 If the push was raced as PushRaced exception is raised."""
712 718 r = 0
713 719 # need a transaction when processing a bundle2 stream
714 720 tr = None
715 721 lock = repo.lock()
716 722 try:
717 723 check_heads(repo, heads, 'uploading changes')
718 724 # push can proceed
719 725 if util.safehasattr(cg, 'params'):
720 726 try:
721 727 tr = repo.transaction('unbundle')
722 728 tr.hookargs['bundle2-exp'] = '1'
723 729 r = bundle2.processbundle(repo, cg, lambda: tr).reply
724 730 cl = repo.unfiltered().changelog
725 731 p = cl.writepending() and repo.root or ""
726 732 repo.hook('b2x-pretransactionclose', throw=True, source=source,
727 733 url=url, pending=p, **tr.hookargs)
728 734 tr.close()
729 735 repo.hook('b2x-transactionclose', source=source, url=url,
730 736 **tr.hookargs)
731 737 except Exception, exc:
732 738 exc.duringunbundle2 = True
733 739 raise
734 740 else:
735 741 r = changegroup.addchangegroup(repo, cg, source, url)
736 742 finally:
737 743 if tr is not None:
738 744 tr.release()
739 745 lock.release()
740 746 return r
@@ -1,1773 +1,1774 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 bundle2caps = {'HG2X': ()}
183 bundle2caps = {'HG2X': (),
184 'b2x:listkeys': ()}
184 185
185 186 # a list of (ui, featureset) functions.
186 187 # only functions defined in module of enabled extensions are invoked
187 188 featuresetupfuncs = set()
188 189
189 190 def _baserequirements(self, create):
190 191 return self.requirements[:]
191 192
192 193 def __init__(self, baseui, path=None, create=False):
193 194 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
194 195 self.wopener = self.wvfs
195 196 self.root = self.wvfs.base
196 197 self.path = self.wvfs.join(".hg")
197 198 self.origroot = path
198 199 self.auditor = pathutil.pathauditor(self.root, self._checknested)
199 200 self.vfs = scmutil.vfs(self.path)
200 201 self.opener = self.vfs
201 202 self.baseui = baseui
202 203 self.ui = baseui.copy()
203 204 self.ui.copy = baseui.copy # prevent copying repo configuration
204 205 # A list of callback to shape the phase if no data were found.
205 206 # Callback are in the form: func(repo, roots) --> processed root.
206 207 # This list it to be filled by extension during repo setup
207 208 self._phasedefaults = []
208 209 try:
209 210 self.ui.readconfig(self.join("hgrc"), self.root)
210 211 extensions.loadall(self.ui)
211 212 except IOError:
212 213 pass
213 214
214 215 if self.featuresetupfuncs:
215 216 self.supported = set(self._basesupported) # use private copy
216 217 extmods = set(m.__name__ for n, m
217 218 in extensions.extensions(self.ui))
218 219 for setupfunc in self.featuresetupfuncs:
219 220 if setupfunc.__module__ in extmods:
220 221 setupfunc(self.ui, self.supported)
221 222 else:
222 223 self.supported = self._basesupported
223 224
224 225 if not self.vfs.isdir():
225 226 if create:
226 227 if not self.wvfs.exists():
227 228 self.wvfs.makedirs()
228 229 self.vfs.makedir(notindexed=True)
229 230 requirements = self._baserequirements(create)
230 231 if self.ui.configbool('format', 'usestore', True):
231 232 self.vfs.mkdir("store")
232 233 requirements.append("store")
233 234 if self.ui.configbool('format', 'usefncache', True):
234 235 requirements.append("fncache")
235 236 if self.ui.configbool('format', 'dotencode', True):
236 237 requirements.append('dotencode')
237 238 # create an invalid changelog
238 239 self.vfs.append(
239 240 "00changelog.i",
240 241 '\0\0\0\2' # represents revlogv2
241 242 ' dummy changelog to prevent using the old repo layout'
242 243 )
243 244 if self.ui.configbool('format', 'generaldelta', False):
244 245 requirements.append("generaldelta")
245 246 requirements = set(requirements)
246 247 else:
247 248 raise error.RepoError(_("repository %s not found") % path)
248 249 elif create:
249 250 raise error.RepoError(_("repository %s already exists") % path)
250 251 else:
251 252 try:
252 253 requirements = scmutil.readrequires(self.vfs, self.supported)
253 254 except IOError, inst:
254 255 if inst.errno != errno.ENOENT:
255 256 raise
256 257 requirements = set()
257 258
258 259 self.sharedpath = self.path
259 260 try:
260 261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 262 realpath=True)
262 263 s = vfs.base
263 264 if not vfs.exists():
264 265 raise error.RepoError(
265 266 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 267 self.sharedpath = s
267 268 except IOError, inst:
268 269 if inst.errno != errno.ENOENT:
269 270 raise
270 271
271 272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 273 self.spath = self.store.path
273 274 self.svfs = self.store.vfs
274 275 self.sopener = self.svfs
275 276 self.sjoin = self.store.join
276 277 self.vfs.createmode = self.store.createmode
277 278 self._applyrequirements(requirements)
278 279 if create:
279 280 self._writerequirements()
280 281
281 282
282 283 self._branchcaches = {}
283 284 self.filterpats = {}
284 285 self._datafilters = {}
285 286 self._transref = self._lockref = self._wlockref = None
286 287
287 288 # A cache for various files under .hg/ that tracks file changes,
288 289 # (used by the filecache decorator)
289 290 #
290 291 # Maps a property name to its util.filecacheentry
291 292 self._filecache = {}
292 293
293 294 # hold sets of revision to be filtered
294 295 # should be cleared when something might have changed the filter value:
295 296 # - new changesets,
296 297 # - phase change,
297 298 # - new obsolescence marker,
298 299 # - working directory parent change,
299 300 # - bookmark changes
300 301 self.filteredrevcache = {}
301 302
302 303 def close(self):
303 304 pass
304 305
305 306 def _restrictcapabilities(self, caps):
306 307 # bundle2 is not ready for prime time, drop it unless explicitly
307 308 # required by the tests (or some brave tester)
308 309 if self.ui.configbool('experimental', 'bundle2-exp', False):
309 310 caps = set(caps)
310 311 capsblob = bundle2.encodecaps(self.bundle2caps)
311 312 caps.add('bundle2-exp=' + urllib.quote(capsblob))
312 313 return caps
313 314
314 315 def _applyrequirements(self, requirements):
315 316 self.requirements = requirements
316 317 self.sopener.options = dict((r, 1) for r in requirements
317 318 if r in self.openerreqs)
318 319 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
319 320 if chunkcachesize is not None:
320 321 self.sopener.options['chunkcachesize'] = chunkcachesize
321 322
322 323 def _writerequirements(self):
323 324 reqfile = self.opener("requires", "w")
324 325 for r in sorted(self.requirements):
325 326 reqfile.write("%s\n" % r)
326 327 reqfile.close()
327 328
328 329 def _checknested(self, path):
329 330 """Determine if path is a legal nested repository."""
330 331 if not path.startswith(self.root):
331 332 return False
332 333 subpath = path[len(self.root) + 1:]
333 334 normsubpath = util.pconvert(subpath)
334 335
335 336 # XXX: Checking against the current working copy is wrong in
336 337 # the sense that it can reject things like
337 338 #
338 339 # $ hg cat -r 10 sub/x.txt
339 340 #
340 341 # if sub/ is no longer a subrepository in the working copy
341 342 # parent revision.
342 343 #
343 344 # However, it can of course also allow things that would have
344 345 # been rejected before, such as the above cat command if sub/
345 346 # is a subrepository now, but was a normal directory before.
346 347 # The old path auditor would have rejected by mistake since it
347 348 # panics when it sees sub/.hg/.
348 349 #
349 350 # All in all, checking against the working copy seems sensible
350 351 # since we want to prevent access to nested repositories on
351 352 # the filesystem *now*.
352 353 ctx = self[None]
353 354 parts = util.splitpath(subpath)
354 355 while parts:
355 356 prefix = '/'.join(parts)
356 357 if prefix in ctx.substate:
357 358 if prefix == normsubpath:
358 359 return True
359 360 else:
360 361 sub = ctx.sub(prefix)
361 362 return sub.checknested(subpath[len(prefix) + 1:])
362 363 else:
363 364 parts.pop()
364 365 return False
365 366
366 367 def peer(self):
367 368 return localpeer(self) # not cached to avoid reference cycle
368 369
369 370 def unfiltered(self):
370 371 """Return unfiltered version of the repository
371 372
372 373 Intended to be overwritten by filtered repo."""
373 374 return self
374 375
375 376 def filtered(self, name):
376 377 """Return a filtered version of a repository"""
377 378 # build a new class with the mixin and the current class
378 379 # (possibly subclass of the repo)
379 380 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 381 pass
381 382 return proxycls(self, name)
382 383
383 384 @repofilecache('bookmarks')
384 385 def _bookmarks(self):
385 386 return bookmarks.bmstore(self)
386 387
387 388 @repofilecache('bookmarks.current')
388 389 def _bookmarkcurrent(self):
389 390 return bookmarks.readcurrent(self)
390 391
391 392 def bookmarkheads(self, bookmark):
392 393 name = bookmark.split('@', 1)[0]
393 394 heads = []
394 395 for mark, n in self._bookmarks.iteritems():
395 396 if mark.split('@', 1)[0] == name:
396 397 heads.append(n)
397 398 return heads
398 399
399 400 @storecache('phaseroots')
400 401 def _phasecache(self):
401 402 return phases.phasecache(self, self._phasedefaults)
402 403
403 404 @storecache('obsstore')
404 405 def obsstore(self):
405 406 store = obsolete.obsstore(self.sopener)
406 407 if store and not obsolete._enabled:
407 408 # message is rare enough to not be translated
408 409 msg = 'obsolete feature not enabled but %i markers found!\n'
409 410 self.ui.warn(msg % len(list(store)))
410 411 return store
411 412
412 413 @storecache('00changelog.i')
413 414 def changelog(self):
414 415 c = changelog.changelog(self.sopener)
415 416 if 'HG_PENDING' in os.environ:
416 417 p = os.environ['HG_PENDING']
417 418 if p.startswith(self.root):
418 419 c.readpending('00changelog.i.a')
419 420 return c
420 421
421 422 @storecache('00manifest.i')
422 423 def manifest(self):
423 424 return manifest.manifest(self.sopener)
424 425
425 426 @repofilecache('dirstate')
426 427 def dirstate(self):
427 428 warned = [0]
428 429 def validate(node):
429 430 try:
430 431 self.changelog.rev(node)
431 432 return node
432 433 except error.LookupError:
433 434 if not warned[0]:
434 435 warned[0] = True
435 436 self.ui.warn(_("warning: ignoring unknown"
436 437 " working parent %s!\n") % short(node))
437 438 return nullid
438 439
439 440 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
440 441
441 442 def __getitem__(self, changeid):
442 443 if changeid is None:
443 444 return context.workingctx(self)
444 445 return context.changectx(self, changeid)
445 446
446 447 def __contains__(self, changeid):
447 448 try:
448 449 return bool(self.lookup(changeid))
449 450 except error.RepoLookupError:
450 451 return False
451 452
452 453 def __nonzero__(self):
453 454 return True
454 455
455 456 def __len__(self):
456 457 return len(self.changelog)
457 458
458 459 def __iter__(self):
459 460 return iter(self.changelog)
460 461
461 462 def revs(self, expr, *args):
462 463 '''Return a list of revisions matching the given revset'''
463 464 expr = revset.formatspec(expr, *args)
464 465 m = revset.match(None, expr)
465 466 return m(self, revset.spanset(self))
466 467
467 468 def set(self, expr, *args):
468 469 '''
469 470 Yield a context for each matching revision, after doing arg
470 471 replacement via revset.formatspec
471 472 '''
472 473 for r in self.revs(expr, *args):
473 474 yield self[r]
474 475
475 476 def url(self):
476 477 return 'file:' + self.root
477 478
478 479 def hook(self, name, throw=False, **args):
479 480 return hook.hook(self.ui, self, name, throw, **args)
480 481
481 482 @unfilteredmethod
482 483 def _tag(self, names, node, message, local, user, date, extra={},
483 484 editor=False):
484 485 if isinstance(names, str):
485 486 names = (names,)
486 487
487 488 branches = self.branchmap()
488 489 for name in names:
489 490 self.hook('pretag', throw=True, node=hex(node), tag=name,
490 491 local=local)
491 492 if name in branches:
492 493 self.ui.warn(_("warning: tag %s conflicts with existing"
493 494 " branch name\n") % name)
494 495
495 496 def writetags(fp, names, munge, prevtags):
496 497 fp.seek(0, 2)
497 498 if prevtags and prevtags[-1] != '\n':
498 499 fp.write('\n')
499 500 for name in names:
500 501 m = munge and munge(name) or name
501 502 if (self._tagscache.tagtypes and
502 503 name in self._tagscache.tagtypes):
503 504 old = self.tags().get(name, nullid)
504 505 fp.write('%s %s\n' % (hex(old), m))
505 506 fp.write('%s %s\n' % (hex(node), m))
506 507 fp.close()
507 508
508 509 prevtags = ''
509 510 if local:
510 511 try:
511 512 fp = self.opener('localtags', 'r+')
512 513 except IOError:
513 514 fp = self.opener('localtags', 'a')
514 515 else:
515 516 prevtags = fp.read()
516 517
517 518 # local tags are stored in the current charset
518 519 writetags(fp, names, None, prevtags)
519 520 for name in names:
520 521 self.hook('tag', node=hex(node), tag=name, local=local)
521 522 return
522 523
523 524 try:
524 525 fp = self.wfile('.hgtags', 'rb+')
525 526 except IOError, e:
526 527 if e.errno != errno.ENOENT:
527 528 raise
528 529 fp = self.wfile('.hgtags', 'ab')
529 530 else:
530 531 prevtags = fp.read()
531 532
532 533 # committed tags are stored in UTF-8
533 534 writetags(fp, names, encoding.fromlocal, prevtags)
534 535
535 536 fp.close()
536 537
537 538 self.invalidatecaches()
538 539
539 540 if '.hgtags' not in self.dirstate:
540 541 self[None].add(['.hgtags'])
541 542
542 543 m = matchmod.exact(self.root, '', ['.hgtags'])
543 544 tagnode = self.commit(message, user, date, extra=extra, match=m,
544 545 editor=editor)
545 546
546 547 for name in names:
547 548 self.hook('tag', node=hex(node), tag=name, local=local)
548 549
549 550 return tagnode
550 551
551 552 def tag(self, names, node, message, local, user, date, editor=False):
552 553 '''tag a revision with one or more symbolic names.
553 554
554 555 names is a list of strings or, when adding a single tag, names may be a
555 556 string.
556 557
557 558 if local is True, the tags are stored in a per-repository file.
558 559 otherwise, they are stored in the .hgtags file, and a new
559 560 changeset is committed with the change.
560 561
561 562 keyword arguments:
562 563
563 564 local: whether to store tags in non-version-controlled file
564 565 (default False)
565 566
566 567 message: commit message to use if committing
567 568
568 569 user: name of user to use if committing
569 570
570 571 date: date tuple to use if committing'''
571 572
572 573 if not local:
573 574 for x in self.status()[:5]:
574 575 if '.hgtags' in x:
575 576 raise util.Abort(_('working copy of .hgtags is changed '
576 577 '(please commit .hgtags manually)'))
577 578
578 579 self.tags() # instantiate the cache
579 580 self._tag(names, node, message, local, user, date, editor=editor)
580 581
581 582 @filteredpropertycache
582 583 def _tagscache(self):
583 584 '''Returns a tagscache object that contains various tags related
584 585 caches.'''
585 586
586 587 # This simplifies its cache management by having one decorated
587 588 # function (this one) and the rest simply fetch things from it.
588 589 class tagscache(object):
589 590 def __init__(self):
590 591 # These two define the set of tags for this repository. tags
591 592 # maps tag name to node; tagtypes maps tag name to 'global' or
592 593 # 'local'. (Global tags are defined by .hgtags across all
593 594 # heads, and local tags are defined in .hg/localtags.)
594 595 # They constitute the in-memory cache of tags.
595 596 self.tags = self.tagtypes = None
596 597
597 598 self.nodetagscache = self.tagslist = None
598 599
599 600 cache = tagscache()
600 601 cache.tags, cache.tagtypes = self._findtags()
601 602
602 603 return cache
603 604
604 605 def tags(self):
605 606 '''return a mapping of tag to node'''
606 607 t = {}
607 608 if self.changelog.filteredrevs:
608 609 tags, tt = self._findtags()
609 610 else:
610 611 tags = self._tagscache.tags
611 612 for k, v in tags.iteritems():
612 613 try:
613 614 # ignore tags to unknown nodes
614 615 self.changelog.rev(v)
615 616 t[k] = v
616 617 except (error.LookupError, ValueError):
617 618 pass
618 619 return t
619 620
620 621 def _findtags(self):
621 622 '''Do the hard work of finding tags. Return a pair of dicts
622 623 (tags, tagtypes) where tags maps tag name to node, and tagtypes
623 624 maps tag name to a string like \'global\' or \'local\'.
624 625 Subclasses or extensions are free to add their own tags, but
625 626 should be aware that the returned dicts will be retained for the
626 627 duration of the localrepo object.'''
627 628
628 629 # XXX what tagtype should subclasses/extensions use? Currently
629 630 # mq and bookmarks add tags, but do not set the tagtype at all.
630 631 # Should each extension invent its own tag type? Should there
631 632 # be one tagtype for all such "virtual" tags? Or is the status
632 633 # quo fine?
633 634
634 635 alltags = {} # map tag name to (node, hist)
635 636 tagtypes = {}
636 637
637 638 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
638 639 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
639 640
640 641 # Build the return dicts. Have to re-encode tag names because
641 642 # the tags module always uses UTF-8 (in order not to lose info
642 643 # writing to the cache), but the rest of Mercurial wants them in
643 644 # local encoding.
644 645 tags = {}
645 646 for (name, (node, hist)) in alltags.iteritems():
646 647 if node != nullid:
647 648 tags[encoding.tolocal(name)] = node
648 649 tags['tip'] = self.changelog.tip()
649 650 tagtypes = dict([(encoding.tolocal(name), value)
650 651 for (name, value) in tagtypes.iteritems()])
651 652 return (tags, tagtypes)
652 653
653 654 def tagtype(self, tagname):
654 655 '''
655 656 return the type of the given tag. result can be:
656 657
657 658 'local' : a local tag
658 659 'global' : a global tag
659 660 None : tag does not exist
660 661 '''
661 662
662 663 return self._tagscache.tagtypes.get(tagname)
663 664
664 665 def tagslist(self):
665 666 '''return a list of tags ordered by revision'''
666 667 if not self._tagscache.tagslist:
667 668 l = []
668 669 for t, n in self.tags().iteritems():
669 670 r = self.changelog.rev(n)
670 671 l.append((r, t, n))
671 672 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
672 673
673 674 return self._tagscache.tagslist
674 675
675 676 def nodetags(self, node):
676 677 '''return the tags associated with a node'''
677 678 if not self._tagscache.nodetagscache:
678 679 nodetagscache = {}
679 680 for t, n in self._tagscache.tags.iteritems():
680 681 nodetagscache.setdefault(n, []).append(t)
681 682 for tags in nodetagscache.itervalues():
682 683 tags.sort()
683 684 self._tagscache.nodetagscache = nodetagscache
684 685 return self._tagscache.nodetagscache.get(node, [])
685 686
686 687 def nodebookmarks(self, node):
687 688 marks = []
688 689 for bookmark, n in self._bookmarks.iteritems():
689 690 if n == node:
690 691 marks.append(bookmark)
691 692 return sorted(marks)
692 693
693 694 def branchmap(self):
694 695 '''returns a dictionary {branch: [branchheads]} with branchheads
695 696 ordered by increasing revision number'''
696 697 branchmap.updatecache(self)
697 698 return self._branchcaches[self.filtername]
698 699
699 700 def branchtip(self, branch):
700 701 '''return the tip node for a given branch'''
701 702 try:
702 703 return self.branchmap().branchtip(branch)
703 704 except KeyError:
704 705 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
705 706
706 707 def lookup(self, key):
707 708 return self[key].node()
708 709
709 710 def lookupbranch(self, key, remote=None):
710 711 repo = remote or self
711 712 if key in repo.branchmap():
712 713 return key
713 714
714 715 repo = (remote and remote.local()) and remote or self
715 716 return repo[key].branch()
716 717
717 718 def known(self, nodes):
718 719 nm = self.changelog.nodemap
719 720 pc = self._phasecache
720 721 result = []
721 722 for n in nodes:
722 723 r = nm.get(n)
723 724 resp = not (r is None or pc.phase(self, r) >= phases.secret)
724 725 result.append(resp)
725 726 return result
726 727
727 728 def local(self):
728 729 return self
729 730
730 731 def cancopy(self):
731 732 # so statichttprepo's override of local() works
732 733 if not self.local():
733 734 return False
734 735 if not self.ui.configbool('phases', 'publish', True):
735 736 return True
736 737 # if publishing we can't copy if there is filtered content
737 738 return not self.filtered('visible').changelog.filteredrevs
738 739
739 740 def join(self, f):
740 741 return os.path.join(self.path, f)
741 742
742 743 def wjoin(self, f):
743 744 return os.path.join(self.root, f)
744 745
745 746 def file(self, f):
746 747 if f[0] == '/':
747 748 f = f[1:]
748 749 return filelog.filelog(self.sopener, f)
749 750
750 751 def changectx(self, changeid):
751 752 return self[changeid]
752 753
753 754 def parents(self, changeid=None):
754 755 '''get list of changectxs for parents of changeid'''
755 756 return self[changeid].parents()
756 757
757 758 def setparents(self, p1, p2=nullid):
758 759 copies = self.dirstate.setparents(p1, p2)
759 760 pctx = self[p1]
760 761 if copies:
761 762 # Adjust copy records, the dirstate cannot do it, it
762 763 # requires access to parents manifests. Preserve them
763 764 # only for entries added to first parent.
764 765 for f in copies:
765 766 if f not in pctx and copies[f] in pctx:
766 767 self.dirstate.copy(copies[f], f)
767 768 if p2 == nullid:
768 769 for f, s in sorted(self.dirstate.copies().items()):
769 770 if f not in pctx and s not in pctx:
770 771 self.dirstate.copy(None, f)
771 772
772 773 def filectx(self, path, changeid=None, fileid=None):
773 774 """changeid can be a changeset revision, node, or tag.
774 775 fileid can be a file revision or node."""
775 776 return context.filectx(self, path, changeid, fileid)
776 777
777 778 def getcwd(self):
778 779 return self.dirstate.getcwd()
779 780
780 781 def pathto(self, f, cwd=None):
781 782 return self.dirstate.pathto(f, cwd)
782 783
783 784 def wfile(self, f, mode='r'):
784 785 return self.wopener(f, mode)
785 786
786 787 def _link(self, f):
787 788 return self.wvfs.islink(f)
788 789
789 790 def _loadfilter(self, filter):
790 791 if filter not in self.filterpats:
791 792 l = []
792 793 for pat, cmd in self.ui.configitems(filter):
793 794 if cmd == '!':
794 795 continue
795 796 mf = matchmod.match(self.root, '', [pat])
796 797 fn = None
797 798 params = cmd
798 799 for name, filterfn in self._datafilters.iteritems():
799 800 if cmd.startswith(name):
800 801 fn = filterfn
801 802 params = cmd[len(name):].lstrip()
802 803 break
803 804 if not fn:
804 805 fn = lambda s, c, **kwargs: util.filter(s, c)
805 806 # Wrap old filters not supporting keyword arguments
806 807 if not inspect.getargspec(fn)[2]:
807 808 oldfn = fn
808 809 fn = lambda s, c, **kwargs: oldfn(s, c)
809 810 l.append((mf, fn, params))
810 811 self.filterpats[filter] = l
811 812 return self.filterpats[filter]
812 813
813 814 def _filter(self, filterpats, filename, data):
814 815 for mf, fn, cmd in filterpats:
815 816 if mf(filename):
816 817 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
817 818 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
818 819 break
819 820
820 821 return data
821 822
822 823 @unfilteredpropertycache
823 824 def _encodefilterpats(self):
824 825 return self._loadfilter('encode')
825 826
826 827 @unfilteredpropertycache
827 828 def _decodefilterpats(self):
828 829 return self._loadfilter('decode')
829 830
830 831 def adddatafilter(self, name, filter):
831 832 self._datafilters[name] = filter
832 833
833 834 def wread(self, filename):
834 835 if self._link(filename):
835 836 data = self.wvfs.readlink(filename)
836 837 else:
837 838 data = self.wopener.read(filename)
838 839 return self._filter(self._encodefilterpats, filename, data)
839 840
840 841 def wwrite(self, filename, data, flags):
841 842 data = self._filter(self._decodefilterpats, filename, data)
842 843 if 'l' in flags:
843 844 self.wopener.symlink(data, filename)
844 845 else:
845 846 self.wopener.write(filename, data)
846 847 if 'x' in flags:
847 848 self.wvfs.setflags(filename, False, True)
848 849
849 850 def wwritedata(self, filename, data):
850 851 return self._filter(self._decodefilterpats, filename, data)
851 852
852 853 def transaction(self, desc, report=None):
853 854 tr = self._transref and self._transref() or None
854 855 if tr and tr.running():
855 856 return tr.nest()
856 857
857 858 # abort here if the journal already exists
858 859 if self.svfs.exists("journal"):
859 860 raise error.RepoError(
860 861 _("abandoned transaction found"),
861 862 hint=_("run 'hg recover' to clean up transaction"))
862 863
863 864 def onclose():
864 865 self.store.write(tr)
865 866
866 867 self._writejournal(desc)
867 868 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
868 869 rp = report and report or self.ui.warn
869 870 tr = transaction.transaction(rp, self.sopener,
870 871 "journal",
871 872 aftertrans(renames),
872 873 self.store.createmode,
873 874 onclose)
874 875 self._transref = weakref.ref(tr)
875 876 return tr
876 877
877 878 def _journalfiles(self):
878 879 return ((self.svfs, 'journal'),
879 880 (self.vfs, 'journal.dirstate'),
880 881 (self.vfs, 'journal.branch'),
881 882 (self.vfs, 'journal.desc'),
882 883 (self.vfs, 'journal.bookmarks'),
883 884 (self.svfs, 'journal.phaseroots'))
884 885
885 886 def undofiles(self):
886 887 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
887 888
888 889 def _writejournal(self, desc):
889 890 self.opener.write("journal.dirstate",
890 891 self.opener.tryread("dirstate"))
891 892 self.opener.write("journal.branch",
892 893 encoding.fromlocal(self.dirstate.branch()))
893 894 self.opener.write("journal.desc",
894 895 "%d\n%s\n" % (len(self), desc))
895 896 self.opener.write("journal.bookmarks",
896 897 self.opener.tryread("bookmarks"))
897 898 self.sopener.write("journal.phaseroots",
898 899 self.sopener.tryread("phaseroots"))
899 900
900 901 def recover(self):
901 902 lock = self.lock()
902 903 try:
903 904 if self.svfs.exists("journal"):
904 905 self.ui.status(_("rolling back interrupted transaction\n"))
905 906 transaction.rollback(self.sopener, "journal",
906 907 self.ui.warn)
907 908 self.invalidate()
908 909 return True
909 910 else:
910 911 self.ui.warn(_("no interrupted transaction available\n"))
911 912 return False
912 913 finally:
913 914 lock.release()
914 915
915 916 def rollback(self, dryrun=False, force=False):
916 917 wlock = lock = None
917 918 try:
918 919 wlock = self.wlock()
919 920 lock = self.lock()
920 921 if self.svfs.exists("undo"):
921 922 return self._rollback(dryrun, force)
922 923 else:
923 924 self.ui.warn(_("no rollback information available\n"))
924 925 return 1
925 926 finally:
926 927 release(lock, wlock)
927 928
928 929 @unfilteredmethod # Until we get smarter cache management
929 930 def _rollback(self, dryrun, force):
930 931 ui = self.ui
931 932 try:
932 933 args = self.opener.read('undo.desc').splitlines()
933 934 (oldlen, desc, detail) = (int(args[0]), args[1], None)
934 935 if len(args) >= 3:
935 936 detail = args[2]
936 937 oldtip = oldlen - 1
937 938
938 939 if detail and ui.verbose:
939 940 msg = (_('repository tip rolled back to revision %s'
940 941 ' (undo %s: %s)\n')
941 942 % (oldtip, desc, detail))
942 943 else:
943 944 msg = (_('repository tip rolled back to revision %s'
944 945 ' (undo %s)\n')
945 946 % (oldtip, desc))
946 947 except IOError:
947 948 msg = _('rolling back unknown transaction\n')
948 949 desc = None
949 950
950 951 if not force and self['.'] != self['tip'] and desc == 'commit':
951 952 raise util.Abort(
952 953 _('rollback of last commit while not checked out '
953 954 'may lose data'), hint=_('use -f to force'))
954 955
955 956 ui.status(msg)
956 957 if dryrun:
957 958 return 0
958 959
959 960 parents = self.dirstate.parents()
960 961 self.destroying()
961 962 transaction.rollback(self.sopener, 'undo', ui.warn)
962 963 if self.vfs.exists('undo.bookmarks'):
963 964 self.vfs.rename('undo.bookmarks', 'bookmarks')
964 965 if self.svfs.exists('undo.phaseroots'):
965 966 self.svfs.rename('undo.phaseroots', 'phaseroots')
966 967 self.invalidate()
967 968
968 969 parentgone = (parents[0] not in self.changelog.nodemap or
969 970 parents[1] not in self.changelog.nodemap)
970 971 if parentgone:
971 972 self.vfs.rename('undo.dirstate', 'dirstate')
972 973 try:
973 974 branch = self.opener.read('undo.branch')
974 975 self.dirstate.setbranch(encoding.tolocal(branch))
975 976 except IOError:
976 977 ui.warn(_('named branch could not be reset: '
977 978 'current branch is still \'%s\'\n')
978 979 % self.dirstate.branch())
979 980
980 981 self.dirstate.invalidate()
981 982 parents = tuple([p.rev() for p in self.parents()])
982 983 if len(parents) > 1:
983 984 ui.status(_('working directory now based on '
984 985 'revisions %d and %d\n') % parents)
985 986 else:
986 987 ui.status(_('working directory now based on '
987 988 'revision %d\n') % parents)
988 989 # TODO: if we know which new heads may result from this rollback, pass
989 990 # them to destroy(), which will prevent the branchhead cache from being
990 991 # invalidated.
991 992 self.destroyed()
992 993 return 0
993 994
994 995 def invalidatecaches(self):
995 996
996 997 if '_tagscache' in vars(self):
997 998 # can't use delattr on proxy
998 999 del self.__dict__['_tagscache']
999 1000
1000 1001 self.unfiltered()._branchcaches.clear()
1001 1002 self.invalidatevolatilesets()
1002 1003
1003 1004 def invalidatevolatilesets(self):
1004 1005 self.filteredrevcache.clear()
1005 1006 obsolete.clearobscaches(self)
1006 1007
1007 1008 def invalidatedirstate(self):
1008 1009 '''Invalidates the dirstate, causing the next call to dirstate
1009 1010 to check if it was modified since the last time it was read,
1010 1011 rereading it if it has.
1011 1012
1012 1013 This is different to dirstate.invalidate() that it doesn't always
1013 1014 rereads the dirstate. Use dirstate.invalidate() if you want to
1014 1015 explicitly read the dirstate again (i.e. restoring it to a previous
1015 1016 known good state).'''
1016 1017 if hasunfilteredcache(self, 'dirstate'):
1017 1018 for k in self.dirstate._filecache:
1018 1019 try:
1019 1020 delattr(self.dirstate, k)
1020 1021 except AttributeError:
1021 1022 pass
1022 1023 delattr(self.unfiltered(), 'dirstate')
1023 1024
1024 1025 def invalidate(self):
1025 1026 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1026 1027 for k in self._filecache:
1027 1028 # dirstate is invalidated separately in invalidatedirstate()
1028 1029 if k == 'dirstate':
1029 1030 continue
1030 1031
1031 1032 try:
1032 1033 delattr(unfiltered, k)
1033 1034 except AttributeError:
1034 1035 pass
1035 1036 self.invalidatecaches()
1036 1037 self.store.invalidatecaches()
1037 1038
1038 1039 def invalidateall(self):
1039 1040 '''Fully invalidates both store and non-store parts, causing the
1040 1041 subsequent operation to reread any outside changes.'''
1041 1042 # extension should hook this to invalidate its caches
1042 1043 self.invalidate()
1043 1044 self.invalidatedirstate()
1044 1045
1045 1046 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1046 1047 try:
1047 1048 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1048 1049 except error.LockHeld, inst:
1049 1050 if not wait:
1050 1051 raise
1051 1052 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1052 1053 (desc, inst.locker))
1053 1054 # default to 600 seconds timeout
1054 1055 l = lockmod.lock(vfs, lockname,
1055 1056 int(self.ui.config("ui", "timeout", "600")),
1056 1057 releasefn, desc=desc)
1057 1058 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1058 1059 if acquirefn:
1059 1060 acquirefn()
1060 1061 return l
1061 1062
1062 1063 def _afterlock(self, callback):
1063 1064 """add a callback to the current repository lock.
1064 1065
1065 1066 The callback will be executed on lock release."""
1066 1067 l = self._lockref and self._lockref()
1067 1068 if l:
1068 1069 l.postrelease.append(callback)
1069 1070 else:
1070 1071 callback()
1071 1072
1072 1073 def lock(self, wait=True):
1073 1074 '''Lock the repository store (.hg/store) and return a weak reference
1074 1075 to the lock. Use this before modifying the store (e.g. committing or
1075 1076 stripping). If you are opening a transaction, get a lock as well.)'''
1076 1077 l = self._lockref and self._lockref()
1077 1078 if l is not None and l.held:
1078 1079 l.lock()
1079 1080 return l
1080 1081
1081 1082 def unlock():
1082 1083 if hasunfilteredcache(self, '_phasecache'):
1083 1084 self._phasecache.write()
1084 1085 for k, ce in self._filecache.items():
1085 1086 if k == 'dirstate' or k not in self.__dict__:
1086 1087 continue
1087 1088 ce.refresh()
1088 1089
1089 1090 l = self._lock(self.svfs, "lock", wait, unlock,
1090 1091 self.invalidate, _('repository %s') % self.origroot)
1091 1092 self._lockref = weakref.ref(l)
1092 1093 return l
1093 1094
1094 1095 def wlock(self, wait=True):
1095 1096 '''Lock the non-store parts of the repository (everything under
1096 1097 .hg except .hg/store) and return a weak reference to the lock.
1097 1098 Use this before modifying files in .hg.'''
1098 1099 l = self._wlockref and self._wlockref()
1099 1100 if l is not None and l.held:
1100 1101 l.lock()
1101 1102 return l
1102 1103
1103 1104 def unlock():
1104 1105 self.dirstate.write()
1105 1106 self._filecache['dirstate'].refresh()
1106 1107
1107 1108 l = self._lock(self.vfs, "wlock", wait, unlock,
1108 1109 self.invalidatedirstate, _('working directory of %s') %
1109 1110 self.origroot)
1110 1111 self._wlockref = weakref.ref(l)
1111 1112 return l
1112 1113
1113 1114 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1114 1115 """
1115 1116 commit an individual file as part of a larger transaction
1116 1117 """
1117 1118
1118 1119 fname = fctx.path()
1119 1120 text = fctx.data()
1120 1121 flog = self.file(fname)
1121 1122 fparent1 = manifest1.get(fname, nullid)
1122 1123 fparent2 = fparent2o = manifest2.get(fname, nullid)
1123 1124
1124 1125 meta = {}
1125 1126 copy = fctx.renamed()
1126 1127 if copy and copy[0] != fname:
1127 1128 # Mark the new revision of this file as a copy of another
1128 1129 # file. This copy data will effectively act as a parent
1129 1130 # of this new revision. If this is a merge, the first
1130 1131 # parent will be the nullid (meaning "look up the copy data")
1131 1132 # and the second one will be the other parent. For example:
1132 1133 #
1133 1134 # 0 --- 1 --- 3 rev1 changes file foo
1134 1135 # \ / rev2 renames foo to bar and changes it
1135 1136 # \- 2 -/ rev3 should have bar with all changes and
1136 1137 # should record that bar descends from
1137 1138 # bar in rev2 and foo in rev1
1138 1139 #
1139 1140 # this allows this merge to succeed:
1140 1141 #
1141 1142 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1142 1143 # \ / merging rev3 and rev4 should use bar@rev2
1143 1144 # \- 2 --- 4 as the merge base
1144 1145 #
1145 1146
1146 1147 cfname = copy[0]
1147 1148 crev = manifest1.get(cfname)
1148 1149 newfparent = fparent2
1149 1150
1150 1151 if manifest2: # branch merge
1151 1152 if fparent2 == nullid or crev is None: # copied on remote side
1152 1153 if cfname in manifest2:
1153 1154 crev = manifest2[cfname]
1154 1155 newfparent = fparent1
1155 1156
1156 1157 # find source in nearest ancestor if we've lost track
1157 1158 if not crev:
1158 1159 self.ui.debug(" %s: searching for copy revision for %s\n" %
1159 1160 (fname, cfname))
1160 1161 for ancestor in self[None].ancestors():
1161 1162 if cfname in ancestor:
1162 1163 crev = ancestor[cfname].filenode()
1163 1164 break
1164 1165
1165 1166 if crev:
1166 1167 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1167 1168 meta["copy"] = cfname
1168 1169 meta["copyrev"] = hex(crev)
1169 1170 fparent1, fparent2 = nullid, newfparent
1170 1171 else:
1171 1172 self.ui.warn(_("warning: can't find ancestor for '%s' "
1172 1173 "copied from '%s'!\n") % (fname, cfname))
1173 1174
1174 1175 elif fparent1 == nullid:
1175 1176 fparent1, fparent2 = fparent2, nullid
1176 1177 elif fparent2 != nullid:
1177 1178 # is one parent an ancestor of the other?
1178 1179 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1179 1180 if fparent1 in fparentancestors:
1180 1181 fparent1, fparent2 = fparent2, nullid
1181 1182 elif fparent2 in fparentancestors:
1182 1183 fparent2 = nullid
1183 1184
1184 1185 # is the file changed?
1185 1186 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1186 1187 changelist.append(fname)
1187 1188 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1188 1189
1189 1190 # are just the flags changed during merge?
1190 1191 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1191 1192 changelist.append(fname)
1192 1193
1193 1194 return fparent1
1194 1195
1195 1196 @unfilteredmethod
1196 1197 def commit(self, text="", user=None, date=None, match=None, force=False,
1197 1198 editor=False, extra={}):
1198 1199 """Add a new revision to current repository.
1199 1200
1200 1201 Revision information is gathered from the working directory,
1201 1202 match can be used to filter the committed files. If editor is
1202 1203 supplied, it is called to get a commit message.
1203 1204 """
1204 1205
1205 1206 def fail(f, msg):
1206 1207 raise util.Abort('%s: %s' % (f, msg))
1207 1208
1208 1209 if not match:
1209 1210 match = matchmod.always(self.root, '')
1210 1211
1211 1212 if not force:
1212 1213 vdirs = []
1213 1214 match.explicitdir = vdirs.append
1214 1215 match.bad = fail
1215 1216
1216 1217 wlock = self.wlock()
1217 1218 try:
1218 1219 wctx = self[None]
1219 1220 merge = len(wctx.parents()) > 1
1220 1221
1221 1222 if (not force and merge and match and
1222 1223 (match.files() or match.anypats())):
1223 1224 raise util.Abort(_('cannot partially commit a merge '
1224 1225 '(do not specify files or patterns)'))
1225 1226
1226 1227 changes = self.status(match=match, clean=force)
1227 1228 if force:
1228 1229 changes[0].extend(changes[6]) # mq may commit unchanged files
1229 1230
1230 1231 # check subrepos
1231 1232 subs = []
1232 1233 commitsubs = set()
1233 1234 newstate = wctx.substate.copy()
1234 1235 # only manage subrepos and .hgsubstate if .hgsub is present
1235 1236 if '.hgsub' in wctx:
1236 1237 # we'll decide whether to track this ourselves, thanks
1237 1238 for c in changes[:3]:
1238 1239 if '.hgsubstate' in c:
1239 1240 c.remove('.hgsubstate')
1240 1241
1241 1242 # compare current state to last committed state
1242 1243 # build new substate based on last committed state
1243 1244 oldstate = wctx.p1().substate
1244 1245 for s in sorted(newstate.keys()):
1245 1246 if not match(s):
1246 1247 # ignore working copy, use old state if present
1247 1248 if s in oldstate:
1248 1249 newstate[s] = oldstate[s]
1249 1250 continue
1250 1251 if not force:
1251 1252 raise util.Abort(
1252 1253 _("commit with new subrepo %s excluded") % s)
1253 1254 if wctx.sub(s).dirty(True):
1254 1255 if not self.ui.configbool('ui', 'commitsubrepos'):
1255 1256 raise util.Abort(
1256 1257 _("uncommitted changes in subrepo %s") % s,
1257 1258 hint=_("use --subrepos for recursive commit"))
1258 1259 subs.append(s)
1259 1260 commitsubs.add(s)
1260 1261 else:
1261 1262 bs = wctx.sub(s).basestate()
1262 1263 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1263 1264 if oldstate.get(s, (None, None, None))[1] != bs:
1264 1265 subs.append(s)
1265 1266
1266 1267 # check for removed subrepos
1267 1268 for p in wctx.parents():
1268 1269 r = [s for s in p.substate if s not in newstate]
1269 1270 subs += [s for s in r if match(s)]
1270 1271 if subs:
1271 1272 if (not match('.hgsub') and
1272 1273 '.hgsub' in (wctx.modified() + wctx.added())):
1273 1274 raise util.Abort(
1274 1275 _("can't commit subrepos without .hgsub"))
1275 1276 changes[0].insert(0, '.hgsubstate')
1276 1277
1277 1278 elif '.hgsub' in changes[2]:
1278 1279 # clean up .hgsubstate when .hgsub is removed
1279 1280 if ('.hgsubstate' in wctx and
1280 1281 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1281 1282 changes[2].insert(0, '.hgsubstate')
1282 1283
1283 1284 # make sure all explicit patterns are matched
1284 1285 if not force and match.files():
1285 1286 matched = set(changes[0] + changes[1] + changes[2])
1286 1287
1287 1288 for f in match.files():
1288 1289 f = self.dirstate.normalize(f)
1289 1290 if f == '.' or f in matched or f in wctx.substate:
1290 1291 continue
1291 1292 if f in changes[3]: # missing
1292 1293 fail(f, _('file not found!'))
1293 1294 if f in vdirs: # visited directory
1294 1295 d = f + '/'
1295 1296 for mf in matched:
1296 1297 if mf.startswith(d):
1297 1298 break
1298 1299 else:
1299 1300 fail(f, _("no match under directory!"))
1300 1301 elif f not in self.dirstate:
1301 1302 fail(f, _("file not tracked!"))
1302 1303
1303 1304 cctx = context.workingctx(self, text, user, date, extra, changes)
1304 1305
1305 1306 if (not force and not extra.get("close") and not merge
1306 1307 and not cctx.files()
1307 1308 and wctx.branch() == wctx.p1().branch()):
1308 1309 return None
1309 1310
1310 1311 if merge and cctx.deleted():
1311 1312 raise util.Abort(_("cannot commit merge with missing files"))
1312 1313
1313 1314 ms = mergemod.mergestate(self)
1314 1315 for f in changes[0]:
1315 1316 if f in ms and ms[f] == 'u':
1316 1317 raise util.Abort(_("unresolved merge conflicts "
1317 1318 "(see hg help resolve)"))
1318 1319
1319 1320 if editor:
1320 1321 cctx._text = editor(self, cctx, subs)
1321 1322 edited = (text != cctx._text)
1322 1323
1323 1324 # Save commit message in case this transaction gets rolled back
1324 1325 # (e.g. by a pretxncommit hook). Leave the content alone on
1325 1326 # the assumption that the user will use the same editor again.
1326 1327 msgfn = self.savecommitmessage(cctx._text)
1327 1328
1328 1329 # commit subs and write new state
1329 1330 if subs:
1330 1331 for s in sorted(commitsubs):
1331 1332 sub = wctx.sub(s)
1332 1333 self.ui.status(_('committing subrepository %s\n') %
1333 1334 subrepo.subrelpath(sub))
1334 1335 sr = sub.commit(cctx._text, user, date)
1335 1336 newstate[s] = (newstate[s][0], sr)
1336 1337 subrepo.writestate(self, newstate)
1337 1338
1338 1339 p1, p2 = self.dirstate.parents()
1339 1340 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1340 1341 try:
1341 1342 self.hook("precommit", throw=True, parent1=hookp1,
1342 1343 parent2=hookp2)
1343 1344 ret = self.commitctx(cctx, True)
1344 1345 except: # re-raises
1345 1346 if edited:
1346 1347 self.ui.write(
1347 1348 _('note: commit message saved in %s\n') % msgfn)
1348 1349 raise
1349 1350
1350 1351 # update bookmarks, dirstate and mergestate
1351 1352 bookmarks.update(self, [p1, p2], ret)
1352 1353 cctx.markcommitted(ret)
1353 1354 ms.reset()
1354 1355 finally:
1355 1356 wlock.release()
1356 1357
1357 1358 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1358 1359 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1359 1360 self._afterlock(commithook)
1360 1361 return ret
1361 1362
1362 1363 @unfilteredmethod
1363 1364 def commitctx(self, ctx, error=False):
1364 1365 """Add a new revision to current repository.
1365 1366 Revision information is passed via the context argument.
1366 1367 """
1367 1368
1368 1369 tr = lock = None
1369 1370 removed = list(ctx.removed())
1370 1371 p1, p2 = ctx.p1(), ctx.p2()
1371 1372 user = ctx.user()
1372 1373
1373 1374 lock = self.lock()
1374 1375 try:
1375 1376 tr = self.transaction("commit")
1376 1377 trp = weakref.proxy(tr)
1377 1378
1378 1379 if ctx.files():
1379 1380 m1 = p1.manifest().copy()
1380 1381 m2 = p2.manifest()
1381 1382
1382 1383 # check in files
1383 1384 new = {}
1384 1385 changed = []
1385 1386 linkrev = len(self)
1386 1387 for f in sorted(ctx.modified() + ctx.added()):
1387 1388 self.ui.note(f + "\n")
1388 1389 try:
1389 1390 fctx = ctx[f]
1390 1391 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1391 1392 changed)
1392 1393 m1.set(f, fctx.flags())
1393 1394 except OSError, inst:
1394 1395 self.ui.warn(_("trouble committing %s!\n") % f)
1395 1396 raise
1396 1397 except IOError, inst:
1397 1398 errcode = getattr(inst, 'errno', errno.ENOENT)
1398 1399 if error or errcode and errcode != errno.ENOENT:
1399 1400 self.ui.warn(_("trouble committing %s!\n") % f)
1400 1401 raise
1401 1402 else:
1402 1403 removed.append(f)
1403 1404
1404 1405 # update manifest
1405 1406 m1.update(new)
1406 1407 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1407 1408 drop = [f for f in removed if f in m1]
1408 1409 for f in drop:
1409 1410 del m1[f]
1410 1411 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1411 1412 p2.manifestnode(), (new, drop))
1412 1413 files = changed + removed
1413 1414 else:
1414 1415 mn = p1.manifestnode()
1415 1416 files = []
1416 1417
1417 1418 # update changelog
1418 1419 self.changelog.delayupdate()
1419 1420 n = self.changelog.add(mn, files, ctx.description(),
1420 1421 trp, p1.node(), p2.node(),
1421 1422 user, ctx.date(), ctx.extra().copy())
1422 1423 p = lambda: self.changelog.writepending() and self.root or ""
1423 1424 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1424 1425 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1425 1426 parent2=xp2, pending=p)
1426 1427 self.changelog.finalize(trp)
1427 1428 # set the new commit is proper phase
1428 1429 targetphase = subrepo.newcommitphase(self.ui, ctx)
1429 1430 if targetphase:
1430 1431 # retract boundary do not alter parent changeset.
1431 1432 # if a parent have higher the resulting phase will
1432 1433 # be compliant anyway
1433 1434 #
1434 1435 # if minimal phase was 0 we don't need to retract anything
1435 1436 phases.retractboundary(self, targetphase, [n])
1436 1437 tr.close()
1437 1438 branchmap.updatecache(self.filtered('served'))
1438 1439 return n
1439 1440 finally:
1440 1441 if tr:
1441 1442 tr.release()
1442 1443 lock.release()
1443 1444
1444 1445 @unfilteredmethod
1445 1446 def destroying(self):
1446 1447 '''Inform the repository that nodes are about to be destroyed.
1447 1448 Intended for use by strip and rollback, so there's a common
1448 1449 place for anything that has to be done before destroying history.
1449 1450
1450 1451 This is mostly useful for saving state that is in memory and waiting
1451 1452 to be flushed when the current lock is released. Because a call to
1452 1453 destroyed is imminent, the repo will be invalidated causing those
1453 1454 changes to stay in memory (waiting for the next unlock), or vanish
1454 1455 completely.
1455 1456 '''
1456 1457 # When using the same lock to commit and strip, the phasecache is left
1457 1458 # dirty after committing. Then when we strip, the repo is invalidated,
1458 1459 # causing those changes to disappear.
1459 1460 if '_phasecache' in vars(self):
1460 1461 self._phasecache.write()
1461 1462
1462 1463 @unfilteredmethod
1463 1464 def destroyed(self):
1464 1465 '''Inform the repository that nodes have been destroyed.
1465 1466 Intended for use by strip and rollback, so there's a common
1466 1467 place for anything that has to be done after destroying history.
1467 1468 '''
1468 1469 # When one tries to:
1469 1470 # 1) destroy nodes thus calling this method (e.g. strip)
1470 1471 # 2) use phasecache somewhere (e.g. commit)
1471 1472 #
1472 1473 # then 2) will fail because the phasecache contains nodes that were
1473 1474 # removed. We can either remove phasecache from the filecache,
1474 1475 # causing it to reload next time it is accessed, or simply filter
1475 1476 # the removed nodes now and write the updated cache.
1476 1477 self._phasecache.filterunknown(self)
1477 1478 self._phasecache.write()
1478 1479
1479 1480 # update the 'served' branch cache to help read only server process
1480 1481 # Thanks to branchcache collaboration this is done from the nearest
1481 1482 # filtered subset and it is expected to be fast.
1482 1483 branchmap.updatecache(self.filtered('served'))
1483 1484
1484 1485 # Ensure the persistent tag cache is updated. Doing it now
1485 1486 # means that the tag cache only has to worry about destroyed
1486 1487 # heads immediately after a strip/rollback. That in turn
1487 1488 # guarantees that "cachetip == currenttip" (comparing both rev
1488 1489 # and node) always means no nodes have been added or destroyed.
1489 1490
1490 1491 # XXX this is suboptimal when qrefresh'ing: we strip the current
1491 1492 # head, refresh the tag cache, then immediately add a new head.
1492 1493 # But I think doing it this way is necessary for the "instant
1493 1494 # tag cache retrieval" case to work.
1494 1495 self.invalidate()
1495 1496
1496 1497 def walk(self, match, node=None):
1497 1498 '''
1498 1499 walk recursively through the directory tree or a given
1499 1500 changeset, finding all files matched by the match
1500 1501 function
1501 1502 '''
1502 1503 return self[node].walk(match)
1503 1504
1504 1505 def status(self, node1='.', node2=None, match=None,
1505 1506 ignored=False, clean=False, unknown=False,
1506 1507 listsubrepos=False):
1507 1508 '''a convenience method that calls node1.status(node2)'''
1508 1509 return self[node1].status(node2, match, ignored, clean, unknown,
1509 1510 listsubrepos)
1510 1511
1511 1512 def heads(self, start=None):
1512 1513 heads = self.changelog.heads(start)
1513 1514 # sort the output in rev descending order
1514 1515 return sorted(heads, key=self.changelog.rev, reverse=True)
1515 1516
1516 1517 def branchheads(self, branch=None, start=None, closed=False):
1517 1518 '''return a (possibly filtered) list of heads for the given branch
1518 1519
1519 1520 Heads are returned in topological order, from newest to oldest.
1520 1521 If branch is None, use the dirstate branch.
1521 1522 If start is not None, return only heads reachable from start.
1522 1523 If closed is True, return heads that are marked as closed as well.
1523 1524 '''
1524 1525 if branch is None:
1525 1526 branch = self[None].branch()
1526 1527 branches = self.branchmap()
1527 1528 if branch not in branches:
1528 1529 return []
1529 1530 # the cache returns heads ordered lowest to highest
1530 1531 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1531 1532 if start is not None:
1532 1533 # filter out the heads that cannot be reached from startrev
1533 1534 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1534 1535 bheads = [h for h in bheads if h in fbheads]
1535 1536 return bheads
1536 1537
1537 1538 def branches(self, nodes):
1538 1539 if not nodes:
1539 1540 nodes = [self.changelog.tip()]
1540 1541 b = []
1541 1542 for n in nodes:
1542 1543 t = n
1543 1544 while True:
1544 1545 p = self.changelog.parents(n)
1545 1546 if p[1] != nullid or p[0] == nullid:
1546 1547 b.append((t, n, p[0], p[1]))
1547 1548 break
1548 1549 n = p[0]
1549 1550 return b
1550 1551
1551 1552 def between(self, pairs):
1552 1553 r = []
1553 1554
1554 1555 for top, bottom in pairs:
1555 1556 n, l, i = top, [], 0
1556 1557 f = 1
1557 1558
1558 1559 while n != bottom and n != nullid:
1559 1560 p = self.changelog.parents(n)[0]
1560 1561 if i == f:
1561 1562 l.append(n)
1562 1563 f = f * 2
1563 1564 n = p
1564 1565 i += 1
1565 1566
1566 1567 r.append(l)
1567 1568
1568 1569 return r
1569 1570
1570 1571 def pull(self, remote, heads=None, force=False):
1571 1572 return exchange.pull (self, remote, heads, force)
1572 1573
1573 1574 def checkpush(self, pushop):
1574 1575 """Extensions can override this function if additional checks have
1575 1576 to be performed before pushing, or call it if they override push
1576 1577 command.
1577 1578 """
1578 1579 pass
1579 1580
1580 1581 @unfilteredpropertycache
1581 1582 def prepushoutgoinghooks(self):
1582 1583 """Return util.hooks consists of "(repo, remote, outgoing)"
1583 1584 functions, which are called before pushing changesets.
1584 1585 """
1585 1586 return util.hooks()
1586 1587
1587 1588 def push(self, remote, force=False, revs=None, newbranch=False):
1588 1589 return exchange.push(self, remote, force, revs, newbranch)
1589 1590
1590 1591 def stream_in(self, remote, requirements):
1591 1592 lock = self.lock()
1592 1593 try:
1593 1594 # Save remote branchmap. We will use it later
1594 1595 # to speed up branchcache creation
1595 1596 rbranchmap = None
1596 1597 if remote.capable("branchmap"):
1597 1598 rbranchmap = remote.branchmap()
1598 1599
1599 1600 fp = remote.stream_out()
1600 1601 l = fp.readline()
1601 1602 try:
1602 1603 resp = int(l)
1603 1604 except ValueError:
1604 1605 raise error.ResponseError(
1605 1606 _('unexpected response from remote server:'), l)
1606 1607 if resp == 1:
1607 1608 raise util.Abort(_('operation forbidden by server'))
1608 1609 elif resp == 2:
1609 1610 raise util.Abort(_('locking the remote repository failed'))
1610 1611 elif resp != 0:
1611 1612 raise util.Abort(_('the server sent an unknown error code'))
1612 1613 self.ui.status(_('streaming all changes\n'))
1613 1614 l = fp.readline()
1614 1615 try:
1615 1616 total_files, total_bytes = map(int, l.split(' ', 1))
1616 1617 except (ValueError, TypeError):
1617 1618 raise error.ResponseError(
1618 1619 _('unexpected response from remote server:'), l)
1619 1620 self.ui.status(_('%d files to transfer, %s of data\n') %
1620 1621 (total_files, util.bytecount(total_bytes)))
1621 1622 handled_bytes = 0
1622 1623 self.ui.progress(_('clone'), 0, total=total_bytes)
1623 1624 start = time.time()
1624 1625
1625 1626 tr = self.transaction(_('clone'))
1626 1627 try:
1627 1628 for i in xrange(total_files):
1628 1629 # XXX doesn't support '\n' or '\r' in filenames
1629 1630 l = fp.readline()
1630 1631 try:
1631 1632 name, size = l.split('\0', 1)
1632 1633 size = int(size)
1633 1634 except (ValueError, TypeError):
1634 1635 raise error.ResponseError(
1635 1636 _('unexpected response from remote server:'), l)
1636 1637 if self.ui.debugflag:
1637 1638 self.ui.debug('adding %s (%s)\n' %
1638 1639 (name, util.bytecount(size)))
1639 1640 # for backwards compat, name was partially encoded
1640 1641 ofp = self.sopener(store.decodedir(name), 'w')
1641 1642 for chunk in util.filechunkiter(fp, limit=size):
1642 1643 handled_bytes += len(chunk)
1643 1644 self.ui.progress(_('clone'), handled_bytes,
1644 1645 total=total_bytes)
1645 1646 ofp.write(chunk)
1646 1647 ofp.close()
1647 1648 tr.close()
1648 1649 finally:
1649 1650 tr.release()
1650 1651
1651 1652 # Writing straight to files circumvented the inmemory caches
1652 1653 self.invalidate()
1653 1654
1654 1655 elapsed = time.time() - start
1655 1656 if elapsed <= 0:
1656 1657 elapsed = 0.001
1657 1658 self.ui.progress(_('clone'), None)
1658 1659 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1659 1660 (util.bytecount(total_bytes), elapsed,
1660 1661 util.bytecount(total_bytes / elapsed)))
1661 1662
1662 1663 # new requirements = old non-format requirements +
1663 1664 # new format-related
1664 1665 # requirements from the streamed-in repository
1665 1666 requirements.update(set(self.requirements) - self.supportedformats)
1666 1667 self._applyrequirements(requirements)
1667 1668 self._writerequirements()
1668 1669
1669 1670 if rbranchmap:
1670 1671 rbheads = []
1671 1672 for bheads in rbranchmap.itervalues():
1672 1673 rbheads.extend(bheads)
1673 1674
1674 1675 if rbheads:
1675 1676 rtiprev = max((int(self.changelog.rev(node))
1676 1677 for node in rbheads))
1677 1678 cache = branchmap.branchcache(rbranchmap,
1678 1679 self[rtiprev].node(),
1679 1680 rtiprev)
1680 1681 # Try to stick it as low as possible
1681 1682 # filter above served are unlikely to be fetch from a clone
1682 1683 for candidate in ('base', 'immutable', 'served'):
1683 1684 rview = self.filtered(candidate)
1684 1685 if cache.validfor(rview):
1685 1686 self._branchcaches[candidate] = cache
1686 1687 cache.write(rview)
1687 1688 break
1688 1689 self.invalidate()
1689 1690 return len(self.heads()) + 1
1690 1691 finally:
1691 1692 lock.release()
1692 1693
1693 1694 def clone(self, remote, heads=[], stream=False):
1694 1695 '''clone remote repository.
1695 1696
1696 1697 keyword arguments:
1697 1698 heads: list of revs to clone (forces use of pull)
1698 1699 stream: use streaming clone if possible'''
1699 1700
1700 1701 # now, all clients that can request uncompressed clones can
1701 1702 # read repo formats supported by all servers that can serve
1702 1703 # them.
1703 1704
1704 1705 # if revlog format changes, client will have to check version
1705 1706 # and format flags on "stream" capability, and use
1706 1707 # uncompressed only if compatible.
1707 1708
1708 1709 if not stream:
1709 1710 # if the server explicitly prefers to stream (for fast LANs)
1710 1711 stream = remote.capable('stream-preferred')
1711 1712
1712 1713 if stream and not heads:
1713 1714 # 'stream' means remote revlog format is revlogv1 only
1714 1715 if remote.capable('stream'):
1715 1716 return self.stream_in(remote, set(('revlogv1',)))
1716 1717 # otherwise, 'streamreqs' contains the remote revlog format
1717 1718 streamreqs = remote.capable('streamreqs')
1718 1719 if streamreqs:
1719 1720 streamreqs = set(streamreqs.split(','))
1720 1721 # if we support it, stream in and adjust our requirements
1721 1722 if not streamreqs - self.supportedformats:
1722 1723 return self.stream_in(remote, streamreqs)
1723 1724 return self.pull(remote, heads)
1724 1725
1725 1726 def pushkey(self, namespace, key, old, new):
1726 1727 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1727 1728 old=old, new=new)
1728 1729 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1729 1730 ret = pushkey.push(self, namespace, key, old, new)
1730 1731 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1731 1732 ret=ret)
1732 1733 return ret
1733 1734
1734 1735 def listkeys(self, namespace):
1735 1736 self.hook('prelistkeys', throw=True, namespace=namespace)
1736 1737 self.ui.debug('listing keys for "%s"\n' % namespace)
1737 1738 values = pushkey.list(self, namespace)
1738 1739 self.hook('listkeys', namespace=namespace, values=values)
1739 1740 return values
1740 1741
1741 1742 def debugwireargs(self, one, two, three=None, four=None, five=None):
1742 1743 '''used to test argument passing over the wire'''
1743 1744 return "%s %s %s %s %s" % (one, two, three, four, five)
1744 1745
1745 1746 def savecommitmessage(self, text):
1746 1747 fp = self.opener('last-message.txt', 'wb')
1747 1748 try:
1748 1749 fp.write(text)
1749 1750 finally:
1750 1751 fp.close()
1751 1752 return self.pathto(fp.name[len(self.root) + 1:])
1752 1753
1753 1754 # used to avoid circular references so destructors work
1754 1755 def aftertrans(files):
1755 1756 renamefiles = [tuple(t) for t in files]
1756 1757 def a():
1757 1758 for vfs, src, dest in renamefiles:
1758 1759 try:
1759 1760 vfs.rename(src, dest)
1760 1761 except OSError: # journal file does not yet exist
1761 1762 pass
1762 1763 return a
1763 1764
1764 1765 def undoname(fn):
1765 1766 base, name = os.path.split(fn)
1766 1767 assert name.startswith('journal')
1767 1768 return os.path.join(base, name.replace('journal', 'undo', 1))
1768 1769
1769 1770 def instance(ui, path, create):
1770 1771 return localrepository(ui, util.urllocalpath(path), create)
1771 1772
1772 1773 def islocal(path):
1773 1774 return True
@@ -1,862 +1,863 b''
1 1 # wireproto.py - generic wire protocol support functions
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import urllib, tempfile, os, sys
9 9 from i18n import _
10 10 from node import bin, hex
11 11 import changegroup as changegroupmod, bundle2, pushkey as pushkeymod
12 12 import peer, error, encoding, util, store, exchange
13 13
14 14
15 15 class abstractserverproto(object):
16 16 """abstract class that summarizes the protocol API
17 17
18 18 Used as reference and documentation.
19 19 """
20 20
21 21 def getargs(self, args):
22 22 """return the value for arguments in <args>
23 23
24 24 returns a list of values (same order as <args>)"""
25 25 raise NotImplementedError()
26 26
27 27 def getfile(self, fp):
28 28 """write the whole content of a file into a file like object
29 29
30 30 The file is in the form::
31 31
32 32 (<chunk-size>\n<chunk>)+0\n
33 33
34 34 chunk size is the ascii version of the int.
35 35 """
36 36 raise NotImplementedError()
37 37
38 38 def redirect(self):
39 39 """may setup interception for stdout and stderr
40 40
41 41 See also the `restore` method."""
42 42 raise NotImplementedError()
43 43
44 44 # If the `redirect` function does install interception, the `restore`
45 45 # function MUST be defined. If interception is not used, this function
46 46 # MUST NOT be defined.
47 47 #
48 48 # left commented here on purpose
49 49 #
50 50 #def restore(self):
51 51 # """reinstall previous stdout and stderr and return intercepted stdout
52 52 # """
53 53 # raise NotImplementedError()
54 54
55 55 def groupchunks(self, cg):
56 56 """return 4096 chunks from a changegroup object
57 57
58 58 Some protocols may have compressed the contents."""
59 59 raise NotImplementedError()
60 60
61 61 # abstract batching support
62 62
63 63 class future(object):
64 64 '''placeholder for a value to be set later'''
65 65 def set(self, value):
66 66 if util.safehasattr(self, 'value'):
67 67 raise error.RepoError("future is already set")
68 68 self.value = value
69 69
70 70 class batcher(object):
71 71 '''base class for batches of commands submittable in a single request
72 72
73 73 All methods invoked on instances of this class are simply queued and
74 74 return a a future for the result. Once you call submit(), all the queued
75 75 calls are performed and the results set in their respective futures.
76 76 '''
77 77 def __init__(self):
78 78 self.calls = []
79 79 def __getattr__(self, name):
80 80 def call(*args, **opts):
81 81 resref = future()
82 82 self.calls.append((name, args, opts, resref,))
83 83 return resref
84 84 return call
85 85 def submit(self):
86 86 pass
87 87
88 88 class localbatch(batcher):
89 89 '''performs the queued calls directly'''
90 90 def __init__(self, local):
91 91 batcher.__init__(self)
92 92 self.local = local
93 93 def submit(self):
94 94 for name, args, opts, resref in self.calls:
95 95 resref.set(getattr(self.local, name)(*args, **opts))
96 96
97 97 class remotebatch(batcher):
98 98 '''batches the queued calls; uses as few roundtrips as possible'''
99 99 def __init__(self, remote):
100 100 '''remote must support _submitbatch(encbatch) and
101 101 _submitone(op, encargs)'''
102 102 batcher.__init__(self)
103 103 self.remote = remote
104 104 def submit(self):
105 105 req, rsp = [], []
106 106 for name, args, opts, resref in self.calls:
107 107 mtd = getattr(self.remote, name)
108 108 batchablefn = getattr(mtd, 'batchable', None)
109 109 if batchablefn is not None:
110 110 batchable = batchablefn(mtd.im_self, *args, **opts)
111 111 encargsorres, encresref = batchable.next()
112 112 if encresref:
113 113 req.append((name, encargsorres,))
114 114 rsp.append((batchable, encresref, resref,))
115 115 else:
116 116 resref.set(encargsorres)
117 117 else:
118 118 if req:
119 119 self._submitreq(req, rsp)
120 120 req, rsp = [], []
121 121 resref.set(mtd(*args, **opts))
122 122 if req:
123 123 self._submitreq(req, rsp)
124 124 def _submitreq(self, req, rsp):
125 125 encresults = self.remote._submitbatch(req)
126 126 for encres, r in zip(encresults, rsp):
127 127 batchable, encresref, resref = r
128 128 encresref.set(encres)
129 129 resref.set(batchable.next())
130 130
131 131 def batchable(f):
132 132 '''annotation for batchable methods
133 133
134 134 Such methods must implement a coroutine as follows:
135 135
136 136 @batchable
137 137 def sample(self, one, two=None):
138 138 # Handle locally computable results first:
139 139 if not one:
140 140 yield "a local result", None
141 141 # Build list of encoded arguments suitable for your wire protocol:
142 142 encargs = [('one', encode(one),), ('two', encode(two),)]
143 143 # Create future for injection of encoded result:
144 144 encresref = future()
145 145 # Return encoded arguments and future:
146 146 yield encargs, encresref
147 147 # Assuming the future to be filled with the result from the batched
148 148 # request now. Decode it:
149 149 yield decode(encresref.value)
150 150
151 151 The decorator returns a function which wraps this coroutine as a plain
152 152 method, but adds the original method as an attribute called "batchable",
153 153 which is used by remotebatch to split the call into separate encoding and
154 154 decoding phases.
155 155 '''
156 156 def plain(*args, **opts):
157 157 batchable = f(*args, **opts)
158 158 encargsorres, encresref = batchable.next()
159 159 if not encresref:
160 160 return encargsorres # a local result in this case
161 161 self = args[0]
162 162 encresref.set(self._submitone(f.func_name, encargsorres))
163 163 return batchable.next()
164 164 setattr(plain, 'batchable', f)
165 165 return plain
166 166
167 167 # list of nodes encoding / decoding
168 168
169 169 def decodelist(l, sep=' '):
170 170 if l:
171 171 return map(bin, l.split(sep))
172 172 return []
173 173
174 174 def encodelist(l, sep=' '):
175 175 return sep.join(map(hex, l))
176 176
177 177 # batched call argument encoding
178 178
179 179 def escapearg(plain):
180 180 return (plain
181 181 .replace(':', '::')
182 182 .replace(',', ':,')
183 183 .replace(';', ':;')
184 184 .replace('=', ':='))
185 185
186 186 def unescapearg(escaped):
187 187 return (escaped
188 188 .replace(':=', '=')
189 189 .replace(':;', ';')
190 190 .replace(':,', ',')
191 191 .replace('::', ':'))
192 192
193 193 # mapping of options accepted by getbundle and their types
194 194 #
195 195 # Meant to be extended by extensions. It is extensions responsibility to ensure
196 196 # such options are properly processed in exchange.getbundle.
197 197 #
198 198 # supported types are:
199 199 #
200 200 # :nodes: list of binary nodes
201 201 # :csv: list of comma-separated values
202 202 # :plain: string with no transformation needed.
203 203 gboptsmap = {'heads': 'nodes',
204 204 'common': 'nodes',
205 'bundlecaps': 'csv'}
205 'bundlecaps': 'csv',
206 'listkeys': 'csv'}
206 207
207 208 # client side
208 209
209 210 class wirepeer(peer.peerrepository):
210 211
211 212 def batch(self):
212 213 return remotebatch(self)
213 214 def _submitbatch(self, req):
214 215 cmds = []
215 216 for op, argsdict in req:
216 217 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
217 218 cmds.append('%s %s' % (op, args))
218 219 rsp = self._call("batch", cmds=';'.join(cmds))
219 220 return rsp.split(';')
220 221 def _submitone(self, op, args):
221 222 return self._call(op, **args)
222 223
223 224 @batchable
224 225 def lookup(self, key):
225 226 self.requirecap('lookup', _('look up remote revision'))
226 227 f = future()
227 228 yield {'key': encoding.fromlocal(key)}, f
228 229 d = f.value
229 230 success, data = d[:-1].split(" ", 1)
230 231 if int(success):
231 232 yield bin(data)
232 233 self._abort(error.RepoError(data))
233 234
234 235 @batchable
235 236 def heads(self):
236 237 f = future()
237 238 yield {}, f
238 239 d = f.value
239 240 try:
240 241 yield decodelist(d[:-1])
241 242 except ValueError:
242 243 self._abort(error.ResponseError(_("unexpected response:"), d))
243 244
244 245 @batchable
245 246 def known(self, nodes):
246 247 f = future()
247 248 yield {'nodes': encodelist(nodes)}, f
248 249 d = f.value
249 250 try:
250 251 yield [bool(int(f)) for f in d]
251 252 except ValueError:
252 253 self._abort(error.ResponseError(_("unexpected response:"), d))
253 254
254 255 @batchable
255 256 def branchmap(self):
256 257 f = future()
257 258 yield {}, f
258 259 d = f.value
259 260 try:
260 261 branchmap = {}
261 262 for branchpart in d.splitlines():
262 263 branchname, branchheads = branchpart.split(' ', 1)
263 264 branchname = encoding.tolocal(urllib.unquote(branchname))
264 265 branchheads = decodelist(branchheads)
265 266 branchmap[branchname] = branchheads
266 267 yield branchmap
267 268 except TypeError:
268 269 self._abort(error.ResponseError(_("unexpected response:"), d))
269 270
270 271 def branches(self, nodes):
271 272 n = encodelist(nodes)
272 273 d = self._call("branches", nodes=n)
273 274 try:
274 275 br = [tuple(decodelist(b)) for b in d.splitlines()]
275 276 return br
276 277 except ValueError:
277 278 self._abort(error.ResponseError(_("unexpected response:"), d))
278 279
279 280 def between(self, pairs):
280 281 batch = 8 # avoid giant requests
281 282 r = []
282 283 for i in xrange(0, len(pairs), batch):
283 284 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
284 285 d = self._call("between", pairs=n)
285 286 try:
286 287 r.extend(l and decodelist(l) or [] for l in d.splitlines())
287 288 except ValueError:
288 289 self._abort(error.ResponseError(_("unexpected response:"), d))
289 290 return r
290 291
291 292 @batchable
292 293 def pushkey(self, namespace, key, old, new):
293 294 if not self.capable('pushkey'):
294 295 yield False, None
295 296 f = future()
296 297 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
297 298 yield {'namespace': encoding.fromlocal(namespace),
298 299 'key': encoding.fromlocal(key),
299 300 'old': encoding.fromlocal(old),
300 301 'new': encoding.fromlocal(new)}, f
301 302 d = f.value
302 303 d, output = d.split('\n', 1)
303 304 try:
304 305 d = bool(int(d))
305 306 except ValueError:
306 307 raise error.ResponseError(
307 308 _('push failed (unexpected response):'), d)
308 309 for l in output.splitlines(True):
309 310 self.ui.status(_('remote: '), l)
310 311 yield d
311 312
312 313 @batchable
313 314 def listkeys(self, namespace):
314 315 if not self.capable('pushkey'):
315 316 yield {}, None
316 317 f = future()
317 318 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
318 319 yield {'namespace': encoding.fromlocal(namespace)}, f
319 320 d = f.value
320 321 yield pushkeymod.decodekeys(d)
321 322
322 323 def stream_out(self):
323 324 return self._callstream('stream_out')
324 325
325 326 def changegroup(self, nodes, kind):
326 327 n = encodelist(nodes)
327 328 f = self._callcompressable("changegroup", roots=n)
328 329 return changegroupmod.unbundle10(f, 'UN')
329 330
330 331 def changegroupsubset(self, bases, heads, kind):
331 332 self.requirecap('changegroupsubset', _('look up remote changes'))
332 333 bases = encodelist(bases)
333 334 heads = encodelist(heads)
334 335 f = self._callcompressable("changegroupsubset",
335 336 bases=bases, heads=heads)
336 337 return changegroupmod.unbundle10(f, 'UN')
337 338
338 339 def getbundle(self, source, **kwargs):
339 340 self.requirecap('getbundle', _('look up remote changes'))
340 341 opts = {}
341 342 for key, value in kwargs.iteritems():
342 343 if value is None:
343 344 continue
344 345 keytype = gboptsmap.get(key)
345 346 if keytype is None:
346 347 assert False, 'unexpected'
347 348 elif keytype == 'nodes':
348 349 value = encodelist(value)
349 350 elif keytype == 'csv':
350 351 value = ','.join(value)
351 352 elif keytype != 'plain':
352 353 raise KeyError('unknown getbundle option type %s'
353 354 % keytype)
354 355 opts[key] = value
355 356 f = self._callcompressable("getbundle", **opts)
356 357 bundlecaps = kwargs.get('bundlecaps')
357 358 if bundlecaps is not None and 'HG2X' in bundlecaps:
358 359 return bundle2.unbundle20(self.ui, f)
359 360 else:
360 361 return changegroupmod.unbundle10(f, 'UN')
361 362
362 363 def unbundle(self, cg, heads, source):
363 364 '''Send cg (a readable file-like object representing the
364 365 changegroup to push, typically a chunkbuffer object) to the
365 366 remote server as a bundle.
366 367
367 368 When pushing a bundle10 stream, return an integer indicating the
368 369 result of the push (see localrepository.addchangegroup()).
369 370
370 371 When pushing a bundle20 stream, return a bundle20 stream.'''
371 372
372 373 if heads != ['force'] and self.capable('unbundlehash'):
373 374 heads = encodelist(['hashed',
374 375 util.sha1(''.join(sorted(heads))).digest()])
375 376 else:
376 377 heads = encodelist(heads)
377 378
378 379 if util.safehasattr(cg, 'deltaheader'):
379 380 # this a bundle10, do the old style call sequence
380 381 ret, output = self._callpush("unbundle", cg, heads=heads)
381 382 if ret == "":
382 383 raise error.ResponseError(
383 384 _('push failed:'), output)
384 385 try:
385 386 ret = int(ret)
386 387 except ValueError:
387 388 raise error.ResponseError(
388 389 _('push failed (unexpected response):'), ret)
389 390
390 391 for l in output.splitlines(True):
391 392 self.ui.status(_('remote: '), l)
392 393 else:
393 394 # bundle2 push. Send a stream, fetch a stream.
394 395 stream = self._calltwowaystream('unbundle', cg, heads=heads)
395 396 ret = bundle2.unbundle20(self.ui, stream)
396 397 return ret
397 398
398 399 def debugwireargs(self, one, two, three=None, four=None, five=None):
399 400 # don't pass optional arguments left at their default value
400 401 opts = {}
401 402 if three is not None:
402 403 opts['three'] = three
403 404 if four is not None:
404 405 opts['four'] = four
405 406 return self._call('debugwireargs', one=one, two=two, **opts)
406 407
407 408 def _call(self, cmd, **args):
408 409 """execute <cmd> on the server
409 410
410 411 The command is expected to return a simple string.
411 412
412 413 returns the server reply as a string."""
413 414 raise NotImplementedError()
414 415
415 416 def _callstream(self, cmd, **args):
416 417 """execute <cmd> on the server
417 418
418 419 The command is expected to return a stream.
419 420
420 421 returns the server reply as a file like object."""
421 422 raise NotImplementedError()
422 423
423 424 def _callcompressable(self, cmd, **args):
424 425 """execute <cmd> on the server
425 426
426 427 The command is expected to return a stream.
427 428
428 429 The stream may have been compressed in some implementations. This
429 430 function takes care of the decompression. This is the only difference
430 431 with _callstream.
431 432
432 433 returns the server reply as a file like object.
433 434 """
434 435 raise NotImplementedError()
435 436
436 437 def _callpush(self, cmd, fp, **args):
437 438 """execute a <cmd> on server
438 439
439 440 The command is expected to be related to a push. Push has a special
440 441 return method.
441 442
442 443 returns the server reply as a (ret, output) tuple. ret is either
443 444 empty (error) or a stringified int.
444 445 """
445 446 raise NotImplementedError()
446 447
447 448 def _calltwowaystream(self, cmd, fp, **args):
448 449 """execute <cmd> on server
449 450
450 451 The command will send a stream to the server and get a stream in reply.
451 452 """
452 453 raise NotImplementedError()
453 454
454 455 def _abort(self, exception):
455 456 """clearly abort the wire protocol connection and raise the exception
456 457 """
457 458 raise NotImplementedError()
458 459
459 460 # server side
460 461
461 462 # wire protocol command can either return a string or one of these classes.
462 463 class streamres(object):
463 464 """wireproto reply: binary stream
464 465
465 466 The call was successful and the result is a stream.
466 467 Iterate on the `self.gen` attribute to retrieve chunks.
467 468 """
468 469 def __init__(self, gen):
469 470 self.gen = gen
470 471
471 472 class pushres(object):
472 473 """wireproto reply: success with simple integer return
473 474
474 475 The call was successful and returned an integer contained in `self.res`.
475 476 """
476 477 def __init__(self, res):
477 478 self.res = res
478 479
479 480 class pusherr(object):
480 481 """wireproto reply: failure
481 482
482 483 The call failed. The `self.res` attribute contains the error message.
483 484 """
484 485 def __init__(self, res):
485 486 self.res = res
486 487
487 488 class ooberror(object):
488 489 """wireproto reply: failure of a batch of operation
489 490
490 491 Something failed during a batch call. The error message is stored in
491 492 `self.message`.
492 493 """
493 494 def __init__(self, message):
494 495 self.message = message
495 496
496 497 def dispatch(repo, proto, command):
497 498 repo = repo.filtered("served")
498 499 func, spec = commands[command]
499 500 args = proto.getargs(spec)
500 501 return func(repo, proto, *args)
501 502
502 503 def options(cmd, keys, others):
503 504 opts = {}
504 505 for k in keys:
505 506 if k in others:
506 507 opts[k] = others[k]
507 508 del others[k]
508 509 if others:
509 510 sys.stderr.write("abort: %s got unexpected arguments %s\n"
510 511 % (cmd, ",".join(others)))
511 512 return opts
512 513
513 514 # list of commands
514 515 commands = {}
515 516
516 517 def wireprotocommand(name, args=''):
517 518 """decorator for wire protocol command"""
518 519 def register(func):
519 520 commands[name] = (func, args)
520 521 return func
521 522 return register
522 523
523 524 @wireprotocommand('batch', 'cmds *')
524 525 def batch(repo, proto, cmds, others):
525 526 repo = repo.filtered("served")
526 527 res = []
527 528 for pair in cmds.split(';'):
528 529 op, args = pair.split(' ', 1)
529 530 vals = {}
530 531 for a in args.split(','):
531 532 if a:
532 533 n, v = a.split('=')
533 534 vals[n] = unescapearg(v)
534 535 func, spec = commands[op]
535 536 if spec:
536 537 keys = spec.split()
537 538 data = {}
538 539 for k in keys:
539 540 if k == '*':
540 541 star = {}
541 542 for key in vals.keys():
542 543 if key not in keys:
543 544 star[key] = vals[key]
544 545 data['*'] = star
545 546 else:
546 547 data[k] = vals[k]
547 548 result = func(repo, proto, *[data[k] for k in keys])
548 549 else:
549 550 result = func(repo, proto)
550 551 if isinstance(result, ooberror):
551 552 return result
552 553 res.append(escapearg(result))
553 554 return ';'.join(res)
554 555
555 556 @wireprotocommand('between', 'pairs')
556 557 def between(repo, proto, pairs):
557 558 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
558 559 r = []
559 560 for b in repo.between(pairs):
560 561 r.append(encodelist(b) + "\n")
561 562 return "".join(r)
562 563
563 564 @wireprotocommand('branchmap')
564 565 def branchmap(repo, proto):
565 566 branchmap = repo.branchmap()
566 567 heads = []
567 568 for branch, nodes in branchmap.iteritems():
568 569 branchname = urllib.quote(encoding.fromlocal(branch))
569 570 branchnodes = encodelist(nodes)
570 571 heads.append('%s %s' % (branchname, branchnodes))
571 572 return '\n'.join(heads)
572 573
573 574 @wireprotocommand('branches', 'nodes')
574 575 def branches(repo, proto, nodes):
575 576 nodes = decodelist(nodes)
576 577 r = []
577 578 for b in repo.branches(nodes):
578 579 r.append(encodelist(b) + "\n")
579 580 return "".join(r)
580 581
581 582
582 583 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
583 584 'known', 'getbundle', 'unbundlehash', 'batch']
584 585
585 586 def _capabilities(repo, proto):
586 587 """return a list of capabilities for a repo
587 588
588 589 This function exists to allow extensions to easily wrap capabilities
589 590 computation
590 591
591 592 - returns a lists: easy to alter
592 593 - change done here will be propagated to both `capabilities` and `hello`
593 594 command without any other action needed.
594 595 """
595 596 # copy to prevent modification of the global list
596 597 caps = list(wireprotocaps)
597 598 if _allowstream(repo.ui):
598 599 if repo.ui.configbool('server', 'preferuncompressed', False):
599 600 caps.append('stream-preferred')
600 601 requiredformats = repo.requirements & repo.supportedformats
601 602 # if our local revlogs are just revlogv1, add 'stream' cap
602 603 if not requiredformats - set(('revlogv1',)):
603 604 caps.append('stream')
604 605 # otherwise, add 'streamreqs' detailing our local revlog format
605 606 else:
606 607 caps.append('streamreqs=%s' % ','.join(requiredformats))
607 608 if repo.ui.configbool('experimental', 'bundle2-exp', False):
608 609 capsblob = bundle2.encodecaps(repo.bundle2caps)
609 610 caps.append('bundle2-exp=' + urllib.quote(capsblob))
610 611 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
611 612 caps.append('httpheader=1024')
612 613 return caps
613 614
614 615 # If you are writing an extension and consider wrapping this function. Wrap
615 616 # `_capabilities` instead.
616 617 @wireprotocommand('capabilities')
617 618 def capabilities(repo, proto):
618 619 return ' '.join(_capabilities(repo, proto))
619 620
620 621 @wireprotocommand('changegroup', 'roots')
621 622 def changegroup(repo, proto, roots):
622 623 nodes = decodelist(roots)
623 624 cg = changegroupmod.changegroup(repo, nodes, 'serve')
624 625 return streamres(proto.groupchunks(cg))
625 626
626 627 @wireprotocommand('changegroupsubset', 'bases heads')
627 628 def changegroupsubset(repo, proto, bases, heads):
628 629 bases = decodelist(bases)
629 630 heads = decodelist(heads)
630 631 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
631 632 return streamres(proto.groupchunks(cg))
632 633
633 634 @wireprotocommand('debugwireargs', 'one two *')
634 635 def debugwireargs(repo, proto, one, two, others):
635 636 # only accept optional args from the known set
636 637 opts = options('debugwireargs', ['three', 'four'], others)
637 638 return repo.debugwireargs(one, two, **opts)
638 639
639 640 # List of options accepted by getbundle.
640 641 #
641 642 # Meant to be extended by extensions. It is the extension's responsibility to
642 643 # ensure such options are properly processed in exchange.getbundle.
643 644 gboptslist = ['heads', 'common', 'bundlecaps']
644 645
645 646 @wireprotocommand('getbundle', '*')
646 647 def getbundle(repo, proto, others):
647 648 opts = options('getbundle', gboptsmap.keys(), others)
648 649 for k, v in opts.iteritems():
649 650 keytype = gboptsmap[k]
650 651 if keytype == 'nodes':
651 652 opts[k] = decodelist(v)
652 653 elif keytype == 'csv':
653 654 opts[k] = set(v.split(','))
654 655 elif keytype != 'plain':
655 656 raise KeyError('unknown getbundle option type %s'
656 657 % keytype)
657 658 cg = exchange.getbundle(repo, 'serve', **opts)
658 659 return streamres(proto.groupchunks(cg))
659 660
660 661 @wireprotocommand('heads')
661 662 def heads(repo, proto):
662 663 h = repo.heads()
663 664 return encodelist(h) + "\n"
664 665
665 666 @wireprotocommand('hello')
666 667 def hello(repo, proto):
667 668 '''the hello command returns a set of lines describing various
668 669 interesting things about the server, in an RFC822-like format.
669 670 Currently the only one defined is "capabilities", which
670 671 consists of a line in the form:
671 672
672 673 capabilities: space separated list of tokens
673 674 '''
674 675 return "capabilities: %s\n" % (capabilities(repo, proto))
675 676
676 677 @wireprotocommand('listkeys', 'namespace')
677 678 def listkeys(repo, proto, namespace):
678 679 d = repo.listkeys(encoding.tolocal(namespace)).items()
679 680 return pushkeymod.encodekeys(d)
680 681
681 682 @wireprotocommand('lookup', 'key')
682 683 def lookup(repo, proto, key):
683 684 try:
684 685 k = encoding.tolocal(key)
685 686 c = repo[k]
686 687 r = c.hex()
687 688 success = 1
688 689 except Exception, inst:
689 690 r = str(inst)
690 691 success = 0
691 692 return "%s %s\n" % (success, r)
692 693
693 694 @wireprotocommand('known', 'nodes *')
694 695 def known(repo, proto, nodes, others):
695 696 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
696 697
697 698 @wireprotocommand('pushkey', 'namespace key old new')
698 699 def pushkey(repo, proto, namespace, key, old, new):
699 700 # compatibility with pre-1.8 clients which were accidentally
700 701 # sending raw binary nodes rather than utf-8-encoded hex
701 702 if len(new) == 20 and new.encode('string-escape') != new:
702 703 # looks like it could be a binary node
703 704 try:
704 705 new.decode('utf-8')
705 706 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
706 707 except UnicodeDecodeError:
707 708 pass # binary, leave unmodified
708 709 else:
709 710 new = encoding.tolocal(new) # normal path
710 711
711 712 if util.safehasattr(proto, 'restore'):
712 713
713 714 proto.redirect()
714 715
715 716 try:
716 717 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
717 718 encoding.tolocal(old), new) or False
718 719 except util.Abort:
719 720 r = False
720 721
721 722 output = proto.restore()
722 723
723 724 return '%s\n%s' % (int(r), output)
724 725
725 726 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
726 727 encoding.tolocal(old), new)
727 728 return '%s\n' % int(r)
728 729
729 730 def _allowstream(ui):
730 731 return ui.configbool('server', 'uncompressed', True, untrusted=True)
731 732
732 733 def _walkstreamfiles(repo):
733 734 # this is it's own function so extensions can override it
734 735 return repo.store.walk()
735 736
736 737 @wireprotocommand('stream_out')
737 738 def stream(repo, proto):
738 739 '''If the server supports streaming clone, it advertises the "stream"
739 740 capability with a value representing the version and flags of the repo
740 741 it is serving. Client checks to see if it understands the format.
741 742
742 743 The format is simple: the server writes out a line with the amount
743 744 of files, then the total amount of bytes to be transferred (separated
744 745 by a space). Then, for each file, the server first writes the filename
745 746 and file size (separated by the null character), then the file contents.
746 747 '''
747 748
748 749 if not _allowstream(repo.ui):
749 750 return '1\n'
750 751
751 752 entries = []
752 753 total_bytes = 0
753 754 try:
754 755 # get consistent snapshot of repo, lock during scan
755 756 lock = repo.lock()
756 757 try:
757 758 repo.ui.debug('scanning\n')
758 759 for name, ename, size in _walkstreamfiles(repo):
759 760 if size:
760 761 entries.append((name, size))
761 762 total_bytes += size
762 763 finally:
763 764 lock.release()
764 765 except error.LockError:
765 766 return '2\n' # error: 2
766 767
767 768 def streamer(repo, entries, total):
768 769 '''stream out all metadata files in repository.'''
769 770 yield '0\n' # success
770 771 repo.ui.debug('%d files, %d bytes to transfer\n' %
771 772 (len(entries), total_bytes))
772 773 yield '%d %d\n' % (len(entries), total_bytes)
773 774
774 775 sopener = repo.sopener
775 776 oldaudit = sopener.mustaudit
776 777 debugflag = repo.ui.debugflag
777 778 sopener.mustaudit = False
778 779
779 780 try:
780 781 for name, size in entries:
781 782 if debugflag:
782 783 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
783 784 # partially encode name over the wire for backwards compat
784 785 yield '%s\0%d\n' % (store.encodedir(name), size)
785 786 if size <= 65536:
786 787 fp = sopener(name)
787 788 try:
788 789 data = fp.read(size)
789 790 finally:
790 791 fp.close()
791 792 yield data
792 793 else:
793 794 for chunk in util.filechunkiter(sopener(name), limit=size):
794 795 yield chunk
795 796 # replace with "finally:" when support for python 2.4 has been dropped
796 797 except Exception:
797 798 sopener.mustaudit = oldaudit
798 799 raise
799 800 sopener.mustaudit = oldaudit
800 801
801 802 return streamres(streamer(repo, entries, total_bytes))
802 803
803 804 @wireprotocommand('unbundle', 'heads')
804 805 def unbundle(repo, proto, heads):
805 806 their_heads = decodelist(heads)
806 807
807 808 try:
808 809 proto.redirect()
809 810
810 811 exchange.check_heads(repo, their_heads, 'preparing changes')
811 812
812 813 # write bundle data to temporary file because it can be big
813 814 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
814 815 fp = os.fdopen(fd, 'wb+')
815 816 r = 0
816 817 try:
817 818 proto.getfile(fp)
818 819 fp.seek(0)
819 820 gen = exchange.readbundle(repo.ui, fp, None)
820 821 r = exchange.unbundle(repo, gen, their_heads, 'serve',
821 822 proto._client())
822 823 if util.safehasattr(r, 'addpart'):
823 824 # The return looks streameable, we are in the bundle2 case and
824 825 # should return a stream.
825 826 return streamres(r.getchunks())
826 827 return pushres(r)
827 828
828 829 finally:
829 830 fp.close()
830 831 os.unlink(tempname)
831 832 except error.BundleValueError, exc:
832 833 bundler = bundle2.bundle20(repo.ui)
833 834 errpart = bundler.newpart('B2X:ERROR:UNSUPPORTEDCONTENT')
834 835 if exc.parttype is not None:
835 836 errpart.addparam('parttype', exc.parttype)
836 837 if exc.params:
837 838 errpart.addparam('params', '\0'.join(exc.params))
838 839 return streamres(bundler.getchunks())
839 840 except util.Abort, inst:
840 841 # The old code we moved used sys.stderr directly.
841 842 # We did not change it to minimise code change.
842 843 # This need to be moved to something proper.
843 844 # Feel free to do it.
844 845 if getattr(inst, 'duringunbundle2', False):
845 846 bundler = bundle2.bundle20(repo.ui)
846 847 manargs = [('message', str(inst))]
847 848 advargs = []
848 849 if inst.hint is not None:
849 850 advargs.append(('hint', inst.hint))
850 851 bundler.addpart(bundle2.bundlepart('B2X:ERROR:ABORT',
851 852 manargs, advargs))
852 853 return streamres(bundler.getchunks())
853 854 else:
854 855 sys.stderr.write("abort: %s\n" % inst)
855 856 return pushres(0)
856 857 except error.PushRaced, exc:
857 858 if getattr(exc, 'duringunbundle2', False):
858 859 bundler = bundle2.bundle20(repo.ui)
859 860 bundler.newpart('B2X:ERROR:PUSHRACED', [('message', str(exc))])
860 861 return streamres(bundler.getchunks())
861 862 else:
862 863 return pusherr(str(exc))
General Comments 0
You need to be logged in to leave comments. Login now