##// END OF EJS Templates
getbundle: pass arbitrary arguments all along the call chain...
Pierre-Yves David -
r21157:60ad2ea5 default
parent child Browse files
Show More
@@ -1,744 +1,745 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85
12 12 import discovery, phases, obsolete, bookmarks, bundle2
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.unbundle10(fh, alg)
35 35 elif version == '2X':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40
41 41 class pushoperation(object):
42 42 """A object that represent a single push operation
43 43
44 44 It purpose is to carry push related state and very common operation.
45 45
46 46 A new should be created at the beginning of each push and discarded
47 47 afterward.
48 48 """
49 49
50 50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 51 # repo we push from
52 52 self.repo = repo
53 53 self.ui = repo.ui
54 54 # repo we push to
55 55 self.remote = remote
56 56 # force option provided
57 57 self.force = force
58 58 # revs to be pushed (None is "all")
59 59 self.revs = revs
60 60 # allow push of new branch
61 61 self.newbranch = newbranch
62 62 # did a local lock get acquired?
63 63 self.locallocked = None
64 64 # Integer version of the push result
65 65 # - None means nothing to push
66 66 # - 0 means HTTP error
67 67 # - 1 means we pushed and remote head count is unchanged *or*
68 68 # we have outgoing changesets but refused to push
69 69 # - other values as described by addchangegroup()
70 70 self.ret = None
71 71 # discover.outgoing object (contains common and outgoing data)
72 72 self.outgoing = None
73 73 # all remote heads before the push
74 74 self.remoteheads = None
75 75 # testable as a boolean indicating if any nodes are missing locally.
76 76 self.incoming = None
77 77 # set of all heads common after changeset bundle push
78 78 self.commonheads = None
79 79
80 80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 81 '''Push outgoing changesets (limited by revs) from a local
82 82 repository to remote. Return an integer:
83 83 - None means nothing to push
84 84 - 0 means HTTP error
85 85 - 1 means we pushed and remote head count is unchanged *or*
86 86 we have outgoing changesets but refused to push
87 87 - other values as described by addchangegroup()
88 88 '''
89 89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 90 if pushop.remote.local():
91 91 missing = (set(pushop.repo.requirements)
92 92 - pushop.remote.local().supported)
93 93 if missing:
94 94 msg = _("required features are not"
95 95 " supported in the destination:"
96 96 " %s") % (', '.join(sorted(missing)))
97 97 raise util.Abort(msg)
98 98
99 99 # there are two ways to push to remote repo:
100 100 #
101 101 # addchangegroup assumes local user can lock remote
102 102 # repo (local filesystem, old ssh servers).
103 103 #
104 104 # unbundle assumes local user cannot lock remote repo (new ssh
105 105 # servers, http servers).
106 106
107 107 if not pushop.remote.canpush():
108 108 raise util.Abort(_("destination does not support push"))
109 109 # get local lock as we might write phase data
110 110 locallock = None
111 111 try:
112 112 locallock = pushop.repo.lock()
113 113 pushop.locallocked = True
114 114 except IOError, err:
115 115 pushop.locallocked = False
116 116 if err.errno != errno.EACCES:
117 117 raise
118 118 # source repo cannot be locked.
119 119 # We do not abort the push, but just disable the local phase
120 120 # synchronisation.
121 121 msg = 'cannot lock source repository: %s\n' % err
122 122 pushop.ui.debug(msg)
123 123 try:
124 124 pushop.repo.checkpush(pushop)
125 125 lock = None
126 126 unbundle = pushop.remote.capable('unbundle')
127 127 if not unbundle:
128 128 lock = pushop.remote.lock()
129 129 try:
130 130 _pushdiscovery(pushop)
131 131 if _pushcheckoutgoing(pushop):
132 132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 133 pushop.remote,
134 134 pushop.outgoing)
135 135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
136 136 False)
137 137 and pushop.remote.capable('bundle2-exp')):
138 138 _pushbundle2(pushop)
139 139 else:
140 140 _pushchangeset(pushop)
141 141 _pushcomputecommonheads(pushop)
142 142 _pushsyncphase(pushop)
143 143 _pushobsolete(pushop)
144 144 finally:
145 145 if lock is not None:
146 146 lock.release()
147 147 finally:
148 148 if locallock is not None:
149 149 locallock.release()
150 150
151 151 _pushbookmark(pushop)
152 152 return pushop.ret
153 153
154 154 def _pushdiscovery(pushop):
155 155 # discovery
156 156 unfi = pushop.repo.unfiltered()
157 157 fci = discovery.findcommonincoming
158 158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
159 159 common, inc, remoteheads = commoninc
160 160 fco = discovery.findcommonoutgoing
161 161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
162 162 commoninc=commoninc, force=pushop.force)
163 163 pushop.outgoing = outgoing
164 164 pushop.remoteheads = remoteheads
165 165 pushop.incoming = inc
166 166
167 167 def _pushcheckoutgoing(pushop):
168 168 outgoing = pushop.outgoing
169 169 unfi = pushop.repo.unfiltered()
170 170 if not outgoing.missing:
171 171 # nothing to push
172 172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
173 173 return False
174 174 # something to push
175 175 if not pushop.force:
176 176 # if repo.obsstore == False --> no obsolete
177 177 # then, save the iteration
178 178 if unfi.obsstore:
179 179 # this message are here for 80 char limit reason
180 180 mso = _("push includes obsolete changeset: %s!")
181 181 mst = "push includes %s changeset: %s!"
182 182 # plain versions for i18n tool to detect them
183 183 _("push includes unstable changeset: %s!")
184 184 _("push includes bumped changeset: %s!")
185 185 _("push includes divergent changeset: %s!")
186 186 # If we are to push if there is at least one
187 187 # obsolete or unstable changeset in missing, at
188 188 # least one of the missinghead will be obsolete or
189 189 # unstable. So checking heads only is ok
190 190 for node in outgoing.missingheads:
191 191 ctx = unfi[node]
192 192 if ctx.obsolete():
193 193 raise util.Abort(mso % ctx)
194 194 elif ctx.troubled():
195 195 raise util.Abort(_(mst)
196 196 % (ctx.troubles()[0],
197 197 ctx))
198 198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
199 199 discovery.checkheads(unfi, pushop.remote, outgoing,
200 200 pushop.remoteheads,
201 201 pushop.newbranch,
202 202 bool(pushop.incoming),
203 203 newbm)
204 204 return True
205 205
206 206 def _pushbundle2(pushop):
207 207 """push data to the remote using bundle2
208 208
209 209 The only currently supported type of data is changegroup but this will
210 210 evolve in the future."""
211 211 # Send known head to the server for race detection.
212 212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
213 213 caps = bundle2.decodecaps(capsblob)
214 214 bundler = bundle2.bundle20(pushop.ui, caps)
215 215 # create reply capability
216 216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
217 217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
218 218 if not pushop.force:
219 219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
220 220 data=iter(pushop.remoteheads))
221 221 bundler.addpart(part)
222 222 extrainfo = _pushbundle2extraparts(pushop, bundler)
223 223 # add the changegroup bundle
224 224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
225 225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
226 226 bundler.addpart(cgpart)
227 227 stream = util.chunkbuffer(bundler.getchunks())
228 228 reply = pushop.remote.unbundle(stream, ['force'], 'push')
229 229 try:
230 230 op = bundle2.processbundle(pushop.repo, reply)
231 231 except KeyError, exc:
232 232 raise util.Abort('missing support for %s' % exc)
233 233 cgreplies = op.records.getreplies(cgpart.id)
234 234 assert len(cgreplies['changegroup']) == 1
235 235 pushop.ret = cgreplies['changegroup'][0]['return']
236 236 _pushbundle2extrareply(pushop, op, extrainfo)
237 237
238 238 def _pushbundle2extraparts(pushop, bundler):
239 239 """hook function to let extensions add parts
240 240
241 241 Return a dict to let extensions pass data to the reply processing.
242 242 """
243 243 return {}
244 244
245 245 def _pushbundle2extrareply(pushop, op, extrainfo):
246 246 """hook function to let extensions react to part replies
247 247
248 248 The dict from _pushbundle2extrareply is fed to this function.
249 249 """
250 250 pass
251 251
252 252 def _pushchangeset(pushop):
253 253 """Make the actual push of changeset bundle to remote repo"""
254 254 outgoing = pushop.outgoing
255 255 unbundle = pushop.remote.capable('unbundle')
256 256 # TODO: get bundlecaps from remote
257 257 bundlecaps = None
258 258 # create a changegroup from local
259 259 if pushop.revs is None and not (outgoing.excluded
260 260 or pushop.repo.changelog.filteredrevs):
261 261 # push everything,
262 262 # use the fast path, no race possible on push
263 263 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
264 264 cg = changegroup.getsubset(pushop.repo,
265 265 outgoing,
266 266 bundler,
267 267 'push',
268 268 fastpath=True)
269 269 else:
270 270 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
271 271 bundlecaps)
272 272
273 273 # apply changegroup to remote
274 274 if unbundle:
275 275 # local repo finds heads on server, finds out what
276 276 # revs it must push. once revs transferred, if server
277 277 # finds it has different heads (someone else won
278 278 # commit/push race), server aborts.
279 279 if pushop.force:
280 280 remoteheads = ['force']
281 281 else:
282 282 remoteheads = pushop.remoteheads
283 283 # ssh: return remote's addchangegroup()
284 284 # http: return remote's addchangegroup() or 0 for error
285 285 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
286 286 'push')
287 287 else:
288 288 # we return an integer indicating remote head count
289 289 # change
290 290 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
291 291
292 292 def _pushcomputecommonheads(pushop):
293 293 unfi = pushop.repo.unfiltered()
294 294 if pushop.ret:
295 295 # push succeed, synchronize target of the push
296 296 cheads = pushop.outgoing.missingheads
297 297 elif pushop.revs is None:
298 298 # All out push fails. synchronize all common
299 299 cheads = pushop.outgoing.commonheads
300 300 else:
301 301 # I want cheads = heads(::missingheads and ::commonheads)
302 302 # (missingheads is revs with secret changeset filtered out)
303 303 #
304 304 # This can be expressed as:
305 305 # cheads = ( (missingheads and ::commonheads)
306 306 # + (commonheads and ::missingheads))"
307 307 # )
308 308 #
309 309 # while trying to push we already computed the following:
310 310 # common = (::commonheads)
311 311 # missing = ((commonheads::missingheads) - commonheads)
312 312 #
313 313 # We can pick:
314 314 # * missingheads part of common (::commonheads)
315 315 common = set(pushop.outgoing.common)
316 316 nm = pushop.repo.changelog.nodemap
317 317 cheads = [node for node in pushop.revs if nm[node] in common]
318 318 # and
319 319 # * commonheads parents on missing
320 320 revset = unfi.set('%ln and parents(roots(%ln))',
321 321 pushop.outgoing.commonheads,
322 322 pushop.outgoing.missing)
323 323 cheads.extend(c.node() for c in revset)
324 324 pushop.commonheads = cheads
325 325
326 326 def _pushsyncphase(pushop):
327 327 """synchronise phase information locally and remotely"""
328 328 unfi = pushop.repo.unfiltered()
329 329 cheads = pushop.commonheads
330 330 if pushop.ret:
331 331 # push succeed, synchronize target of the push
332 332 cheads = pushop.outgoing.missingheads
333 333 elif pushop.revs is None:
334 334 # All out push fails. synchronize all common
335 335 cheads = pushop.outgoing.commonheads
336 336 else:
337 337 # I want cheads = heads(::missingheads and ::commonheads)
338 338 # (missingheads is revs with secret changeset filtered out)
339 339 #
340 340 # This can be expressed as:
341 341 # cheads = ( (missingheads and ::commonheads)
342 342 # + (commonheads and ::missingheads))"
343 343 # )
344 344 #
345 345 # while trying to push we already computed the following:
346 346 # common = (::commonheads)
347 347 # missing = ((commonheads::missingheads) - commonheads)
348 348 #
349 349 # We can pick:
350 350 # * missingheads part of common (::commonheads)
351 351 common = set(pushop.outgoing.common)
352 352 nm = pushop.repo.changelog.nodemap
353 353 cheads = [node for node in pushop.revs if nm[node] in common]
354 354 # and
355 355 # * commonheads parents on missing
356 356 revset = unfi.set('%ln and parents(roots(%ln))',
357 357 pushop.outgoing.commonheads,
358 358 pushop.outgoing.missing)
359 359 cheads.extend(c.node() for c in revset)
360 360 pushop.commonheads = cheads
361 361 # even when we don't push, exchanging phase data is useful
362 362 remotephases = pushop.remote.listkeys('phases')
363 363 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
364 364 and remotephases # server supports phases
365 365 and pushop.ret is None # nothing was pushed
366 366 and remotephases.get('publishing', False)):
367 367 # When:
368 368 # - this is a subrepo push
369 369 # - and remote support phase
370 370 # - and no changeset was pushed
371 371 # - and remote is publishing
372 372 # We may be in issue 3871 case!
373 373 # We drop the possible phase synchronisation done by
374 374 # courtesy to publish changesets possibly locally draft
375 375 # on the remote.
376 376 remotephases = {'publishing': 'True'}
377 377 if not remotephases: # old server or public only reply from non-publishing
378 378 _localphasemove(pushop, cheads)
379 379 # don't push any phase data as there is nothing to push
380 380 else:
381 381 ana = phases.analyzeremotephases(pushop.repo, cheads,
382 382 remotephases)
383 383 pheads, droots = ana
384 384 ### Apply remote phase on local
385 385 if remotephases.get('publishing', False):
386 386 _localphasemove(pushop, cheads)
387 387 else: # publish = False
388 388 _localphasemove(pushop, pheads)
389 389 _localphasemove(pushop, cheads, phases.draft)
390 390 ### Apply local phase on remote
391 391
392 392 # Get the list of all revs draft on remote by public here.
393 393 # XXX Beware that revset break if droots is not strictly
394 394 # XXX root we may want to ensure it is but it is costly
395 395 outdated = unfi.set('heads((%ln::%ln) and public())',
396 396 droots, cheads)
397 397 for newremotehead in outdated:
398 398 r = pushop.remote.pushkey('phases',
399 399 newremotehead.hex(),
400 400 str(phases.draft),
401 401 str(phases.public))
402 402 if not r:
403 403 pushop.ui.warn(_('updating %s to public failed!\n')
404 404 % newremotehead)
405 405
406 406 def _localphasemove(pushop, nodes, phase=phases.public):
407 407 """move <nodes> to <phase> in the local source repo"""
408 408 if pushop.locallocked:
409 409 phases.advanceboundary(pushop.repo, phase, nodes)
410 410 else:
411 411 # repo is not locked, do not change any phases!
412 412 # Informs the user that phases should have been moved when
413 413 # applicable.
414 414 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
415 415 phasestr = phases.phasenames[phase]
416 416 if actualmoves:
417 417 pushop.ui.status(_('cannot lock source repo, skipping '
418 418 'local %s phase update\n') % phasestr)
419 419
420 420 def _pushobsolete(pushop):
421 421 """utility function to push obsolete markers to a remote"""
422 422 pushop.ui.debug('try to push obsolete markers to remote\n')
423 423 repo = pushop.repo
424 424 remote = pushop.remote
425 425 if (obsolete._enabled and repo.obsstore and
426 426 'obsolete' in remote.listkeys('namespaces')):
427 427 rslts = []
428 428 remotedata = repo.listkeys('obsolete')
429 429 for key in sorted(remotedata, reverse=True):
430 430 # reverse sort to ensure we end with dump0
431 431 data = remotedata[key]
432 432 rslts.append(remote.pushkey('obsolete', key, '', data))
433 433 if [r for r in rslts if not r]:
434 434 msg = _('failed to push some obsolete markers!\n')
435 435 repo.ui.warn(msg)
436 436
437 437 def _pushbookmark(pushop):
438 438 """Update bookmark position on remote"""
439 439 ui = pushop.ui
440 440 repo = pushop.repo.unfiltered()
441 441 remote = pushop.remote
442 442 ui.debug("checking for updated bookmarks\n")
443 443 revnums = map(repo.changelog.rev, pushop.revs or [])
444 444 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
445 445 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
446 446 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
447 447 srchex=hex)
448 448
449 449 for b, scid, dcid in advsrc:
450 450 if ancestors and repo[scid].rev() not in ancestors:
451 451 continue
452 452 if remote.pushkey('bookmarks', b, dcid, scid):
453 453 ui.status(_("updating bookmark %s\n") % b)
454 454 else:
455 455 ui.warn(_('updating bookmark %s failed!\n') % b)
456 456
457 457 class pulloperation(object):
458 458 """A object that represent a single pull operation
459 459
460 460 It purpose is to carry push related state and very common operation.
461 461
462 462 A new should be created at the beginning of each pull and discarded
463 463 afterward.
464 464 """
465 465
466 466 def __init__(self, repo, remote, heads=None, force=False):
467 467 # repo we pull into
468 468 self.repo = repo
469 469 # repo we pull from
470 470 self.remote = remote
471 471 # revision we try to pull (None is "all")
472 472 self.heads = heads
473 473 # do we force pull?
474 474 self.force = force
475 475 # the name the pull transaction
476 476 self._trname = 'pull\n' + util.hidepassword(remote.url())
477 477 # hold the transaction once created
478 478 self._tr = None
479 479 # set of common changeset between local and remote before pull
480 480 self.common = None
481 481 # set of pulled head
482 482 self.rheads = None
483 483 # list of missing changeset to fetch remotely
484 484 self.fetch = None
485 485 # result of changegroup pulling (used as return code by pull)
486 486 self.cgresult = None
487 487 # list of step remaining todo (related to future bundle2 usage)
488 488 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
489 489
490 490 @util.propertycache
491 491 def pulledsubset(self):
492 492 """heads of the set of changeset target by the pull"""
493 493 # compute target subset
494 494 if self.heads is None:
495 495 # We pulled every thing possible
496 496 # sync on everything common
497 497 c = set(self.common)
498 498 ret = list(self.common)
499 499 for n in self.rheads:
500 500 if n not in c:
501 501 ret.append(n)
502 502 return ret
503 503 else:
504 504 # We pulled a specific subset
505 505 # sync on this subset
506 506 return self.heads
507 507
508 508 def gettransaction(self):
509 509 """get appropriate pull transaction, creating it if needed"""
510 510 if self._tr is None:
511 511 self._tr = self.repo.transaction(self._trname)
512 512 return self._tr
513 513
514 514 def closetransaction(self):
515 515 """close transaction if created"""
516 516 if self._tr is not None:
517 517 self._tr.close()
518 518
519 519 def releasetransaction(self):
520 520 """release transaction if created"""
521 521 if self._tr is not None:
522 522 self._tr.release()
523 523
524 524 def pull(repo, remote, heads=None, force=False):
525 525 pullop = pulloperation(repo, remote, heads, force)
526 526 if pullop.remote.local():
527 527 missing = set(pullop.remote.requirements) - pullop.repo.supported
528 528 if missing:
529 529 msg = _("required features are not"
530 530 " supported in the destination:"
531 531 " %s") % (', '.join(sorted(missing)))
532 532 raise util.Abort(msg)
533 533
534 534 lock = pullop.repo.lock()
535 535 try:
536 536 _pulldiscovery(pullop)
537 537 if (pullop.repo.ui.configbool('server', 'bundle2', False)
538 538 and pullop.remote.capable('bundle2-exp')):
539 539 _pullbundle2(pullop)
540 540 if 'changegroup' in pullop.todosteps:
541 541 _pullchangeset(pullop)
542 542 if 'phases' in pullop.todosteps:
543 543 _pullphase(pullop)
544 544 if 'obsmarkers' in pullop.todosteps:
545 545 _pullobsolete(pullop)
546 546 pullop.closetransaction()
547 547 finally:
548 548 pullop.releasetransaction()
549 549 lock.release()
550 550
551 551 return pullop.cgresult
552 552
553 553 def _pulldiscovery(pullop):
554 554 """discovery phase for the pull
555 555
556 556 Current handle changeset discovery only, will change handle all discovery
557 557 at some point."""
558 558 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
559 559 pullop.remote,
560 560 heads=pullop.heads,
561 561 force=pullop.force)
562 562 pullop.common, pullop.fetch, pullop.rheads = tmp
563 563
564 564 def _pullbundle2(pullop):
565 565 """pull data using bundle2
566 566
567 567 For now, the only supported data are changegroup."""
568 568 kwargs = {'bundlecaps': set(['HG2X'])}
569 569 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
570 570 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
571 571 # pulling changegroup
572 572 pullop.todosteps.remove('changegroup')
573 573 if not pullop.fetch:
574 574 pullop.repo.ui.status(_("no changes found\n"))
575 575 pullop.cgresult = 0
576 576 else:
577 577 kwargs['common'] = pullop.common
578 578 kwargs['heads'] = pullop.heads or pullop.rheads
579 579 if pullop.heads is None and list(pullop.common) == [nullid]:
580 580 pullop.repo.ui.status(_("requesting all changes\n"))
581 581 if kwargs.keys() == ['format']:
582 582 return # nothing to pull
583 583 bundle = pullop.remote.getbundle('pull', **kwargs)
584 584 try:
585 585 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
586 586 except KeyError, exc:
587 587 raise util.Abort('missing support for %s' % exc)
588 588 assert len(op.records['changegroup']) == 1
589 589 pullop.cgresult = op.records['changegroup'][0]['return']
590 590
591 591 def _pullchangeset(pullop):
592 592 """pull changeset from unbundle into the local repo"""
593 593 # We delay the open of the transaction as late as possible so we
594 594 # don't open transaction for nothing or you break future useful
595 595 # rollback call
596 596 pullop.todosteps.remove('changegroup')
597 597 if not pullop.fetch:
598 598 pullop.repo.ui.status(_("no changes found\n"))
599 599 pullop.cgresult = 0
600 600 return
601 601 pullop.gettransaction()
602 602 if pullop.heads is None and list(pullop.common) == [nullid]:
603 603 pullop.repo.ui.status(_("requesting all changes\n"))
604 604 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
605 605 # issue1320, avoid a race if remote changed after discovery
606 606 pullop.heads = pullop.rheads
607 607
608 608 if pullop.remote.capable('getbundle'):
609 609 # TODO: get bundlecaps from remote
610 610 cg = pullop.remote.getbundle('pull', common=pullop.common,
611 611 heads=pullop.heads or pullop.rheads)
612 612 elif pullop.heads is None:
613 613 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
614 614 elif not pullop.remote.capable('changegroupsubset'):
615 615 raise util.Abort(_("partial pull cannot be done because "
616 616 "other repository doesn't support "
617 617 "changegroupsubset."))
618 618 else:
619 619 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
620 620 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
621 621 pullop.remote.url())
622 622
623 623 def _pullphase(pullop):
624 624 # Get remote phases data from remote
625 625 pullop.todosteps.remove('phases')
626 626 remotephases = pullop.remote.listkeys('phases')
627 627 publishing = bool(remotephases.get('publishing', False))
628 628 if remotephases and not publishing:
629 629 # remote is new and unpublishing
630 630 pheads, _dr = phases.analyzeremotephases(pullop.repo,
631 631 pullop.pulledsubset,
632 632 remotephases)
633 633 phases.advanceboundary(pullop.repo, phases.public, pheads)
634 634 phases.advanceboundary(pullop.repo, phases.draft,
635 635 pullop.pulledsubset)
636 636 else:
637 637 # Remote is old or publishing all common changesets
638 638 # should be seen as public
639 639 phases.advanceboundary(pullop.repo, phases.public,
640 640 pullop.pulledsubset)
641 641
642 642 def _pullobsolete(pullop):
643 643 """utility function to pull obsolete markers from a remote
644 644
645 645 The `gettransaction` is function that return the pull transaction, creating
646 646 one if necessary. We return the transaction to inform the calling code that
647 647 a new transaction have been created (when applicable).
648 648
649 649 Exists mostly to allow overriding for experimentation purpose"""
650 650 pullop.todosteps.remove('obsmarkers')
651 651 tr = None
652 652 if obsolete._enabled:
653 653 pullop.repo.ui.debug('fetching remote obsolete markers\n')
654 654 remoteobs = pullop.remote.listkeys('obsolete')
655 655 if 'dump0' in remoteobs:
656 656 tr = pullop.gettransaction()
657 657 for key in sorted(remoteobs, reverse=True):
658 658 if key.startswith('dump'):
659 659 data = base85.b85decode(remoteobs[key])
660 660 pullop.repo.obsstore.mergemarkers(tr, data)
661 661 pullop.repo.invalidatevolatilesets()
662 662 return tr
663 663
664 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
664 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
665 **kwargs):
665 666 """return a full bundle (with potentially multiple kind of parts)
666 667
667 668 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
668 669 passed. For now, the bundle can contain only changegroup, but this will
669 670 changes when more part type will be available for bundle2.
670 671
671 672 This is different from changegroup.getbundle that only returns an HG10
672 673 changegroup bundle. They may eventually get reunited in the future when we
673 674 have a clearer idea of the API we what to query different data.
674 675
675 676 The implementation is at a very early stage and will get massive rework
676 677 when the API of bundle is refined.
677 678 """
678 679 # build bundle here.
679 680 cg = changegroup.getbundle(repo, source, heads=heads,
680 681 common=common, bundlecaps=bundlecaps)
681 682 if bundlecaps is None or 'HG2X' not in bundlecaps:
682 683 return cg
683 684 # very crude first implementation,
684 685 # the bundle API will change and the generation will be done lazily.
685 686 b2caps = {}
686 687 for bcaps in bundlecaps:
687 688 if bcaps.startswith('bundle2='):
688 689 blob = urllib.unquote(bcaps[len('bundle2='):])
689 690 b2caps.update(bundle2.decodecaps(blob))
690 691 bundler = bundle2.bundle20(repo.ui, b2caps)
691 692 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
692 693 bundler.addpart(part)
693 694 return util.chunkbuffer(bundler.getchunks())
694 695
695 696 class PushRaced(RuntimeError):
696 697 """An exception raised during unbundling that indicate a push race"""
697 698
698 699 def check_heads(repo, their_heads, context):
699 700 """check if the heads of a repo have been modified
700 701
701 702 Used by peer for unbundling.
702 703 """
703 704 heads = repo.heads()
704 705 heads_hash = util.sha1(''.join(sorted(heads))).digest()
705 706 if not (their_heads == ['force'] or their_heads == heads or
706 707 their_heads == ['hashed', heads_hash]):
707 708 # someone else committed/pushed/unbundled while we
708 709 # were transferring data
709 710 raise PushRaced('repository changed while %s - '
710 711 'please try again' % context)
711 712
712 713 def unbundle(repo, cg, heads, source, url):
713 714 """Apply a bundle to a repo.
714 715
715 716 this function makes sure the repo is locked during the application and have
716 717 mechanism to check that no push race occurred between the creation of the
717 718 bundle and its application.
718 719
719 720 If the push was raced as PushRaced exception is raised."""
720 721 r = 0
721 722 # need a transaction when processing a bundle2 stream
722 723 tr = None
723 724 lock = repo.lock()
724 725 try:
725 726 check_heads(repo, heads, 'uploading changes')
726 727 # push can proceed
727 728 if util.safehasattr(cg, 'params'):
728 729 tr = repo.transaction('unbundle')
729 730 tr.hookargs['bundle2-exp'] = '1'
730 731 r = bundle2.processbundle(repo, cg, lambda: tr).reply
731 732 cl = repo.unfiltered().changelog
732 733 p = cl.writepending() and repo.root or ""
733 734 repo.hook('b2x-pretransactionclose', throw=True, source=source,
734 735 url=url, pending=p, **tr.hookargs)
735 736 tr.close()
736 737 repo.hook('b2x-transactionclose', source=source, url=url,
737 738 **tr.hookargs)
738 739 else:
739 740 r = changegroup.addchangegroup(repo, cg, source, url)
740 741 finally:
741 742 if tr is not None:
742 743 tr.release()
743 744 lock.release()
744 745 return r
@@ -1,1910 +1,1910 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10'):
109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except exchange.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), exc.message)
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 bundle2caps = {'HG2X': ()}
184 184
185 185 # a list of (ui, featureset) functions.
186 186 # only functions defined in module of enabled extensions are invoked
187 187 featuresetupfuncs = set()
188 188
189 189 def _baserequirements(self, create):
190 190 return self.requirements[:]
191 191
192 192 def __init__(self, baseui, path=None, create=False):
193 193 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
194 194 self.wopener = self.wvfs
195 195 self.root = self.wvfs.base
196 196 self.path = self.wvfs.join(".hg")
197 197 self.origroot = path
198 198 self.auditor = pathutil.pathauditor(self.root, self._checknested)
199 199 self.vfs = scmutil.vfs(self.path)
200 200 self.opener = self.vfs
201 201 self.baseui = baseui
202 202 self.ui = baseui.copy()
203 203 self.ui.copy = baseui.copy # prevent copying repo configuration
204 204 # A list of callback to shape the phase if no data were found.
205 205 # Callback are in the form: func(repo, roots) --> processed root.
206 206 # This list it to be filled by extension during repo setup
207 207 self._phasedefaults = []
208 208 try:
209 209 self.ui.readconfig(self.join("hgrc"), self.root)
210 210 extensions.loadall(self.ui)
211 211 except IOError:
212 212 pass
213 213
214 214 if self.featuresetupfuncs:
215 215 self.supported = set(self._basesupported) # use private copy
216 216 extmods = set(m.__name__ for n, m
217 217 in extensions.extensions(self.ui))
218 218 for setupfunc in self.featuresetupfuncs:
219 219 if setupfunc.__module__ in extmods:
220 220 setupfunc(self.ui, self.supported)
221 221 else:
222 222 self.supported = self._basesupported
223 223
224 224 if not self.vfs.isdir():
225 225 if create:
226 226 if not self.wvfs.exists():
227 227 self.wvfs.makedirs()
228 228 self.vfs.makedir(notindexed=True)
229 229 requirements = self._baserequirements(create)
230 230 if self.ui.configbool('format', 'usestore', True):
231 231 self.vfs.mkdir("store")
232 232 requirements.append("store")
233 233 if self.ui.configbool('format', 'usefncache', True):
234 234 requirements.append("fncache")
235 235 if self.ui.configbool('format', 'dotencode', True):
236 236 requirements.append('dotencode')
237 237 # create an invalid changelog
238 238 self.vfs.append(
239 239 "00changelog.i",
240 240 '\0\0\0\2' # represents revlogv2
241 241 ' dummy changelog to prevent using the old repo layout'
242 242 )
243 243 if self.ui.configbool('format', 'generaldelta', False):
244 244 requirements.append("generaldelta")
245 245 requirements = set(requirements)
246 246 else:
247 247 raise error.RepoError(_("repository %s not found") % path)
248 248 elif create:
249 249 raise error.RepoError(_("repository %s already exists") % path)
250 250 else:
251 251 try:
252 252 requirements = scmutil.readrequires(self.vfs, self.supported)
253 253 except IOError, inst:
254 254 if inst.errno != errno.ENOENT:
255 255 raise
256 256 requirements = set()
257 257
258 258 self.sharedpath = self.path
259 259 try:
260 260 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 261 realpath=True)
262 262 s = vfs.base
263 263 if not vfs.exists():
264 264 raise error.RepoError(
265 265 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 266 self.sharedpath = s
267 267 except IOError, inst:
268 268 if inst.errno != errno.ENOENT:
269 269 raise
270 270
271 271 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 272 self.spath = self.store.path
273 273 self.svfs = self.store.vfs
274 274 self.sopener = self.svfs
275 275 self.sjoin = self.store.join
276 276 self.vfs.createmode = self.store.createmode
277 277 self._applyrequirements(requirements)
278 278 if create:
279 279 self._writerequirements()
280 280
281 281
282 282 self._branchcaches = {}
283 283 self.filterpats = {}
284 284 self._datafilters = {}
285 285 self._transref = self._lockref = self._wlockref = None
286 286
287 287 # A cache for various files under .hg/ that tracks file changes,
288 288 # (used by the filecache decorator)
289 289 #
290 290 # Maps a property name to its util.filecacheentry
291 291 self._filecache = {}
292 292
293 293 # hold sets of revision to be filtered
294 294 # should be cleared when something might have changed the filter value:
295 295 # - new changesets,
296 296 # - phase change,
297 297 # - new obsolescence marker,
298 298 # - working directory parent change,
299 299 # - bookmark changes
300 300 self.filteredrevcache = {}
301 301
302 302 def close(self):
303 303 pass
304 304
305 305 def _restrictcapabilities(self, caps):
306 306 # bundle2 is not ready for prime time, drop it unless explicitly
307 307 # required by the tests (or some brave tester)
308 308 if self.ui.configbool('experimental', 'bundle2-exp', False):
309 309 caps = set(caps)
310 310 capsblob = bundle2.encodecaps(self.bundle2caps)
311 311 caps.add('bundle2-exp=' + urllib.quote(capsblob))
312 312 return caps
313 313
314 314 def _applyrequirements(self, requirements):
315 315 self.requirements = requirements
316 316 self.sopener.options = dict((r, 1) for r in requirements
317 317 if r in self.openerreqs)
318 318 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
319 319 if chunkcachesize is not None:
320 320 self.sopener.options['chunkcachesize'] = chunkcachesize
321 321
322 322 def _writerequirements(self):
323 323 reqfile = self.opener("requires", "w")
324 324 for r in sorted(self.requirements):
325 325 reqfile.write("%s\n" % r)
326 326 reqfile.close()
327 327
328 328 def _checknested(self, path):
329 329 """Determine if path is a legal nested repository."""
330 330 if not path.startswith(self.root):
331 331 return False
332 332 subpath = path[len(self.root) + 1:]
333 333 normsubpath = util.pconvert(subpath)
334 334
335 335 # XXX: Checking against the current working copy is wrong in
336 336 # the sense that it can reject things like
337 337 #
338 338 # $ hg cat -r 10 sub/x.txt
339 339 #
340 340 # if sub/ is no longer a subrepository in the working copy
341 341 # parent revision.
342 342 #
343 343 # However, it can of course also allow things that would have
344 344 # been rejected before, such as the above cat command if sub/
345 345 # is a subrepository now, but was a normal directory before.
346 346 # The old path auditor would have rejected by mistake since it
347 347 # panics when it sees sub/.hg/.
348 348 #
349 349 # All in all, checking against the working copy seems sensible
350 350 # since we want to prevent access to nested repositories on
351 351 # the filesystem *now*.
352 352 ctx = self[None]
353 353 parts = util.splitpath(subpath)
354 354 while parts:
355 355 prefix = '/'.join(parts)
356 356 if prefix in ctx.substate:
357 357 if prefix == normsubpath:
358 358 return True
359 359 else:
360 360 sub = ctx.sub(prefix)
361 361 return sub.checknested(subpath[len(prefix) + 1:])
362 362 else:
363 363 parts.pop()
364 364 return False
365 365
366 366 def peer(self):
367 367 return localpeer(self) # not cached to avoid reference cycle
368 368
369 369 def unfiltered(self):
370 370 """Return unfiltered version of the repository
371 371
372 372 Intended to be overwritten by filtered repo."""
373 373 return self
374 374
375 375 def filtered(self, name):
376 376 """Return a filtered version of a repository"""
377 377 # build a new class with the mixin and the current class
378 378 # (possibly subclass of the repo)
379 379 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 380 pass
381 381 return proxycls(self, name)
382 382
383 383 @repofilecache('bookmarks')
384 384 def _bookmarks(self):
385 385 return bookmarks.bmstore(self)
386 386
387 387 @repofilecache('bookmarks.current')
388 388 def _bookmarkcurrent(self):
389 389 return bookmarks.readcurrent(self)
390 390
391 391 def bookmarkheads(self, bookmark):
392 392 name = bookmark.split('@', 1)[0]
393 393 heads = []
394 394 for mark, n in self._bookmarks.iteritems():
395 395 if mark.split('@', 1)[0] == name:
396 396 heads.append(n)
397 397 return heads
398 398
399 399 @storecache('phaseroots')
400 400 def _phasecache(self):
401 401 return phases.phasecache(self, self._phasedefaults)
402 402
403 403 @storecache('obsstore')
404 404 def obsstore(self):
405 405 store = obsolete.obsstore(self.sopener)
406 406 if store and not obsolete._enabled:
407 407 # message is rare enough to not be translated
408 408 msg = 'obsolete feature not enabled but %i markers found!\n'
409 409 self.ui.warn(msg % len(list(store)))
410 410 return store
411 411
412 412 @storecache('00changelog.i')
413 413 def changelog(self):
414 414 c = changelog.changelog(self.sopener)
415 415 if 'HG_PENDING' in os.environ:
416 416 p = os.environ['HG_PENDING']
417 417 if p.startswith(self.root):
418 418 c.readpending('00changelog.i.a')
419 419 return c
420 420
421 421 @storecache('00manifest.i')
422 422 def manifest(self):
423 423 return manifest.manifest(self.sopener)
424 424
425 425 @repofilecache('dirstate')
426 426 def dirstate(self):
427 427 warned = [0]
428 428 def validate(node):
429 429 try:
430 430 self.changelog.rev(node)
431 431 return node
432 432 except error.LookupError:
433 433 if not warned[0]:
434 434 warned[0] = True
435 435 self.ui.warn(_("warning: ignoring unknown"
436 436 " working parent %s!\n") % short(node))
437 437 return nullid
438 438
439 439 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
440 440
441 441 def __getitem__(self, changeid):
442 442 if changeid is None:
443 443 return context.workingctx(self)
444 444 return context.changectx(self, changeid)
445 445
446 446 def __contains__(self, changeid):
447 447 try:
448 448 return bool(self.lookup(changeid))
449 449 except error.RepoLookupError:
450 450 return False
451 451
452 452 def __nonzero__(self):
453 453 return True
454 454
455 455 def __len__(self):
456 456 return len(self.changelog)
457 457
458 458 def __iter__(self):
459 459 return iter(self.changelog)
460 460
461 461 def revs(self, expr, *args):
462 462 '''Return a list of revisions matching the given revset'''
463 463 expr = revset.formatspec(expr, *args)
464 464 m = revset.match(None, expr)
465 465 return m(self, revset.spanset(self))
466 466
467 467 def set(self, expr, *args):
468 468 '''
469 469 Yield a context for each matching revision, after doing arg
470 470 replacement via revset.formatspec
471 471 '''
472 472 for r in self.revs(expr, *args):
473 473 yield self[r]
474 474
475 475 def url(self):
476 476 return 'file:' + self.root
477 477
478 478 def hook(self, name, throw=False, **args):
479 479 return hook.hook(self.ui, self, name, throw, **args)
480 480
481 481 @unfilteredmethod
482 482 def _tag(self, names, node, message, local, user, date, extra={}):
483 483 if isinstance(names, str):
484 484 names = (names,)
485 485
486 486 branches = self.branchmap()
487 487 for name in names:
488 488 self.hook('pretag', throw=True, node=hex(node), tag=name,
489 489 local=local)
490 490 if name in branches:
491 491 self.ui.warn(_("warning: tag %s conflicts with existing"
492 492 " branch name\n") % name)
493 493
494 494 def writetags(fp, names, munge, prevtags):
495 495 fp.seek(0, 2)
496 496 if prevtags and prevtags[-1] != '\n':
497 497 fp.write('\n')
498 498 for name in names:
499 499 m = munge and munge(name) or name
500 500 if (self._tagscache.tagtypes and
501 501 name in self._tagscache.tagtypes):
502 502 old = self.tags().get(name, nullid)
503 503 fp.write('%s %s\n' % (hex(old), m))
504 504 fp.write('%s %s\n' % (hex(node), m))
505 505 fp.close()
506 506
507 507 prevtags = ''
508 508 if local:
509 509 try:
510 510 fp = self.opener('localtags', 'r+')
511 511 except IOError:
512 512 fp = self.opener('localtags', 'a')
513 513 else:
514 514 prevtags = fp.read()
515 515
516 516 # local tags are stored in the current charset
517 517 writetags(fp, names, None, prevtags)
518 518 for name in names:
519 519 self.hook('tag', node=hex(node), tag=name, local=local)
520 520 return
521 521
522 522 try:
523 523 fp = self.wfile('.hgtags', 'rb+')
524 524 except IOError, e:
525 525 if e.errno != errno.ENOENT:
526 526 raise
527 527 fp = self.wfile('.hgtags', 'ab')
528 528 else:
529 529 prevtags = fp.read()
530 530
531 531 # committed tags are stored in UTF-8
532 532 writetags(fp, names, encoding.fromlocal, prevtags)
533 533
534 534 fp.close()
535 535
536 536 self.invalidatecaches()
537 537
538 538 if '.hgtags' not in self.dirstate:
539 539 self[None].add(['.hgtags'])
540 540
541 541 m = matchmod.exact(self.root, '', ['.hgtags'])
542 542 tagnode = self.commit(message, user, date, extra=extra, match=m)
543 543
544 544 for name in names:
545 545 self.hook('tag', node=hex(node), tag=name, local=local)
546 546
547 547 return tagnode
548 548
549 549 def tag(self, names, node, message, local, user, date):
550 550 '''tag a revision with one or more symbolic names.
551 551
552 552 names is a list of strings or, when adding a single tag, names may be a
553 553 string.
554 554
555 555 if local is True, the tags are stored in a per-repository file.
556 556 otherwise, they are stored in the .hgtags file, and a new
557 557 changeset is committed with the change.
558 558
559 559 keyword arguments:
560 560
561 561 local: whether to store tags in non-version-controlled file
562 562 (default False)
563 563
564 564 message: commit message to use if committing
565 565
566 566 user: name of user to use if committing
567 567
568 568 date: date tuple to use if committing'''
569 569
570 570 if not local:
571 571 for x in self.status()[:5]:
572 572 if '.hgtags' in x:
573 573 raise util.Abort(_('working copy of .hgtags is changed '
574 574 '(please commit .hgtags manually)'))
575 575
576 576 self.tags() # instantiate the cache
577 577 self._tag(names, node, message, local, user, date)
578 578
579 579 @filteredpropertycache
580 580 def _tagscache(self):
581 581 '''Returns a tagscache object that contains various tags related
582 582 caches.'''
583 583
584 584 # This simplifies its cache management by having one decorated
585 585 # function (this one) and the rest simply fetch things from it.
586 586 class tagscache(object):
587 587 def __init__(self):
588 588 # These two define the set of tags for this repository. tags
589 589 # maps tag name to node; tagtypes maps tag name to 'global' or
590 590 # 'local'. (Global tags are defined by .hgtags across all
591 591 # heads, and local tags are defined in .hg/localtags.)
592 592 # They constitute the in-memory cache of tags.
593 593 self.tags = self.tagtypes = None
594 594
595 595 self.nodetagscache = self.tagslist = None
596 596
597 597 cache = tagscache()
598 598 cache.tags, cache.tagtypes = self._findtags()
599 599
600 600 return cache
601 601
602 602 def tags(self):
603 603 '''return a mapping of tag to node'''
604 604 t = {}
605 605 if self.changelog.filteredrevs:
606 606 tags, tt = self._findtags()
607 607 else:
608 608 tags = self._tagscache.tags
609 609 for k, v in tags.iteritems():
610 610 try:
611 611 # ignore tags to unknown nodes
612 612 self.changelog.rev(v)
613 613 t[k] = v
614 614 except (error.LookupError, ValueError):
615 615 pass
616 616 return t
617 617
618 618 def _findtags(self):
619 619 '''Do the hard work of finding tags. Return a pair of dicts
620 620 (tags, tagtypes) where tags maps tag name to node, and tagtypes
621 621 maps tag name to a string like \'global\' or \'local\'.
622 622 Subclasses or extensions are free to add their own tags, but
623 623 should be aware that the returned dicts will be retained for the
624 624 duration of the localrepo object.'''
625 625
626 626 # XXX what tagtype should subclasses/extensions use? Currently
627 627 # mq and bookmarks add tags, but do not set the tagtype at all.
628 628 # Should each extension invent its own tag type? Should there
629 629 # be one tagtype for all such "virtual" tags? Or is the status
630 630 # quo fine?
631 631
632 632 alltags = {} # map tag name to (node, hist)
633 633 tagtypes = {}
634 634
635 635 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
636 636 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
637 637
638 638 # Build the return dicts. Have to re-encode tag names because
639 639 # the tags module always uses UTF-8 (in order not to lose info
640 640 # writing to the cache), but the rest of Mercurial wants them in
641 641 # local encoding.
642 642 tags = {}
643 643 for (name, (node, hist)) in alltags.iteritems():
644 644 if node != nullid:
645 645 tags[encoding.tolocal(name)] = node
646 646 tags['tip'] = self.changelog.tip()
647 647 tagtypes = dict([(encoding.tolocal(name), value)
648 648 for (name, value) in tagtypes.iteritems()])
649 649 return (tags, tagtypes)
650 650
651 651 def tagtype(self, tagname):
652 652 '''
653 653 return the type of the given tag. result can be:
654 654
655 655 'local' : a local tag
656 656 'global' : a global tag
657 657 None : tag does not exist
658 658 '''
659 659
660 660 return self._tagscache.tagtypes.get(tagname)
661 661
662 662 def tagslist(self):
663 663 '''return a list of tags ordered by revision'''
664 664 if not self._tagscache.tagslist:
665 665 l = []
666 666 for t, n in self.tags().iteritems():
667 667 r = self.changelog.rev(n)
668 668 l.append((r, t, n))
669 669 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
670 670
671 671 return self._tagscache.tagslist
672 672
673 673 def nodetags(self, node):
674 674 '''return the tags associated with a node'''
675 675 if not self._tagscache.nodetagscache:
676 676 nodetagscache = {}
677 677 for t, n in self._tagscache.tags.iteritems():
678 678 nodetagscache.setdefault(n, []).append(t)
679 679 for tags in nodetagscache.itervalues():
680 680 tags.sort()
681 681 self._tagscache.nodetagscache = nodetagscache
682 682 return self._tagscache.nodetagscache.get(node, [])
683 683
684 684 def nodebookmarks(self, node):
685 685 marks = []
686 686 for bookmark, n in self._bookmarks.iteritems():
687 687 if n == node:
688 688 marks.append(bookmark)
689 689 return sorted(marks)
690 690
691 691 def branchmap(self):
692 692 '''returns a dictionary {branch: [branchheads]} with branchheads
693 693 ordered by increasing revision number'''
694 694 branchmap.updatecache(self)
695 695 return self._branchcaches[self.filtername]
696 696
697 697 def branchtip(self, branch):
698 698 '''return the tip node for a given branch'''
699 699 try:
700 700 return self.branchmap().branchtip(branch)
701 701 except KeyError:
702 702 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
703 703
704 704 def lookup(self, key):
705 705 return self[key].node()
706 706
707 707 def lookupbranch(self, key, remote=None):
708 708 repo = remote or self
709 709 if key in repo.branchmap():
710 710 return key
711 711
712 712 repo = (remote and remote.local()) and remote or self
713 713 return repo[key].branch()
714 714
715 715 def known(self, nodes):
716 716 nm = self.changelog.nodemap
717 717 pc = self._phasecache
718 718 result = []
719 719 for n in nodes:
720 720 r = nm.get(n)
721 721 resp = not (r is None or pc.phase(self, r) >= phases.secret)
722 722 result.append(resp)
723 723 return result
724 724
725 725 def local(self):
726 726 return self
727 727
728 728 def cancopy(self):
729 729 # so statichttprepo's override of local() works
730 730 if not self.local():
731 731 return False
732 732 if not self.ui.configbool('phases', 'publish', True):
733 733 return True
734 734 # if publishing we can't copy if there is filtered content
735 735 return not self.filtered('visible').changelog.filteredrevs
736 736
737 737 def join(self, f):
738 738 return os.path.join(self.path, f)
739 739
740 740 def wjoin(self, f):
741 741 return os.path.join(self.root, f)
742 742
743 743 def file(self, f):
744 744 if f[0] == '/':
745 745 f = f[1:]
746 746 return filelog.filelog(self.sopener, f)
747 747
748 748 def changectx(self, changeid):
749 749 return self[changeid]
750 750
751 751 def parents(self, changeid=None):
752 752 '''get list of changectxs for parents of changeid'''
753 753 return self[changeid].parents()
754 754
755 755 def setparents(self, p1, p2=nullid):
756 756 copies = self.dirstate.setparents(p1, p2)
757 757 pctx = self[p1]
758 758 if copies:
759 759 # Adjust copy records, the dirstate cannot do it, it
760 760 # requires access to parents manifests. Preserve them
761 761 # only for entries added to first parent.
762 762 for f in copies:
763 763 if f not in pctx and copies[f] in pctx:
764 764 self.dirstate.copy(copies[f], f)
765 765 if p2 == nullid:
766 766 for f, s in sorted(self.dirstate.copies().items()):
767 767 if f not in pctx and s not in pctx:
768 768 self.dirstate.copy(None, f)
769 769
770 770 def filectx(self, path, changeid=None, fileid=None):
771 771 """changeid can be a changeset revision, node, or tag.
772 772 fileid can be a file revision or node."""
773 773 return context.filectx(self, path, changeid, fileid)
774 774
775 775 def getcwd(self):
776 776 return self.dirstate.getcwd()
777 777
778 778 def pathto(self, f, cwd=None):
779 779 return self.dirstate.pathto(f, cwd)
780 780
781 781 def wfile(self, f, mode='r'):
782 782 return self.wopener(f, mode)
783 783
784 784 def _link(self, f):
785 785 return self.wvfs.islink(f)
786 786
787 787 def _loadfilter(self, filter):
788 788 if filter not in self.filterpats:
789 789 l = []
790 790 for pat, cmd in self.ui.configitems(filter):
791 791 if cmd == '!':
792 792 continue
793 793 mf = matchmod.match(self.root, '', [pat])
794 794 fn = None
795 795 params = cmd
796 796 for name, filterfn in self._datafilters.iteritems():
797 797 if cmd.startswith(name):
798 798 fn = filterfn
799 799 params = cmd[len(name):].lstrip()
800 800 break
801 801 if not fn:
802 802 fn = lambda s, c, **kwargs: util.filter(s, c)
803 803 # Wrap old filters not supporting keyword arguments
804 804 if not inspect.getargspec(fn)[2]:
805 805 oldfn = fn
806 806 fn = lambda s, c, **kwargs: oldfn(s, c)
807 807 l.append((mf, fn, params))
808 808 self.filterpats[filter] = l
809 809 return self.filterpats[filter]
810 810
811 811 def _filter(self, filterpats, filename, data):
812 812 for mf, fn, cmd in filterpats:
813 813 if mf(filename):
814 814 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
815 815 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
816 816 break
817 817
818 818 return data
819 819
820 820 @unfilteredpropertycache
821 821 def _encodefilterpats(self):
822 822 return self._loadfilter('encode')
823 823
824 824 @unfilteredpropertycache
825 825 def _decodefilterpats(self):
826 826 return self._loadfilter('decode')
827 827
828 828 def adddatafilter(self, name, filter):
829 829 self._datafilters[name] = filter
830 830
831 831 def wread(self, filename):
832 832 if self._link(filename):
833 833 data = self.wvfs.readlink(filename)
834 834 else:
835 835 data = self.wopener.read(filename)
836 836 return self._filter(self._encodefilterpats, filename, data)
837 837
838 838 def wwrite(self, filename, data, flags):
839 839 data = self._filter(self._decodefilterpats, filename, data)
840 840 if 'l' in flags:
841 841 self.wopener.symlink(data, filename)
842 842 else:
843 843 self.wopener.write(filename, data)
844 844 if 'x' in flags:
845 845 self.wvfs.setflags(filename, False, True)
846 846
847 847 def wwritedata(self, filename, data):
848 848 return self._filter(self._decodefilterpats, filename, data)
849 849
850 850 def transaction(self, desc, report=None):
851 851 tr = self._transref and self._transref() or None
852 852 if tr and tr.running():
853 853 return tr.nest()
854 854
855 855 # abort here if the journal already exists
856 856 if self.svfs.exists("journal"):
857 857 raise error.RepoError(
858 858 _("abandoned transaction found - run hg recover"))
859 859
860 860 def onclose():
861 861 self.store.write(tr)
862 862
863 863 self._writejournal(desc)
864 864 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
865 865 rp = report and report or self.ui.warn
866 866 tr = transaction.transaction(rp, self.sopener,
867 867 "journal",
868 868 aftertrans(renames),
869 869 self.store.createmode,
870 870 onclose)
871 871 self._transref = weakref.ref(tr)
872 872 return tr
873 873
874 874 def _journalfiles(self):
875 875 return ((self.svfs, 'journal'),
876 876 (self.vfs, 'journal.dirstate'),
877 877 (self.vfs, 'journal.branch'),
878 878 (self.vfs, 'journal.desc'),
879 879 (self.vfs, 'journal.bookmarks'),
880 880 (self.svfs, 'journal.phaseroots'))
881 881
882 882 def undofiles(self):
883 883 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
884 884
885 885 def _writejournal(self, desc):
886 886 self.opener.write("journal.dirstate",
887 887 self.opener.tryread("dirstate"))
888 888 self.opener.write("journal.branch",
889 889 encoding.fromlocal(self.dirstate.branch()))
890 890 self.opener.write("journal.desc",
891 891 "%d\n%s\n" % (len(self), desc))
892 892 self.opener.write("journal.bookmarks",
893 893 self.opener.tryread("bookmarks"))
894 894 self.sopener.write("journal.phaseroots",
895 895 self.sopener.tryread("phaseroots"))
896 896
897 897 def recover(self):
898 898 lock = self.lock()
899 899 try:
900 900 if self.svfs.exists("journal"):
901 901 self.ui.status(_("rolling back interrupted transaction\n"))
902 902 transaction.rollback(self.sopener, "journal",
903 903 self.ui.warn)
904 904 self.invalidate()
905 905 return True
906 906 else:
907 907 self.ui.warn(_("no interrupted transaction available\n"))
908 908 return False
909 909 finally:
910 910 lock.release()
911 911
912 912 def rollback(self, dryrun=False, force=False):
913 913 wlock = lock = None
914 914 try:
915 915 wlock = self.wlock()
916 916 lock = self.lock()
917 917 if self.svfs.exists("undo"):
918 918 return self._rollback(dryrun, force)
919 919 else:
920 920 self.ui.warn(_("no rollback information available\n"))
921 921 return 1
922 922 finally:
923 923 release(lock, wlock)
924 924
925 925 @unfilteredmethod # Until we get smarter cache management
926 926 def _rollback(self, dryrun, force):
927 927 ui = self.ui
928 928 try:
929 929 args = self.opener.read('undo.desc').splitlines()
930 930 (oldlen, desc, detail) = (int(args[0]), args[1], None)
931 931 if len(args) >= 3:
932 932 detail = args[2]
933 933 oldtip = oldlen - 1
934 934
935 935 if detail and ui.verbose:
936 936 msg = (_('repository tip rolled back to revision %s'
937 937 ' (undo %s: %s)\n')
938 938 % (oldtip, desc, detail))
939 939 else:
940 940 msg = (_('repository tip rolled back to revision %s'
941 941 ' (undo %s)\n')
942 942 % (oldtip, desc))
943 943 except IOError:
944 944 msg = _('rolling back unknown transaction\n')
945 945 desc = None
946 946
947 947 if not force and self['.'] != self['tip'] and desc == 'commit':
948 948 raise util.Abort(
949 949 _('rollback of last commit while not checked out '
950 950 'may lose data'), hint=_('use -f to force'))
951 951
952 952 ui.status(msg)
953 953 if dryrun:
954 954 return 0
955 955
956 956 parents = self.dirstate.parents()
957 957 self.destroying()
958 958 transaction.rollback(self.sopener, 'undo', ui.warn)
959 959 if self.vfs.exists('undo.bookmarks'):
960 960 self.vfs.rename('undo.bookmarks', 'bookmarks')
961 961 if self.svfs.exists('undo.phaseroots'):
962 962 self.svfs.rename('undo.phaseroots', 'phaseroots')
963 963 self.invalidate()
964 964
965 965 parentgone = (parents[0] not in self.changelog.nodemap or
966 966 parents[1] not in self.changelog.nodemap)
967 967 if parentgone:
968 968 self.vfs.rename('undo.dirstate', 'dirstate')
969 969 try:
970 970 branch = self.opener.read('undo.branch')
971 971 self.dirstate.setbranch(encoding.tolocal(branch))
972 972 except IOError:
973 973 ui.warn(_('named branch could not be reset: '
974 974 'current branch is still \'%s\'\n')
975 975 % self.dirstate.branch())
976 976
977 977 self.dirstate.invalidate()
978 978 parents = tuple([p.rev() for p in self.parents()])
979 979 if len(parents) > 1:
980 980 ui.status(_('working directory now based on '
981 981 'revisions %d and %d\n') % parents)
982 982 else:
983 983 ui.status(_('working directory now based on '
984 984 'revision %d\n') % parents)
985 985 # TODO: if we know which new heads may result from this rollback, pass
986 986 # them to destroy(), which will prevent the branchhead cache from being
987 987 # invalidated.
988 988 self.destroyed()
989 989 return 0
990 990
991 991 def invalidatecaches(self):
992 992
993 993 if '_tagscache' in vars(self):
994 994 # can't use delattr on proxy
995 995 del self.__dict__['_tagscache']
996 996
997 997 self.unfiltered()._branchcaches.clear()
998 998 self.invalidatevolatilesets()
999 999
1000 1000 def invalidatevolatilesets(self):
1001 1001 self.filteredrevcache.clear()
1002 1002 obsolete.clearobscaches(self)
1003 1003
1004 1004 def invalidatedirstate(self):
1005 1005 '''Invalidates the dirstate, causing the next call to dirstate
1006 1006 to check if it was modified since the last time it was read,
1007 1007 rereading it if it has.
1008 1008
1009 1009 This is different to dirstate.invalidate() that it doesn't always
1010 1010 rereads the dirstate. Use dirstate.invalidate() if you want to
1011 1011 explicitly read the dirstate again (i.e. restoring it to a previous
1012 1012 known good state).'''
1013 1013 if hasunfilteredcache(self, 'dirstate'):
1014 1014 for k in self.dirstate._filecache:
1015 1015 try:
1016 1016 delattr(self.dirstate, k)
1017 1017 except AttributeError:
1018 1018 pass
1019 1019 delattr(self.unfiltered(), 'dirstate')
1020 1020
1021 1021 def invalidate(self):
1022 1022 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1023 1023 for k in self._filecache:
1024 1024 # dirstate is invalidated separately in invalidatedirstate()
1025 1025 if k == 'dirstate':
1026 1026 continue
1027 1027
1028 1028 try:
1029 1029 delattr(unfiltered, k)
1030 1030 except AttributeError:
1031 1031 pass
1032 1032 self.invalidatecaches()
1033 1033 self.store.invalidatecaches()
1034 1034
1035 1035 def invalidateall(self):
1036 1036 '''Fully invalidates both store and non-store parts, causing the
1037 1037 subsequent operation to reread any outside changes.'''
1038 1038 # extension should hook this to invalidate its caches
1039 1039 self.invalidate()
1040 1040 self.invalidatedirstate()
1041 1041
1042 1042 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1043 1043 try:
1044 1044 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1045 1045 except error.LockHeld, inst:
1046 1046 if not wait:
1047 1047 raise
1048 1048 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1049 1049 (desc, inst.locker))
1050 1050 # default to 600 seconds timeout
1051 1051 l = lockmod.lock(vfs, lockname,
1052 1052 int(self.ui.config("ui", "timeout", "600")),
1053 1053 releasefn, desc=desc)
1054 1054 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1055 1055 if acquirefn:
1056 1056 acquirefn()
1057 1057 return l
1058 1058
1059 1059 def _afterlock(self, callback):
1060 1060 """add a callback to the current repository lock.
1061 1061
1062 1062 The callback will be executed on lock release."""
1063 1063 l = self._lockref and self._lockref()
1064 1064 if l:
1065 1065 l.postrelease.append(callback)
1066 1066 else:
1067 1067 callback()
1068 1068
1069 1069 def lock(self, wait=True):
1070 1070 '''Lock the repository store (.hg/store) and return a weak reference
1071 1071 to the lock. Use this before modifying the store (e.g. committing or
1072 1072 stripping). If you are opening a transaction, get a lock as well.)'''
1073 1073 l = self._lockref and self._lockref()
1074 1074 if l is not None and l.held:
1075 1075 l.lock()
1076 1076 return l
1077 1077
1078 1078 def unlock():
1079 1079 if hasunfilteredcache(self, '_phasecache'):
1080 1080 self._phasecache.write()
1081 1081 for k, ce in self._filecache.items():
1082 1082 if k == 'dirstate' or k not in self.__dict__:
1083 1083 continue
1084 1084 ce.refresh()
1085 1085
1086 1086 l = self._lock(self.svfs, "lock", wait, unlock,
1087 1087 self.invalidate, _('repository %s') % self.origroot)
1088 1088 self._lockref = weakref.ref(l)
1089 1089 return l
1090 1090
1091 1091 def wlock(self, wait=True):
1092 1092 '''Lock the non-store parts of the repository (everything under
1093 1093 .hg except .hg/store) and return a weak reference to the lock.
1094 1094 Use this before modifying files in .hg.'''
1095 1095 l = self._wlockref and self._wlockref()
1096 1096 if l is not None and l.held:
1097 1097 l.lock()
1098 1098 return l
1099 1099
1100 1100 def unlock():
1101 1101 self.dirstate.write()
1102 1102 self._filecache['dirstate'].refresh()
1103 1103
1104 1104 l = self._lock(self.vfs, "wlock", wait, unlock,
1105 1105 self.invalidatedirstate, _('working directory of %s') %
1106 1106 self.origroot)
1107 1107 self._wlockref = weakref.ref(l)
1108 1108 return l
1109 1109
1110 1110 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1111 1111 """
1112 1112 commit an individual file as part of a larger transaction
1113 1113 """
1114 1114
1115 1115 fname = fctx.path()
1116 1116 text = fctx.data()
1117 1117 flog = self.file(fname)
1118 1118 fparent1 = manifest1.get(fname, nullid)
1119 1119 fparent2 = fparent2o = manifest2.get(fname, nullid)
1120 1120
1121 1121 meta = {}
1122 1122 copy = fctx.renamed()
1123 1123 if copy and copy[0] != fname:
1124 1124 # Mark the new revision of this file as a copy of another
1125 1125 # file. This copy data will effectively act as a parent
1126 1126 # of this new revision. If this is a merge, the first
1127 1127 # parent will be the nullid (meaning "look up the copy data")
1128 1128 # and the second one will be the other parent. For example:
1129 1129 #
1130 1130 # 0 --- 1 --- 3 rev1 changes file foo
1131 1131 # \ / rev2 renames foo to bar and changes it
1132 1132 # \- 2 -/ rev3 should have bar with all changes and
1133 1133 # should record that bar descends from
1134 1134 # bar in rev2 and foo in rev1
1135 1135 #
1136 1136 # this allows this merge to succeed:
1137 1137 #
1138 1138 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1139 1139 # \ / merging rev3 and rev4 should use bar@rev2
1140 1140 # \- 2 --- 4 as the merge base
1141 1141 #
1142 1142
1143 1143 cfname = copy[0]
1144 1144 crev = manifest1.get(cfname)
1145 1145 newfparent = fparent2
1146 1146
1147 1147 if manifest2: # branch merge
1148 1148 if fparent2 == nullid or crev is None: # copied on remote side
1149 1149 if cfname in manifest2:
1150 1150 crev = manifest2[cfname]
1151 1151 newfparent = fparent1
1152 1152
1153 1153 # find source in nearest ancestor if we've lost track
1154 1154 if not crev:
1155 1155 self.ui.debug(" %s: searching for copy revision for %s\n" %
1156 1156 (fname, cfname))
1157 1157 for ancestor in self[None].ancestors():
1158 1158 if cfname in ancestor:
1159 1159 crev = ancestor[cfname].filenode()
1160 1160 break
1161 1161
1162 1162 if crev:
1163 1163 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1164 1164 meta["copy"] = cfname
1165 1165 meta["copyrev"] = hex(crev)
1166 1166 fparent1, fparent2 = nullid, newfparent
1167 1167 else:
1168 1168 self.ui.warn(_("warning: can't find ancestor for '%s' "
1169 1169 "copied from '%s'!\n") % (fname, cfname))
1170 1170
1171 1171 elif fparent1 == nullid:
1172 1172 fparent1, fparent2 = fparent2, nullid
1173 1173 elif fparent2 != nullid:
1174 1174 # is one parent an ancestor of the other?
1175 1175 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1176 1176 if fparent1 in fparentancestors:
1177 1177 fparent1, fparent2 = fparent2, nullid
1178 1178 elif fparent2 in fparentancestors:
1179 1179 fparent2 = nullid
1180 1180
1181 1181 # is the file changed?
1182 1182 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1183 1183 changelist.append(fname)
1184 1184 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1185 1185
1186 1186 # are just the flags changed during merge?
1187 1187 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1188 1188 changelist.append(fname)
1189 1189
1190 1190 return fparent1
1191 1191
1192 1192 @unfilteredmethod
1193 1193 def commit(self, text="", user=None, date=None, match=None, force=False,
1194 1194 editor=False, extra={}):
1195 1195 """Add a new revision to current repository.
1196 1196
1197 1197 Revision information is gathered from the working directory,
1198 1198 match can be used to filter the committed files. If editor is
1199 1199 supplied, it is called to get a commit message.
1200 1200 """
1201 1201
1202 1202 def fail(f, msg):
1203 1203 raise util.Abort('%s: %s' % (f, msg))
1204 1204
1205 1205 if not match:
1206 1206 match = matchmod.always(self.root, '')
1207 1207
1208 1208 if not force:
1209 1209 vdirs = []
1210 1210 match.explicitdir = vdirs.append
1211 1211 match.bad = fail
1212 1212
1213 1213 wlock = self.wlock()
1214 1214 try:
1215 1215 wctx = self[None]
1216 1216 merge = len(wctx.parents()) > 1
1217 1217
1218 1218 if (not force and merge and match and
1219 1219 (match.files() or match.anypats())):
1220 1220 raise util.Abort(_('cannot partially commit a merge '
1221 1221 '(do not specify files or patterns)'))
1222 1222
1223 1223 changes = self.status(match=match, clean=force)
1224 1224 if force:
1225 1225 changes[0].extend(changes[6]) # mq may commit unchanged files
1226 1226
1227 1227 # check subrepos
1228 1228 subs = []
1229 1229 commitsubs = set()
1230 1230 newstate = wctx.substate.copy()
1231 1231 # only manage subrepos and .hgsubstate if .hgsub is present
1232 1232 if '.hgsub' in wctx:
1233 1233 # we'll decide whether to track this ourselves, thanks
1234 1234 for c in changes[:3]:
1235 1235 if '.hgsubstate' in c:
1236 1236 c.remove('.hgsubstate')
1237 1237
1238 1238 # compare current state to last committed state
1239 1239 # build new substate based on last committed state
1240 1240 oldstate = wctx.p1().substate
1241 1241 for s in sorted(newstate.keys()):
1242 1242 if not match(s):
1243 1243 # ignore working copy, use old state if present
1244 1244 if s in oldstate:
1245 1245 newstate[s] = oldstate[s]
1246 1246 continue
1247 1247 if not force:
1248 1248 raise util.Abort(
1249 1249 _("commit with new subrepo %s excluded") % s)
1250 1250 if wctx.sub(s).dirty(True):
1251 1251 if not self.ui.configbool('ui', 'commitsubrepos'):
1252 1252 raise util.Abort(
1253 1253 _("uncommitted changes in subrepo %s") % s,
1254 1254 hint=_("use --subrepos for recursive commit"))
1255 1255 subs.append(s)
1256 1256 commitsubs.add(s)
1257 1257 else:
1258 1258 bs = wctx.sub(s).basestate()
1259 1259 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1260 1260 if oldstate.get(s, (None, None, None))[1] != bs:
1261 1261 subs.append(s)
1262 1262
1263 1263 # check for removed subrepos
1264 1264 for p in wctx.parents():
1265 1265 r = [s for s in p.substate if s not in newstate]
1266 1266 subs += [s for s in r if match(s)]
1267 1267 if subs:
1268 1268 if (not match('.hgsub') and
1269 1269 '.hgsub' in (wctx.modified() + wctx.added())):
1270 1270 raise util.Abort(
1271 1271 _("can't commit subrepos without .hgsub"))
1272 1272 changes[0].insert(0, '.hgsubstate')
1273 1273
1274 1274 elif '.hgsub' in changes[2]:
1275 1275 # clean up .hgsubstate when .hgsub is removed
1276 1276 if ('.hgsubstate' in wctx and
1277 1277 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1278 1278 changes[2].insert(0, '.hgsubstate')
1279 1279
1280 1280 # make sure all explicit patterns are matched
1281 1281 if not force and match.files():
1282 1282 matched = set(changes[0] + changes[1] + changes[2])
1283 1283
1284 1284 for f in match.files():
1285 1285 f = self.dirstate.normalize(f)
1286 1286 if f == '.' or f in matched or f in wctx.substate:
1287 1287 continue
1288 1288 if f in changes[3]: # missing
1289 1289 fail(f, _('file not found!'))
1290 1290 if f in vdirs: # visited directory
1291 1291 d = f + '/'
1292 1292 for mf in matched:
1293 1293 if mf.startswith(d):
1294 1294 break
1295 1295 else:
1296 1296 fail(f, _("no match under directory!"))
1297 1297 elif f not in self.dirstate:
1298 1298 fail(f, _("file not tracked!"))
1299 1299
1300 1300 cctx = context.workingctx(self, text, user, date, extra, changes)
1301 1301
1302 1302 if (not force and not extra.get("close") and not merge
1303 1303 and not cctx.files()
1304 1304 and wctx.branch() == wctx.p1().branch()):
1305 1305 return None
1306 1306
1307 1307 if merge and cctx.deleted():
1308 1308 raise util.Abort(_("cannot commit merge with missing files"))
1309 1309
1310 1310 ms = mergemod.mergestate(self)
1311 1311 for f in changes[0]:
1312 1312 if f in ms and ms[f] == 'u':
1313 1313 raise util.Abort(_("unresolved merge conflicts "
1314 1314 "(see hg help resolve)"))
1315 1315
1316 1316 if editor:
1317 1317 cctx._text = editor(self, cctx, subs)
1318 1318 edited = (text != cctx._text)
1319 1319
1320 1320 # Save commit message in case this transaction gets rolled back
1321 1321 # (e.g. by a pretxncommit hook). Leave the content alone on
1322 1322 # the assumption that the user will use the same editor again.
1323 1323 msgfn = self.savecommitmessage(cctx._text)
1324 1324
1325 1325 # commit subs and write new state
1326 1326 if subs:
1327 1327 for s in sorted(commitsubs):
1328 1328 sub = wctx.sub(s)
1329 1329 self.ui.status(_('committing subrepository %s\n') %
1330 1330 subrepo.subrelpath(sub))
1331 1331 sr = sub.commit(cctx._text, user, date)
1332 1332 newstate[s] = (newstate[s][0], sr)
1333 1333 subrepo.writestate(self, newstate)
1334 1334
1335 1335 p1, p2 = self.dirstate.parents()
1336 1336 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1337 1337 try:
1338 1338 self.hook("precommit", throw=True, parent1=hookp1,
1339 1339 parent2=hookp2)
1340 1340 ret = self.commitctx(cctx, True)
1341 1341 except: # re-raises
1342 1342 if edited:
1343 1343 self.ui.write(
1344 1344 _('note: commit message saved in %s\n') % msgfn)
1345 1345 raise
1346 1346
1347 1347 # update bookmarks, dirstate and mergestate
1348 1348 bookmarks.update(self, [p1, p2], ret)
1349 1349 cctx.markcommitted(ret)
1350 1350 ms.reset()
1351 1351 finally:
1352 1352 wlock.release()
1353 1353
1354 1354 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1355 1355 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1356 1356 self._afterlock(commithook)
1357 1357 return ret
1358 1358
1359 1359 @unfilteredmethod
1360 1360 def commitctx(self, ctx, error=False):
1361 1361 """Add a new revision to current repository.
1362 1362 Revision information is passed via the context argument.
1363 1363 """
1364 1364
1365 1365 tr = lock = None
1366 1366 removed = list(ctx.removed())
1367 1367 p1, p2 = ctx.p1(), ctx.p2()
1368 1368 user = ctx.user()
1369 1369
1370 1370 lock = self.lock()
1371 1371 try:
1372 1372 tr = self.transaction("commit")
1373 1373 trp = weakref.proxy(tr)
1374 1374
1375 1375 if ctx.files():
1376 1376 m1 = p1.manifest().copy()
1377 1377 m2 = p2.manifest()
1378 1378
1379 1379 # check in files
1380 1380 new = {}
1381 1381 changed = []
1382 1382 linkrev = len(self)
1383 1383 for f in sorted(ctx.modified() + ctx.added()):
1384 1384 self.ui.note(f + "\n")
1385 1385 try:
1386 1386 fctx = ctx[f]
1387 1387 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1388 1388 changed)
1389 1389 m1.set(f, fctx.flags())
1390 1390 except OSError, inst:
1391 1391 self.ui.warn(_("trouble committing %s!\n") % f)
1392 1392 raise
1393 1393 except IOError, inst:
1394 1394 errcode = getattr(inst, 'errno', errno.ENOENT)
1395 1395 if error or errcode and errcode != errno.ENOENT:
1396 1396 self.ui.warn(_("trouble committing %s!\n") % f)
1397 1397 raise
1398 1398 else:
1399 1399 removed.append(f)
1400 1400
1401 1401 # update manifest
1402 1402 m1.update(new)
1403 1403 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1404 1404 drop = [f for f in removed if f in m1]
1405 1405 for f in drop:
1406 1406 del m1[f]
1407 1407 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1408 1408 p2.manifestnode(), (new, drop))
1409 1409 files = changed + removed
1410 1410 else:
1411 1411 mn = p1.manifestnode()
1412 1412 files = []
1413 1413
1414 1414 # update changelog
1415 1415 self.changelog.delayupdate()
1416 1416 n = self.changelog.add(mn, files, ctx.description(),
1417 1417 trp, p1.node(), p2.node(),
1418 1418 user, ctx.date(), ctx.extra().copy())
1419 1419 p = lambda: self.changelog.writepending() and self.root or ""
1420 1420 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1421 1421 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1422 1422 parent2=xp2, pending=p)
1423 1423 self.changelog.finalize(trp)
1424 1424 # set the new commit is proper phase
1425 1425 targetphase = subrepo.newcommitphase(self.ui, ctx)
1426 1426 if targetphase:
1427 1427 # retract boundary do not alter parent changeset.
1428 1428 # if a parent have higher the resulting phase will
1429 1429 # be compliant anyway
1430 1430 #
1431 1431 # if minimal phase was 0 we don't need to retract anything
1432 1432 phases.retractboundary(self, targetphase, [n])
1433 1433 tr.close()
1434 1434 branchmap.updatecache(self.filtered('served'))
1435 1435 return n
1436 1436 finally:
1437 1437 if tr:
1438 1438 tr.release()
1439 1439 lock.release()
1440 1440
1441 1441 @unfilteredmethod
1442 1442 def destroying(self):
1443 1443 '''Inform the repository that nodes are about to be destroyed.
1444 1444 Intended for use by strip and rollback, so there's a common
1445 1445 place for anything that has to be done before destroying history.
1446 1446
1447 1447 This is mostly useful for saving state that is in memory and waiting
1448 1448 to be flushed when the current lock is released. Because a call to
1449 1449 destroyed is imminent, the repo will be invalidated causing those
1450 1450 changes to stay in memory (waiting for the next unlock), or vanish
1451 1451 completely.
1452 1452 '''
1453 1453 # When using the same lock to commit and strip, the phasecache is left
1454 1454 # dirty after committing. Then when we strip, the repo is invalidated,
1455 1455 # causing those changes to disappear.
1456 1456 if '_phasecache' in vars(self):
1457 1457 self._phasecache.write()
1458 1458
1459 1459 @unfilteredmethod
1460 1460 def destroyed(self):
1461 1461 '''Inform the repository that nodes have been destroyed.
1462 1462 Intended for use by strip and rollback, so there's a common
1463 1463 place for anything that has to be done after destroying history.
1464 1464 '''
1465 1465 # When one tries to:
1466 1466 # 1) destroy nodes thus calling this method (e.g. strip)
1467 1467 # 2) use phasecache somewhere (e.g. commit)
1468 1468 #
1469 1469 # then 2) will fail because the phasecache contains nodes that were
1470 1470 # removed. We can either remove phasecache from the filecache,
1471 1471 # causing it to reload next time it is accessed, or simply filter
1472 1472 # the removed nodes now and write the updated cache.
1473 1473 self._phasecache.filterunknown(self)
1474 1474 self._phasecache.write()
1475 1475
1476 1476 # update the 'served' branch cache to help read only server process
1477 1477 # Thanks to branchcache collaboration this is done from the nearest
1478 1478 # filtered subset and it is expected to be fast.
1479 1479 branchmap.updatecache(self.filtered('served'))
1480 1480
1481 1481 # Ensure the persistent tag cache is updated. Doing it now
1482 1482 # means that the tag cache only has to worry about destroyed
1483 1483 # heads immediately after a strip/rollback. That in turn
1484 1484 # guarantees that "cachetip == currenttip" (comparing both rev
1485 1485 # and node) always means no nodes have been added or destroyed.
1486 1486
1487 1487 # XXX this is suboptimal when qrefresh'ing: we strip the current
1488 1488 # head, refresh the tag cache, then immediately add a new head.
1489 1489 # But I think doing it this way is necessary for the "instant
1490 1490 # tag cache retrieval" case to work.
1491 1491 self.invalidate()
1492 1492
1493 1493 def walk(self, match, node=None):
1494 1494 '''
1495 1495 walk recursively through the directory tree or a given
1496 1496 changeset, finding all files matched by the match
1497 1497 function
1498 1498 '''
1499 1499 return self[node].walk(match)
1500 1500
1501 1501 def status(self, node1='.', node2=None, match=None,
1502 1502 ignored=False, clean=False, unknown=False,
1503 1503 listsubrepos=False):
1504 1504 """return status of files between two nodes or node and working
1505 1505 directory.
1506 1506
1507 1507 If node1 is None, use the first dirstate parent instead.
1508 1508 If node2 is None, compare node1 with working directory.
1509 1509 """
1510 1510
1511 1511 def mfmatches(ctx):
1512 1512 mf = ctx.manifest().copy()
1513 1513 if match.always():
1514 1514 return mf
1515 1515 for fn in mf.keys():
1516 1516 if not match(fn):
1517 1517 del mf[fn]
1518 1518 return mf
1519 1519
1520 1520 ctx1 = self[node1]
1521 1521 ctx2 = self[node2]
1522 1522
1523 1523 working = ctx2.rev() is None
1524 1524 parentworking = working and ctx1 == self['.']
1525 1525 match = match or matchmod.always(self.root, self.getcwd())
1526 1526 listignored, listclean, listunknown = ignored, clean, unknown
1527 1527
1528 1528 # load earliest manifest first for caching reasons
1529 1529 if not working and ctx2.rev() < ctx1.rev():
1530 1530 ctx2.manifest()
1531 1531
1532 1532 if not parentworking:
1533 1533 def bad(f, msg):
1534 1534 # 'f' may be a directory pattern from 'match.files()',
1535 1535 # so 'f not in ctx1' is not enough
1536 1536 if f not in ctx1 and f not in ctx1.dirs():
1537 1537 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1538 1538 match.bad = bad
1539 1539
1540 1540 if working: # we need to scan the working dir
1541 1541 subrepos = []
1542 1542 if '.hgsub' in self.dirstate:
1543 1543 subrepos = sorted(ctx2.substate)
1544 1544 s = self.dirstate.status(match, subrepos, listignored,
1545 1545 listclean, listunknown)
1546 1546 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1547 1547
1548 1548 # check for any possibly clean files
1549 1549 if parentworking and cmp:
1550 1550 fixup = []
1551 1551 # do a full compare of any files that might have changed
1552 1552 for f in sorted(cmp):
1553 1553 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1554 1554 or ctx1[f].cmp(ctx2[f])):
1555 1555 modified.append(f)
1556 1556 else:
1557 1557 fixup.append(f)
1558 1558
1559 1559 # update dirstate for files that are actually clean
1560 1560 if fixup:
1561 1561 if listclean:
1562 1562 clean += fixup
1563 1563
1564 1564 try:
1565 1565 # updating the dirstate is optional
1566 1566 # so we don't wait on the lock
1567 1567 wlock = self.wlock(False)
1568 1568 try:
1569 1569 for f in fixup:
1570 1570 self.dirstate.normal(f)
1571 1571 finally:
1572 1572 wlock.release()
1573 1573 except error.LockError:
1574 1574 pass
1575 1575
1576 1576 if not parentworking:
1577 1577 mf1 = mfmatches(ctx1)
1578 1578 if working:
1579 1579 # we are comparing working dir against non-parent
1580 1580 # generate a pseudo-manifest for the working dir
1581 1581 mf2 = mfmatches(self['.'])
1582 1582 for f in cmp + modified + added:
1583 1583 mf2[f] = None
1584 1584 mf2.set(f, ctx2.flags(f))
1585 1585 for f in removed:
1586 1586 if f in mf2:
1587 1587 del mf2[f]
1588 1588 else:
1589 1589 # we are comparing two revisions
1590 1590 deleted, unknown, ignored = [], [], []
1591 1591 mf2 = mfmatches(ctx2)
1592 1592
1593 1593 modified, added, clean = [], [], []
1594 1594 withflags = mf1.withflags() | mf2.withflags()
1595 1595 for fn, mf2node in mf2.iteritems():
1596 1596 if fn in mf1:
1597 1597 if (fn not in deleted and
1598 1598 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1599 1599 (mf1[fn] != mf2node and
1600 1600 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1601 1601 modified.append(fn)
1602 1602 elif listclean:
1603 1603 clean.append(fn)
1604 1604 del mf1[fn]
1605 1605 elif fn not in deleted:
1606 1606 added.append(fn)
1607 1607 removed = mf1.keys()
1608 1608
1609 1609 if working and modified and not self.dirstate._checklink:
1610 1610 # Symlink placeholders may get non-symlink-like contents
1611 1611 # via user error or dereferencing by NFS or Samba servers,
1612 1612 # so we filter out any placeholders that don't look like a
1613 1613 # symlink
1614 1614 sane = []
1615 1615 for f in modified:
1616 1616 if ctx2.flags(f) == 'l':
1617 1617 d = ctx2[f].data()
1618 1618 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1619 1619 self.ui.debug('ignoring suspect symlink placeholder'
1620 1620 ' "%s"\n' % f)
1621 1621 continue
1622 1622 sane.append(f)
1623 1623 modified = sane
1624 1624
1625 1625 r = modified, added, removed, deleted, unknown, ignored, clean
1626 1626
1627 1627 if listsubrepos:
1628 1628 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1629 1629 if working:
1630 1630 rev2 = None
1631 1631 else:
1632 1632 rev2 = ctx2.substate[subpath][1]
1633 1633 try:
1634 1634 submatch = matchmod.narrowmatcher(subpath, match)
1635 1635 s = sub.status(rev2, match=submatch, ignored=listignored,
1636 1636 clean=listclean, unknown=listunknown,
1637 1637 listsubrepos=True)
1638 1638 for rfiles, sfiles in zip(r, s):
1639 1639 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1640 1640 except error.LookupError:
1641 1641 self.ui.status(_("skipping missing subrepository: %s\n")
1642 1642 % subpath)
1643 1643
1644 1644 for l in r:
1645 1645 l.sort()
1646 1646 return r
1647 1647
1648 1648 def heads(self, start=None):
1649 1649 heads = self.changelog.heads(start)
1650 1650 # sort the output in rev descending order
1651 1651 return sorted(heads, key=self.changelog.rev, reverse=True)
1652 1652
1653 1653 def branchheads(self, branch=None, start=None, closed=False):
1654 1654 '''return a (possibly filtered) list of heads for the given branch
1655 1655
1656 1656 Heads are returned in topological order, from newest to oldest.
1657 1657 If branch is None, use the dirstate branch.
1658 1658 If start is not None, return only heads reachable from start.
1659 1659 If closed is True, return heads that are marked as closed as well.
1660 1660 '''
1661 1661 if branch is None:
1662 1662 branch = self[None].branch()
1663 1663 branches = self.branchmap()
1664 1664 if branch not in branches:
1665 1665 return []
1666 1666 # the cache returns heads ordered lowest to highest
1667 1667 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1668 1668 if start is not None:
1669 1669 # filter out the heads that cannot be reached from startrev
1670 1670 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1671 1671 bheads = [h for h in bheads if h in fbheads]
1672 1672 return bheads
1673 1673
1674 1674 def branches(self, nodes):
1675 1675 if not nodes:
1676 1676 nodes = [self.changelog.tip()]
1677 1677 b = []
1678 1678 for n in nodes:
1679 1679 t = n
1680 1680 while True:
1681 1681 p = self.changelog.parents(n)
1682 1682 if p[1] != nullid or p[0] == nullid:
1683 1683 b.append((t, n, p[0], p[1]))
1684 1684 break
1685 1685 n = p[0]
1686 1686 return b
1687 1687
1688 1688 def between(self, pairs):
1689 1689 r = []
1690 1690
1691 1691 for top, bottom in pairs:
1692 1692 n, l, i = top, [], 0
1693 1693 f = 1
1694 1694
1695 1695 while n != bottom and n != nullid:
1696 1696 p = self.changelog.parents(n)[0]
1697 1697 if i == f:
1698 1698 l.append(n)
1699 1699 f = f * 2
1700 1700 n = p
1701 1701 i += 1
1702 1702
1703 1703 r.append(l)
1704 1704
1705 1705 return r
1706 1706
1707 1707 def pull(self, remote, heads=None, force=False):
1708 1708 return exchange.pull (self, remote, heads, force)
1709 1709
1710 1710 def checkpush(self, pushop):
1711 1711 """Extensions can override this function if additional checks have
1712 1712 to be performed before pushing, or call it if they override push
1713 1713 command.
1714 1714 """
1715 1715 pass
1716 1716
1717 1717 @unfilteredpropertycache
1718 1718 def prepushoutgoinghooks(self):
1719 1719 """Return util.hooks consists of "(repo, remote, outgoing)"
1720 1720 functions, which are called before pushing changesets.
1721 1721 """
1722 1722 return util.hooks()
1723 1723
1724 1724 def push(self, remote, force=False, revs=None, newbranch=False):
1725 1725 return exchange.push(self, remote, force, revs, newbranch)
1726 1726
1727 1727 def stream_in(self, remote, requirements):
1728 1728 lock = self.lock()
1729 1729 try:
1730 1730 # Save remote branchmap. We will use it later
1731 1731 # to speed up branchcache creation
1732 1732 rbranchmap = None
1733 1733 if remote.capable("branchmap"):
1734 1734 rbranchmap = remote.branchmap()
1735 1735
1736 1736 fp = remote.stream_out()
1737 1737 l = fp.readline()
1738 1738 try:
1739 1739 resp = int(l)
1740 1740 except ValueError:
1741 1741 raise error.ResponseError(
1742 1742 _('unexpected response from remote server:'), l)
1743 1743 if resp == 1:
1744 1744 raise util.Abort(_('operation forbidden by server'))
1745 1745 elif resp == 2:
1746 1746 raise util.Abort(_('locking the remote repository failed'))
1747 1747 elif resp != 0:
1748 1748 raise util.Abort(_('the server sent an unknown error code'))
1749 1749 self.ui.status(_('streaming all changes\n'))
1750 1750 l = fp.readline()
1751 1751 try:
1752 1752 total_files, total_bytes = map(int, l.split(' ', 1))
1753 1753 except (ValueError, TypeError):
1754 1754 raise error.ResponseError(
1755 1755 _('unexpected response from remote server:'), l)
1756 1756 self.ui.status(_('%d files to transfer, %s of data\n') %
1757 1757 (total_files, util.bytecount(total_bytes)))
1758 1758 handled_bytes = 0
1759 1759 self.ui.progress(_('clone'), 0, total=total_bytes)
1760 1760 start = time.time()
1761 1761
1762 1762 tr = self.transaction(_('clone'))
1763 1763 try:
1764 1764 for i in xrange(total_files):
1765 1765 # XXX doesn't support '\n' or '\r' in filenames
1766 1766 l = fp.readline()
1767 1767 try:
1768 1768 name, size = l.split('\0', 1)
1769 1769 size = int(size)
1770 1770 except (ValueError, TypeError):
1771 1771 raise error.ResponseError(
1772 1772 _('unexpected response from remote server:'), l)
1773 1773 if self.ui.debugflag:
1774 1774 self.ui.debug('adding %s (%s)\n' %
1775 1775 (name, util.bytecount(size)))
1776 1776 # for backwards compat, name was partially encoded
1777 1777 ofp = self.sopener(store.decodedir(name), 'w')
1778 1778 for chunk in util.filechunkiter(fp, limit=size):
1779 1779 handled_bytes += len(chunk)
1780 1780 self.ui.progress(_('clone'), handled_bytes,
1781 1781 total=total_bytes)
1782 1782 ofp.write(chunk)
1783 1783 ofp.close()
1784 1784 tr.close()
1785 1785 finally:
1786 1786 tr.release()
1787 1787
1788 1788 # Writing straight to files circumvented the inmemory caches
1789 1789 self.invalidate()
1790 1790
1791 1791 elapsed = time.time() - start
1792 1792 if elapsed <= 0:
1793 1793 elapsed = 0.001
1794 1794 self.ui.progress(_('clone'), None)
1795 1795 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1796 1796 (util.bytecount(total_bytes), elapsed,
1797 1797 util.bytecount(total_bytes / elapsed)))
1798 1798
1799 1799 # new requirements = old non-format requirements +
1800 1800 # new format-related
1801 1801 # requirements from the streamed-in repository
1802 1802 requirements.update(set(self.requirements) - self.supportedformats)
1803 1803 self._applyrequirements(requirements)
1804 1804 self._writerequirements()
1805 1805
1806 1806 if rbranchmap:
1807 1807 rbheads = []
1808 1808 for bheads in rbranchmap.itervalues():
1809 1809 rbheads.extend(bheads)
1810 1810
1811 1811 if rbheads:
1812 1812 rtiprev = max((int(self.changelog.rev(node))
1813 1813 for node in rbheads))
1814 1814 cache = branchmap.branchcache(rbranchmap,
1815 1815 self[rtiprev].node(),
1816 1816 rtiprev)
1817 1817 # Try to stick it as low as possible
1818 1818 # filter above served are unlikely to be fetch from a clone
1819 1819 for candidate in ('base', 'immutable', 'served'):
1820 1820 rview = self.filtered(candidate)
1821 1821 if cache.validfor(rview):
1822 1822 self._branchcaches[candidate] = cache
1823 1823 cache.write(rview)
1824 1824 break
1825 1825 self.invalidate()
1826 1826 return len(self.heads()) + 1
1827 1827 finally:
1828 1828 lock.release()
1829 1829
1830 1830 def clone(self, remote, heads=[], stream=False):
1831 1831 '''clone remote repository.
1832 1832
1833 1833 keyword arguments:
1834 1834 heads: list of revs to clone (forces use of pull)
1835 1835 stream: use streaming clone if possible'''
1836 1836
1837 1837 # now, all clients that can request uncompressed clones can
1838 1838 # read repo formats supported by all servers that can serve
1839 1839 # them.
1840 1840
1841 1841 # if revlog format changes, client will have to check version
1842 1842 # and format flags on "stream" capability, and use
1843 1843 # uncompressed only if compatible.
1844 1844
1845 1845 if not stream:
1846 1846 # if the server explicitly prefers to stream (for fast LANs)
1847 1847 stream = remote.capable('stream-preferred')
1848 1848
1849 1849 if stream and not heads:
1850 1850 # 'stream' means remote revlog format is revlogv1 only
1851 1851 if remote.capable('stream'):
1852 1852 return self.stream_in(remote, set(('revlogv1',)))
1853 1853 # otherwise, 'streamreqs' contains the remote revlog format
1854 1854 streamreqs = remote.capable('streamreqs')
1855 1855 if streamreqs:
1856 1856 streamreqs = set(streamreqs.split(','))
1857 1857 # if we support it, stream in and adjust our requirements
1858 1858 if not streamreqs - self.supportedformats:
1859 1859 return self.stream_in(remote, streamreqs)
1860 1860 return self.pull(remote, heads)
1861 1861
1862 1862 def pushkey(self, namespace, key, old, new):
1863 1863 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1864 1864 old=old, new=new)
1865 1865 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1866 1866 ret = pushkey.push(self, namespace, key, old, new)
1867 1867 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1868 1868 ret=ret)
1869 1869 return ret
1870 1870
1871 1871 def listkeys(self, namespace):
1872 1872 self.hook('prelistkeys', throw=True, namespace=namespace)
1873 1873 self.ui.debug('listing keys for "%s"\n' % namespace)
1874 1874 values = pushkey.list(self, namespace)
1875 1875 self.hook('listkeys', namespace=namespace, values=values)
1876 1876 return values
1877 1877
1878 1878 def debugwireargs(self, one, two, three=None, four=None, five=None):
1879 1879 '''used to test argument passing over the wire'''
1880 1880 return "%s %s %s %s %s" % (one, two, three, four, five)
1881 1881
1882 1882 def savecommitmessage(self, text):
1883 1883 fp = self.opener('last-message.txt', 'wb')
1884 1884 try:
1885 1885 fp.write(text)
1886 1886 finally:
1887 1887 fp.close()
1888 1888 return self.pathto(fp.name[len(self.root) + 1:])
1889 1889
1890 1890 # used to avoid circular references so destructors work
1891 1891 def aftertrans(files):
1892 1892 renamefiles = [tuple(t) for t in files]
1893 1893 def a():
1894 1894 for vfs, src, dest in renamefiles:
1895 1895 try:
1896 1896 vfs.rename(src, dest)
1897 1897 except OSError: # journal file does not yet exist
1898 1898 pass
1899 1899 return a
1900 1900
1901 1901 def undoname(fn):
1902 1902 base, name = os.path.split(fn)
1903 1903 assert name.startswith('journal')
1904 1904 return os.path.join(base, name.replace('journal', 'undo', 1))
1905 1905
1906 1906 def instance(ui, path, create):
1907 1907 return localrepository(ui, util.urllocalpath(path), create)
1908 1908
1909 1909 def islocal(path):
1910 1910 return True
@@ -1,812 +1,814 b''
1 1 # wireproto.py - generic wire protocol support functions
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import urllib, tempfile, os, sys
9 9 from i18n import _
10 10 from node import bin, hex
11 11 import changegroup as changegroupmod, bundle2
12 12 import peer, error, encoding, util, store, exchange
13 13
14 14
15 15 class abstractserverproto(object):
16 16 """abstract class that summarizes the protocol API
17 17
18 18 Used as reference and documentation.
19 19 """
20 20
21 21 def getargs(self, args):
22 22 """return the value for arguments in <args>
23 23
24 24 returns a list of values (same order as <args>)"""
25 25 raise NotImplementedError()
26 26
27 27 def getfile(self, fp):
28 28 """write the whole content of a file into a file like object
29 29
30 30 The file is in the form::
31 31
32 32 (<chunk-size>\n<chunk>)+0\n
33 33
34 34 chunk size is the ascii version of the int.
35 35 """
36 36 raise NotImplementedError()
37 37
38 38 def redirect(self):
39 39 """may setup interception for stdout and stderr
40 40
41 41 See also the `restore` method."""
42 42 raise NotImplementedError()
43 43
44 44 # If the `redirect` function does install interception, the `restore`
45 45 # function MUST be defined. If interception is not used, this function
46 46 # MUST NOT be defined.
47 47 #
48 48 # left commented here on purpose
49 49 #
50 50 #def restore(self):
51 51 # """reinstall previous stdout and stderr and return intercepted stdout
52 52 # """
53 53 # raise NotImplementedError()
54 54
55 55 def groupchunks(self, cg):
56 56 """return 4096 chunks from a changegroup object
57 57
58 58 Some protocols may have compressed the contents."""
59 59 raise NotImplementedError()
60 60
61 61 # abstract batching support
62 62
63 63 class future(object):
64 64 '''placeholder for a value to be set later'''
65 65 def set(self, value):
66 66 if util.safehasattr(self, 'value'):
67 67 raise error.RepoError("future is already set")
68 68 self.value = value
69 69
70 70 class batcher(object):
71 71 '''base class for batches of commands submittable in a single request
72 72
73 73 All methods invoked on instances of this class are simply queued and
74 74 return a a future for the result. Once you call submit(), all the queued
75 75 calls are performed and the results set in their respective futures.
76 76 '''
77 77 def __init__(self):
78 78 self.calls = []
79 79 def __getattr__(self, name):
80 80 def call(*args, **opts):
81 81 resref = future()
82 82 self.calls.append((name, args, opts, resref,))
83 83 return resref
84 84 return call
85 85 def submit(self):
86 86 pass
87 87
88 88 class localbatch(batcher):
89 89 '''performs the queued calls directly'''
90 90 def __init__(self, local):
91 91 batcher.__init__(self)
92 92 self.local = local
93 93 def submit(self):
94 94 for name, args, opts, resref in self.calls:
95 95 resref.set(getattr(self.local, name)(*args, **opts))
96 96
97 97 class remotebatch(batcher):
98 98 '''batches the queued calls; uses as few roundtrips as possible'''
99 99 def __init__(self, remote):
100 100 '''remote must support _submitbatch(encbatch) and
101 101 _submitone(op, encargs)'''
102 102 batcher.__init__(self)
103 103 self.remote = remote
104 104 def submit(self):
105 105 req, rsp = [], []
106 106 for name, args, opts, resref in self.calls:
107 107 mtd = getattr(self.remote, name)
108 108 batchablefn = getattr(mtd, 'batchable', None)
109 109 if batchablefn is not None:
110 110 batchable = batchablefn(mtd.im_self, *args, **opts)
111 111 encargsorres, encresref = batchable.next()
112 112 if encresref:
113 113 req.append((name, encargsorres,))
114 114 rsp.append((batchable, encresref, resref,))
115 115 else:
116 116 resref.set(encargsorres)
117 117 else:
118 118 if req:
119 119 self._submitreq(req, rsp)
120 120 req, rsp = [], []
121 121 resref.set(mtd(*args, **opts))
122 122 if req:
123 123 self._submitreq(req, rsp)
124 124 def _submitreq(self, req, rsp):
125 125 encresults = self.remote._submitbatch(req)
126 126 for encres, r in zip(encresults, rsp):
127 127 batchable, encresref, resref = r
128 128 encresref.set(encres)
129 129 resref.set(batchable.next())
130 130
131 131 def batchable(f):
132 132 '''annotation for batchable methods
133 133
134 134 Such methods must implement a coroutine as follows:
135 135
136 136 @batchable
137 137 def sample(self, one, two=None):
138 138 # Handle locally computable results first:
139 139 if not one:
140 140 yield "a local result", None
141 141 # Build list of encoded arguments suitable for your wire protocol:
142 142 encargs = [('one', encode(one),), ('two', encode(two),)]
143 143 # Create future for injection of encoded result:
144 144 encresref = future()
145 145 # Return encoded arguments and future:
146 146 yield encargs, encresref
147 147 # Assuming the future to be filled with the result from the batched
148 148 # request now. Decode it:
149 149 yield decode(encresref.value)
150 150
151 151 The decorator returns a function which wraps this coroutine as a plain
152 152 method, but adds the original method as an attribute called "batchable",
153 153 which is used by remotebatch to split the call into separate encoding and
154 154 decoding phases.
155 155 '''
156 156 def plain(*args, **opts):
157 157 batchable = f(*args, **opts)
158 158 encargsorres, encresref = batchable.next()
159 159 if not encresref:
160 160 return encargsorres # a local result in this case
161 161 self = args[0]
162 162 encresref.set(self._submitone(f.func_name, encargsorres))
163 163 return batchable.next()
164 164 setattr(plain, 'batchable', f)
165 165 return plain
166 166
167 167 # list of nodes encoding / decoding
168 168
169 169 def decodelist(l, sep=' '):
170 170 if l:
171 171 return map(bin, l.split(sep))
172 172 return []
173 173
174 174 def encodelist(l, sep=' '):
175 175 return sep.join(map(hex, l))
176 176
177 177 # batched call argument encoding
178 178
179 179 def escapearg(plain):
180 180 return (plain
181 181 .replace(':', '::')
182 182 .replace(',', ':,')
183 183 .replace(';', ':;')
184 184 .replace('=', ':='))
185 185
186 186 def unescapearg(escaped):
187 187 return (escaped
188 188 .replace(':=', '=')
189 189 .replace(':;', ';')
190 190 .replace(':,', ',')
191 191 .replace('::', ':'))
192 192
193 193 # client side
194 194
195 195 class wirepeer(peer.peerrepository):
196 196
197 197 def batch(self):
198 198 return remotebatch(self)
199 199 def _submitbatch(self, req):
200 200 cmds = []
201 201 for op, argsdict in req:
202 202 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
203 203 cmds.append('%s %s' % (op, args))
204 204 rsp = self._call("batch", cmds=';'.join(cmds))
205 205 return rsp.split(';')
206 206 def _submitone(self, op, args):
207 207 return self._call(op, **args)
208 208
209 209 @batchable
210 210 def lookup(self, key):
211 211 self.requirecap('lookup', _('look up remote revision'))
212 212 f = future()
213 213 yield {'key': encoding.fromlocal(key)}, f
214 214 d = f.value
215 215 success, data = d[:-1].split(" ", 1)
216 216 if int(success):
217 217 yield bin(data)
218 218 self._abort(error.RepoError(data))
219 219
220 220 @batchable
221 221 def heads(self):
222 222 f = future()
223 223 yield {}, f
224 224 d = f.value
225 225 try:
226 226 yield decodelist(d[:-1])
227 227 except ValueError:
228 228 self._abort(error.ResponseError(_("unexpected response:"), d))
229 229
230 230 @batchable
231 231 def known(self, nodes):
232 232 f = future()
233 233 yield {'nodes': encodelist(nodes)}, f
234 234 d = f.value
235 235 try:
236 236 yield [bool(int(f)) for f in d]
237 237 except ValueError:
238 238 self._abort(error.ResponseError(_("unexpected response:"), d))
239 239
240 240 @batchable
241 241 def branchmap(self):
242 242 f = future()
243 243 yield {}, f
244 244 d = f.value
245 245 try:
246 246 branchmap = {}
247 247 for branchpart in d.splitlines():
248 248 branchname, branchheads = branchpart.split(' ', 1)
249 249 branchname = encoding.tolocal(urllib.unquote(branchname))
250 250 branchheads = decodelist(branchheads)
251 251 branchmap[branchname] = branchheads
252 252 yield branchmap
253 253 except TypeError:
254 254 self._abort(error.ResponseError(_("unexpected response:"), d))
255 255
256 256 def branches(self, nodes):
257 257 n = encodelist(nodes)
258 258 d = self._call("branches", nodes=n)
259 259 try:
260 260 br = [tuple(decodelist(b)) for b in d.splitlines()]
261 261 return br
262 262 except ValueError:
263 263 self._abort(error.ResponseError(_("unexpected response:"), d))
264 264
265 265 def between(self, pairs):
266 266 batch = 8 # avoid giant requests
267 267 r = []
268 268 for i in xrange(0, len(pairs), batch):
269 269 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
270 270 d = self._call("between", pairs=n)
271 271 try:
272 272 r.extend(l and decodelist(l) or [] for l in d.splitlines())
273 273 except ValueError:
274 274 self._abort(error.ResponseError(_("unexpected response:"), d))
275 275 return r
276 276
277 277 @batchable
278 278 def pushkey(self, namespace, key, old, new):
279 279 if not self.capable('pushkey'):
280 280 yield False, None
281 281 f = future()
282 282 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
283 283 yield {'namespace': encoding.fromlocal(namespace),
284 284 'key': encoding.fromlocal(key),
285 285 'old': encoding.fromlocal(old),
286 286 'new': encoding.fromlocal(new)}, f
287 287 d = f.value
288 288 d, output = d.split('\n', 1)
289 289 try:
290 290 d = bool(int(d))
291 291 except ValueError:
292 292 raise error.ResponseError(
293 293 _('push failed (unexpected response):'), d)
294 294 for l in output.splitlines(True):
295 295 self.ui.status(_('remote: '), l)
296 296 yield d
297 297
298 298 @batchable
299 299 def listkeys(self, namespace):
300 300 if not self.capable('pushkey'):
301 301 yield {}, None
302 302 f = future()
303 303 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
304 304 yield {'namespace': encoding.fromlocal(namespace)}, f
305 305 d = f.value
306 306 r = {}
307 307 for l in d.splitlines():
308 308 k, v = l.split('\t')
309 309 r[encoding.tolocal(k)] = encoding.tolocal(v)
310 310 yield r
311 311
312 312 def stream_out(self):
313 313 return self._callstream('stream_out')
314 314
315 315 def changegroup(self, nodes, kind):
316 316 n = encodelist(nodes)
317 317 f = self._callcompressable("changegroup", roots=n)
318 318 return changegroupmod.unbundle10(f, 'UN')
319 319
320 320 def changegroupsubset(self, bases, heads, kind):
321 321 self.requirecap('changegroupsubset', _('look up remote changes'))
322 322 bases = encodelist(bases)
323 323 heads = encodelist(heads)
324 324 f = self._callcompressable("changegroupsubset",
325 325 bases=bases, heads=heads)
326 326 return changegroupmod.unbundle10(f, 'UN')
327 327
328 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
328 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
329 **kwargs):
329 330 self.requirecap('getbundle', _('look up remote changes'))
330 331 opts = {}
331 332 if heads is not None:
332 333 opts['heads'] = encodelist(heads)
333 334 if common is not None:
334 335 opts['common'] = encodelist(common)
335 336 if bundlecaps is not None:
336 337 opts['bundlecaps'] = ','.join(bundlecaps)
338 opts.update(kwargs)
337 339 f = self._callcompressable("getbundle", **opts)
338 340 if bundlecaps is not None and 'HG2X' in bundlecaps:
339 341 return bundle2.unbundle20(self.ui, f)
340 342 else:
341 343 return changegroupmod.unbundle10(f, 'UN')
342 344
343 345 def unbundle(self, cg, heads, source):
344 346 '''Send cg (a readable file-like object representing the
345 347 changegroup to push, typically a chunkbuffer object) to the
346 348 remote server as a bundle.
347 349
348 350 When pushing a bundle10 stream, return an integer indicating the
349 351 result of the push (see localrepository.addchangegroup()).
350 352
351 353 When pushing a bundle20 stream, return a bundle20 stream.'''
352 354
353 355 if heads != ['force'] and self.capable('unbundlehash'):
354 356 heads = encodelist(['hashed',
355 357 util.sha1(''.join(sorted(heads))).digest()])
356 358 else:
357 359 heads = encodelist(heads)
358 360
359 361 if util.safehasattr(cg, 'deltaheader'):
360 362 # this a bundle10, do the old style call sequence
361 363 ret, output = self._callpush("unbundle", cg, heads=heads)
362 364 if ret == "":
363 365 raise error.ResponseError(
364 366 _('push failed:'), output)
365 367 try:
366 368 ret = int(ret)
367 369 except ValueError:
368 370 raise error.ResponseError(
369 371 _('push failed (unexpected response):'), ret)
370 372
371 373 for l in output.splitlines(True):
372 374 self.ui.status(_('remote: '), l)
373 375 else:
374 376 # bundle2 push. Send a stream, fetch a stream.
375 377 stream = self._calltwowaystream('unbundle', cg, heads=heads)
376 378 ret = bundle2.unbundle20(self.ui, stream)
377 379 return ret
378 380
379 381 def debugwireargs(self, one, two, three=None, four=None, five=None):
380 382 # don't pass optional arguments left at their default value
381 383 opts = {}
382 384 if three is not None:
383 385 opts['three'] = three
384 386 if four is not None:
385 387 opts['four'] = four
386 388 return self._call('debugwireargs', one=one, two=two, **opts)
387 389
388 390 def _call(self, cmd, **args):
389 391 """execute <cmd> on the server
390 392
391 393 The command is expected to return a simple string.
392 394
393 395 returns the server reply as a string."""
394 396 raise NotImplementedError()
395 397
396 398 def _callstream(self, cmd, **args):
397 399 """execute <cmd> on the server
398 400
399 401 The command is expected to return a stream.
400 402
401 403 returns the server reply as a file like object."""
402 404 raise NotImplementedError()
403 405
404 406 def _callcompressable(self, cmd, **args):
405 407 """execute <cmd> on the server
406 408
407 409 The command is expected to return a stream.
408 410
409 411 The stream may have been compressed in some implementations. This
410 412 function takes care of the decompression. This is the only difference
411 413 with _callstream.
412 414
413 415 returns the server reply as a file like object.
414 416 """
415 417 raise NotImplementedError()
416 418
417 419 def _callpush(self, cmd, fp, **args):
418 420 """execute a <cmd> on server
419 421
420 422 The command is expected to be related to a push. Push has a special
421 423 return method.
422 424
423 425 returns the server reply as a (ret, output) tuple. ret is either
424 426 empty (error) or a stringified int.
425 427 """
426 428 raise NotImplementedError()
427 429
428 430 def _calltwowaystream(self, cmd, fp, **args):
429 431 """execute <cmd> on server
430 432
431 433 The command will send a stream to the server and get a stream in reply.
432 434 """
433 435 raise NotImplementedError()
434 436
435 437 def _abort(self, exception):
436 438 """clearly abort the wire protocol connection and raise the exception
437 439 """
438 440 raise NotImplementedError()
439 441
440 442 # server side
441 443
442 444 # wire protocol command can either return a string or one of these classes.
443 445 class streamres(object):
444 446 """wireproto reply: binary stream
445 447
446 448 The call was successful and the result is a stream.
447 449 Iterate on the `self.gen` attribute to retrieve chunks.
448 450 """
449 451 def __init__(self, gen):
450 452 self.gen = gen
451 453
452 454 class pushres(object):
453 455 """wireproto reply: success with simple integer return
454 456
455 457 The call was successful and returned an integer contained in `self.res`.
456 458 """
457 459 def __init__(self, res):
458 460 self.res = res
459 461
460 462 class pusherr(object):
461 463 """wireproto reply: failure
462 464
463 465 The call failed. The `self.res` attribute contains the error message.
464 466 """
465 467 def __init__(self, res):
466 468 self.res = res
467 469
468 470 class ooberror(object):
469 471 """wireproto reply: failure of a batch of operation
470 472
471 473 Something failed during a batch call. The error message is stored in
472 474 `self.message`.
473 475 """
474 476 def __init__(self, message):
475 477 self.message = message
476 478
477 479 def dispatch(repo, proto, command):
478 480 repo = repo.filtered("served")
479 481 func, spec = commands[command]
480 482 args = proto.getargs(spec)
481 483 return func(repo, proto, *args)
482 484
483 485 def options(cmd, keys, others):
484 486 opts = {}
485 487 for k in keys:
486 488 if k in others:
487 489 opts[k] = others[k]
488 490 del others[k]
489 491 if others:
490 492 sys.stderr.write("abort: %s got unexpected arguments %s\n"
491 493 % (cmd, ",".join(others)))
492 494 return opts
493 495
494 496 # list of commands
495 497 commands = {}
496 498
497 499 def wireprotocommand(name, args=''):
498 500 """decorator for wire protocol command"""
499 501 def register(func):
500 502 commands[name] = (func, args)
501 503 return func
502 504 return register
503 505
504 506 @wireprotocommand('batch', 'cmds *')
505 507 def batch(repo, proto, cmds, others):
506 508 repo = repo.filtered("served")
507 509 res = []
508 510 for pair in cmds.split(';'):
509 511 op, args = pair.split(' ', 1)
510 512 vals = {}
511 513 for a in args.split(','):
512 514 if a:
513 515 n, v = a.split('=')
514 516 vals[n] = unescapearg(v)
515 517 func, spec = commands[op]
516 518 if spec:
517 519 keys = spec.split()
518 520 data = {}
519 521 for k in keys:
520 522 if k == '*':
521 523 star = {}
522 524 for key in vals.keys():
523 525 if key not in keys:
524 526 star[key] = vals[key]
525 527 data['*'] = star
526 528 else:
527 529 data[k] = vals[k]
528 530 result = func(repo, proto, *[data[k] for k in keys])
529 531 else:
530 532 result = func(repo, proto)
531 533 if isinstance(result, ooberror):
532 534 return result
533 535 res.append(escapearg(result))
534 536 return ';'.join(res)
535 537
536 538 @wireprotocommand('between', 'pairs')
537 539 def between(repo, proto, pairs):
538 540 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
539 541 r = []
540 542 for b in repo.between(pairs):
541 543 r.append(encodelist(b) + "\n")
542 544 return "".join(r)
543 545
544 546 @wireprotocommand('branchmap')
545 547 def branchmap(repo, proto):
546 548 branchmap = repo.branchmap()
547 549 heads = []
548 550 for branch, nodes in branchmap.iteritems():
549 551 branchname = urllib.quote(encoding.fromlocal(branch))
550 552 branchnodes = encodelist(nodes)
551 553 heads.append('%s %s' % (branchname, branchnodes))
552 554 return '\n'.join(heads)
553 555
554 556 @wireprotocommand('branches', 'nodes')
555 557 def branches(repo, proto, nodes):
556 558 nodes = decodelist(nodes)
557 559 r = []
558 560 for b in repo.branches(nodes):
559 561 r.append(encodelist(b) + "\n")
560 562 return "".join(r)
561 563
562 564
563 565 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
564 566 'known', 'getbundle', 'unbundlehash', 'batch']
565 567
566 568 def _capabilities(repo, proto):
567 569 """return a list of capabilities for a repo
568 570
569 571 This function exists to allow extensions to easily wrap capabilities
570 572 computation
571 573
572 574 - returns a lists: easy to alter
573 575 - change done here will be propagated to both `capabilities` and `hello`
574 576 command without any other action needed.
575 577 """
576 578 # copy to prevent modification of the global list
577 579 caps = list(wireprotocaps)
578 580 if _allowstream(repo.ui):
579 581 if repo.ui.configbool('server', 'preferuncompressed', False):
580 582 caps.append('stream-preferred')
581 583 requiredformats = repo.requirements & repo.supportedformats
582 584 # if our local revlogs are just revlogv1, add 'stream' cap
583 585 if not requiredformats - set(('revlogv1',)):
584 586 caps.append('stream')
585 587 # otherwise, add 'streamreqs' detailing our local revlog format
586 588 else:
587 589 caps.append('streamreqs=%s' % ','.join(requiredformats))
588 590 if repo.ui.configbool('experimental', 'bundle2-exp', False):
589 591 capsblob = bundle2.encodecaps(repo.bundle2caps)
590 592 caps.append('bundle2-exp=' + urllib.quote(capsblob))
591 593 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
592 594 caps.append('httpheader=1024')
593 595 return caps
594 596
595 597 # If you are writing an extension and consider wrapping this function. Wrap
596 598 # `_capabilities` instead.
597 599 @wireprotocommand('capabilities')
598 600 def capabilities(repo, proto):
599 601 return ' '.join(_capabilities(repo, proto))
600 602
601 603 @wireprotocommand('changegroup', 'roots')
602 604 def changegroup(repo, proto, roots):
603 605 nodes = decodelist(roots)
604 606 cg = changegroupmod.changegroup(repo, nodes, 'serve')
605 607 return streamres(proto.groupchunks(cg))
606 608
607 609 @wireprotocommand('changegroupsubset', 'bases heads')
608 610 def changegroupsubset(repo, proto, bases, heads):
609 611 bases = decodelist(bases)
610 612 heads = decodelist(heads)
611 613 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
612 614 return streamres(proto.groupchunks(cg))
613 615
614 616 @wireprotocommand('debugwireargs', 'one two *')
615 617 def debugwireargs(repo, proto, one, two, others):
616 618 # only accept optional args from the known set
617 619 opts = options('debugwireargs', ['three', 'four'], others)
618 620 return repo.debugwireargs(one, two, **opts)
619 621
620 622 @wireprotocommand('getbundle', '*')
621 623 def getbundle(repo, proto, others):
622 624 opts = options('getbundle', ['heads', 'common', 'bundlecaps'], others)
623 625 for k, v in opts.iteritems():
624 626 if k in ('heads', 'common'):
625 627 opts[k] = decodelist(v)
626 628 elif k == 'bundlecaps':
627 629 opts[k] = set(v.split(','))
628 630 cg = exchange.getbundle(repo, 'serve', **opts)
629 631 return streamres(proto.groupchunks(cg))
630 632
631 633 @wireprotocommand('heads')
632 634 def heads(repo, proto):
633 635 h = repo.heads()
634 636 return encodelist(h) + "\n"
635 637
636 638 @wireprotocommand('hello')
637 639 def hello(repo, proto):
638 640 '''the hello command returns a set of lines describing various
639 641 interesting things about the server, in an RFC822-like format.
640 642 Currently the only one defined is "capabilities", which
641 643 consists of a line in the form:
642 644
643 645 capabilities: space separated list of tokens
644 646 '''
645 647 return "capabilities: %s\n" % (capabilities(repo, proto))
646 648
647 649 @wireprotocommand('listkeys', 'namespace')
648 650 def listkeys(repo, proto, namespace):
649 651 d = repo.listkeys(encoding.tolocal(namespace)).items()
650 652 t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
651 653 for k, v in d])
652 654 return t
653 655
654 656 @wireprotocommand('lookup', 'key')
655 657 def lookup(repo, proto, key):
656 658 try:
657 659 k = encoding.tolocal(key)
658 660 c = repo[k]
659 661 r = c.hex()
660 662 success = 1
661 663 except Exception, inst:
662 664 r = str(inst)
663 665 success = 0
664 666 return "%s %s\n" % (success, r)
665 667
666 668 @wireprotocommand('known', 'nodes *')
667 669 def known(repo, proto, nodes, others):
668 670 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
669 671
670 672 @wireprotocommand('pushkey', 'namespace key old new')
671 673 def pushkey(repo, proto, namespace, key, old, new):
672 674 # compatibility with pre-1.8 clients which were accidentally
673 675 # sending raw binary nodes rather than utf-8-encoded hex
674 676 if len(new) == 20 and new.encode('string-escape') != new:
675 677 # looks like it could be a binary node
676 678 try:
677 679 new.decode('utf-8')
678 680 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
679 681 except UnicodeDecodeError:
680 682 pass # binary, leave unmodified
681 683 else:
682 684 new = encoding.tolocal(new) # normal path
683 685
684 686 if util.safehasattr(proto, 'restore'):
685 687
686 688 proto.redirect()
687 689
688 690 try:
689 691 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
690 692 encoding.tolocal(old), new) or False
691 693 except util.Abort:
692 694 r = False
693 695
694 696 output = proto.restore()
695 697
696 698 return '%s\n%s' % (int(r), output)
697 699
698 700 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
699 701 encoding.tolocal(old), new)
700 702 return '%s\n' % int(r)
701 703
702 704 def _allowstream(ui):
703 705 return ui.configbool('server', 'uncompressed', True, untrusted=True)
704 706
705 707 def _walkstreamfiles(repo):
706 708 # this is it's own function so extensions can override it
707 709 return repo.store.walk()
708 710
709 711 @wireprotocommand('stream_out')
710 712 def stream(repo, proto):
711 713 '''If the server supports streaming clone, it advertises the "stream"
712 714 capability with a value representing the version and flags of the repo
713 715 it is serving. Client checks to see if it understands the format.
714 716
715 717 The format is simple: the server writes out a line with the amount
716 718 of files, then the total amount of bytes to be transferred (separated
717 719 by a space). Then, for each file, the server first writes the filename
718 720 and file size (separated by the null character), then the file contents.
719 721 '''
720 722
721 723 if not _allowstream(repo.ui):
722 724 return '1\n'
723 725
724 726 entries = []
725 727 total_bytes = 0
726 728 try:
727 729 # get consistent snapshot of repo, lock during scan
728 730 lock = repo.lock()
729 731 try:
730 732 repo.ui.debug('scanning\n')
731 733 for name, ename, size in _walkstreamfiles(repo):
732 734 if size:
733 735 entries.append((name, size))
734 736 total_bytes += size
735 737 finally:
736 738 lock.release()
737 739 except error.LockError:
738 740 return '2\n' # error: 2
739 741
740 742 def streamer(repo, entries, total):
741 743 '''stream out all metadata files in repository.'''
742 744 yield '0\n' # success
743 745 repo.ui.debug('%d files, %d bytes to transfer\n' %
744 746 (len(entries), total_bytes))
745 747 yield '%d %d\n' % (len(entries), total_bytes)
746 748
747 749 sopener = repo.sopener
748 750 oldaudit = sopener.mustaudit
749 751 debugflag = repo.ui.debugflag
750 752 sopener.mustaudit = False
751 753
752 754 try:
753 755 for name, size in entries:
754 756 if debugflag:
755 757 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
756 758 # partially encode name over the wire for backwards compat
757 759 yield '%s\0%d\n' % (store.encodedir(name), size)
758 760 if size <= 65536:
759 761 fp = sopener(name)
760 762 try:
761 763 data = fp.read(size)
762 764 finally:
763 765 fp.close()
764 766 yield data
765 767 else:
766 768 for chunk in util.filechunkiter(sopener(name), limit=size):
767 769 yield chunk
768 770 # replace with "finally:" when support for python 2.4 has been dropped
769 771 except Exception:
770 772 sopener.mustaudit = oldaudit
771 773 raise
772 774 sopener.mustaudit = oldaudit
773 775
774 776 return streamres(streamer(repo, entries, total_bytes))
775 777
776 778 @wireprotocommand('unbundle', 'heads')
777 779 def unbundle(repo, proto, heads):
778 780 their_heads = decodelist(heads)
779 781
780 782 try:
781 783 proto.redirect()
782 784
783 785 exchange.check_heads(repo, their_heads, 'preparing changes')
784 786
785 787 # write bundle data to temporary file because it can be big
786 788 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
787 789 fp = os.fdopen(fd, 'wb+')
788 790 r = 0
789 791 try:
790 792 proto.getfile(fp)
791 793 fp.seek(0)
792 794 gen = exchange.readbundle(repo.ui, fp, None)
793 795 r = exchange.unbundle(repo, gen, their_heads, 'serve',
794 796 proto._client())
795 797 if util.safehasattr(r, 'addpart'):
796 798 # The return looks streameable, we are in the bundle2 case and
797 799 # should return a stream.
798 800 return streamres(r.getchunks())
799 801 return pushres(r)
800 802
801 803 finally:
802 804 fp.close()
803 805 os.unlink(tempname)
804 806 except util.Abort, inst:
805 807 # The old code we moved used sys.stderr directly.
806 808 # We did not change it to minimise code change.
807 809 # This need to be moved to something proper.
808 810 # Feel free to do it.
809 811 sys.stderr.write("abort: %s\n" % inst)
810 812 return pushres(0)
811 813 except exchange.PushRaced, exc:
812 814 return pusherr(str(exc))
General Comments 0
You need to be logged in to leave comments. Login now