##// END OF EJS Templates
localrepo: introduce "prepushoutgoinghooks" to extend outgoing check easily...
FUJIWARA Katsunori -
r21043:6c383c87 default
parent child Browse files
Show More
@@ -1,644 +1,647 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno
11 11 import util, scmutil, changegroup, base85
12 12 import discovery, phases, obsolete, bookmarks, bundle2
13 13
14 14
15 15 class pushoperation(object):
16 16 """A object that represent a single push operation
17 17
18 18 It purpose is to carry push related state and very common operation.
19 19
20 20 A new should be created at the beginning of each push and discarded
21 21 afterward.
22 22 """
23 23
24 24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 25 # repo we push from
26 26 self.repo = repo
27 27 self.ui = repo.ui
28 28 # repo we push to
29 29 self.remote = remote
30 30 # force option provided
31 31 self.force = force
32 32 # revs to be pushed (None is "all")
33 33 self.revs = revs
34 34 # allow push of new branch
35 35 self.newbranch = newbranch
36 36 # did a local lock get acquired?
37 37 self.locallocked = None
38 38 # Integer version of the push result
39 39 # - None means nothing to push
40 40 # - 0 means HTTP error
41 41 # - 1 means we pushed and remote head count is unchanged *or*
42 42 # we have outgoing changesets but refused to push
43 43 # - other values as described by addchangegroup()
44 44 self.ret = None
45 45 # discover.outgoing object (contains common and outgoing data)
46 46 self.outgoing = None
47 47 # all remote heads before the push
48 48 self.remoteheads = None
49 49 # testable as a boolean indicating if any nodes are missing locally.
50 50 self.incoming = None
51 51 # set of all heads common after changeset bundle push
52 52 self.commonheads = None
53 53
54 54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 55 '''Push outgoing changesets (limited by revs) from a local
56 56 repository to remote. Return an integer:
57 57 - None means nothing to push
58 58 - 0 means HTTP error
59 59 - 1 means we pushed and remote head count is unchanged *or*
60 60 we have outgoing changesets but refused to push
61 61 - other values as described by addchangegroup()
62 62 '''
63 63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 64 if pushop.remote.local():
65 65 missing = (set(pushop.repo.requirements)
66 66 - pushop.remote.local().supported)
67 67 if missing:
68 68 msg = _("required features are not"
69 69 " supported in the destination:"
70 70 " %s") % (', '.join(sorted(missing)))
71 71 raise util.Abort(msg)
72 72
73 73 # there are two ways to push to remote repo:
74 74 #
75 75 # addchangegroup assumes local user can lock remote
76 76 # repo (local filesystem, old ssh servers).
77 77 #
78 78 # unbundle assumes local user cannot lock remote repo (new ssh
79 79 # servers, http servers).
80 80
81 81 if not pushop.remote.canpush():
82 82 raise util.Abort(_("destination does not support push"))
83 83 # get local lock as we might write phase data
84 84 locallock = None
85 85 try:
86 86 locallock = pushop.repo.lock()
87 87 pushop.locallocked = True
88 88 except IOError, err:
89 89 pushop.locallocked = False
90 90 if err.errno != errno.EACCES:
91 91 raise
92 92 # source repo cannot be locked.
93 93 # We do not abort the push, but just disable the local phase
94 94 # synchronisation.
95 95 msg = 'cannot lock source repository: %s\n' % err
96 96 pushop.ui.debug(msg)
97 97 try:
98 98 pushop.repo.checkpush(pushop)
99 99 lock = None
100 100 unbundle = pushop.remote.capable('unbundle')
101 101 if not unbundle:
102 102 lock = pushop.remote.lock()
103 103 try:
104 104 _pushdiscovery(pushop)
105 105 if _pushcheckoutgoing(pushop):
106 pushop.repo.prepushoutgoinghooks(pushop.repo,
107 pushop.remote,
108 pushop.outgoing)
106 109 _pushchangeset(pushop)
107 110 _pushcomputecommonheads(pushop)
108 111 _pushsyncphase(pushop)
109 112 _pushobsolete(pushop)
110 113 finally:
111 114 if lock is not None:
112 115 lock.release()
113 116 finally:
114 117 if locallock is not None:
115 118 locallock.release()
116 119
117 120 _pushbookmark(pushop)
118 121 return pushop.ret
119 122
120 123 def _pushdiscovery(pushop):
121 124 # discovery
122 125 unfi = pushop.repo.unfiltered()
123 126 fci = discovery.findcommonincoming
124 127 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 128 common, inc, remoteheads = commoninc
126 129 fco = discovery.findcommonoutgoing
127 130 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 131 commoninc=commoninc, force=pushop.force)
129 132 pushop.outgoing = outgoing
130 133 pushop.remoteheads = remoteheads
131 134 pushop.incoming = inc
132 135
133 136 def _pushcheckoutgoing(pushop):
134 137 outgoing = pushop.outgoing
135 138 unfi = pushop.repo.unfiltered()
136 139 if not outgoing.missing:
137 140 # nothing to push
138 141 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 142 return False
140 143 # something to push
141 144 if not pushop.force:
142 145 # if repo.obsstore == False --> no obsolete
143 146 # then, save the iteration
144 147 if unfi.obsstore:
145 148 # this message are here for 80 char limit reason
146 149 mso = _("push includes obsolete changeset: %s!")
147 150 mst = "push includes %s changeset: %s!"
148 151 # plain versions for i18n tool to detect them
149 152 _("push includes unstable changeset: %s!")
150 153 _("push includes bumped changeset: %s!")
151 154 _("push includes divergent changeset: %s!")
152 155 # If we are to push if there is at least one
153 156 # obsolete or unstable changeset in missing, at
154 157 # least one of the missinghead will be obsolete or
155 158 # unstable. So checking heads only is ok
156 159 for node in outgoing.missingheads:
157 160 ctx = unfi[node]
158 161 if ctx.obsolete():
159 162 raise util.Abort(mso % ctx)
160 163 elif ctx.troubled():
161 164 raise util.Abort(_(mst)
162 165 % (ctx.troubles()[0],
163 166 ctx))
164 167 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 168 discovery.checkheads(unfi, pushop.remote, outgoing,
166 169 pushop.remoteheads,
167 170 pushop.newbranch,
168 171 bool(pushop.incoming),
169 172 newbm)
170 173 return True
171 174
172 175 def _pushchangeset(pushop):
173 176 """Make the actual push of changeset bundle to remote repo"""
174 177 outgoing = pushop.outgoing
175 178 unbundle = pushop.remote.capable('unbundle')
176 179 # TODO: get bundlecaps from remote
177 180 bundlecaps = None
178 181 # create a changegroup from local
179 182 if pushop.revs is None and not (outgoing.excluded
180 183 or pushop.repo.changelog.filteredrevs):
181 184 # push everything,
182 185 # use the fast path, no race possible on push
183 186 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 187 cg = changegroup.getsubset(pushop.repo,
185 188 outgoing,
186 189 bundler,
187 190 'push',
188 191 fastpath=True)
189 192 else:
190 193 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
191 194 bundlecaps)
192 195
193 196 # apply changegroup to remote
194 197 if unbundle:
195 198 # local repo finds heads on server, finds out what
196 199 # revs it must push. once revs transferred, if server
197 200 # finds it has different heads (someone else won
198 201 # commit/push race), server aborts.
199 202 if pushop.force:
200 203 remoteheads = ['force']
201 204 else:
202 205 remoteheads = pushop.remoteheads
203 206 # ssh: return remote's addchangegroup()
204 207 # http: return remote's addchangegroup() or 0 for error
205 208 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
206 209 'push')
207 210 else:
208 211 # we return an integer indicating remote head count
209 212 # change
210 213 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
211 214
212 215 def _pushcomputecommonheads(pushop):
213 216 unfi = pushop.repo.unfiltered()
214 217 if pushop.ret:
215 218 # push succeed, synchronize target of the push
216 219 cheads = pushop.outgoing.missingheads
217 220 elif pushop.revs is None:
218 221 # All out push fails. synchronize all common
219 222 cheads = pushop.outgoing.commonheads
220 223 else:
221 224 # I want cheads = heads(::missingheads and ::commonheads)
222 225 # (missingheads is revs with secret changeset filtered out)
223 226 #
224 227 # This can be expressed as:
225 228 # cheads = ( (missingheads and ::commonheads)
226 229 # + (commonheads and ::missingheads))"
227 230 # )
228 231 #
229 232 # while trying to push we already computed the following:
230 233 # common = (::commonheads)
231 234 # missing = ((commonheads::missingheads) - commonheads)
232 235 #
233 236 # We can pick:
234 237 # * missingheads part of common (::commonheads)
235 238 common = set(pushop.outgoing.common)
236 239 nm = pushop.repo.changelog.nodemap
237 240 cheads = [node for node in pushop.revs if nm[node] in common]
238 241 # and
239 242 # * commonheads parents on missing
240 243 revset = unfi.set('%ln and parents(roots(%ln))',
241 244 pushop.outgoing.commonheads,
242 245 pushop.outgoing.missing)
243 246 cheads.extend(c.node() for c in revset)
244 247 pushop.commonheads = cheads
245 248
246 249 def _pushsyncphase(pushop):
247 250 """synchronise phase information locally and remotely"""
248 251 unfi = pushop.repo.unfiltered()
249 252 cheads = pushop.commonheads
250 253 if pushop.ret:
251 254 # push succeed, synchronize target of the push
252 255 cheads = pushop.outgoing.missingheads
253 256 elif pushop.revs is None:
254 257 # All out push fails. synchronize all common
255 258 cheads = pushop.outgoing.commonheads
256 259 else:
257 260 # I want cheads = heads(::missingheads and ::commonheads)
258 261 # (missingheads is revs with secret changeset filtered out)
259 262 #
260 263 # This can be expressed as:
261 264 # cheads = ( (missingheads and ::commonheads)
262 265 # + (commonheads and ::missingheads))"
263 266 # )
264 267 #
265 268 # while trying to push we already computed the following:
266 269 # common = (::commonheads)
267 270 # missing = ((commonheads::missingheads) - commonheads)
268 271 #
269 272 # We can pick:
270 273 # * missingheads part of common (::commonheads)
271 274 common = set(pushop.outgoing.common)
272 275 nm = pushop.repo.changelog.nodemap
273 276 cheads = [node for node in pushop.revs if nm[node] in common]
274 277 # and
275 278 # * commonheads parents on missing
276 279 revset = unfi.set('%ln and parents(roots(%ln))',
277 280 pushop.outgoing.commonheads,
278 281 pushop.outgoing.missing)
279 282 cheads.extend(c.node() for c in revset)
280 283 pushop.commonheads = cheads
281 284 # even when we don't push, exchanging phase data is useful
282 285 remotephases = pushop.remote.listkeys('phases')
283 286 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
284 287 and remotephases # server supports phases
285 288 and pushop.ret is None # nothing was pushed
286 289 and remotephases.get('publishing', False)):
287 290 # When:
288 291 # - this is a subrepo push
289 292 # - and remote support phase
290 293 # - and no changeset was pushed
291 294 # - and remote is publishing
292 295 # We may be in issue 3871 case!
293 296 # We drop the possible phase synchronisation done by
294 297 # courtesy to publish changesets possibly locally draft
295 298 # on the remote.
296 299 remotephases = {'publishing': 'True'}
297 300 if not remotephases: # old server or public only reply from non-publishing
298 301 _localphasemove(pushop, cheads)
299 302 # don't push any phase data as there is nothing to push
300 303 else:
301 304 ana = phases.analyzeremotephases(pushop.repo, cheads,
302 305 remotephases)
303 306 pheads, droots = ana
304 307 ### Apply remote phase on local
305 308 if remotephases.get('publishing', False):
306 309 _localphasemove(pushop, cheads)
307 310 else: # publish = False
308 311 _localphasemove(pushop, pheads)
309 312 _localphasemove(pushop, cheads, phases.draft)
310 313 ### Apply local phase on remote
311 314
312 315 # Get the list of all revs draft on remote by public here.
313 316 # XXX Beware that revset break if droots is not strictly
314 317 # XXX root we may want to ensure it is but it is costly
315 318 outdated = unfi.set('heads((%ln::%ln) and public())',
316 319 droots, cheads)
317 320 for newremotehead in outdated:
318 321 r = pushop.remote.pushkey('phases',
319 322 newremotehead.hex(),
320 323 str(phases.draft),
321 324 str(phases.public))
322 325 if not r:
323 326 pushop.ui.warn(_('updating %s to public failed!\n')
324 327 % newremotehead)
325 328
326 329 def _localphasemove(pushop, nodes, phase=phases.public):
327 330 """move <nodes> to <phase> in the local source repo"""
328 331 if pushop.locallocked:
329 332 phases.advanceboundary(pushop.repo, phase, nodes)
330 333 else:
331 334 # repo is not locked, do not change any phases!
332 335 # Informs the user that phases should have been moved when
333 336 # applicable.
334 337 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
335 338 phasestr = phases.phasenames[phase]
336 339 if actualmoves:
337 340 pushop.ui.status(_('cannot lock source repo, skipping '
338 341 'local %s phase update\n') % phasestr)
339 342
340 343 def _pushobsolete(pushop):
341 344 """utility function to push obsolete markers to a remote"""
342 345 pushop.ui.debug('try to push obsolete markers to remote\n')
343 346 repo = pushop.repo
344 347 remote = pushop.remote
345 348 if (obsolete._enabled and repo.obsstore and
346 349 'obsolete' in remote.listkeys('namespaces')):
347 350 rslts = []
348 351 remotedata = repo.listkeys('obsolete')
349 352 for key in sorted(remotedata, reverse=True):
350 353 # reverse sort to ensure we end with dump0
351 354 data = remotedata[key]
352 355 rslts.append(remote.pushkey('obsolete', key, '', data))
353 356 if [r for r in rslts if not r]:
354 357 msg = _('failed to push some obsolete markers!\n')
355 358 repo.ui.warn(msg)
356 359
357 360 def _pushbookmark(pushop):
358 361 """Update bookmark position on remote"""
359 362 ui = pushop.ui
360 363 repo = pushop.repo.unfiltered()
361 364 remote = pushop.remote
362 365 ui.debug("checking for updated bookmarks\n")
363 366 revnums = map(repo.changelog.rev, pushop.revs or [])
364 367 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
365 368 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
366 369 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
367 370 srchex=hex)
368 371
369 372 for b, scid, dcid in advsrc:
370 373 if ancestors and repo[scid].rev() not in ancestors:
371 374 continue
372 375 if remote.pushkey('bookmarks', b, dcid, scid):
373 376 ui.status(_("updating bookmark %s\n") % b)
374 377 else:
375 378 ui.warn(_('updating bookmark %s failed!\n') % b)
376 379
377 380 class pulloperation(object):
378 381 """A object that represent a single pull operation
379 382
380 383 It purpose is to carry push related state and very common operation.
381 384
382 385 A new should be created at the beginning of each pull and discarded
383 386 afterward.
384 387 """
385 388
386 389 def __init__(self, repo, remote, heads=None, force=False):
387 390 # repo we pull into
388 391 self.repo = repo
389 392 # repo we pull from
390 393 self.remote = remote
391 394 # revision we try to pull (None is "all")
392 395 self.heads = heads
393 396 # do we force pull?
394 397 self.force = force
395 398 # the name the pull transaction
396 399 self._trname = 'pull\n' + util.hidepassword(remote.url())
397 400 # hold the transaction once created
398 401 self._tr = None
399 402 # set of common changeset between local and remote before pull
400 403 self.common = None
401 404 # set of pulled head
402 405 self.rheads = None
403 406 # list of missing changeset to fetch remotely
404 407 self.fetch = None
405 408 # result of changegroup pulling (used as return code by pull)
406 409 self.cgresult = None
407 410 # list of step remaining todo (related to future bundle2 usage)
408 411 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
409 412
410 413 @util.propertycache
411 414 def pulledsubset(self):
412 415 """heads of the set of changeset target by the pull"""
413 416 # compute target subset
414 417 if self.heads is None:
415 418 # We pulled every thing possible
416 419 # sync on everything common
417 420 c = set(self.common)
418 421 ret = list(self.common)
419 422 for n in self.rheads:
420 423 if n not in c:
421 424 ret.append(n)
422 425 return ret
423 426 else:
424 427 # We pulled a specific subset
425 428 # sync on this subset
426 429 return self.heads
427 430
428 431 def gettransaction(self):
429 432 """get appropriate pull transaction, creating it if needed"""
430 433 if self._tr is None:
431 434 self._tr = self.repo.transaction(self._trname)
432 435 return self._tr
433 436
434 437 def closetransaction(self):
435 438 """close transaction if created"""
436 439 if self._tr is not None:
437 440 self._tr.close()
438 441
439 442 def releasetransaction(self):
440 443 """release transaction if created"""
441 444 if self._tr is not None:
442 445 self._tr.release()
443 446
444 447 def pull(repo, remote, heads=None, force=False):
445 448 pullop = pulloperation(repo, remote, heads, force)
446 449 if pullop.remote.local():
447 450 missing = set(pullop.remote.requirements) - pullop.repo.supported
448 451 if missing:
449 452 msg = _("required features are not"
450 453 " supported in the destination:"
451 454 " %s") % (', '.join(sorted(missing)))
452 455 raise util.Abort(msg)
453 456
454 457 lock = pullop.repo.lock()
455 458 try:
456 459 _pulldiscovery(pullop)
457 460 if pullop.remote.capable('bundle2'):
458 461 _pullbundle2(pullop)
459 462 if 'changegroup' in pullop.todosteps:
460 463 _pullchangeset(pullop)
461 464 if 'phases' in pullop.todosteps:
462 465 _pullphase(pullop)
463 466 if 'obsmarkers' in pullop.todosteps:
464 467 _pullobsolete(pullop)
465 468 pullop.closetransaction()
466 469 finally:
467 470 pullop.releasetransaction()
468 471 lock.release()
469 472
470 473 return pullop.cgresult
471 474
472 475 def _pulldiscovery(pullop):
473 476 """discovery phase for the pull
474 477
475 478 Current handle changeset discovery only, will change handle all discovery
476 479 at some point."""
477 480 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
478 481 pullop.remote,
479 482 heads=pullop.heads,
480 483 force=pullop.force)
481 484 pullop.common, pullop.fetch, pullop.rheads = tmp
482 485
483 486 def _pullbundle2(pullop):
484 487 """pull data using bundle2
485 488
486 489 For now, the only supported data are changegroup."""
487 490 kwargs = {'bundlecaps': set(['HG20'])}
488 491 # pulling changegroup
489 492 pullop.todosteps.remove('changegroup')
490 493 if not pullop.fetch:
491 494 pullop.repo.ui.status(_("no changes found\n"))
492 495 pullop.cgresult = 0
493 496 else:
494 497 kwargs['common'] = pullop.common
495 498 kwargs['heads'] = pullop.heads or pullop.rheads
496 499 if pullop.heads is None and list(pullop.common) == [nullid]:
497 500 pullop.repo.ui.status(_("requesting all changes\n"))
498 501 if kwargs.keys() == ['format']:
499 502 return # nothing to pull
500 503 bundle = pullop.remote.getbundle('pull', **kwargs)
501 504 try:
502 505 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
503 506 except KeyError, exc:
504 507 raise util.Abort('missing support for %s' % exc)
505 508 assert len(op.records['changegroup']) == 1
506 509 pullop.cgresult = op.records['changegroup'][0]['return']
507 510
508 511 def _pullchangeset(pullop):
509 512 """pull changeset from unbundle into the local repo"""
510 513 # We delay the open of the transaction as late as possible so we
511 514 # don't open transaction for nothing or you break future useful
512 515 # rollback call
513 516 pullop.todosteps.remove('changegroup')
514 517 if not pullop.fetch:
515 518 pullop.repo.ui.status(_("no changes found\n"))
516 519 pullop.cgresult = 0
517 520 return
518 521 pullop.gettransaction()
519 522 if pullop.heads is None and list(pullop.common) == [nullid]:
520 523 pullop.repo.ui.status(_("requesting all changes\n"))
521 524 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
522 525 # issue1320, avoid a race if remote changed after discovery
523 526 pullop.heads = pullop.rheads
524 527
525 528 if pullop.remote.capable('getbundle'):
526 529 # TODO: get bundlecaps from remote
527 530 cg = pullop.remote.getbundle('pull', common=pullop.common,
528 531 heads=pullop.heads or pullop.rheads)
529 532 elif pullop.heads is None:
530 533 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
531 534 elif not pullop.remote.capable('changegroupsubset'):
532 535 raise util.Abort(_("partial pull cannot be done because "
533 536 "other repository doesn't support "
534 537 "changegroupsubset."))
535 538 else:
536 539 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
537 540 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
538 541 pullop.remote.url())
539 542
540 543 def _pullphase(pullop):
541 544 # Get remote phases data from remote
542 545 pullop.todosteps.remove('phases')
543 546 remotephases = pullop.remote.listkeys('phases')
544 547 publishing = bool(remotephases.get('publishing', False))
545 548 if remotephases and not publishing:
546 549 # remote is new and unpublishing
547 550 pheads, _dr = phases.analyzeremotephases(pullop.repo,
548 551 pullop.pulledsubset,
549 552 remotephases)
550 553 phases.advanceboundary(pullop.repo, phases.public, pheads)
551 554 phases.advanceboundary(pullop.repo, phases.draft,
552 555 pullop.pulledsubset)
553 556 else:
554 557 # Remote is old or publishing all common changesets
555 558 # should be seen as public
556 559 phases.advanceboundary(pullop.repo, phases.public,
557 560 pullop.pulledsubset)
558 561
559 562 def _pullobsolete(pullop):
560 563 """utility function to pull obsolete markers from a remote
561 564
562 565 The `gettransaction` is function that return the pull transaction, creating
563 566 one if necessary. We return the transaction to inform the calling code that
564 567 a new transaction have been created (when applicable).
565 568
566 569 Exists mostly to allow overriding for experimentation purpose"""
567 570 pullop.todosteps.remove('obsmarkers')
568 571 tr = None
569 572 if obsolete._enabled:
570 573 pullop.repo.ui.debug('fetching remote obsolete markers\n')
571 574 remoteobs = pullop.remote.listkeys('obsolete')
572 575 if 'dump0' in remoteobs:
573 576 tr = pullop.gettransaction()
574 577 for key in sorted(remoteobs, reverse=True):
575 578 if key.startswith('dump'):
576 579 data = base85.b85decode(remoteobs[key])
577 580 pullop.repo.obsstore.mergemarkers(tr, data)
578 581 pullop.repo.invalidatevolatilesets()
579 582 return tr
580 583
581 584 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
582 585 """return a full bundle (with potentially multiple kind of parts)
583 586
584 587 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
585 588 passed. For now, the bundle can contain only changegroup, but this will
586 589 changes when more part type will be available for bundle2.
587 590
588 591 This is different from changegroup.getbundle that only returns an HG10
589 592 changegroup bundle. They may eventually get reunited in the future when we
590 593 have a clearer idea of the API we what to query different data.
591 594
592 595 The implementation is at a very early stage and will get massive rework
593 596 when the API of bundle is refined.
594 597 """
595 598 # build bundle here.
596 599 cg = changegroup.getbundle(repo, source, heads=heads,
597 600 common=common, bundlecaps=bundlecaps)
598 601 if bundlecaps is None or 'HG20' not in bundlecaps:
599 602 return cg
600 603 # very crude first implementation,
601 604 # the bundle API will change and the generation will be done lazily.
602 605 bundler = bundle2.bundle20(repo.ui)
603 606 def cgchunks(cg=cg):
604 607 yield 'HG10UN'
605 608 for c in cg.getchunks():
606 609 yield c
607 610 part = bundle2.bundlepart('changegroup', data=cgchunks())
608 611 bundler.addpart(part)
609 612 return bundle2.unbundle20(repo.ui, util.chunkbuffer(bundler.getchunks()))
610 613
611 614 class PushRaced(RuntimeError):
612 615 """An exception raised during unbundling that indicate a push race"""
613 616
614 617 def check_heads(repo, their_heads, context):
615 618 """check if the heads of a repo have been modified
616 619
617 620 Used by peer for unbundling.
618 621 """
619 622 heads = repo.heads()
620 623 heads_hash = util.sha1(''.join(sorted(heads))).digest()
621 624 if not (their_heads == ['force'] or their_heads == heads or
622 625 their_heads == ['hashed', heads_hash]):
623 626 # someone else committed/pushed/unbundled while we
624 627 # were transferring data
625 628 raise PushRaced('repository changed while %s - '
626 629 'please try again' % context)
627 630
628 631 def unbundle(repo, cg, heads, source, url):
629 632 """Apply a bundle to a repo.
630 633
631 634 this function makes sure the repo is locked during the application and have
632 635 mechanism to check that no push race occurred between the creation of the
633 636 bundle and its application.
634 637
635 638 If the push was raced as PushRaced exception is raised."""
636 639 r = 0
637 640 lock = repo.lock()
638 641 try:
639 642 check_heads(repo, heads, 'uploading changes')
640 643 # push can proceed
641 644 r = changegroup.addchangegroup(repo, cg, source, url)
642 645 finally:
643 646 lock.release()
644 647 return r
@@ -1,1885 +1,1892 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock as lockmod
12 12 import transaction, store, encoding, exchange
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 import branchmap, pathutil
20 20 propertycache = util.propertycache
21 21 filecache = scmutil.filecache
22 22
23 23 class repofilecache(filecache):
24 24 """All filecache usage on repo are done for logic that should be unfiltered
25 25 """
26 26
27 27 def __get__(self, repo, type=None):
28 28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 29 def __set__(self, repo, value):
30 30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 31 def __delete__(self, repo):
32 32 return super(repofilecache, self).__delete__(repo.unfiltered())
33 33
34 34 class storecache(repofilecache):
35 35 """filecache for files in the store"""
36 36 def join(self, obj, fname):
37 37 return obj.sjoin(fname)
38 38
39 39 class unfilteredpropertycache(propertycache):
40 40 """propertycache that apply to unfiltered repo only"""
41 41
42 42 def __get__(self, repo, type=None):
43 43 unfi = repo.unfiltered()
44 44 if unfi is repo:
45 45 return super(unfilteredpropertycache, self).__get__(unfi)
46 46 return getattr(unfi, self.name)
47 47
48 48 class filteredpropertycache(propertycache):
49 49 """propertycache that must take filtering in account"""
50 50
51 51 def cachevalue(self, obj, value):
52 52 object.__setattr__(obj, self.name, value)
53 53
54 54
55 55 def hasunfilteredcache(repo, name):
56 56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 57 return name in vars(repo.unfiltered())
58 58
59 59 def unfilteredmethod(orig):
60 60 """decorate method that always need to be run on unfiltered version"""
61 61 def wrapper(repo, *args, **kwargs):
62 62 return orig(repo.unfiltered(), *args, **kwargs)
63 63 return wrapper
64 64
65 65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 66 'bundle2', 'unbundle'))
67 67 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 68
69 69 class localpeer(peer.peerrepository):
70 70 '''peer for a local repo; reflects only the most recent API'''
71 71
72 72 def __init__(self, repo, caps=moderncaps):
73 73 peer.peerrepository.__init__(self)
74 74 self._repo = repo.filtered('served')
75 75 self.ui = repo.ui
76 76 self._caps = repo._restrictcapabilities(caps)
77 77 self.requirements = repo.requirements
78 78 self.supportedformats = repo.supportedformats
79 79
80 80 def close(self):
81 81 self._repo.close()
82 82
83 83 def _capabilities(self):
84 84 return self._caps
85 85
86 86 def local(self):
87 87 return self._repo
88 88
89 89 def canpush(self):
90 90 return True
91 91
92 92 def url(self):
93 93 return self._repo.url()
94 94
95 95 def lookup(self, key):
96 96 return self._repo.lookup(key)
97 97
98 98 def branchmap(self):
99 99 return self._repo.branchmap()
100 100
101 101 def heads(self):
102 102 return self._repo.heads()
103 103
104 104 def known(self, nodes):
105 105 return self._repo.known(nodes)
106 106
107 107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 108 format='HG10'):
109 109 return exchange.getbundle(self._repo, source, heads=heads,
110 110 common=common, bundlecaps=bundlecaps)
111 111
112 112 # TODO We might want to move the next two calls into legacypeer and add
113 113 # unbundle instead.
114 114
115 115 def unbundle(self, cg, heads, url):
116 116 """apply a bundle on a repo
117 117
118 118 This function handles the repo locking itself."""
119 119 try:
120 120 return exchange.unbundle(self._repo, cg, heads, 'push', url)
121 121 except exchange.PushRaced, exc:
122 122 raise error.ResponseError(_('push failed:'), exc.message)
123 123
124 124 def lock(self):
125 125 return self._repo.lock()
126 126
127 127 def addchangegroup(self, cg, source, url):
128 128 return changegroup.addchangegroup(self._repo, cg, source, url)
129 129
130 130 def pushkey(self, namespace, key, old, new):
131 131 return self._repo.pushkey(namespace, key, old, new)
132 132
133 133 def listkeys(self, namespace):
134 134 return self._repo.listkeys(namespace)
135 135
136 136 def debugwireargs(self, one, two, three=None, four=None, five=None):
137 137 '''used to test argument passing over the wire'''
138 138 return "%s %s %s %s %s" % (one, two, three, four, five)
139 139
140 140 class locallegacypeer(localpeer):
141 141 '''peer extension which implements legacy methods too; used for tests with
142 142 restricted capabilities'''
143 143
144 144 def __init__(self, repo):
145 145 localpeer.__init__(self, repo, caps=legacycaps)
146 146
147 147 def branches(self, nodes):
148 148 return self._repo.branches(nodes)
149 149
150 150 def between(self, pairs):
151 151 return self._repo.between(pairs)
152 152
153 153 def changegroup(self, basenodes, source):
154 154 return changegroup.changegroup(self._repo, basenodes, source)
155 155
156 156 def changegroupsubset(self, bases, heads, source):
157 157 return changegroup.changegroupsubset(self._repo, bases, heads, source)
158 158
159 159 class localrepository(object):
160 160
161 161 supportedformats = set(('revlogv1', 'generaldelta'))
162 162 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
163 163 'dotencode'))
164 164 openerreqs = set(('revlogv1', 'generaldelta'))
165 165 requirements = ['revlogv1']
166 166 filtername = None
167 167
168 168 # a list of (ui, featureset) functions.
169 169 # only functions defined in module of enabled extensions are invoked
170 170 featuresetupfuncs = set()
171 171
172 172 def _baserequirements(self, create):
173 173 return self.requirements[:]
174 174
175 175 def __init__(self, baseui, path=None, create=False):
176 176 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
177 177 self.wopener = self.wvfs
178 178 self.root = self.wvfs.base
179 179 self.path = self.wvfs.join(".hg")
180 180 self.origroot = path
181 181 self.auditor = pathutil.pathauditor(self.root, self._checknested)
182 182 self.vfs = scmutil.vfs(self.path)
183 183 self.opener = self.vfs
184 184 self.baseui = baseui
185 185 self.ui = baseui.copy()
186 186 self.ui.copy = baseui.copy # prevent copying repo configuration
187 187 # A list of callback to shape the phase if no data were found.
188 188 # Callback are in the form: func(repo, roots) --> processed root.
189 189 # This list it to be filled by extension during repo setup
190 190 self._phasedefaults = []
191 191 try:
192 192 self.ui.readconfig(self.join("hgrc"), self.root)
193 193 extensions.loadall(self.ui)
194 194 except IOError:
195 195 pass
196 196
197 197 if self.featuresetupfuncs:
198 198 self.supported = set(self._basesupported) # use private copy
199 199 extmods = set(m.__name__ for n, m
200 200 in extensions.extensions(self.ui))
201 201 for setupfunc in self.featuresetupfuncs:
202 202 if setupfunc.__module__ in extmods:
203 203 setupfunc(self.ui, self.supported)
204 204 else:
205 205 self.supported = self._basesupported
206 206
207 207 if not self.vfs.isdir():
208 208 if create:
209 209 if not self.wvfs.exists():
210 210 self.wvfs.makedirs()
211 211 self.vfs.makedir(notindexed=True)
212 212 requirements = self._baserequirements(create)
213 213 if self.ui.configbool('format', 'usestore', True):
214 214 self.vfs.mkdir("store")
215 215 requirements.append("store")
216 216 if self.ui.configbool('format', 'usefncache', True):
217 217 requirements.append("fncache")
218 218 if self.ui.configbool('format', 'dotencode', True):
219 219 requirements.append('dotencode')
220 220 # create an invalid changelog
221 221 self.vfs.append(
222 222 "00changelog.i",
223 223 '\0\0\0\2' # represents revlogv2
224 224 ' dummy changelog to prevent using the old repo layout'
225 225 )
226 226 if self.ui.configbool('format', 'generaldelta', False):
227 227 requirements.append("generaldelta")
228 228 requirements = set(requirements)
229 229 else:
230 230 raise error.RepoError(_("repository %s not found") % path)
231 231 elif create:
232 232 raise error.RepoError(_("repository %s already exists") % path)
233 233 else:
234 234 try:
235 235 requirements = scmutil.readrequires(self.vfs, self.supported)
236 236 except IOError, inst:
237 237 if inst.errno != errno.ENOENT:
238 238 raise
239 239 requirements = set()
240 240
241 241 self.sharedpath = self.path
242 242 try:
243 243 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
244 244 realpath=True)
245 245 s = vfs.base
246 246 if not vfs.exists():
247 247 raise error.RepoError(
248 248 _('.hg/sharedpath points to nonexistent directory %s') % s)
249 249 self.sharedpath = s
250 250 except IOError, inst:
251 251 if inst.errno != errno.ENOENT:
252 252 raise
253 253
254 254 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
255 255 self.spath = self.store.path
256 256 self.svfs = self.store.vfs
257 257 self.sopener = self.svfs
258 258 self.sjoin = self.store.join
259 259 self.vfs.createmode = self.store.createmode
260 260 self._applyrequirements(requirements)
261 261 if create:
262 262 self._writerequirements()
263 263
264 264
265 265 self._branchcaches = {}
266 266 self.filterpats = {}
267 267 self._datafilters = {}
268 268 self._transref = self._lockref = self._wlockref = None
269 269
270 270 # A cache for various files under .hg/ that tracks file changes,
271 271 # (used by the filecache decorator)
272 272 #
273 273 # Maps a property name to its util.filecacheentry
274 274 self._filecache = {}
275 275
276 276 # hold sets of revision to be filtered
277 277 # should be cleared when something might have changed the filter value:
278 278 # - new changesets,
279 279 # - phase change,
280 280 # - new obsolescence marker,
281 281 # - working directory parent change,
282 282 # - bookmark changes
283 283 self.filteredrevcache = {}
284 284
285 285 def close(self):
286 286 pass
287 287
288 288 def _restrictcapabilities(self, caps):
289 289 # bundle2 is not ready for prime time, drop it unless explicitly
290 290 # required by the tests (or some brave tester)
291 291 if not self.ui.configbool('server', 'bundle2', False):
292 292 caps = set(caps)
293 293 caps.discard('bundle2')
294 294 return caps
295 295
296 296 def _applyrequirements(self, requirements):
297 297 self.requirements = requirements
298 298 self.sopener.options = dict((r, 1) for r in requirements
299 299 if r in self.openerreqs)
300 300 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
301 301 if chunkcachesize is not None:
302 302 self.sopener.options['chunkcachesize'] = chunkcachesize
303 303
304 304 def _writerequirements(self):
305 305 reqfile = self.opener("requires", "w")
306 306 for r in sorted(self.requirements):
307 307 reqfile.write("%s\n" % r)
308 308 reqfile.close()
309 309
310 310 def _checknested(self, path):
311 311 """Determine if path is a legal nested repository."""
312 312 if not path.startswith(self.root):
313 313 return False
314 314 subpath = path[len(self.root) + 1:]
315 315 normsubpath = util.pconvert(subpath)
316 316
317 317 # XXX: Checking against the current working copy is wrong in
318 318 # the sense that it can reject things like
319 319 #
320 320 # $ hg cat -r 10 sub/x.txt
321 321 #
322 322 # if sub/ is no longer a subrepository in the working copy
323 323 # parent revision.
324 324 #
325 325 # However, it can of course also allow things that would have
326 326 # been rejected before, such as the above cat command if sub/
327 327 # is a subrepository now, but was a normal directory before.
328 328 # The old path auditor would have rejected by mistake since it
329 329 # panics when it sees sub/.hg/.
330 330 #
331 331 # All in all, checking against the working copy seems sensible
332 332 # since we want to prevent access to nested repositories on
333 333 # the filesystem *now*.
334 334 ctx = self[None]
335 335 parts = util.splitpath(subpath)
336 336 while parts:
337 337 prefix = '/'.join(parts)
338 338 if prefix in ctx.substate:
339 339 if prefix == normsubpath:
340 340 return True
341 341 else:
342 342 sub = ctx.sub(prefix)
343 343 return sub.checknested(subpath[len(prefix) + 1:])
344 344 else:
345 345 parts.pop()
346 346 return False
347 347
348 348 def peer(self):
349 349 return localpeer(self) # not cached to avoid reference cycle
350 350
351 351 def unfiltered(self):
352 352 """Return unfiltered version of the repository
353 353
354 354 Intended to be overwritten by filtered repo."""
355 355 return self
356 356
357 357 def filtered(self, name):
358 358 """Return a filtered version of a repository"""
359 359 # build a new class with the mixin and the current class
360 360 # (possibly subclass of the repo)
361 361 class proxycls(repoview.repoview, self.unfiltered().__class__):
362 362 pass
363 363 return proxycls(self, name)
364 364
365 365 @repofilecache('bookmarks')
366 366 def _bookmarks(self):
367 367 return bookmarks.bmstore(self)
368 368
369 369 @repofilecache('bookmarks.current')
370 370 def _bookmarkcurrent(self):
371 371 return bookmarks.readcurrent(self)
372 372
373 373 def bookmarkheads(self, bookmark):
374 374 name = bookmark.split('@', 1)[0]
375 375 heads = []
376 376 for mark, n in self._bookmarks.iteritems():
377 377 if mark.split('@', 1)[0] == name:
378 378 heads.append(n)
379 379 return heads
380 380
381 381 @storecache('phaseroots')
382 382 def _phasecache(self):
383 383 return phases.phasecache(self, self._phasedefaults)
384 384
385 385 @storecache('obsstore')
386 386 def obsstore(self):
387 387 store = obsolete.obsstore(self.sopener)
388 388 if store and not obsolete._enabled:
389 389 # message is rare enough to not be translated
390 390 msg = 'obsolete feature not enabled but %i markers found!\n'
391 391 self.ui.warn(msg % len(list(store)))
392 392 return store
393 393
394 394 @storecache('00changelog.i')
395 395 def changelog(self):
396 396 c = changelog.changelog(self.sopener)
397 397 if 'HG_PENDING' in os.environ:
398 398 p = os.environ['HG_PENDING']
399 399 if p.startswith(self.root):
400 400 c.readpending('00changelog.i.a')
401 401 return c
402 402
403 403 @storecache('00manifest.i')
404 404 def manifest(self):
405 405 return manifest.manifest(self.sopener)
406 406
407 407 @repofilecache('dirstate')
408 408 def dirstate(self):
409 409 warned = [0]
410 410 def validate(node):
411 411 try:
412 412 self.changelog.rev(node)
413 413 return node
414 414 except error.LookupError:
415 415 if not warned[0]:
416 416 warned[0] = True
417 417 self.ui.warn(_("warning: ignoring unknown"
418 418 " working parent %s!\n") % short(node))
419 419 return nullid
420 420
421 421 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
422 422
423 423 def __getitem__(self, changeid):
424 424 if changeid is None:
425 425 return context.workingctx(self)
426 426 return context.changectx(self, changeid)
427 427
428 428 def __contains__(self, changeid):
429 429 try:
430 430 return bool(self.lookup(changeid))
431 431 except error.RepoLookupError:
432 432 return False
433 433
434 434 def __nonzero__(self):
435 435 return True
436 436
437 437 def __len__(self):
438 438 return len(self.changelog)
439 439
440 440 def __iter__(self):
441 441 return iter(self.changelog)
442 442
443 443 def revs(self, expr, *args):
444 444 '''Return a list of revisions matching the given revset'''
445 445 expr = revset.formatspec(expr, *args)
446 446 m = revset.match(None, expr)
447 447 return m(self, revset.spanset(self))
448 448
449 449 def set(self, expr, *args):
450 450 '''
451 451 Yield a context for each matching revision, after doing arg
452 452 replacement via revset.formatspec
453 453 '''
454 454 for r in self.revs(expr, *args):
455 455 yield self[r]
456 456
457 457 def url(self):
458 458 return 'file:' + self.root
459 459
460 460 def hook(self, name, throw=False, **args):
461 461 return hook.hook(self.ui, self, name, throw, **args)
462 462
463 463 @unfilteredmethod
464 464 def _tag(self, names, node, message, local, user, date, extra={}):
465 465 if isinstance(names, str):
466 466 names = (names,)
467 467
468 468 branches = self.branchmap()
469 469 for name in names:
470 470 self.hook('pretag', throw=True, node=hex(node), tag=name,
471 471 local=local)
472 472 if name in branches:
473 473 self.ui.warn(_("warning: tag %s conflicts with existing"
474 474 " branch name\n") % name)
475 475
476 476 def writetags(fp, names, munge, prevtags):
477 477 fp.seek(0, 2)
478 478 if prevtags and prevtags[-1] != '\n':
479 479 fp.write('\n')
480 480 for name in names:
481 481 m = munge and munge(name) or name
482 482 if (self._tagscache.tagtypes and
483 483 name in self._tagscache.tagtypes):
484 484 old = self.tags().get(name, nullid)
485 485 fp.write('%s %s\n' % (hex(old), m))
486 486 fp.write('%s %s\n' % (hex(node), m))
487 487 fp.close()
488 488
489 489 prevtags = ''
490 490 if local:
491 491 try:
492 492 fp = self.opener('localtags', 'r+')
493 493 except IOError:
494 494 fp = self.opener('localtags', 'a')
495 495 else:
496 496 prevtags = fp.read()
497 497
498 498 # local tags are stored in the current charset
499 499 writetags(fp, names, None, prevtags)
500 500 for name in names:
501 501 self.hook('tag', node=hex(node), tag=name, local=local)
502 502 return
503 503
504 504 try:
505 505 fp = self.wfile('.hgtags', 'rb+')
506 506 except IOError, e:
507 507 if e.errno != errno.ENOENT:
508 508 raise
509 509 fp = self.wfile('.hgtags', 'ab')
510 510 else:
511 511 prevtags = fp.read()
512 512
513 513 # committed tags are stored in UTF-8
514 514 writetags(fp, names, encoding.fromlocal, prevtags)
515 515
516 516 fp.close()
517 517
518 518 self.invalidatecaches()
519 519
520 520 if '.hgtags' not in self.dirstate:
521 521 self[None].add(['.hgtags'])
522 522
523 523 m = matchmod.exact(self.root, '', ['.hgtags'])
524 524 tagnode = self.commit(message, user, date, extra=extra, match=m)
525 525
526 526 for name in names:
527 527 self.hook('tag', node=hex(node), tag=name, local=local)
528 528
529 529 return tagnode
530 530
531 531 def tag(self, names, node, message, local, user, date):
532 532 '''tag a revision with one or more symbolic names.
533 533
534 534 names is a list of strings or, when adding a single tag, names may be a
535 535 string.
536 536
537 537 if local is True, the tags are stored in a per-repository file.
538 538 otherwise, they are stored in the .hgtags file, and a new
539 539 changeset is committed with the change.
540 540
541 541 keyword arguments:
542 542
543 543 local: whether to store tags in non-version-controlled file
544 544 (default False)
545 545
546 546 message: commit message to use if committing
547 547
548 548 user: name of user to use if committing
549 549
550 550 date: date tuple to use if committing'''
551 551
552 552 if not local:
553 553 for x in self.status()[:5]:
554 554 if '.hgtags' in x:
555 555 raise util.Abort(_('working copy of .hgtags is changed '
556 556 '(please commit .hgtags manually)'))
557 557
558 558 self.tags() # instantiate the cache
559 559 self._tag(names, node, message, local, user, date)
560 560
561 561 @filteredpropertycache
562 562 def _tagscache(self):
563 563 '''Returns a tagscache object that contains various tags related
564 564 caches.'''
565 565
566 566 # This simplifies its cache management by having one decorated
567 567 # function (this one) and the rest simply fetch things from it.
568 568 class tagscache(object):
569 569 def __init__(self):
570 570 # These two define the set of tags for this repository. tags
571 571 # maps tag name to node; tagtypes maps tag name to 'global' or
572 572 # 'local'. (Global tags are defined by .hgtags across all
573 573 # heads, and local tags are defined in .hg/localtags.)
574 574 # They constitute the in-memory cache of tags.
575 575 self.tags = self.tagtypes = None
576 576
577 577 self.nodetagscache = self.tagslist = None
578 578
579 579 cache = tagscache()
580 580 cache.tags, cache.tagtypes = self._findtags()
581 581
582 582 return cache
583 583
584 584 def tags(self):
585 585 '''return a mapping of tag to node'''
586 586 t = {}
587 587 if self.changelog.filteredrevs:
588 588 tags, tt = self._findtags()
589 589 else:
590 590 tags = self._tagscache.tags
591 591 for k, v in tags.iteritems():
592 592 try:
593 593 # ignore tags to unknown nodes
594 594 self.changelog.rev(v)
595 595 t[k] = v
596 596 except (error.LookupError, ValueError):
597 597 pass
598 598 return t
599 599
600 600 def _findtags(self):
601 601 '''Do the hard work of finding tags. Return a pair of dicts
602 602 (tags, tagtypes) where tags maps tag name to node, and tagtypes
603 603 maps tag name to a string like \'global\' or \'local\'.
604 604 Subclasses or extensions are free to add their own tags, but
605 605 should be aware that the returned dicts will be retained for the
606 606 duration of the localrepo object.'''
607 607
608 608 # XXX what tagtype should subclasses/extensions use? Currently
609 609 # mq and bookmarks add tags, but do not set the tagtype at all.
610 610 # Should each extension invent its own tag type? Should there
611 611 # be one tagtype for all such "virtual" tags? Or is the status
612 612 # quo fine?
613 613
614 614 alltags = {} # map tag name to (node, hist)
615 615 tagtypes = {}
616 616
617 617 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
618 618 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
619 619
620 620 # Build the return dicts. Have to re-encode tag names because
621 621 # the tags module always uses UTF-8 (in order not to lose info
622 622 # writing to the cache), but the rest of Mercurial wants them in
623 623 # local encoding.
624 624 tags = {}
625 625 for (name, (node, hist)) in alltags.iteritems():
626 626 if node != nullid:
627 627 tags[encoding.tolocal(name)] = node
628 628 tags['tip'] = self.changelog.tip()
629 629 tagtypes = dict([(encoding.tolocal(name), value)
630 630 for (name, value) in tagtypes.iteritems()])
631 631 return (tags, tagtypes)
632 632
633 633 def tagtype(self, tagname):
634 634 '''
635 635 return the type of the given tag. result can be:
636 636
637 637 'local' : a local tag
638 638 'global' : a global tag
639 639 None : tag does not exist
640 640 '''
641 641
642 642 return self._tagscache.tagtypes.get(tagname)
643 643
644 644 def tagslist(self):
645 645 '''return a list of tags ordered by revision'''
646 646 if not self._tagscache.tagslist:
647 647 l = []
648 648 for t, n in self.tags().iteritems():
649 649 r = self.changelog.rev(n)
650 650 l.append((r, t, n))
651 651 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
652 652
653 653 return self._tagscache.tagslist
654 654
655 655 def nodetags(self, node):
656 656 '''return the tags associated with a node'''
657 657 if not self._tagscache.nodetagscache:
658 658 nodetagscache = {}
659 659 for t, n in self._tagscache.tags.iteritems():
660 660 nodetagscache.setdefault(n, []).append(t)
661 661 for tags in nodetagscache.itervalues():
662 662 tags.sort()
663 663 self._tagscache.nodetagscache = nodetagscache
664 664 return self._tagscache.nodetagscache.get(node, [])
665 665
666 666 def nodebookmarks(self, node):
667 667 marks = []
668 668 for bookmark, n in self._bookmarks.iteritems():
669 669 if n == node:
670 670 marks.append(bookmark)
671 671 return sorted(marks)
672 672
673 673 def branchmap(self):
674 674 '''returns a dictionary {branch: [branchheads]} with branchheads
675 675 ordered by increasing revision number'''
676 676 branchmap.updatecache(self)
677 677 return self._branchcaches[self.filtername]
678 678
679 679 def branchtip(self, branch):
680 680 '''return the tip node for a given branch'''
681 681 try:
682 682 return self.branchmap().branchtip(branch)
683 683 except KeyError:
684 684 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
685 685
686 686 def lookup(self, key):
687 687 return self[key].node()
688 688
689 689 def lookupbranch(self, key, remote=None):
690 690 repo = remote or self
691 691 if key in repo.branchmap():
692 692 return key
693 693
694 694 repo = (remote and remote.local()) and remote or self
695 695 return repo[key].branch()
696 696
697 697 def known(self, nodes):
698 698 nm = self.changelog.nodemap
699 699 pc = self._phasecache
700 700 result = []
701 701 for n in nodes:
702 702 r = nm.get(n)
703 703 resp = not (r is None or pc.phase(self, r) >= phases.secret)
704 704 result.append(resp)
705 705 return result
706 706
707 707 def local(self):
708 708 return self
709 709
710 710 def cancopy(self):
711 711 # so statichttprepo's override of local() works
712 712 if not self.local():
713 713 return False
714 714 if not self.ui.configbool('phases', 'publish', True):
715 715 return True
716 716 # if publishing we can't copy if there is filtered content
717 717 return not self.filtered('visible').changelog.filteredrevs
718 718
719 719 def join(self, f):
720 720 return os.path.join(self.path, f)
721 721
722 722 def wjoin(self, f):
723 723 return os.path.join(self.root, f)
724 724
725 725 def file(self, f):
726 726 if f[0] == '/':
727 727 f = f[1:]
728 728 return filelog.filelog(self.sopener, f)
729 729
730 730 def changectx(self, changeid):
731 731 return self[changeid]
732 732
733 733 def parents(self, changeid=None):
734 734 '''get list of changectxs for parents of changeid'''
735 735 return self[changeid].parents()
736 736
737 737 def setparents(self, p1, p2=nullid):
738 738 copies = self.dirstate.setparents(p1, p2)
739 739 pctx = self[p1]
740 740 if copies:
741 741 # Adjust copy records, the dirstate cannot do it, it
742 742 # requires access to parents manifests. Preserve them
743 743 # only for entries added to first parent.
744 744 for f in copies:
745 745 if f not in pctx and copies[f] in pctx:
746 746 self.dirstate.copy(copies[f], f)
747 747 if p2 == nullid:
748 748 for f, s in sorted(self.dirstate.copies().items()):
749 749 if f not in pctx and s not in pctx:
750 750 self.dirstate.copy(None, f)
751 751
752 752 def filectx(self, path, changeid=None, fileid=None):
753 753 """changeid can be a changeset revision, node, or tag.
754 754 fileid can be a file revision or node."""
755 755 return context.filectx(self, path, changeid, fileid)
756 756
757 757 def getcwd(self):
758 758 return self.dirstate.getcwd()
759 759
760 760 def pathto(self, f, cwd=None):
761 761 return self.dirstate.pathto(f, cwd)
762 762
763 763 def wfile(self, f, mode='r'):
764 764 return self.wopener(f, mode)
765 765
766 766 def _link(self, f):
767 767 return self.wvfs.islink(f)
768 768
769 769 def _loadfilter(self, filter):
770 770 if filter not in self.filterpats:
771 771 l = []
772 772 for pat, cmd in self.ui.configitems(filter):
773 773 if cmd == '!':
774 774 continue
775 775 mf = matchmod.match(self.root, '', [pat])
776 776 fn = None
777 777 params = cmd
778 778 for name, filterfn in self._datafilters.iteritems():
779 779 if cmd.startswith(name):
780 780 fn = filterfn
781 781 params = cmd[len(name):].lstrip()
782 782 break
783 783 if not fn:
784 784 fn = lambda s, c, **kwargs: util.filter(s, c)
785 785 # Wrap old filters not supporting keyword arguments
786 786 if not inspect.getargspec(fn)[2]:
787 787 oldfn = fn
788 788 fn = lambda s, c, **kwargs: oldfn(s, c)
789 789 l.append((mf, fn, params))
790 790 self.filterpats[filter] = l
791 791 return self.filterpats[filter]
792 792
793 793 def _filter(self, filterpats, filename, data):
794 794 for mf, fn, cmd in filterpats:
795 795 if mf(filename):
796 796 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
797 797 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
798 798 break
799 799
800 800 return data
801 801
802 802 @unfilteredpropertycache
803 803 def _encodefilterpats(self):
804 804 return self._loadfilter('encode')
805 805
806 806 @unfilteredpropertycache
807 807 def _decodefilterpats(self):
808 808 return self._loadfilter('decode')
809 809
810 810 def adddatafilter(self, name, filter):
811 811 self._datafilters[name] = filter
812 812
813 813 def wread(self, filename):
814 814 if self._link(filename):
815 815 data = self.wvfs.readlink(filename)
816 816 else:
817 817 data = self.wopener.read(filename)
818 818 return self._filter(self._encodefilterpats, filename, data)
819 819
820 820 def wwrite(self, filename, data, flags):
821 821 data = self._filter(self._decodefilterpats, filename, data)
822 822 if 'l' in flags:
823 823 self.wopener.symlink(data, filename)
824 824 else:
825 825 self.wopener.write(filename, data)
826 826 if 'x' in flags:
827 827 self.wvfs.setflags(filename, False, True)
828 828
829 829 def wwritedata(self, filename, data):
830 830 return self._filter(self._decodefilterpats, filename, data)
831 831
832 832 def transaction(self, desc, report=None):
833 833 tr = self._transref and self._transref() or None
834 834 if tr and tr.running():
835 835 return tr.nest()
836 836
837 837 # abort here if the journal already exists
838 838 if self.svfs.exists("journal"):
839 839 raise error.RepoError(
840 840 _("abandoned transaction found - run hg recover"))
841 841
842 842 def onclose():
843 843 self.store.write(tr)
844 844
845 845 self._writejournal(desc)
846 846 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
847 847 rp = report and report or self.ui.warn
848 848 tr = transaction.transaction(rp, self.sopener,
849 849 "journal",
850 850 aftertrans(renames),
851 851 self.store.createmode,
852 852 onclose)
853 853 self._transref = weakref.ref(tr)
854 854 return tr
855 855
856 856 def _journalfiles(self):
857 857 return ((self.svfs, 'journal'),
858 858 (self.vfs, 'journal.dirstate'),
859 859 (self.vfs, 'journal.branch'),
860 860 (self.vfs, 'journal.desc'),
861 861 (self.vfs, 'journal.bookmarks'),
862 862 (self.svfs, 'journal.phaseroots'))
863 863
864 864 def undofiles(self):
865 865 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
866 866
867 867 def _writejournal(self, desc):
868 868 self.opener.write("journal.dirstate",
869 869 self.opener.tryread("dirstate"))
870 870 self.opener.write("journal.branch",
871 871 encoding.fromlocal(self.dirstate.branch()))
872 872 self.opener.write("journal.desc",
873 873 "%d\n%s\n" % (len(self), desc))
874 874 self.opener.write("journal.bookmarks",
875 875 self.opener.tryread("bookmarks"))
876 876 self.sopener.write("journal.phaseroots",
877 877 self.sopener.tryread("phaseroots"))
878 878
879 879 def recover(self):
880 880 lock = self.lock()
881 881 try:
882 882 if self.svfs.exists("journal"):
883 883 self.ui.status(_("rolling back interrupted transaction\n"))
884 884 transaction.rollback(self.sopener, "journal",
885 885 self.ui.warn)
886 886 self.invalidate()
887 887 return True
888 888 else:
889 889 self.ui.warn(_("no interrupted transaction available\n"))
890 890 return False
891 891 finally:
892 892 lock.release()
893 893
894 894 def rollback(self, dryrun=False, force=False):
895 895 wlock = lock = None
896 896 try:
897 897 wlock = self.wlock()
898 898 lock = self.lock()
899 899 if self.svfs.exists("undo"):
900 900 return self._rollback(dryrun, force)
901 901 else:
902 902 self.ui.warn(_("no rollback information available\n"))
903 903 return 1
904 904 finally:
905 905 release(lock, wlock)
906 906
907 907 @unfilteredmethod # Until we get smarter cache management
908 908 def _rollback(self, dryrun, force):
909 909 ui = self.ui
910 910 try:
911 911 args = self.opener.read('undo.desc').splitlines()
912 912 (oldlen, desc, detail) = (int(args[0]), args[1], None)
913 913 if len(args) >= 3:
914 914 detail = args[2]
915 915 oldtip = oldlen - 1
916 916
917 917 if detail and ui.verbose:
918 918 msg = (_('repository tip rolled back to revision %s'
919 919 ' (undo %s: %s)\n')
920 920 % (oldtip, desc, detail))
921 921 else:
922 922 msg = (_('repository tip rolled back to revision %s'
923 923 ' (undo %s)\n')
924 924 % (oldtip, desc))
925 925 except IOError:
926 926 msg = _('rolling back unknown transaction\n')
927 927 desc = None
928 928
929 929 if not force and self['.'] != self['tip'] and desc == 'commit':
930 930 raise util.Abort(
931 931 _('rollback of last commit while not checked out '
932 932 'may lose data'), hint=_('use -f to force'))
933 933
934 934 ui.status(msg)
935 935 if dryrun:
936 936 return 0
937 937
938 938 parents = self.dirstate.parents()
939 939 self.destroying()
940 940 transaction.rollback(self.sopener, 'undo', ui.warn)
941 941 if self.vfs.exists('undo.bookmarks'):
942 942 self.vfs.rename('undo.bookmarks', 'bookmarks')
943 943 if self.svfs.exists('undo.phaseroots'):
944 944 self.svfs.rename('undo.phaseroots', 'phaseroots')
945 945 self.invalidate()
946 946
947 947 parentgone = (parents[0] not in self.changelog.nodemap or
948 948 parents[1] not in self.changelog.nodemap)
949 949 if parentgone:
950 950 self.vfs.rename('undo.dirstate', 'dirstate')
951 951 try:
952 952 branch = self.opener.read('undo.branch')
953 953 self.dirstate.setbranch(encoding.tolocal(branch))
954 954 except IOError:
955 955 ui.warn(_('named branch could not be reset: '
956 956 'current branch is still \'%s\'\n')
957 957 % self.dirstate.branch())
958 958
959 959 self.dirstate.invalidate()
960 960 parents = tuple([p.rev() for p in self.parents()])
961 961 if len(parents) > 1:
962 962 ui.status(_('working directory now based on '
963 963 'revisions %d and %d\n') % parents)
964 964 else:
965 965 ui.status(_('working directory now based on '
966 966 'revision %d\n') % parents)
967 967 # TODO: if we know which new heads may result from this rollback, pass
968 968 # them to destroy(), which will prevent the branchhead cache from being
969 969 # invalidated.
970 970 self.destroyed()
971 971 return 0
972 972
973 973 def invalidatecaches(self):
974 974
975 975 if '_tagscache' in vars(self):
976 976 # can't use delattr on proxy
977 977 del self.__dict__['_tagscache']
978 978
979 979 self.unfiltered()._branchcaches.clear()
980 980 self.invalidatevolatilesets()
981 981
982 982 def invalidatevolatilesets(self):
983 983 self.filteredrevcache.clear()
984 984 obsolete.clearobscaches(self)
985 985
986 986 def invalidatedirstate(self):
987 987 '''Invalidates the dirstate, causing the next call to dirstate
988 988 to check if it was modified since the last time it was read,
989 989 rereading it if it has.
990 990
991 991 This is different to dirstate.invalidate() that it doesn't always
992 992 rereads the dirstate. Use dirstate.invalidate() if you want to
993 993 explicitly read the dirstate again (i.e. restoring it to a previous
994 994 known good state).'''
995 995 if hasunfilteredcache(self, 'dirstate'):
996 996 for k in self.dirstate._filecache:
997 997 try:
998 998 delattr(self.dirstate, k)
999 999 except AttributeError:
1000 1000 pass
1001 1001 delattr(self.unfiltered(), 'dirstate')
1002 1002
1003 1003 def invalidate(self):
1004 1004 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1005 1005 for k in self._filecache:
1006 1006 # dirstate is invalidated separately in invalidatedirstate()
1007 1007 if k == 'dirstate':
1008 1008 continue
1009 1009
1010 1010 try:
1011 1011 delattr(unfiltered, k)
1012 1012 except AttributeError:
1013 1013 pass
1014 1014 self.invalidatecaches()
1015 1015 self.store.invalidatecaches()
1016 1016
1017 1017 def invalidateall(self):
1018 1018 '''Fully invalidates both store and non-store parts, causing the
1019 1019 subsequent operation to reread any outside changes.'''
1020 1020 # extension should hook this to invalidate its caches
1021 1021 self.invalidate()
1022 1022 self.invalidatedirstate()
1023 1023
1024 1024 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1025 1025 try:
1026 1026 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1027 1027 except error.LockHeld, inst:
1028 1028 if not wait:
1029 1029 raise
1030 1030 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1031 1031 (desc, inst.locker))
1032 1032 # default to 600 seconds timeout
1033 1033 l = lockmod.lock(vfs, lockname,
1034 1034 int(self.ui.config("ui", "timeout", "600")),
1035 1035 releasefn, desc=desc)
1036 1036 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1037 1037 if acquirefn:
1038 1038 acquirefn()
1039 1039 return l
1040 1040
1041 1041 def _afterlock(self, callback):
1042 1042 """add a callback to the current repository lock.
1043 1043
1044 1044 The callback will be executed on lock release."""
1045 1045 l = self._lockref and self._lockref()
1046 1046 if l:
1047 1047 l.postrelease.append(callback)
1048 1048 else:
1049 1049 callback()
1050 1050
1051 1051 def lock(self, wait=True):
1052 1052 '''Lock the repository store (.hg/store) and return a weak reference
1053 1053 to the lock. Use this before modifying the store (e.g. committing or
1054 1054 stripping). If you are opening a transaction, get a lock as well.)'''
1055 1055 l = self._lockref and self._lockref()
1056 1056 if l is not None and l.held:
1057 1057 l.lock()
1058 1058 return l
1059 1059
1060 1060 def unlock():
1061 1061 if hasunfilteredcache(self, '_phasecache'):
1062 1062 self._phasecache.write()
1063 1063 for k, ce in self._filecache.items():
1064 1064 if k == 'dirstate' or k not in self.__dict__:
1065 1065 continue
1066 1066 ce.refresh()
1067 1067
1068 1068 l = self._lock(self.svfs, "lock", wait, unlock,
1069 1069 self.invalidate, _('repository %s') % self.origroot)
1070 1070 self._lockref = weakref.ref(l)
1071 1071 return l
1072 1072
1073 1073 def wlock(self, wait=True):
1074 1074 '''Lock the non-store parts of the repository (everything under
1075 1075 .hg except .hg/store) and return a weak reference to the lock.
1076 1076 Use this before modifying files in .hg.'''
1077 1077 l = self._wlockref and self._wlockref()
1078 1078 if l is not None and l.held:
1079 1079 l.lock()
1080 1080 return l
1081 1081
1082 1082 def unlock():
1083 1083 self.dirstate.write()
1084 1084 self._filecache['dirstate'].refresh()
1085 1085
1086 1086 l = self._lock(self.vfs, "wlock", wait, unlock,
1087 1087 self.invalidatedirstate, _('working directory of %s') %
1088 1088 self.origroot)
1089 1089 self._wlockref = weakref.ref(l)
1090 1090 return l
1091 1091
1092 1092 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1093 1093 """
1094 1094 commit an individual file as part of a larger transaction
1095 1095 """
1096 1096
1097 1097 fname = fctx.path()
1098 1098 text = fctx.data()
1099 1099 flog = self.file(fname)
1100 1100 fparent1 = manifest1.get(fname, nullid)
1101 1101 fparent2 = fparent2o = manifest2.get(fname, nullid)
1102 1102
1103 1103 meta = {}
1104 1104 copy = fctx.renamed()
1105 1105 if copy and copy[0] != fname:
1106 1106 # Mark the new revision of this file as a copy of another
1107 1107 # file. This copy data will effectively act as a parent
1108 1108 # of this new revision. If this is a merge, the first
1109 1109 # parent will be the nullid (meaning "look up the copy data")
1110 1110 # and the second one will be the other parent. For example:
1111 1111 #
1112 1112 # 0 --- 1 --- 3 rev1 changes file foo
1113 1113 # \ / rev2 renames foo to bar and changes it
1114 1114 # \- 2 -/ rev3 should have bar with all changes and
1115 1115 # should record that bar descends from
1116 1116 # bar in rev2 and foo in rev1
1117 1117 #
1118 1118 # this allows this merge to succeed:
1119 1119 #
1120 1120 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1121 1121 # \ / merging rev3 and rev4 should use bar@rev2
1122 1122 # \- 2 --- 4 as the merge base
1123 1123 #
1124 1124
1125 1125 cfname = copy[0]
1126 1126 crev = manifest1.get(cfname)
1127 1127 newfparent = fparent2
1128 1128
1129 1129 if manifest2: # branch merge
1130 1130 if fparent2 == nullid or crev is None: # copied on remote side
1131 1131 if cfname in manifest2:
1132 1132 crev = manifest2[cfname]
1133 1133 newfparent = fparent1
1134 1134
1135 1135 # find source in nearest ancestor if we've lost track
1136 1136 if not crev:
1137 1137 self.ui.debug(" %s: searching for copy revision for %s\n" %
1138 1138 (fname, cfname))
1139 1139 for ancestor in self[None].ancestors():
1140 1140 if cfname in ancestor:
1141 1141 crev = ancestor[cfname].filenode()
1142 1142 break
1143 1143
1144 1144 if crev:
1145 1145 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1146 1146 meta["copy"] = cfname
1147 1147 meta["copyrev"] = hex(crev)
1148 1148 fparent1, fparent2 = nullid, newfparent
1149 1149 else:
1150 1150 self.ui.warn(_("warning: can't find ancestor for '%s' "
1151 1151 "copied from '%s'!\n") % (fname, cfname))
1152 1152
1153 1153 elif fparent1 == nullid:
1154 1154 fparent1, fparent2 = fparent2, nullid
1155 1155 elif fparent2 != nullid:
1156 1156 # is one parent an ancestor of the other?
1157 1157 fparentancestors = flog.commonancestors(fparent1, fparent2)
1158 1158 if fparent1 in fparentancestors:
1159 1159 fparent1, fparent2 = fparent2, nullid
1160 1160 elif fparent2 in fparentancestors:
1161 1161 fparent2 = nullid
1162 1162
1163 1163 # is the file changed?
1164 1164 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1165 1165 changelist.append(fname)
1166 1166 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1167 1167
1168 1168 # are just the flags changed during merge?
1169 1169 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1170 1170 changelist.append(fname)
1171 1171
1172 1172 return fparent1
1173 1173
1174 1174 @unfilteredmethod
1175 1175 def commit(self, text="", user=None, date=None, match=None, force=False,
1176 1176 editor=False, extra={}):
1177 1177 """Add a new revision to current repository.
1178 1178
1179 1179 Revision information is gathered from the working directory,
1180 1180 match can be used to filter the committed files. If editor is
1181 1181 supplied, it is called to get a commit message.
1182 1182 """
1183 1183
1184 1184 def fail(f, msg):
1185 1185 raise util.Abort('%s: %s' % (f, msg))
1186 1186
1187 1187 if not match:
1188 1188 match = matchmod.always(self.root, '')
1189 1189
1190 1190 if not force:
1191 1191 vdirs = []
1192 1192 match.explicitdir = vdirs.append
1193 1193 match.bad = fail
1194 1194
1195 1195 wlock = self.wlock()
1196 1196 try:
1197 1197 wctx = self[None]
1198 1198 merge = len(wctx.parents()) > 1
1199 1199
1200 1200 if (not force and merge and match and
1201 1201 (match.files() or match.anypats())):
1202 1202 raise util.Abort(_('cannot partially commit a merge '
1203 1203 '(do not specify files or patterns)'))
1204 1204
1205 1205 changes = self.status(match=match, clean=force)
1206 1206 if force:
1207 1207 changes[0].extend(changes[6]) # mq may commit unchanged files
1208 1208
1209 1209 # check subrepos
1210 1210 subs = []
1211 1211 commitsubs = set()
1212 1212 newstate = wctx.substate.copy()
1213 1213 # only manage subrepos and .hgsubstate if .hgsub is present
1214 1214 if '.hgsub' in wctx:
1215 1215 # we'll decide whether to track this ourselves, thanks
1216 1216 for c in changes[:3]:
1217 1217 if '.hgsubstate' in c:
1218 1218 c.remove('.hgsubstate')
1219 1219
1220 1220 # compare current state to last committed state
1221 1221 # build new substate based on last committed state
1222 1222 oldstate = wctx.p1().substate
1223 1223 for s in sorted(newstate.keys()):
1224 1224 if not match(s):
1225 1225 # ignore working copy, use old state if present
1226 1226 if s in oldstate:
1227 1227 newstate[s] = oldstate[s]
1228 1228 continue
1229 1229 if not force:
1230 1230 raise util.Abort(
1231 1231 _("commit with new subrepo %s excluded") % s)
1232 1232 if wctx.sub(s).dirty(True):
1233 1233 if not self.ui.configbool('ui', 'commitsubrepos'):
1234 1234 raise util.Abort(
1235 1235 _("uncommitted changes in subrepo %s") % s,
1236 1236 hint=_("use --subrepos for recursive commit"))
1237 1237 subs.append(s)
1238 1238 commitsubs.add(s)
1239 1239 else:
1240 1240 bs = wctx.sub(s).basestate()
1241 1241 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1242 1242 if oldstate.get(s, (None, None, None))[1] != bs:
1243 1243 subs.append(s)
1244 1244
1245 1245 # check for removed subrepos
1246 1246 for p in wctx.parents():
1247 1247 r = [s for s in p.substate if s not in newstate]
1248 1248 subs += [s for s in r if match(s)]
1249 1249 if subs:
1250 1250 if (not match('.hgsub') and
1251 1251 '.hgsub' in (wctx.modified() + wctx.added())):
1252 1252 raise util.Abort(
1253 1253 _("can't commit subrepos without .hgsub"))
1254 1254 changes[0].insert(0, '.hgsubstate')
1255 1255
1256 1256 elif '.hgsub' in changes[2]:
1257 1257 # clean up .hgsubstate when .hgsub is removed
1258 1258 if ('.hgsubstate' in wctx and
1259 1259 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1260 1260 changes[2].insert(0, '.hgsubstate')
1261 1261
1262 1262 # make sure all explicit patterns are matched
1263 1263 if not force and match.files():
1264 1264 matched = set(changes[0] + changes[1] + changes[2])
1265 1265
1266 1266 for f in match.files():
1267 1267 f = self.dirstate.normalize(f)
1268 1268 if f == '.' or f in matched or f in wctx.substate:
1269 1269 continue
1270 1270 if f in changes[3]: # missing
1271 1271 fail(f, _('file not found!'))
1272 1272 if f in vdirs: # visited directory
1273 1273 d = f + '/'
1274 1274 for mf in matched:
1275 1275 if mf.startswith(d):
1276 1276 break
1277 1277 else:
1278 1278 fail(f, _("no match under directory!"))
1279 1279 elif f not in self.dirstate:
1280 1280 fail(f, _("file not tracked!"))
1281 1281
1282 1282 cctx = context.workingctx(self, text, user, date, extra, changes)
1283 1283
1284 1284 if (not force and not extra.get("close") and not merge
1285 1285 and not cctx.files()
1286 1286 and wctx.branch() == wctx.p1().branch()):
1287 1287 return None
1288 1288
1289 1289 if merge and cctx.deleted():
1290 1290 raise util.Abort(_("cannot commit merge with missing files"))
1291 1291
1292 1292 ms = mergemod.mergestate(self)
1293 1293 for f in changes[0]:
1294 1294 if f in ms and ms[f] == 'u':
1295 1295 raise util.Abort(_("unresolved merge conflicts "
1296 1296 "(see hg help resolve)"))
1297 1297
1298 1298 if editor:
1299 1299 cctx._text = editor(self, cctx, subs)
1300 1300 edited = (text != cctx._text)
1301 1301
1302 1302 # Save commit message in case this transaction gets rolled back
1303 1303 # (e.g. by a pretxncommit hook). Leave the content alone on
1304 1304 # the assumption that the user will use the same editor again.
1305 1305 msgfn = self.savecommitmessage(cctx._text)
1306 1306
1307 1307 # commit subs and write new state
1308 1308 if subs:
1309 1309 for s in sorted(commitsubs):
1310 1310 sub = wctx.sub(s)
1311 1311 self.ui.status(_('committing subrepository %s\n') %
1312 1312 subrepo.subrelpath(sub))
1313 1313 sr = sub.commit(cctx._text, user, date)
1314 1314 newstate[s] = (newstate[s][0], sr)
1315 1315 subrepo.writestate(self, newstate)
1316 1316
1317 1317 p1, p2 = self.dirstate.parents()
1318 1318 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1319 1319 try:
1320 1320 self.hook("precommit", throw=True, parent1=hookp1,
1321 1321 parent2=hookp2)
1322 1322 ret = self.commitctx(cctx, True)
1323 1323 except: # re-raises
1324 1324 if edited:
1325 1325 self.ui.write(
1326 1326 _('note: commit message saved in %s\n') % msgfn)
1327 1327 raise
1328 1328
1329 1329 # update bookmarks, dirstate and mergestate
1330 1330 bookmarks.update(self, [p1, p2], ret)
1331 1331 cctx.markcommitted(ret)
1332 1332 ms.reset()
1333 1333 finally:
1334 1334 wlock.release()
1335 1335
1336 1336 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1337 1337 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1338 1338 self._afterlock(commithook)
1339 1339 return ret
1340 1340
1341 1341 @unfilteredmethod
1342 1342 def commitctx(self, ctx, error=False):
1343 1343 """Add a new revision to current repository.
1344 1344 Revision information is passed via the context argument.
1345 1345 """
1346 1346
1347 1347 tr = lock = None
1348 1348 removed = list(ctx.removed())
1349 1349 p1, p2 = ctx.p1(), ctx.p2()
1350 1350 user = ctx.user()
1351 1351
1352 1352 lock = self.lock()
1353 1353 try:
1354 1354 tr = self.transaction("commit")
1355 1355 trp = weakref.proxy(tr)
1356 1356
1357 1357 if ctx.files():
1358 1358 m1 = p1.manifest().copy()
1359 1359 m2 = p2.manifest()
1360 1360
1361 1361 # check in files
1362 1362 new = {}
1363 1363 changed = []
1364 1364 linkrev = len(self)
1365 1365 for f in sorted(ctx.modified() + ctx.added()):
1366 1366 self.ui.note(f + "\n")
1367 1367 try:
1368 1368 fctx = ctx[f]
1369 1369 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1370 1370 changed)
1371 1371 m1.set(f, fctx.flags())
1372 1372 except OSError, inst:
1373 1373 self.ui.warn(_("trouble committing %s!\n") % f)
1374 1374 raise
1375 1375 except IOError, inst:
1376 1376 errcode = getattr(inst, 'errno', errno.ENOENT)
1377 1377 if error or errcode and errcode != errno.ENOENT:
1378 1378 self.ui.warn(_("trouble committing %s!\n") % f)
1379 1379 raise
1380 1380 else:
1381 1381 removed.append(f)
1382 1382
1383 1383 # update manifest
1384 1384 m1.update(new)
1385 1385 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1386 1386 drop = [f for f in removed if f in m1]
1387 1387 for f in drop:
1388 1388 del m1[f]
1389 1389 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1390 1390 p2.manifestnode(), (new, drop))
1391 1391 files = changed + removed
1392 1392 else:
1393 1393 mn = p1.manifestnode()
1394 1394 files = []
1395 1395
1396 1396 # update changelog
1397 1397 self.changelog.delayupdate()
1398 1398 n = self.changelog.add(mn, files, ctx.description(),
1399 1399 trp, p1.node(), p2.node(),
1400 1400 user, ctx.date(), ctx.extra().copy())
1401 1401 p = lambda: self.changelog.writepending() and self.root or ""
1402 1402 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1403 1403 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1404 1404 parent2=xp2, pending=p)
1405 1405 self.changelog.finalize(trp)
1406 1406 # set the new commit is proper phase
1407 1407 targetphase = subrepo.newcommitphase(self.ui, ctx)
1408 1408 if targetphase:
1409 1409 # retract boundary do not alter parent changeset.
1410 1410 # if a parent have higher the resulting phase will
1411 1411 # be compliant anyway
1412 1412 #
1413 1413 # if minimal phase was 0 we don't need to retract anything
1414 1414 phases.retractboundary(self, targetphase, [n])
1415 1415 tr.close()
1416 1416 branchmap.updatecache(self.filtered('served'))
1417 1417 return n
1418 1418 finally:
1419 1419 if tr:
1420 1420 tr.release()
1421 1421 lock.release()
1422 1422
1423 1423 @unfilteredmethod
1424 1424 def destroying(self):
1425 1425 '''Inform the repository that nodes are about to be destroyed.
1426 1426 Intended for use by strip and rollback, so there's a common
1427 1427 place for anything that has to be done before destroying history.
1428 1428
1429 1429 This is mostly useful for saving state that is in memory and waiting
1430 1430 to be flushed when the current lock is released. Because a call to
1431 1431 destroyed is imminent, the repo will be invalidated causing those
1432 1432 changes to stay in memory (waiting for the next unlock), or vanish
1433 1433 completely.
1434 1434 '''
1435 1435 # When using the same lock to commit and strip, the phasecache is left
1436 1436 # dirty after committing. Then when we strip, the repo is invalidated,
1437 1437 # causing those changes to disappear.
1438 1438 if '_phasecache' in vars(self):
1439 1439 self._phasecache.write()
1440 1440
1441 1441 @unfilteredmethod
1442 1442 def destroyed(self):
1443 1443 '''Inform the repository that nodes have been destroyed.
1444 1444 Intended for use by strip and rollback, so there's a common
1445 1445 place for anything that has to be done after destroying history.
1446 1446 '''
1447 1447 # When one tries to:
1448 1448 # 1) destroy nodes thus calling this method (e.g. strip)
1449 1449 # 2) use phasecache somewhere (e.g. commit)
1450 1450 #
1451 1451 # then 2) will fail because the phasecache contains nodes that were
1452 1452 # removed. We can either remove phasecache from the filecache,
1453 1453 # causing it to reload next time it is accessed, or simply filter
1454 1454 # the removed nodes now and write the updated cache.
1455 1455 self._phasecache.filterunknown(self)
1456 1456 self._phasecache.write()
1457 1457
1458 1458 # update the 'served' branch cache to help read only server process
1459 1459 # Thanks to branchcache collaboration this is done from the nearest
1460 1460 # filtered subset and it is expected to be fast.
1461 1461 branchmap.updatecache(self.filtered('served'))
1462 1462
1463 1463 # Ensure the persistent tag cache is updated. Doing it now
1464 1464 # means that the tag cache only has to worry about destroyed
1465 1465 # heads immediately after a strip/rollback. That in turn
1466 1466 # guarantees that "cachetip == currenttip" (comparing both rev
1467 1467 # and node) always means no nodes have been added or destroyed.
1468 1468
1469 1469 # XXX this is suboptimal when qrefresh'ing: we strip the current
1470 1470 # head, refresh the tag cache, then immediately add a new head.
1471 1471 # But I think doing it this way is necessary for the "instant
1472 1472 # tag cache retrieval" case to work.
1473 1473 self.invalidate()
1474 1474
1475 1475 def walk(self, match, node=None):
1476 1476 '''
1477 1477 walk recursively through the directory tree or a given
1478 1478 changeset, finding all files matched by the match
1479 1479 function
1480 1480 '''
1481 1481 return self[node].walk(match)
1482 1482
1483 1483 def status(self, node1='.', node2=None, match=None,
1484 1484 ignored=False, clean=False, unknown=False,
1485 1485 listsubrepos=False):
1486 1486 """return status of files between two nodes or node and working
1487 1487 directory.
1488 1488
1489 1489 If node1 is None, use the first dirstate parent instead.
1490 1490 If node2 is None, compare node1 with working directory.
1491 1491 """
1492 1492
1493 1493 def mfmatches(ctx):
1494 1494 mf = ctx.manifest().copy()
1495 1495 if match.always():
1496 1496 return mf
1497 1497 for fn in mf.keys():
1498 1498 if not match(fn):
1499 1499 del mf[fn]
1500 1500 return mf
1501 1501
1502 1502 ctx1 = self[node1]
1503 1503 ctx2 = self[node2]
1504 1504
1505 1505 working = ctx2.rev() is None
1506 1506 parentworking = working and ctx1 == self['.']
1507 1507 match = match or matchmod.always(self.root, self.getcwd())
1508 1508 listignored, listclean, listunknown = ignored, clean, unknown
1509 1509
1510 1510 # load earliest manifest first for caching reasons
1511 1511 if not working and ctx2.rev() < ctx1.rev():
1512 1512 ctx2.manifest()
1513 1513
1514 1514 if not parentworking:
1515 1515 def bad(f, msg):
1516 1516 # 'f' may be a directory pattern from 'match.files()',
1517 1517 # so 'f not in ctx1' is not enough
1518 1518 if f not in ctx1 and f not in ctx1.dirs():
1519 1519 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1520 1520 match.bad = bad
1521 1521
1522 1522 if working: # we need to scan the working dir
1523 1523 subrepos = []
1524 1524 if '.hgsub' in self.dirstate:
1525 1525 subrepos = sorted(ctx2.substate)
1526 1526 s = self.dirstate.status(match, subrepos, listignored,
1527 1527 listclean, listunknown)
1528 1528 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1529 1529
1530 1530 # check for any possibly clean files
1531 1531 if parentworking and cmp:
1532 1532 fixup = []
1533 1533 # do a full compare of any files that might have changed
1534 1534 for f in sorted(cmp):
1535 1535 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1536 1536 or ctx1[f].cmp(ctx2[f])):
1537 1537 modified.append(f)
1538 1538 else:
1539 1539 fixup.append(f)
1540 1540
1541 1541 # update dirstate for files that are actually clean
1542 1542 if fixup:
1543 1543 if listclean:
1544 1544 clean += fixup
1545 1545
1546 1546 try:
1547 1547 # updating the dirstate is optional
1548 1548 # so we don't wait on the lock
1549 1549 wlock = self.wlock(False)
1550 1550 try:
1551 1551 for f in fixup:
1552 1552 self.dirstate.normal(f)
1553 1553 finally:
1554 1554 wlock.release()
1555 1555 except error.LockError:
1556 1556 pass
1557 1557
1558 1558 if not parentworking:
1559 1559 mf1 = mfmatches(ctx1)
1560 1560 if working:
1561 1561 # we are comparing working dir against non-parent
1562 1562 # generate a pseudo-manifest for the working dir
1563 1563 mf2 = mfmatches(self['.'])
1564 1564 for f in cmp + modified + added:
1565 1565 mf2[f] = None
1566 1566 mf2.set(f, ctx2.flags(f))
1567 1567 for f in removed:
1568 1568 if f in mf2:
1569 1569 del mf2[f]
1570 1570 else:
1571 1571 # we are comparing two revisions
1572 1572 deleted, unknown, ignored = [], [], []
1573 1573 mf2 = mfmatches(ctx2)
1574 1574
1575 1575 modified, added, clean = [], [], []
1576 1576 withflags = mf1.withflags() | mf2.withflags()
1577 1577 for fn, mf2node in mf2.iteritems():
1578 1578 if fn in mf1:
1579 1579 if (fn not in deleted and
1580 1580 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1581 1581 (mf1[fn] != mf2node and
1582 1582 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1583 1583 modified.append(fn)
1584 1584 elif listclean:
1585 1585 clean.append(fn)
1586 1586 del mf1[fn]
1587 1587 elif fn not in deleted:
1588 1588 added.append(fn)
1589 1589 removed = mf1.keys()
1590 1590
1591 1591 if working and modified and not self.dirstate._checklink:
1592 1592 # Symlink placeholders may get non-symlink-like contents
1593 1593 # via user error or dereferencing by NFS or Samba servers,
1594 1594 # so we filter out any placeholders that don't look like a
1595 1595 # symlink
1596 1596 sane = []
1597 1597 for f in modified:
1598 1598 if ctx2.flags(f) == 'l':
1599 1599 d = ctx2[f].data()
1600 1600 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1601 1601 self.ui.debug('ignoring suspect symlink placeholder'
1602 1602 ' "%s"\n' % f)
1603 1603 continue
1604 1604 sane.append(f)
1605 1605 modified = sane
1606 1606
1607 1607 r = modified, added, removed, deleted, unknown, ignored, clean
1608 1608
1609 1609 if listsubrepos:
1610 1610 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1611 1611 if working:
1612 1612 rev2 = None
1613 1613 else:
1614 1614 rev2 = ctx2.substate[subpath][1]
1615 1615 try:
1616 1616 submatch = matchmod.narrowmatcher(subpath, match)
1617 1617 s = sub.status(rev2, match=submatch, ignored=listignored,
1618 1618 clean=listclean, unknown=listunknown,
1619 1619 listsubrepos=True)
1620 1620 for rfiles, sfiles in zip(r, s):
1621 1621 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1622 1622 except error.LookupError:
1623 1623 self.ui.status(_("skipping missing subrepository: %s\n")
1624 1624 % subpath)
1625 1625
1626 1626 for l in r:
1627 1627 l.sort()
1628 1628 return r
1629 1629
1630 1630 def heads(self, start=None):
1631 1631 heads = self.changelog.heads(start)
1632 1632 # sort the output in rev descending order
1633 1633 return sorted(heads, key=self.changelog.rev, reverse=True)
1634 1634
1635 1635 def branchheads(self, branch=None, start=None, closed=False):
1636 1636 '''return a (possibly filtered) list of heads for the given branch
1637 1637
1638 1638 Heads are returned in topological order, from newest to oldest.
1639 1639 If branch is None, use the dirstate branch.
1640 1640 If start is not None, return only heads reachable from start.
1641 1641 If closed is True, return heads that are marked as closed as well.
1642 1642 '''
1643 1643 if branch is None:
1644 1644 branch = self[None].branch()
1645 1645 branches = self.branchmap()
1646 1646 if branch not in branches:
1647 1647 return []
1648 1648 # the cache returns heads ordered lowest to highest
1649 1649 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1650 1650 if start is not None:
1651 1651 # filter out the heads that cannot be reached from startrev
1652 1652 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1653 1653 bheads = [h for h in bheads if h in fbheads]
1654 1654 return bheads
1655 1655
1656 1656 def branches(self, nodes):
1657 1657 if not nodes:
1658 1658 nodes = [self.changelog.tip()]
1659 1659 b = []
1660 1660 for n in nodes:
1661 1661 t = n
1662 1662 while True:
1663 1663 p = self.changelog.parents(n)
1664 1664 if p[1] != nullid or p[0] == nullid:
1665 1665 b.append((t, n, p[0], p[1]))
1666 1666 break
1667 1667 n = p[0]
1668 1668 return b
1669 1669
1670 1670 def between(self, pairs):
1671 1671 r = []
1672 1672
1673 1673 for top, bottom in pairs:
1674 1674 n, l, i = top, [], 0
1675 1675 f = 1
1676 1676
1677 1677 while n != bottom and n != nullid:
1678 1678 p = self.changelog.parents(n)[0]
1679 1679 if i == f:
1680 1680 l.append(n)
1681 1681 f = f * 2
1682 1682 n = p
1683 1683 i += 1
1684 1684
1685 1685 r.append(l)
1686 1686
1687 1687 return r
1688 1688
1689 1689 def pull(self, remote, heads=None, force=False):
1690 1690 return exchange.pull (self, remote, heads, force)
1691 1691
1692 1692 def checkpush(self, pushop):
1693 1693 """Extensions can override this function if additional checks have
1694 1694 to be performed before pushing, or call it if they override push
1695 1695 command.
1696 1696 """
1697 1697 pass
1698 1698
1699 @unfilteredpropertycache
1700 def prepushoutgoinghooks(self):
1701 """Return util.hooks consists of "(repo, remote, outgoing)"
1702 functions, which are called before pushing changesets.
1703 """
1704 return util.hooks()
1705
1699 1706 def push(self, remote, force=False, revs=None, newbranch=False):
1700 1707 return exchange.push(self, remote, force, revs, newbranch)
1701 1708
1702 1709 def stream_in(self, remote, requirements):
1703 1710 lock = self.lock()
1704 1711 try:
1705 1712 # Save remote branchmap. We will use it later
1706 1713 # to speed up branchcache creation
1707 1714 rbranchmap = None
1708 1715 if remote.capable("branchmap"):
1709 1716 rbranchmap = remote.branchmap()
1710 1717
1711 1718 fp = remote.stream_out()
1712 1719 l = fp.readline()
1713 1720 try:
1714 1721 resp = int(l)
1715 1722 except ValueError:
1716 1723 raise error.ResponseError(
1717 1724 _('unexpected response from remote server:'), l)
1718 1725 if resp == 1:
1719 1726 raise util.Abort(_('operation forbidden by server'))
1720 1727 elif resp == 2:
1721 1728 raise util.Abort(_('locking the remote repository failed'))
1722 1729 elif resp != 0:
1723 1730 raise util.Abort(_('the server sent an unknown error code'))
1724 1731 self.ui.status(_('streaming all changes\n'))
1725 1732 l = fp.readline()
1726 1733 try:
1727 1734 total_files, total_bytes = map(int, l.split(' ', 1))
1728 1735 except (ValueError, TypeError):
1729 1736 raise error.ResponseError(
1730 1737 _('unexpected response from remote server:'), l)
1731 1738 self.ui.status(_('%d files to transfer, %s of data\n') %
1732 1739 (total_files, util.bytecount(total_bytes)))
1733 1740 handled_bytes = 0
1734 1741 self.ui.progress(_('clone'), 0, total=total_bytes)
1735 1742 start = time.time()
1736 1743
1737 1744 tr = self.transaction(_('clone'))
1738 1745 try:
1739 1746 for i in xrange(total_files):
1740 1747 # XXX doesn't support '\n' or '\r' in filenames
1741 1748 l = fp.readline()
1742 1749 try:
1743 1750 name, size = l.split('\0', 1)
1744 1751 size = int(size)
1745 1752 except (ValueError, TypeError):
1746 1753 raise error.ResponseError(
1747 1754 _('unexpected response from remote server:'), l)
1748 1755 if self.ui.debugflag:
1749 1756 self.ui.debug('adding %s (%s)\n' %
1750 1757 (name, util.bytecount(size)))
1751 1758 # for backwards compat, name was partially encoded
1752 1759 ofp = self.sopener(store.decodedir(name), 'w')
1753 1760 for chunk in util.filechunkiter(fp, limit=size):
1754 1761 handled_bytes += len(chunk)
1755 1762 self.ui.progress(_('clone'), handled_bytes,
1756 1763 total=total_bytes)
1757 1764 ofp.write(chunk)
1758 1765 ofp.close()
1759 1766 tr.close()
1760 1767 finally:
1761 1768 tr.release()
1762 1769
1763 1770 # Writing straight to files circumvented the inmemory caches
1764 1771 self.invalidate()
1765 1772
1766 1773 elapsed = time.time() - start
1767 1774 if elapsed <= 0:
1768 1775 elapsed = 0.001
1769 1776 self.ui.progress(_('clone'), None)
1770 1777 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1771 1778 (util.bytecount(total_bytes), elapsed,
1772 1779 util.bytecount(total_bytes / elapsed)))
1773 1780
1774 1781 # new requirements = old non-format requirements +
1775 1782 # new format-related
1776 1783 # requirements from the streamed-in repository
1777 1784 requirements.update(set(self.requirements) - self.supportedformats)
1778 1785 self._applyrequirements(requirements)
1779 1786 self._writerequirements()
1780 1787
1781 1788 if rbranchmap:
1782 1789 rbheads = []
1783 1790 for bheads in rbranchmap.itervalues():
1784 1791 rbheads.extend(bheads)
1785 1792
1786 1793 if rbheads:
1787 1794 rtiprev = max((int(self.changelog.rev(node))
1788 1795 for node in rbheads))
1789 1796 cache = branchmap.branchcache(rbranchmap,
1790 1797 self[rtiprev].node(),
1791 1798 rtiprev)
1792 1799 # Try to stick it as low as possible
1793 1800 # filter above served are unlikely to be fetch from a clone
1794 1801 for candidate in ('base', 'immutable', 'served'):
1795 1802 rview = self.filtered(candidate)
1796 1803 if cache.validfor(rview):
1797 1804 self._branchcaches[candidate] = cache
1798 1805 cache.write(rview)
1799 1806 break
1800 1807 self.invalidate()
1801 1808 return len(self.heads()) + 1
1802 1809 finally:
1803 1810 lock.release()
1804 1811
1805 1812 def clone(self, remote, heads=[], stream=False):
1806 1813 '''clone remote repository.
1807 1814
1808 1815 keyword arguments:
1809 1816 heads: list of revs to clone (forces use of pull)
1810 1817 stream: use streaming clone if possible'''
1811 1818
1812 1819 # now, all clients that can request uncompressed clones can
1813 1820 # read repo formats supported by all servers that can serve
1814 1821 # them.
1815 1822
1816 1823 # if revlog format changes, client will have to check version
1817 1824 # and format flags on "stream" capability, and use
1818 1825 # uncompressed only if compatible.
1819 1826
1820 1827 if not stream:
1821 1828 # if the server explicitly prefers to stream (for fast LANs)
1822 1829 stream = remote.capable('stream-preferred')
1823 1830
1824 1831 if stream and not heads:
1825 1832 # 'stream' means remote revlog format is revlogv1 only
1826 1833 if remote.capable('stream'):
1827 1834 return self.stream_in(remote, set(('revlogv1',)))
1828 1835 # otherwise, 'streamreqs' contains the remote revlog format
1829 1836 streamreqs = remote.capable('streamreqs')
1830 1837 if streamreqs:
1831 1838 streamreqs = set(streamreqs.split(','))
1832 1839 # if we support it, stream in and adjust our requirements
1833 1840 if not streamreqs - self.supportedformats:
1834 1841 return self.stream_in(remote, streamreqs)
1835 1842 return self.pull(remote, heads)
1836 1843
1837 1844 def pushkey(self, namespace, key, old, new):
1838 1845 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1839 1846 old=old, new=new)
1840 1847 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1841 1848 ret = pushkey.push(self, namespace, key, old, new)
1842 1849 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1843 1850 ret=ret)
1844 1851 return ret
1845 1852
1846 1853 def listkeys(self, namespace):
1847 1854 self.hook('prelistkeys', throw=True, namespace=namespace)
1848 1855 self.ui.debug('listing keys for "%s"\n' % namespace)
1849 1856 values = pushkey.list(self, namespace)
1850 1857 self.hook('listkeys', namespace=namespace, values=values)
1851 1858 return values
1852 1859
1853 1860 def debugwireargs(self, one, two, three=None, four=None, five=None):
1854 1861 '''used to test argument passing over the wire'''
1855 1862 return "%s %s %s %s %s" % (one, two, three, four, five)
1856 1863
1857 1864 def savecommitmessage(self, text):
1858 1865 fp = self.opener('last-message.txt', 'wb')
1859 1866 try:
1860 1867 fp.write(text)
1861 1868 finally:
1862 1869 fp.close()
1863 1870 return self.pathto(fp.name[len(self.root) + 1:])
1864 1871
1865 1872 # used to avoid circular references so destructors work
1866 1873 def aftertrans(files):
1867 1874 renamefiles = [tuple(t) for t in files]
1868 1875 def a():
1869 1876 for vfs, src, dest in renamefiles:
1870 1877 try:
1871 1878 vfs.rename(src, dest)
1872 1879 except OSError: # journal file does not yet exist
1873 1880 pass
1874 1881 return a
1875 1882
1876 1883 def undoname(fn):
1877 1884 base, name = os.path.split(fn)
1878 1885 assert name.startswith('journal')
1879 1886 return os.path.join(base, name.replace('journal', 'undo', 1))
1880 1887
1881 1888 def instance(ui, path, create):
1882 1889 return localrepository(ui, util.urllocalpath(path), create)
1883 1890
1884 1891 def islocal(path):
1885 1892 return True
General Comments 0
You need to be logged in to leave comments. Login now