##// END OF EJS Templates
bundle2: add an exchange.getbundle function...
Pierre-Yves David -
r20954:dba91f80 default
parent child Browse files
Show More
@@ -1,554 +1,585 b''
1 1 # exchange.py - utily to exchange data between repo.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno
11 11 import util, scmutil, changegroup, base85
12 12 import discovery, phases, obsolete, bookmarks
13 13
14 14
15 15 class pushoperation(object):
16 16 """A object that represent a single push operation
17 17
18 18 It purpose is to carry push related state and very common operation.
19 19
20 20 A new should be created at the begining of each push and discarded
21 21 afterward.
22 22 """
23 23
24 24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 25 # repo we push from
26 26 self.repo = repo
27 27 self.ui = repo.ui
28 28 # repo we push to
29 29 self.remote = remote
30 30 # force option provided
31 31 self.force = force
32 32 # revs to be pushed (None is "all")
33 33 self.revs = revs
34 34 # allow push of new branch
35 35 self.newbranch = newbranch
36 36 # did a local lock get acquired?
37 37 self.locallocked = None
38 38 # Integer version of the push result
39 39 # - None means nothing to push
40 40 # - 0 means HTTP error
41 41 # - 1 means we pushed and remote head count is unchanged *or*
42 42 # we have outgoing changesets but refused to push
43 43 # - other values as described by addchangegroup()
44 44 self.ret = None
45 45 # discover.outgoing object (contains common and outgoin data)
46 46 self.outgoing = None
47 47 # all remote heads before the push
48 48 self.remoteheads = None
49 49 # testable as a boolean indicating if any nodes are missing locally.
50 50 self.incoming = None
51 51 # set of all heads common after changeset bundle push
52 52 self.commonheads = None
53 53
54 54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 55 '''Push outgoing changesets (limited by revs) from a local
56 56 repository to remote. Return an integer:
57 57 - None means nothing to push
58 58 - 0 means HTTP error
59 59 - 1 means we pushed and remote head count is unchanged *or*
60 60 we have outgoing changesets but refused to push
61 61 - other values as described by addchangegroup()
62 62 '''
63 63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 64 if pushop.remote.local():
65 65 missing = (set(pushop.repo.requirements)
66 66 - pushop.remote.local().supported)
67 67 if missing:
68 68 msg = _("required features are not"
69 69 " supported in the destination:"
70 70 " %s") % (', '.join(sorted(missing)))
71 71 raise util.Abort(msg)
72 72
73 73 # there are two ways to push to remote repo:
74 74 #
75 75 # addchangegroup assumes local user can lock remote
76 76 # repo (local filesystem, old ssh servers).
77 77 #
78 78 # unbundle assumes local user cannot lock remote repo (new ssh
79 79 # servers, http servers).
80 80
81 81 if not pushop.remote.canpush():
82 82 raise util.Abort(_("destination does not support push"))
83 83 # get local lock as we might write phase data
84 84 locallock = None
85 85 try:
86 86 locallock = pushop.repo.lock()
87 87 pushop.locallocked = True
88 88 except IOError, err:
89 89 pushop.locallocked = False
90 90 if err.errno != errno.EACCES:
91 91 raise
92 92 # source repo cannot be locked.
93 93 # We do not abort the push, but just disable the local phase
94 94 # synchronisation.
95 95 msg = 'cannot lock source repository: %s\n' % err
96 96 pushop.ui.debug(msg)
97 97 try:
98 98 pushop.repo.checkpush(pushop)
99 99 lock = None
100 100 unbundle = pushop.remote.capable('unbundle')
101 101 if not unbundle:
102 102 lock = pushop.remote.lock()
103 103 try:
104 104 _pushdiscovery(pushop)
105 105 if _pushcheckoutgoing(pushop):
106 106 _pushchangeset(pushop)
107 107 _pushcomputecommonheads(pushop)
108 108 _pushsyncphase(pushop)
109 109 _pushobsolete(pushop)
110 110 finally:
111 111 if lock is not None:
112 112 lock.release()
113 113 finally:
114 114 if locallock is not None:
115 115 locallock.release()
116 116
117 117 _pushbookmark(pushop)
118 118 return pushop.ret
119 119
120 120 def _pushdiscovery(pushop):
121 121 # discovery
122 122 unfi = pushop.repo.unfiltered()
123 123 fci = discovery.findcommonincoming
124 124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 125 common, inc, remoteheads = commoninc
126 126 fco = discovery.findcommonoutgoing
127 127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 128 commoninc=commoninc, force=pushop.force)
129 129 pushop.outgoing = outgoing
130 130 pushop.remoteheads = remoteheads
131 131 pushop.incoming = inc
132 132
133 133 def _pushcheckoutgoing(pushop):
134 134 outgoing = pushop.outgoing
135 135 unfi = pushop.repo.unfiltered()
136 136 if not outgoing.missing:
137 137 # nothing to push
138 138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 139 return False
140 140 # something to push
141 141 if not pushop.force:
142 142 # if repo.obsstore == False --> no obsolete
143 143 # then, save the iteration
144 144 if unfi.obsstore:
145 145 # this message are here for 80 char limit reason
146 146 mso = _("push includes obsolete changeset: %s!")
147 147 mst = "push includes %s changeset: %s!"
148 148 # plain versions for i18n tool to detect them
149 149 _("push includes unstable changeset: %s!")
150 150 _("push includes bumped changeset: %s!")
151 151 _("push includes divergent changeset: %s!")
152 152 # If we are to push if there is at least one
153 153 # obsolete or unstable changeset in missing, at
154 154 # least one of the missinghead will be obsolete or
155 155 # unstable. So checking heads only is ok
156 156 for node in outgoing.missingheads:
157 157 ctx = unfi[node]
158 158 if ctx.obsolete():
159 159 raise util.Abort(mso % ctx)
160 160 elif ctx.troubled():
161 161 raise util.Abort(_(mst)
162 162 % (ctx.troubles()[0],
163 163 ctx))
164 164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 165 discovery.checkheads(unfi, pushop.remote, outgoing,
166 166 pushop.remoteheads,
167 167 pushop.newbranch,
168 168 bool(pushop.incoming),
169 169 newbm)
170 170 return True
171 171
172 172 def _pushchangeset(pushop):
173 173 """Make the actual push of changeset bundle to remote repo"""
174 174 outgoing = pushop.outgoing
175 175 unbundle = pushop.remote.capable('unbundle')
176 176 # TODO: get bundlecaps from remote
177 177 bundlecaps = None
178 178 # create a changegroup from local
179 179 if pushop.revs is None and not (outgoing.excluded
180 180 or pushop.repo.changelog.filteredrevs):
181 181 # push everything,
182 182 # use the fast path, no race possible on push
183 183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 184 cg = changegroup.getsubset(pushop.repo,
185 185 outgoing,
186 186 bundler,
187 187 'push',
188 188 fastpath=True)
189 189 else:
190 190 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
191 191 bundlecaps)
192 192
193 193 # apply changegroup to remote
194 194 if unbundle:
195 195 # local repo finds heads on server, finds out what
196 196 # revs it must push. once revs transferred, if server
197 197 # finds it has different heads (someone else won
198 198 # commit/push race), server aborts.
199 199 if pushop.force:
200 200 remoteheads = ['force']
201 201 else:
202 202 remoteheads = pushop.remoteheads
203 203 # ssh: return remote's addchangegroup()
204 204 # http: return remote's addchangegroup() or 0 for error
205 205 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
206 206 'push')
207 207 else:
208 208 # we return an integer indicating remote head count
209 209 # change
210 210 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
211 211 pushop.repo.url())
212 212
213 213 def _pushcomputecommonheads(pushop):
214 214 unfi = pushop.repo.unfiltered()
215 215 if pushop.ret:
216 216 # push succeed, synchronize target of the push
217 217 cheads = pushop.outgoing.missingheads
218 218 elif pushop.revs is None:
219 219 # All out push fails. synchronize all common
220 220 cheads = pushop.outgoing.commonheads
221 221 else:
222 222 # I want cheads = heads(::missingheads and ::commonheads)
223 223 # (missingheads is revs with secret changeset filtered out)
224 224 #
225 225 # This can be expressed as:
226 226 # cheads = ( (missingheads and ::commonheads)
227 227 # + (commonheads and ::missingheads))"
228 228 # )
229 229 #
230 230 # while trying to push we already computed the following:
231 231 # common = (::commonheads)
232 232 # missing = ((commonheads::missingheads) - commonheads)
233 233 #
234 234 # We can pick:
235 235 # * missingheads part of common (::commonheads)
236 236 common = set(pushop.outgoing.common)
237 237 nm = pushop.repo.changelog.nodemap
238 238 cheads = [node for node in pushop.revs if nm[node] in common]
239 239 # and
240 240 # * commonheads parents on missing
241 241 revset = unfi.set('%ln and parents(roots(%ln))',
242 242 pushop.outgoing.commonheads,
243 243 pushop.outgoing.missing)
244 244 cheads.extend(c.node() for c in revset)
245 245 pushop.commonheads = cheads
246 246
247 247 def _pushsyncphase(pushop):
248 248 """synchronise phase information locally and remotly"""
249 249 unfi = pushop.repo.unfiltered()
250 250 cheads = pushop.commonheads
251 251 if pushop.ret:
252 252 # push succeed, synchronize target of the push
253 253 cheads = pushop.outgoing.missingheads
254 254 elif pushop.revs is None:
255 255 # All out push fails. synchronize all common
256 256 cheads = pushop.outgoing.commonheads
257 257 else:
258 258 # I want cheads = heads(::missingheads and ::commonheads)
259 259 # (missingheads is revs with secret changeset filtered out)
260 260 #
261 261 # This can be expressed as:
262 262 # cheads = ( (missingheads and ::commonheads)
263 263 # + (commonheads and ::missingheads))"
264 264 # )
265 265 #
266 266 # while trying to push we already computed the following:
267 267 # common = (::commonheads)
268 268 # missing = ((commonheads::missingheads) - commonheads)
269 269 #
270 270 # We can pick:
271 271 # * missingheads part of common (::commonheads)
272 272 common = set(pushop.outgoing.common)
273 273 nm = pushop.repo.changelog.nodemap
274 274 cheads = [node for node in pushop.revs if nm[node] in common]
275 275 # and
276 276 # * commonheads parents on missing
277 277 revset = unfi.set('%ln and parents(roots(%ln))',
278 278 pushop.outgoing.commonheads,
279 279 pushop.outgoing.missing)
280 280 cheads.extend(c.node() for c in revset)
281 281 pushop.commonheads = cheads
282 282 # even when we don't push, exchanging phase data is useful
283 283 remotephases = pushop.remote.listkeys('phases')
284 284 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
285 285 and remotephases # server supports phases
286 286 and pushop.ret is None # nothing was pushed
287 287 and remotephases.get('publishing', False)):
288 288 # When:
289 289 # - this is a subrepo push
290 290 # - and remote support phase
291 291 # - and no changeset was pushed
292 292 # - and remote is publishing
293 293 # We may be in issue 3871 case!
294 294 # We drop the possible phase synchronisation done by
295 295 # courtesy to publish changesets possibly locally draft
296 296 # on the remote.
297 297 remotephases = {'publishing': 'True'}
298 298 if not remotephases: # old server or public only rer
299 299 _localphasemove(pushop, cheads)
300 300 # don't push any phase data as there is nothing to push
301 301 else:
302 302 ana = phases.analyzeremotephases(pushop.repo, cheads,
303 303 remotephases)
304 304 pheads, droots = ana
305 305 ### Apply remote phase on local
306 306 if remotephases.get('publishing', False):
307 307 _localphasemove(pushop, cheads)
308 308 else: # publish = False
309 309 _localphasemove(pushop, pheads)
310 310 _localphasemove(pushop, cheads, phases.draft)
311 311 ### Apply local phase on remote
312 312
313 313 # Get the list of all revs draft on remote by public here.
314 314 # XXX Beware that revset break if droots is not strictly
315 315 # XXX root we may want to ensure it is but it is costly
316 316 outdated = unfi.set('heads((%ln::%ln) and public())',
317 317 droots, cheads)
318 318 for newremotehead in outdated:
319 319 r = pushop.remote.pushkey('phases',
320 320 newremotehead.hex(),
321 321 str(phases.draft),
322 322 str(phases.public))
323 323 if not r:
324 324 pushop.ui.warn(_('updating %s to public failed!\n')
325 325 % newremotehead)
326 326
327 327 def _localphasemove(pushop, nodes, phase=phases.public):
328 328 """move <nodes> to <phase> in the local source repo"""
329 329 if pushop.locallocked:
330 330 phases.advanceboundary(pushop.repo, phase, nodes)
331 331 else:
332 332 # repo is not locked, do not change any phases!
333 333 # Informs the user that phases should have been moved when
334 334 # applicable.
335 335 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
336 336 phasestr = phases.phasenames[phase]
337 337 if actualmoves:
338 338 pushop.ui.status(_('cannot lock source repo, skipping '
339 339 'local %s phase update\n') % phasestr)
340 340
341 341 def _pushobsolete(pushop):
342 342 """utility function to push obsolete markers to a remote"""
343 343 pushop.ui.debug('try to push obsolete markers to remote\n')
344 344 repo = pushop.repo
345 345 remote = pushop.remote
346 346 if (obsolete._enabled and repo.obsstore and
347 347 'obsolete' in remote.listkeys('namespaces')):
348 348 rslts = []
349 349 remotedata = repo.listkeys('obsolete')
350 350 for key in sorted(remotedata, reverse=True):
351 351 # reverse sort to ensure we end with dump0
352 352 data = remotedata[key]
353 353 rslts.append(remote.pushkey('obsolete', key, '', data))
354 354 if [r for r in rslts if not r]:
355 355 msg = _('failed to push some obsolete markers!\n')
356 356 repo.ui.warn(msg)
357 357
358 358 def _pushbookmark(pushop):
359 359 """Update bookmark position on remote"""
360 360 ui = pushop.ui
361 361 repo = pushop.repo.unfiltered()
362 362 remote = pushop.remote
363 363 ui.debug("checking for updated bookmarks\n")
364 364 revnums = map(repo.changelog.rev, pushop.revs or [])
365 365 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
366 366 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
367 367 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
368 368 srchex=hex)
369 369
370 370 for b, scid, dcid in advsrc:
371 371 if ancestors and repo[scid].rev() not in ancestors:
372 372 continue
373 373 if remote.pushkey('bookmarks', b, dcid, scid):
374 374 ui.status(_("updating bookmark %s\n") % b)
375 375 else:
376 376 ui.warn(_('updating bookmark %s failed!\n') % b)
377 377
378 378 class pulloperation(object):
379 379 """A object that represent a single pull operation
380 380
381 381 It purpose is to carry push related state and very common operation.
382 382
383 383 A new should be created at the begining of each pull and discarded
384 384 afterward.
385 385 """
386 386
387 387 def __init__(self, repo, remote, heads=None, force=False):
388 388 # repo we pull into
389 389 self.repo = repo
390 390 # repo we pull from
391 391 self.remote = remote
392 392 # revision we try to pull (None is "all")
393 393 self.heads = heads
394 394 # do we force pull?
395 395 self.force = force
396 396 # the name the pull transaction
397 397 self._trname = 'pull\n' + util.hidepassword(remote.url())
398 398 # hold the transaction once created
399 399 self._tr = None
400 400 # set of common changeset between local and remote before pull
401 401 self.common = None
402 402 # set of pulled head
403 403 self.rheads = None
404 404 # list of missing changeset to fetch remotly
405 405 self.fetch = None
406 406 # result of changegroup pulling (used as returng code by pull)
407 407 self.cgresult = None
408 408 # list of step remaining todo (related to future bundle2 usage)
409 409 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
410 410
411 411 @util.propertycache
412 412 def pulledsubset(self):
413 413 """heads of the set of changeset target by the pull"""
414 414 # compute target subset
415 415 if self.heads is None:
416 416 # We pulled every thing possible
417 417 # sync on everything common
418 418 c = set(self.common)
419 419 ret = list(self.common)
420 420 for n in self.rheads:
421 421 if n not in c:
422 422 ret.append(n)
423 423 return ret
424 424 else:
425 425 # We pulled a specific subset
426 426 # sync on this subset
427 427 return self.heads
428 428
429 429 def gettransaction(self):
430 430 """get appropriate pull transaction, creating it if needed"""
431 431 if self._tr is None:
432 432 self._tr = self.repo.transaction(self._trname)
433 433 return self._tr
434 434
435 435 def closetransaction(self):
436 436 """close transaction if created"""
437 437 if self._tr is not None:
438 438 self._tr.close()
439 439
440 440 def releasetransaction(self):
441 441 """release transaction if created"""
442 442 if self._tr is not None:
443 443 self._tr.release()
444 444
445 445 def pull(repo, remote, heads=None, force=False):
446 446 pullop = pulloperation(repo, remote, heads, force)
447 447 if pullop.remote.local():
448 448 missing = set(pullop.remote.requirements) - pullop.repo.supported
449 449 if missing:
450 450 msg = _("required features are not"
451 451 " supported in the destination:"
452 452 " %s") % (', '.join(sorted(missing)))
453 453 raise util.Abort(msg)
454 454
455 455 lock = pullop.repo.lock()
456 456 try:
457 457 _pulldiscovery(pullop)
458 458 if 'changegroup' in pullop.todosteps:
459 459 _pullchangeset(pullop)
460 460 if 'phases' in pullop.todosteps:
461 461 _pullphase(pullop)
462 462 if 'obsmarkers' in pullop.todosteps:
463 463 _pullobsolete(pullop)
464 464 pullop.closetransaction()
465 465 finally:
466 466 pullop.releasetransaction()
467 467 lock.release()
468 468
469 469 return pullop.cgresult
470 470
471 471 def _pulldiscovery(pullop):
472 472 """discovery phase for the pull
473 473
474 474 Current handle changeset discovery only, will change handle all discovery
475 475 at some point."""
476 476 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
477 477 pullop.remote,
478 478 heads=pullop.heads,
479 479 force=pullop.force)
480 480 pullop.common, pullop.fetch, pullop.rheads = tmp
481 481
482 482 def _pullchangeset(pullop):
483 483 """pull changeset from unbundle into the local repo"""
484 484 # We delay the open of the transaction as late as possible so we
485 485 # don't open transaction for nothing or you break future useful
486 486 # rollback call
487 487 pullop.todosteps.remove('changegroup')
488 488 if not pullop.fetch:
489 489 pullop.repo.ui.status(_("no changes found\n"))
490 490 pullop.cgresult = 0
491 491 return
492 492 pullop.gettransaction()
493 493 if pullop.heads is None and list(pullop.common) == [nullid]:
494 494 pullop.repo.ui.status(_("requesting all changes\n"))
495 495 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
496 496 # issue1320, avoid a race if remote changed after discovery
497 497 pullop.heads = pullop.rheads
498 498
499 499 if pullop.remote.capable('getbundle'):
500 500 # TODO: get bundlecaps from remote
501 501 cg = pullop.remote.getbundle('pull', common=pullop.common,
502 502 heads=pullop.heads or pullop.rheads)
503 503 elif pullop.heads is None:
504 504 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
505 505 elif not pullop.remote.capable('changegroupsubset'):
506 506 raise util.Abort(_("partial pull cannot be done because "
507 507 "other repository doesn't support "
508 508 "changegroupsubset."))
509 509 else:
510 510 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
511 511 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
512 512 pullop.remote.url())
513 513
514 514 def _pullphase(pullop):
515 515 # Get remote phases data from remote
516 516 pullop.todosteps.remove('phases')
517 517 remotephases = pullop.remote.listkeys('phases')
518 518 publishing = bool(remotephases.get('publishing', False))
519 519 if remotephases and not publishing:
520 520 # remote is new and unpublishing
521 521 pheads, _dr = phases.analyzeremotephases(pullop.repo,
522 522 pullop.pulledsubset,
523 523 remotephases)
524 524 phases.advanceboundary(pullop.repo, phases.public, pheads)
525 525 phases.advanceboundary(pullop.repo, phases.draft,
526 526 pullop.pulledsubset)
527 527 else:
528 528 # Remote is old or publishing all common changesets
529 529 # should be seen as public
530 530 phases.advanceboundary(pullop.repo, phases.public,
531 531 pullop.pulledsubset)
532 532
533 533 def _pullobsolete(pullop):
534 534 """utility function to pull obsolete markers from a remote
535 535
536 536 The `gettransaction` is function that return the pull transaction, creating
537 537 one if necessary. We return the transaction to inform the calling code that
538 538 a new transaction have been created (when applicable).
539 539
540 540 Exists mostly to allow overriding for experimentation purpose"""
541 541 pullop.todosteps.remove('obsmarkers')
542 542 tr = None
543 543 if obsolete._enabled:
544 544 pullop.repo.ui.debug('fetching remote obsolete markers\n')
545 545 remoteobs = pullop.remote.listkeys('obsolete')
546 546 if 'dump0' in remoteobs:
547 547 tr = pullop.gettransaction()
548 548 for key in sorted(remoteobs, reverse=True):
549 549 if key.startswith('dump'):
550 550 data = base85.b85decode(remoteobs[key])
551 551 pullop.repo.obsstore.mergemarkers(tr, data)
552 552 pullop.repo.invalidatevolatilesets()
553 553 return tr
554 554
555 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
556 """return a full bundle (with potentially multiple kind of parts)
557
558 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
559 passed. For now, the bundle can contain only changegroup, but this will
560 changes when more part type will be available for bundle2.
561
562 This is different from changegroup.getbundle that only returns an HG10
563 changegroup bundle. They may eventually get reunited in the future when we
564 have a clearer idea of the API we what to query different data.
565
566 The implementation is at a very early stage and will get massive rework
567 when the API of bundle is refined.
568 """
569 # build bundle here.
570 cg = changegroup.getbundle(repo, source, heads=heads,
571 common=common, bundlecaps=None)
572 if bundlecaps is None or 'HG20' not in bundlecaps:
573 return cg
574 # very crude first implementation,
575 # the bundle API will change and the generation will be done lazily.
576 bundler = bundle2.bundle20(repo.ui)
577 tempname = changegroup.writebundle(cg, None, 'HG10UN')
578 data = open(tempname).read()
579 part = bundle2.part('changegroup', data=data)
580 bundler.addpart(part)
581 temp = cStringIO.StringIO()
582 for c in bundler.getchunks():
583 temp.write(c)
584 temp.seek(0)
585 return bundle2.unbundle20(repo.ui, temp)
@@ -1,1870 +1,1870 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock as lockmod
12 12 import transaction, store, encoding, exchange
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 import branchmap, pathutil
20 20 propertycache = util.propertycache
21 21 filecache = scmutil.filecache
22 22
23 23 class repofilecache(filecache):
24 24 """All filecache usage on repo are done for logic that should be unfiltered
25 25 """
26 26
27 27 def __get__(self, repo, type=None):
28 28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 29 def __set__(self, repo, value):
30 30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 31 def __delete__(self, repo):
32 32 return super(repofilecache, self).__delete__(repo.unfiltered())
33 33
34 34 class storecache(repofilecache):
35 35 """filecache for files in the store"""
36 36 def join(self, obj, fname):
37 37 return obj.sjoin(fname)
38 38
39 39 class unfilteredpropertycache(propertycache):
40 40 """propertycache that apply to unfiltered repo only"""
41 41
42 42 def __get__(self, repo, type=None):
43 43 unfi = repo.unfiltered()
44 44 if unfi is repo:
45 45 return super(unfilteredpropertycache, self).__get__(unfi)
46 46 return getattr(unfi, self.name)
47 47
48 48 class filteredpropertycache(propertycache):
49 49 """propertycache that must take filtering in account"""
50 50
51 51 def cachevalue(self, obj, value):
52 52 object.__setattr__(obj, self.name, value)
53 53
54 54
55 55 def hasunfilteredcache(repo, name):
56 56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 57 return name in vars(repo.unfiltered())
58 58
59 59 def unfilteredmethod(orig):
60 60 """decorate method that always need to be run on unfiltered version"""
61 61 def wrapper(repo, *args, **kwargs):
62 62 return orig(repo.unfiltered(), *args, **kwargs)
63 63 return wrapper
64 64
65 65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
66 66 legacycaps = moderncaps.union(set(['changegroupsubset']))
67 67
68 68 class localpeer(peer.peerrepository):
69 69 '''peer for a local repo; reflects only the most recent API'''
70 70
71 71 def __init__(self, repo, caps=moderncaps):
72 72 peer.peerrepository.__init__(self)
73 73 self._repo = repo.filtered('served')
74 74 self.ui = repo.ui
75 75 self._caps = repo._restrictcapabilities(caps)
76 76 self.requirements = repo.requirements
77 77 self.supportedformats = repo.supportedformats
78 78
79 79 def close(self):
80 80 self._repo.close()
81 81
82 82 def _capabilities(self):
83 83 return self._caps
84 84
85 85 def local(self):
86 86 return self._repo
87 87
88 88 def canpush(self):
89 89 return True
90 90
91 91 def url(self):
92 92 return self._repo.url()
93 93
94 94 def lookup(self, key):
95 95 return self._repo.lookup(key)
96 96
97 97 def branchmap(self):
98 98 return self._repo.branchmap()
99 99
100 100 def heads(self):
101 101 return self._repo.heads()
102 102
103 103 def known(self, nodes):
104 104 return self._repo.known(nodes)
105 105
106 106 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
107 107 format='HG10'):
108 return changegroup.getbundle(self._repo, source, heads=heads,
108 return exchange.getbundle(self._repo, source, heads=heads,
109 109 common=common, bundlecaps=bundlecaps)
110 110
111 111 # TODO We might want to move the next two calls into legacypeer and add
112 112 # unbundle instead.
113 113
114 114 def lock(self):
115 115 return self._repo.lock()
116 116
117 117 def addchangegroup(self, cg, source, url):
118 118 return changegroup.addchangegroup(self._repo, cg, source, url)
119 119
120 120 def pushkey(self, namespace, key, old, new):
121 121 return self._repo.pushkey(namespace, key, old, new)
122 122
123 123 def listkeys(self, namespace):
124 124 return self._repo.listkeys(namespace)
125 125
126 126 def debugwireargs(self, one, two, three=None, four=None, five=None):
127 127 '''used to test argument passing over the wire'''
128 128 return "%s %s %s %s %s" % (one, two, three, four, five)
129 129
130 130 class locallegacypeer(localpeer):
131 131 '''peer extension which implements legacy methods too; used for tests with
132 132 restricted capabilities'''
133 133
134 134 def __init__(self, repo):
135 135 localpeer.__init__(self, repo, caps=legacycaps)
136 136
137 137 def branches(self, nodes):
138 138 return self._repo.branches(nodes)
139 139
140 140 def between(self, pairs):
141 141 return self._repo.between(pairs)
142 142
143 143 def changegroup(self, basenodes, source):
144 144 return changegroup.changegroup(self._repo, basenodes, source)
145 145
146 146 def changegroupsubset(self, bases, heads, source):
147 147 return changegroup.changegroupsubset(self._repo, bases, heads, source)
148 148
149 149 class localrepository(object):
150 150
151 151 supportedformats = set(('revlogv1', 'generaldelta'))
152 152 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
153 153 'dotencode'))
154 154 openerreqs = set(('revlogv1', 'generaldelta'))
155 155 requirements = ['revlogv1']
156 156 filtername = None
157 157
158 158 # a list of (ui, featureset) functions.
159 159 # only functions defined in module of enabled extensions are invoked
160 160 featuresetupfuncs = set()
161 161
162 162 def _baserequirements(self, create):
163 163 return self.requirements[:]
164 164
165 165 def __init__(self, baseui, path=None, create=False):
166 166 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
167 167 self.wopener = self.wvfs
168 168 self.root = self.wvfs.base
169 169 self.path = self.wvfs.join(".hg")
170 170 self.origroot = path
171 171 self.auditor = pathutil.pathauditor(self.root, self._checknested)
172 172 self.vfs = scmutil.vfs(self.path)
173 173 self.opener = self.vfs
174 174 self.baseui = baseui
175 175 self.ui = baseui.copy()
176 176 self.ui.copy = baseui.copy # prevent copying repo configuration
177 177 # A list of callback to shape the phase if no data were found.
178 178 # Callback are in the form: func(repo, roots) --> processed root.
179 179 # This list it to be filled by extension during repo setup
180 180 self._phasedefaults = []
181 181 try:
182 182 self.ui.readconfig(self.join("hgrc"), self.root)
183 183 extensions.loadall(self.ui)
184 184 except IOError:
185 185 pass
186 186
187 187 if self.featuresetupfuncs:
188 188 self.supported = set(self._basesupported) # use private copy
189 189 extmods = set(m.__name__ for n, m
190 190 in extensions.extensions(self.ui))
191 191 for setupfunc in self.featuresetupfuncs:
192 192 if setupfunc.__module__ in extmods:
193 193 setupfunc(self.ui, self.supported)
194 194 else:
195 195 self.supported = self._basesupported
196 196
197 197 if not self.vfs.isdir():
198 198 if create:
199 199 if not self.wvfs.exists():
200 200 self.wvfs.makedirs()
201 201 self.vfs.makedir(notindexed=True)
202 202 requirements = self._baserequirements(create)
203 203 if self.ui.configbool('format', 'usestore', True):
204 204 self.vfs.mkdir("store")
205 205 requirements.append("store")
206 206 if self.ui.configbool('format', 'usefncache', True):
207 207 requirements.append("fncache")
208 208 if self.ui.configbool('format', 'dotencode', True):
209 209 requirements.append('dotencode')
210 210 # create an invalid changelog
211 211 self.vfs.append(
212 212 "00changelog.i",
213 213 '\0\0\0\2' # represents revlogv2
214 214 ' dummy changelog to prevent using the old repo layout'
215 215 )
216 216 if self.ui.configbool('format', 'generaldelta', False):
217 217 requirements.append("generaldelta")
218 218 requirements = set(requirements)
219 219 else:
220 220 raise error.RepoError(_("repository %s not found") % path)
221 221 elif create:
222 222 raise error.RepoError(_("repository %s already exists") % path)
223 223 else:
224 224 try:
225 225 requirements = scmutil.readrequires(self.vfs, self.supported)
226 226 except IOError, inst:
227 227 if inst.errno != errno.ENOENT:
228 228 raise
229 229 requirements = set()
230 230
231 231 self.sharedpath = self.path
232 232 try:
233 233 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
234 234 realpath=True)
235 235 s = vfs.base
236 236 if not vfs.exists():
237 237 raise error.RepoError(
238 238 _('.hg/sharedpath points to nonexistent directory %s') % s)
239 239 self.sharedpath = s
240 240 except IOError, inst:
241 241 if inst.errno != errno.ENOENT:
242 242 raise
243 243
244 244 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
245 245 self.spath = self.store.path
246 246 self.svfs = self.store.vfs
247 247 self.sopener = self.svfs
248 248 self.sjoin = self.store.join
249 249 self.vfs.createmode = self.store.createmode
250 250 self._applyrequirements(requirements)
251 251 if create:
252 252 self._writerequirements()
253 253
254 254
255 255 self._branchcaches = {}
256 256 self.filterpats = {}
257 257 self._datafilters = {}
258 258 self._transref = self._lockref = self._wlockref = None
259 259
260 260 # A cache for various files under .hg/ that tracks file changes,
261 261 # (used by the filecache decorator)
262 262 #
263 263 # Maps a property name to its util.filecacheentry
264 264 self._filecache = {}
265 265
266 266 # hold sets of revision to be filtered
267 267 # should be cleared when something might have changed the filter value:
268 268 # - new changesets,
269 269 # - phase change,
270 270 # - new obsolescence marker,
271 271 # - working directory parent change,
272 272 # - bookmark changes
273 273 self.filteredrevcache = {}
274 274
275 275 def close(self):
276 276 pass
277 277
278 278 def _restrictcapabilities(self, caps):
279 279 return caps
280 280
281 281 def _applyrequirements(self, requirements):
282 282 self.requirements = requirements
283 283 self.sopener.options = dict((r, 1) for r in requirements
284 284 if r in self.openerreqs)
285 285 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
286 286 if chunkcachesize is not None:
287 287 self.sopener.options['chunkcachesize'] = chunkcachesize
288 288
289 289 def _writerequirements(self):
290 290 reqfile = self.opener("requires", "w")
291 291 for r in sorted(self.requirements):
292 292 reqfile.write("%s\n" % r)
293 293 reqfile.close()
294 294
295 295 def _checknested(self, path):
296 296 """Determine if path is a legal nested repository."""
297 297 if not path.startswith(self.root):
298 298 return False
299 299 subpath = path[len(self.root) + 1:]
300 300 normsubpath = util.pconvert(subpath)
301 301
302 302 # XXX: Checking against the current working copy is wrong in
303 303 # the sense that it can reject things like
304 304 #
305 305 # $ hg cat -r 10 sub/x.txt
306 306 #
307 307 # if sub/ is no longer a subrepository in the working copy
308 308 # parent revision.
309 309 #
310 310 # However, it can of course also allow things that would have
311 311 # been rejected before, such as the above cat command if sub/
312 312 # is a subrepository now, but was a normal directory before.
313 313 # The old path auditor would have rejected by mistake since it
314 314 # panics when it sees sub/.hg/.
315 315 #
316 316 # All in all, checking against the working copy seems sensible
317 317 # since we want to prevent access to nested repositories on
318 318 # the filesystem *now*.
319 319 ctx = self[None]
320 320 parts = util.splitpath(subpath)
321 321 while parts:
322 322 prefix = '/'.join(parts)
323 323 if prefix in ctx.substate:
324 324 if prefix == normsubpath:
325 325 return True
326 326 else:
327 327 sub = ctx.sub(prefix)
328 328 return sub.checknested(subpath[len(prefix) + 1:])
329 329 else:
330 330 parts.pop()
331 331 return False
332 332
333 333 def peer(self):
334 334 return localpeer(self) # not cached to avoid reference cycle
335 335
336 336 def unfiltered(self):
337 337 """Return unfiltered version of the repository
338 338
339 339 Intended to be overwritten by filtered repo."""
340 340 return self
341 341
342 342 def filtered(self, name):
343 343 """Return a filtered version of a repository"""
344 344 # build a new class with the mixin and the current class
345 345 # (possibly subclass of the repo)
346 346 class proxycls(repoview.repoview, self.unfiltered().__class__):
347 347 pass
348 348 return proxycls(self, name)
349 349
350 350 @repofilecache('bookmarks')
351 351 def _bookmarks(self):
352 352 return bookmarks.bmstore(self)
353 353
354 354 @repofilecache('bookmarks.current')
355 355 def _bookmarkcurrent(self):
356 356 return bookmarks.readcurrent(self)
357 357
358 358 def bookmarkheads(self, bookmark):
359 359 name = bookmark.split('@', 1)[0]
360 360 heads = []
361 361 for mark, n in self._bookmarks.iteritems():
362 362 if mark.split('@', 1)[0] == name:
363 363 heads.append(n)
364 364 return heads
365 365
366 366 @storecache('phaseroots')
367 367 def _phasecache(self):
368 368 return phases.phasecache(self, self._phasedefaults)
369 369
370 370 @storecache('obsstore')
371 371 def obsstore(self):
372 372 store = obsolete.obsstore(self.sopener)
373 373 if store and not obsolete._enabled:
374 374 # message is rare enough to not be translated
375 375 msg = 'obsolete feature not enabled but %i markers found!\n'
376 376 self.ui.warn(msg % len(list(store)))
377 377 return store
378 378
379 379 @storecache('00changelog.i')
380 380 def changelog(self):
381 381 c = changelog.changelog(self.sopener)
382 382 if 'HG_PENDING' in os.environ:
383 383 p = os.environ['HG_PENDING']
384 384 if p.startswith(self.root):
385 385 c.readpending('00changelog.i.a')
386 386 return c
387 387
388 388 @storecache('00manifest.i')
389 389 def manifest(self):
390 390 return manifest.manifest(self.sopener)
391 391
392 392 @repofilecache('dirstate')
393 393 def dirstate(self):
394 394 warned = [0]
395 395 def validate(node):
396 396 try:
397 397 self.changelog.rev(node)
398 398 return node
399 399 except error.LookupError:
400 400 if not warned[0]:
401 401 warned[0] = True
402 402 self.ui.warn(_("warning: ignoring unknown"
403 403 " working parent %s!\n") % short(node))
404 404 return nullid
405 405
406 406 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
407 407
408 408 def __getitem__(self, changeid):
409 409 if changeid is None:
410 410 return context.workingctx(self)
411 411 return context.changectx(self, changeid)
412 412
413 413 def __contains__(self, changeid):
414 414 try:
415 415 return bool(self.lookup(changeid))
416 416 except error.RepoLookupError:
417 417 return False
418 418
419 419 def __nonzero__(self):
420 420 return True
421 421
422 422 def __len__(self):
423 423 return len(self.changelog)
424 424
425 425 def __iter__(self):
426 426 return iter(self.changelog)
427 427
428 428 def revs(self, expr, *args):
429 429 '''Return a list of revisions matching the given revset'''
430 430 expr = revset.formatspec(expr, *args)
431 431 m = revset.match(None, expr)
432 432 return m(self, revset.spanset(self))
433 433
434 434 def set(self, expr, *args):
435 435 '''
436 436 Yield a context for each matching revision, after doing arg
437 437 replacement via revset.formatspec
438 438 '''
439 439 for r in self.revs(expr, *args):
440 440 yield self[r]
441 441
442 442 def url(self):
443 443 return 'file:' + self.root
444 444
445 445 def hook(self, name, throw=False, **args):
446 446 return hook.hook(self.ui, self, name, throw, **args)
447 447
448 448 @unfilteredmethod
449 449 def _tag(self, names, node, message, local, user, date, extra={}):
450 450 if isinstance(names, str):
451 451 names = (names,)
452 452
453 453 branches = self.branchmap()
454 454 for name in names:
455 455 self.hook('pretag', throw=True, node=hex(node), tag=name,
456 456 local=local)
457 457 if name in branches:
458 458 self.ui.warn(_("warning: tag %s conflicts with existing"
459 459 " branch name\n") % name)
460 460
461 461 def writetags(fp, names, munge, prevtags):
462 462 fp.seek(0, 2)
463 463 if prevtags and prevtags[-1] != '\n':
464 464 fp.write('\n')
465 465 for name in names:
466 466 m = munge and munge(name) or name
467 467 if (self._tagscache.tagtypes and
468 468 name in self._tagscache.tagtypes):
469 469 old = self.tags().get(name, nullid)
470 470 fp.write('%s %s\n' % (hex(old), m))
471 471 fp.write('%s %s\n' % (hex(node), m))
472 472 fp.close()
473 473
474 474 prevtags = ''
475 475 if local:
476 476 try:
477 477 fp = self.opener('localtags', 'r+')
478 478 except IOError:
479 479 fp = self.opener('localtags', 'a')
480 480 else:
481 481 prevtags = fp.read()
482 482
483 483 # local tags are stored in the current charset
484 484 writetags(fp, names, None, prevtags)
485 485 for name in names:
486 486 self.hook('tag', node=hex(node), tag=name, local=local)
487 487 return
488 488
489 489 try:
490 490 fp = self.wfile('.hgtags', 'rb+')
491 491 except IOError, e:
492 492 if e.errno != errno.ENOENT:
493 493 raise
494 494 fp = self.wfile('.hgtags', 'ab')
495 495 else:
496 496 prevtags = fp.read()
497 497
498 498 # committed tags are stored in UTF-8
499 499 writetags(fp, names, encoding.fromlocal, prevtags)
500 500
501 501 fp.close()
502 502
503 503 self.invalidatecaches()
504 504
505 505 if '.hgtags' not in self.dirstate:
506 506 self[None].add(['.hgtags'])
507 507
508 508 m = matchmod.exact(self.root, '', ['.hgtags'])
509 509 tagnode = self.commit(message, user, date, extra=extra, match=m)
510 510
511 511 for name in names:
512 512 self.hook('tag', node=hex(node), tag=name, local=local)
513 513
514 514 return tagnode
515 515
516 516 def tag(self, names, node, message, local, user, date):
517 517 '''tag a revision with one or more symbolic names.
518 518
519 519 names is a list of strings or, when adding a single tag, names may be a
520 520 string.
521 521
522 522 if local is True, the tags are stored in a per-repository file.
523 523 otherwise, they are stored in the .hgtags file, and a new
524 524 changeset is committed with the change.
525 525
526 526 keyword arguments:
527 527
528 528 local: whether to store tags in non-version-controlled file
529 529 (default False)
530 530
531 531 message: commit message to use if committing
532 532
533 533 user: name of user to use if committing
534 534
535 535 date: date tuple to use if committing'''
536 536
537 537 if not local:
538 538 for x in self.status()[:5]:
539 539 if '.hgtags' in x:
540 540 raise util.Abort(_('working copy of .hgtags is changed '
541 541 '(please commit .hgtags manually)'))
542 542
543 543 self.tags() # instantiate the cache
544 544 self._tag(names, node, message, local, user, date)
545 545
546 546 @filteredpropertycache
547 547 def _tagscache(self):
548 548 '''Returns a tagscache object that contains various tags related
549 549 caches.'''
550 550
551 551 # This simplifies its cache management by having one decorated
552 552 # function (this one) and the rest simply fetch things from it.
553 553 class tagscache(object):
554 554 def __init__(self):
555 555 # These two define the set of tags for this repository. tags
556 556 # maps tag name to node; tagtypes maps tag name to 'global' or
557 557 # 'local'. (Global tags are defined by .hgtags across all
558 558 # heads, and local tags are defined in .hg/localtags.)
559 559 # They constitute the in-memory cache of tags.
560 560 self.tags = self.tagtypes = None
561 561
562 562 self.nodetagscache = self.tagslist = None
563 563
564 564 cache = tagscache()
565 565 cache.tags, cache.tagtypes = self._findtags()
566 566
567 567 return cache
568 568
569 569 def tags(self):
570 570 '''return a mapping of tag to node'''
571 571 t = {}
572 572 if self.changelog.filteredrevs:
573 573 tags, tt = self._findtags()
574 574 else:
575 575 tags = self._tagscache.tags
576 576 for k, v in tags.iteritems():
577 577 try:
578 578 # ignore tags to unknown nodes
579 579 self.changelog.rev(v)
580 580 t[k] = v
581 581 except (error.LookupError, ValueError):
582 582 pass
583 583 return t
584 584
585 585 def _findtags(self):
586 586 '''Do the hard work of finding tags. Return a pair of dicts
587 587 (tags, tagtypes) where tags maps tag name to node, and tagtypes
588 588 maps tag name to a string like \'global\' or \'local\'.
589 589 Subclasses or extensions are free to add their own tags, but
590 590 should be aware that the returned dicts will be retained for the
591 591 duration of the localrepo object.'''
592 592
593 593 # XXX what tagtype should subclasses/extensions use? Currently
594 594 # mq and bookmarks add tags, but do not set the tagtype at all.
595 595 # Should each extension invent its own tag type? Should there
596 596 # be one tagtype for all such "virtual" tags? Or is the status
597 597 # quo fine?
598 598
599 599 alltags = {} # map tag name to (node, hist)
600 600 tagtypes = {}
601 601
602 602 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
603 603 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
604 604
605 605 # Build the return dicts. Have to re-encode tag names because
606 606 # the tags module always uses UTF-8 (in order not to lose info
607 607 # writing to the cache), but the rest of Mercurial wants them in
608 608 # local encoding.
609 609 tags = {}
610 610 for (name, (node, hist)) in alltags.iteritems():
611 611 if node != nullid:
612 612 tags[encoding.tolocal(name)] = node
613 613 tags['tip'] = self.changelog.tip()
614 614 tagtypes = dict([(encoding.tolocal(name), value)
615 615 for (name, value) in tagtypes.iteritems()])
616 616 return (tags, tagtypes)
617 617
618 618 def tagtype(self, tagname):
619 619 '''
620 620 return the type of the given tag. result can be:
621 621
622 622 'local' : a local tag
623 623 'global' : a global tag
624 624 None : tag does not exist
625 625 '''
626 626
627 627 return self._tagscache.tagtypes.get(tagname)
628 628
629 629 def tagslist(self):
630 630 '''return a list of tags ordered by revision'''
631 631 if not self._tagscache.tagslist:
632 632 l = []
633 633 for t, n in self.tags().iteritems():
634 634 r = self.changelog.rev(n)
635 635 l.append((r, t, n))
636 636 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
637 637
638 638 return self._tagscache.tagslist
639 639
640 640 def nodetags(self, node):
641 641 '''return the tags associated with a node'''
642 642 if not self._tagscache.nodetagscache:
643 643 nodetagscache = {}
644 644 for t, n in self._tagscache.tags.iteritems():
645 645 nodetagscache.setdefault(n, []).append(t)
646 646 for tags in nodetagscache.itervalues():
647 647 tags.sort()
648 648 self._tagscache.nodetagscache = nodetagscache
649 649 return self._tagscache.nodetagscache.get(node, [])
650 650
651 651 def nodebookmarks(self, node):
652 652 marks = []
653 653 for bookmark, n in self._bookmarks.iteritems():
654 654 if n == node:
655 655 marks.append(bookmark)
656 656 return sorted(marks)
657 657
658 658 def branchmap(self):
659 659 '''returns a dictionary {branch: [branchheads]} with branchheads
660 660 ordered by increasing revision number'''
661 661 branchmap.updatecache(self)
662 662 return self._branchcaches[self.filtername]
663 663
664 664 def branchtip(self, branch):
665 665 '''return the tip node for a given branch'''
666 666 try:
667 667 return self.branchmap().branchtip(branch)
668 668 except KeyError:
669 669 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
670 670
671 671 def lookup(self, key):
672 672 return self[key].node()
673 673
674 674 def lookupbranch(self, key, remote=None):
675 675 repo = remote or self
676 676 if key in repo.branchmap():
677 677 return key
678 678
679 679 repo = (remote and remote.local()) and remote or self
680 680 return repo[key].branch()
681 681
682 682 def known(self, nodes):
683 683 nm = self.changelog.nodemap
684 684 pc = self._phasecache
685 685 result = []
686 686 for n in nodes:
687 687 r = nm.get(n)
688 688 resp = not (r is None or pc.phase(self, r) >= phases.secret)
689 689 result.append(resp)
690 690 return result
691 691
692 692 def local(self):
693 693 return self
694 694
695 695 def cancopy(self):
696 696 # so statichttprepo's override of local() works
697 697 if not self.local():
698 698 return False
699 699 if not self.ui.configbool('phases', 'publish', True):
700 700 return True
701 701 # if publishing we can't copy if there is filtered content
702 702 return not self.filtered('visible').changelog.filteredrevs
703 703
704 704 def join(self, f):
705 705 return os.path.join(self.path, f)
706 706
707 707 def wjoin(self, f):
708 708 return os.path.join(self.root, f)
709 709
710 710 def file(self, f):
711 711 if f[0] == '/':
712 712 f = f[1:]
713 713 return filelog.filelog(self.sopener, f)
714 714
715 715 def changectx(self, changeid):
716 716 return self[changeid]
717 717
718 718 def parents(self, changeid=None):
719 719 '''get list of changectxs for parents of changeid'''
720 720 return self[changeid].parents()
721 721
722 722 def setparents(self, p1, p2=nullid):
723 723 copies = self.dirstate.setparents(p1, p2)
724 724 pctx = self[p1]
725 725 if copies:
726 726 # Adjust copy records, the dirstate cannot do it, it
727 727 # requires access to parents manifests. Preserve them
728 728 # only for entries added to first parent.
729 729 for f in copies:
730 730 if f not in pctx and copies[f] in pctx:
731 731 self.dirstate.copy(copies[f], f)
732 732 if p2 == nullid:
733 733 for f, s in sorted(self.dirstate.copies().items()):
734 734 if f not in pctx and s not in pctx:
735 735 self.dirstate.copy(None, f)
736 736
737 737 def filectx(self, path, changeid=None, fileid=None):
738 738 """changeid can be a changeset revision, node, or tag.
739 739 fileid can be a file revision or node."""
740 740 return context.filectx(self, path, changeid, fileid)
741 741
742 742 def getcwd(self):
743 743 return self.dirstate.getcwd()
744 744
745 745 def pathto(self, f, cwd=None):
746 746 return self.dirstate.pathto(f, cwd)
747 747
748 748 def wfile(self, f, mode='r'):
749 749 return self.wopener(f, mode)
750 750
751 751 def _link(self, f):
752 752 return self.wvfs.islink(f)
753 753
754 754 def _loadfilter(self, filter):
755 755 if filter not in self.filterpats:
756 756 l = []
757 757 for pat, cmd in self.ui.configitems(filter):
758 758 if cmd == '!':
759 759 continue
760 760 mf = matchmod.match(self.root, '', [pat])
761 761 fn = None
762 762 params = cmd
763 763 for name, filterfn in self._datafilters.iteritems():
764 764 if cmd.startswith(name):
765 765 fn = filterfn
766 766 params = cmd[len(name):].lstrip()
767 767 break
768 768 if not fn:
769 769 fn = lambda s, c, **kwargs: util.filter(s, c)
770 770 # Wrap old filters not supporting keyword arguments
771 771 if not inspect.getargspec(fn)[2]:
772 772 oldfn = fn
773 773 fn = lambda s, c, **kwargs: oldfn(s, c)
774 774 l.append((mf, fn, params))
775 775 self.filterpats[filter] = l
776 776 return self.filterpats[filter]
777 777
778 778 def _filter(self, filterpats, filename, data):
779 779 for mf, fn, cmd in filterpats:
780 780 if mf(filename):
781 781 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
782 782 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
783 783 break
784 784
785 785 return data
786 786
787 787 @unfilteredpropertycache
788 788 def _encodefilterpats(self):
789 789 return self._loadfilter('encode')
790 790
791 791 @unfilteredpropertycache
792 792 def _decodefilterpats(self):
793 793 return self._loadfilter('decode')
794 794
795 795 def adddatafilter(self, name, filter):
796 796 self._datafilters[name] = filter
797 797
798 798 def wread(self, filename):
799 799 if self._link(filename):
800 800 data = self.wvfs.readlink(filename)
801 801 else:
802 802 data = self.wopener.read(filename)
803 803 return self._filter(self._encodefilterpats, filename, data)
804 804
805 805 def wwrite(self, filename, data, flags):
806 806 data = self._filter(self._decodefilterpats, filename, data)
807 807 if 'l' in flags:
808 808 self.wopener.symlink(data, filename)
809 809 else:
810 810 self.wopener.write(filename, data)
811 811 if 'x' in flags:
812 812 self.wvfs.setflags(filename, False, True)
813 813
814 814 def wwritedata(self, filename, data):
815 815 return self._filter(self._decodefilterpats, filename, data)
816 816
817 817 def transaction(self, desc, report=None):
818 818 tr = self._transref and self._transref() or None
819 819 if tr and tr.running():
820 820 return tr.nest()
821 821
822 822 # abort here if the journal already exists
823 823 if self.svfs.exists("journal"):
824 824 raise error.RepoError(
825 825 _("abandoned transaction found - run hg recover"))
826 826
827 827 def onclose():
828 828 self.store.write(tr)
829 829
830 830 self._writejournal(desc)
831 831 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
832 832 rp = report and report or self.ui.warn
833 833 tr = transaction.transaction(rp, self.sopener,
834 834 "journal",
835 835 aftertrans(renames),
836 836 self.store.createmode,
837 837 onclose)
838 838 self._transref = weakref.ref(tr)
839 839 return tr
840 840
841 841 def _journalfiles(self):
842 842 return ((self.svfs, 'journal'),
843 843 (self.vfs, 'journal.dirstate'),
844 844 (self.vfs, 'journal.branch'),
845 845 (self.vfs, 'journal.desc'),
846 846 (self.vfs, 'journal.bookmarks'),
847 847 (self.svfs, 'journal.phaseroots'))
848 848
849 849 def undofiles(self):
850 850 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
851 851
852 852 def _writejournal(self, desc):
853 853 self.opener.write("journal.dirstate",
854 854 self.opener.tryread("dirstate"))
855 855 self.opener.write("journal.branch",
856 856 encoding.fromlocal(self.dirstate.branch()))
857 857 self.opener.write("journal.desc",
858 858 "%d\n%s\n" % (len(self), desc))
859 859 self.opener.write("journal.bookmarks",
860 860 self.opener.tryread("bookmarks"))
861 861 self.sopener.write("journal.phaseroots",
862 862 self.sopener.tryread("phaseroots"))
863 863
864 864 def recover(self):
865 865 lock = self.lock()
866 866 try:
867 867 if self.svfs.exists("journal"):
868 868 self.ui.status(_("rolling back interrupted transaction\n"))
869 869 transaction.rollback(self.sopener, "journal",
870 870 self.ui.warn)
871 871 self.invalidate()
872 872 return True
873 873 else:
874 874 self.ui.warn(_("no interrupted transaction available\n"))
875 875 return False
876 876 finally:
877 877 lock.release()
878 878
879 879 def rollback(self, dryrun=False, force=False):
880 880 wlock = lock = None
881 881 try:
882 882 wlock = self.wlock()
883 883 lock = self.lock()
884 884 if self.svfs.exists("undo"):
885 885 return self._rollback(dryrun, force)
886 886 else:
887 887 self.ui.warn(_("no rollback information available\n"))
888 888 return 1
889 889 finally:
890 890 release(lock, wlock)
891 891
892 892 @unfilteredmethod # Until we get smarter cache management
893 893 def _rollback(self, dryrun, force):
894 894 ui = self.ui
895 895 try:
896 896 args = self.opener.read('undo.desc').splitlines()
897 897 (oldlen, desc, detail) = (int(args[0]), args[1], None)
898 898 if len(args) >= 3:
899 899 detail = args[2]
900 900 oldtip = oldlen - 1
901 901
902 902 if detail and ui.verbose:
903 903 msg = (_('repository tip rolled back to revision %s'
904 904 ' (undo %s: %s)\n')
905 905 % (oldtip, desc, detail))
906 906 else:
907 907 msg = (_('repository tip rolled back to revision %s'
908 908 ' (undo %s)\n')
909 909 % (oldtip, desc))
910 910 except IOError:
911 911 msg = _('rolling back unknown transaction\n')
912 912 desc = None
913 913
914 914 if not force and self['.'] != self['tip'] and desc == 'commit':
915 915 raise util.Abort(
916 916 _('rollback of last commit while not checked out '
917 917 'may lose data'), hint=_('use -f to force'))
918 918
919 919 ui.status(msg)
920 920 if dryrun:
921 921 return 0
922 922
923 923 parents = self.dirstate.parents()
924 924 self.destroying()
925 925 transaction.rollback(self.sopener, 'undo', ui.warn)
926 926 if self.vfs.exists('undo.bookmarks'):
927 927 self.vfs.rename('undo.bookmarks', 'bookmarks')
928 928 if self.svfs.exists('undo.phaseroots'):
929 929 self.svfs.rename('undo.phaseroots', 'phaseroots')
930 930 self.invalidate()
931 931
932 932 parentgone = (parents[0] not in self.changelog.nodemap or
933 933 parents[1] not in self.changelog.nodemap)
934 934 if parentgone:
935 935 self.vfs.rename('undo.dirstate', 'dirstate')
936 936 try:
937 937 branch = self.opener.read('undo.branch')
938 938 self.dirstate.setbranch(encoding.tolocal(branch))
939 939 except IOError:
940 940 ui.warn(_('named branch could not be reset: '
941 941 'current branch is still \'%s\'\n')
942 942 % self.dirstate.branch())
943 943
944 944 self.dirstate.invalidate()
945 945 parents = tuple([p.rev() for p in self.parents()])
946 946 if len(parents) > 1:
947 947 ui.status(_('working directory now based on '
948 948 'revisions %d and %d\n') % parents)
949 949 else:
950 950 ui.status(_('working directory now based on '
951 951 'revision %d\n') % parents)
952 952 # TODO: if we know which new heads may result from this rollback, pass
953 953 # them to destroy(), which will prevent the branchhead cache from being
954 954 # invalidated.
955 955 self.destroyed()
956 956 return 0
957 957
958 958 def invalidatecaches(self):
959 959
960 960 if '_tagscache' in vars(self):
961 961 # can't use delattr on proxy
962 962 del self.__dict__['_tagscache']
963 963
964 964 self.unfiltered()._branchcaches.clear()
965 965 self.invalidatevolatilesets()
966 966
967 967 def invalidatevolatilesets(self):
968 968 self.filteredrevcache.clear()
969 969 obsolete.clearobscaches(self)
970 970
971 971 def invalidatedirstate(self):
972 972 '''Invalidates the dirstate, causing the next call to dirstate
973 973 to check if it was modified since the last time it was read,
974 974 rereading it if it has.
975 975
976 976 This is different to dirstate.invalidate() that it doesn't always
977 977 rereads the dirstate. Use dirstate.invalidate() if you want to
978 978 explicitly read the dirstate again (i.e. restoring it to a previous
979 979 known good state).'''
980 980 if hasunfilteredcache(self, 'dirstate'):
981 981 for k in self.dirstate._filecache:
982 982 try:
983 983 delattr(self.dirstate, k)
984 984 except AttributeError:
985 985 pass
986 986 delattr(self.unfiltered(), 'dirstate')
987 987
988 988 def invalidate(self):
989 989 unfiltered = self.unfiltered() # all file caches are stored unfiltered
990 990 for k in self._filecache:
991 991 # dirstate is invalidated separately in invalidatedirstate()
992 992 if k == 'dirstate':
993 993 continue
994 994
995 995 try:
996 996 delattr(unfiltered, k)
997 997 except AttributeError:
998 998 pass
999 999 self.invalidatecaches()
1000 1000 self.store.invalidatecaches()
1001 1001
1002 1002 def invalidateall(self):
1003 1003 '''Fully invalidates both store and non-store parts, causing the
1004 1004 subsequent operation to reread any outside changes.'''
1005 1005 # extension should hook this to invalidate its caches
1006 1006 self.invalidate()
1007 1007 self.invalidatedirstate()
1008 1008
1009 1009 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1010 1010 try:
1011 1011 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1012 1012 except error.LockHeld, inst:
1013 1013 if not wait:
1014 1014 raise
1015 1015 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1016 1016 (desc, inst.locker))
1017 1017 # default to 600 seconds timeout
1018 1018 l = lockmod.lock(vfs, lockname,
1019 1019 int(self.ui.config("ui", "timeout", "600")),
1020 1020 releasefn, desc=desc)
1021 1021 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1022 1022 if acquirefn:
1023 1023 acquirefn()
1024 1024 return l
1025 1025
1026 1026 def _afterlock(self, callback):
1027 1027 """add a callback to the current repository lock.
1028 1028
1029 1029 The callback will be executed on lock release."""
1030 1030 l = self._lockref and self._lockref()
1031 1031 if l:
1032 1032 l.postrelease.append(callback)
1033 1033 else:
1034 1034 callback()
1035 1035
1036 1036 def lock(self, wait=True):
1037 1037 '''Lock the repository store (.hg/store) and return a weak reference
1038 1038 to the lock. Use this before modifying the store (e.g. committing or
1039 1039 stripping). If you are opening a transaction, get a lock as well.)'''
1040 1040 l = self._lockref and self._lockref()
1041 1041 if l is not None and l.held:
1042 1042 l.lock()
1043 1043 return l
1044 1044
1045 1045 def unlock():
1046 1046 if hasunfilteredcache(self, '_phasecache'):
1047 1047 self._phasecache.write()
1048 1048 for k, ce in self._filecache.items():
1049 1049 if k == 'dirstate' or k not in self.__dict__:
1050 1050 continue
1051 1051 ce.refresh()
1052 1052
1053 1053 l = self._lock(self.svfs, "lock", wait, unlock,
1054 1054 self.invalidate, _('repository %s') % self.origroot)
1055 1055 self._lockref = weakref.ref(l)
1056 1056 return l
1057 1057
1058 1058 def wlock(self, wait=True):
1059 1059 '''Lock the non-store parts of the repository (everything under
1060 1060 .hg except .hg/store) and return a weak reference to the lock.
1061 1061 Use this before modifying files in .hg.'''
1062 1062 l = self._wlockref and self._wlockref()
1063 1063 if l is not None and l.held:
1064 1064 l.lock()
1065 1065 return l
1066 1066
1067 1067 def unlock():
1068 1068 self.dirstate.write()
1069 1069 self._filecache['dirstate'].refresh()
1070 1070
1071 1071 l = self._lock(self.vfs, "wlock", wait, unlock,
1072 1072 self.invalidatedirstate, _('working directory of %s') %
1073 1073 self.origroot)
1074 1074 self._wlockref = weakref.ref(l)
1075 1075 return l
1076 1076
1077 1077 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1078 1078 """
1079 1079 commit an individual file as part of a larger transaction
1080 1080 """
1081 1081
1082 1082 fname = fctx.path()
1083 1083 text = fctx.data()
1084 1084 flog = self.file(fname)
1085 1085 fparent1 = manifest1.get(fname, nullid)
1086 1086 fparent2 = fparent2o = manifest2.get(fname, nullid)
1087 1087
1088 1088 meta = {}
1089 1089 copy = fctx.renamed()
1090 1090 if copy and copy[0] != fname:
1091 1091 # Mark the new revision of this file as a copy of another
1092 1092 # file. This copy data will effectively act as a parent
1093 1093 # of this new revision. If this is a merge, the first
1094 1094 # parent will be the nullid (meaning "look up the copy data")
1095 1095 # and the second one will be the other parent. For example:
1096 1096 #
1097 1097 # 0 --- 1 --- 3 rev1 changes file foo
1098 1098 # \ / rev2 renames foo to bar and changes it
1099 1099 # \- 2 -/ rev3 should have bar with all changes and
1100 1100 # should record that bar descends from
1101 1101 # bar in rev2 and foo in rev1
1102 1102 #
1103 1103 # this allows this merge to succeed:
1104 1104 #
1105 1105 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1106 1106 # \ / merging rev3 and rev4 should use bar@rev2
1107 1107 # \- 2 --- 4 as the merge base
1108 1108 #
1109 1109
1110 1110 cfname = copy[0]
1111 1111 crev = manifest1.get(cfname)
1112 1112 newfparent = fparent2
1113 1113
1114 1114 if manifest2: # branch merge
1115 1115 if fparent2 == nullid or crev is None: # copied on remote side
1116 1116 if cfname in manifest2:
1117 1117 crev = manifest2[cfname]
1118 1118 newfparent = fparent1
1119 1119
1120 1120 # find source in nearest ancestor if we've lost track
1121 1121 if not crev:
1122 1122 self.ui.debug(" %s: searching for copy revision for %s\n" %
1123 1123 (fname, cfname))
1124 1124 for ancestor in self[None].ancestors():
1125 1125 if cfname in ancestor:
1126 1126 crev = ancestor[cfname].filenode()
1127 1127 break
1128 1128
1129 1129 if crev:
1130 1130 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1131 1131 meta["copy"] = cfname
1132 1132 meta["copyrev"] = hex(crev)
1133 1133 fparent1, fparent2 = nullid, newfparent
1134 1134 else:
1135 1135 self.ui.warn(_("warning: can't find ancestor for '%s' "
1136 1136 "copied from '%s'!\n") % (fname, cfname))
1137 1137
1138 1138 elif fparent1 == nullid:
1139 1139 fparent1, fparent2 = fparent2, nullid
1140 1140 elif fparent2 != nullid:
1141 1141 # is one parent an ancestor of the other?
1142 1142 fparentancestor = flog.ancestor(fparent1, fparent2)
1143 1143 if fparentancestor == fparent1:
1144 1144 fparent1, fparent2 = fparent2, nullid
1145 1145 elif fparentancestor == fparent2:
1146 1146 fparent2 = nullid
1147 1147
1148 1148 # is the file changed?
1149 1149 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1150 1150 changelist.append(fname)
1151 1151 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1152 1152
1153 1153 # are just the flags changed during merge?
1154 1154 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1155 1155 changelist.append(fname)
1156 1156
1157 1157 return fparent1
1158 1158
1159 1159 @unfilteredmethod
1160 1160 def commit(self, text="", user=None, date=None, match=None, force=False,
1161 1161 editor=False, extra={}):
1162 1162 """Add a new revision to current repository.
1163 1163
1164 1164 Revision information is gathered from the working directory,
1165 1165 match can be used to filter the committed files. If editor is
1166 1166 supplied, it is called to get a commit message.
1167 1167 """
1168 1168
1169 1169 def fail(f, msg):
1170 1170 raise util.Abort('%s: %s' % (f, msg))
1171 1171
1172 1172 if not match:
1173 1173 match = matchmod.always(self.root, '')
1174 1174
1175 1175 if not force:
1176 1176 vdirs = []
1177 1177 match.explicitdir = vdirs.append
1178 1178 match.bad = fail
1179 1179
1180 1180 wlock = self.wlock()
1181 1181 try:
1182 1182 wctx = self[None]
1183 1183 merge = len(wctx.parents()) > 1
1184 1184
1185 1185 if (not force and merge and match and
1186 1186 (match.files() or match.anypats())):
1187 1187 raise util.Abort(_('cannot partially commit a merge '
1188 1188 '(do not specify files or patterns)'))
1189 1189
1190 1190 changes = self.status(match=match, clean=force)
1191 1191 if force:
1192 1192 changes[0].extend(changes[6]) # mq may commit unchanged files
1193 1193
1194 1194 # check subrepos
1195 1195 subs = []
1196 1196 commitsubs = set()
1197 1197 newstate = wctx.substate.copy()
1198 1198 # only manage subrepos and .hgsubstate if .hgsub is present
1199 1199 if '.hgsub' in wctx:
1200 1200 # we'll decide whether to track this ourselves, thanks
1201 1201 for c in changes[:3]:
1202 1202 if '.hgsubstate' in c:
1203 1203 c.remove('.hgsubstate')
1204 1204
1205 1205 # compare current state to last committed state
1206 1206 # build new substate based on last committed state
1207 1207 oldstate = wctx.p1().substate
1208 1208 for s in sorted(newstate.keys()):
1209 1209 if not match(s):
1210 1210 # ignore working copy, use old state if present
1211 1211 if s in oldstate:
1212 1212 newstate[s] = oldstate[s]
1213 1213 continue
1214 1214 if not force:
1215 1215 raise util.Abort(
1216 1216 _("commit with new subrepo %s excluded") % s)
1217 1217 if wctx.sub(s).dirty(True):
1218 1218 if not self.ui.configbool('ui', 'commitsubrepos'):
1219 1219 raise util.Abort(
1220 1220 _("uncommitted changes in subrepo %s") % s,
1221 1221 hint=_("use --subrepos for recursive commit"))
1222 1222 subs.append(s)
1223 1223 commitsubs.add(s)
1224 1224 else:
1225 1225 bs = wctx.sub(s).basestate()
1226 1226 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1227 1227 if oldstate.get(s, (None, None, None))[1] != bs:
1228 1228 subs.append(s)
1229 1229
1230 1230 # check for removed subrepos
1231 1231 for p in wctx.parents():
1232 1232 r = [s for s in p.substate if s not in newstate]
1233 1233 subs += [s for s in r if match(s)]
1234 1234 if subs:
1235 1235 if (not match('.hgsub') and
1236 1236 '.hgsub' in (wctx.modified() + wctx.added())):
1237 1237 raise util.Abort(
1238 1238 _("can't commit subrepos without .hgsub"))
1239 1239 changes[0].insert(0, '.hgsubstate')
1240 1240
1241 1241 elif '.hgsub' in changes[2]:
1242 1242 # clean up .hgsubstate when .hgsub is removed
1243 1243 if ('.hgsubstate' in wctx and
1244 1244 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1245 1245 changes[2].insert(0, '.hgsubstate')
1246 1246
1247 1247 # make sure all explicit patterns are matched
1248 1248 if not force and match.files():
1249 1249 matched = set(changes[0] + changes[1] + changes[2])
1250 1250
1251 1251 for f in match.files():
1252 1252 f = self.dirstate.normalize(f)
1253 1253 if f == '.' or f in matched or f in wctx.substate:
1254 1254 continue
1255 1255 if f in changes[3]: # missing
1256 1256 fail(f, _('file not found!'))
1257 1257 if f in vdirs: # visited directory
1258 1258 d = f + '/'
1259 1259 for mf in matched:
1260 1260 if mf.startswith(d):
1261 1261 break
1262 1262 else:
1263 1263 fail(f, _("no match under directory!"))
1264 1264 elif f not in self.dirstate:
1265 1265 fail(f, _("file not tracked!"))
1266 1266
1267 1267 cctx = context.workingctx(self, text, user, date, extra, changes)
1268 1268
1269 1269 if (not force and not extra.get("close") and not merge
1270 1270 and not cctx.files()
1271 1271 and wctx.branch() == wctx.p1().branch()):
1272 1272 return None
1273 1273
1274 1274 if merge and cctx.deleted():
1275 1275 raise util.Abort(_("cannot commit merge with missing files"))
1276 1276
1277 1277 ms = mergemod.mergestate(self)
1278 1278 for f in changes[0]:
1279 1279 if f in ms and ms[f] == 'u':
1280 1280 raise util.Abort(_("unresolved merge conflicts "
1281 1281 "(see hg help resolve)"))
1282 1282
1283 1283 if editor:
1284 1284 cctx._text = editor(self, cctx, subs)
1285 1285 edited = (text != cctx._text)
1286 1286
1287 1287 # Save commit message in case this transaction gets rolled back
1288 1288 # (e.g. by a pretxncommit hook). Leave the content alone on
1289 1289 # the assumption that the user will use the same editor again.
1290 1290 msgfn = self.savecommitmessage(cctx._text)
1291 1291
1292 1292 # commit subs and write new state
1293 1293 if subs:
1294 1294 for s in sorted(commitsubs):
1295 1295 sub = wctx.sub(s)
1296 1296 self.ui.status(_('committing subrepository %s\n') %
1297 1297 subrepo.subrelpath(sub))
1298 1298 sr = sub.commit(cctx._text, user, date)
1299 1299 newstate[s] = (newstate[s][0], sr)
1300 1300 subrepo.writestate(self, newstate)
1301 1301
1302 1302 p1, p2 = self.dirstate.parents()
1303 1303 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1304 1304 try:
1305 1305 self.hook("precommit", throw=True, parent1=hookp1,
1306 1306 parent2=hookp2)
1307 1307 ret = self.commitctx(cctx, True)
1308 1308 except: # re-raises
1309 1309 if edited:
1310 1310 self.ui.write(
1311 1311 _('note: commit message saved in %s\n') % msgfn)
1312 1312 raise
1313 1313
1314 1314 # update bookmarks, dirstate and mergestate
1315 1315 bookmarks.update(self, [p1, p2], ret)
1316 1316 cctx.markcommitted(ret)
1317 1317 ms.reset()
1318 1318 finally:
1319 1319 wlock.release()
1320 1320
1321 1321 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1322 1322 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1323 1323 self._afterlock(commithook)
1324 1324 return ret
1325 1325
1326 1326 @unfilteredmethod
1327 1327 def commitctx(self, ctx, error=False):
1328 1328 """Add a new revision to current repository.
1329 1329 Revision information is passed via the context argument.
1330 1330 """
1331 1331
1332 1332 tr = lock = None
1333 1333 removed = list(ctx.removed())
1334 1334 p1, p2 = ctx.p1(), ctx.p2()
1335 1335 user = ctx.user()
1336 1336
1337 1337 lock = self.lock()
1338 1338 try:
1339 1339 tr = self.transaction("commit")
1340 1340 trp = weakref.proxy(tr)
1341 1341
1342 1342 if ctx.files():
1343 1343 m1 = p1.manifest().copy()
1344 1344 m2 = p2.manifest()
1345 1345
1346 1346 # check in files
1347 1347 new = {}
1348 1348 changed = []
1349 1349 linkrev = len(self)
1350 1350 for f in sorted(ctx.modified() + ctx.added()):
1351 1351 self.ui.note(f + "\n")
1352 1352 try:
1353 1353 fctx = ctx[f]
1354 1354 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1355 1355 changed)
1356 1356 m1.set(f, fctx.flags())
1357 1357 except OSError, inst:
1358 1358 self.ui.warn(_("trouble committing %s!\n") % f)
1359 1359 raise
1360 1360 except IOError, inst:
1361 1361 errcode = getattr(inst, 'errno', errno.ENOENT)
1362 1362 if error or errcode and errcode != errno.ENOENT:
1363 1363 self.ui.warn(_("trouble committing %s!\n") % f)
1364 1364 raise
1365 1365 else:
1366 1366 removed.append(f)
1367 1367
1368 1368 # update manifest
1369 1369 m1.update(new)
1370 1370 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1371 1371 drop = [f for f in removed if f in m1]
1372 1372 for f in drop:
1373 1373 del m1[f]
1374 1374 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1375 1375 p2.manifestnode(), (new, drop))
1376 1376 files = changed + removed
1377 1377 else:
1378 1378 mn = p1.manifestnode()
1379 1379 files = []
1380 1380
1381 1381 # update changelog
1382 1382 self.changelog.delayupdate()
1383 1383 n = self.changelog.add(mn, files, ctx.description(),
1384 1384 trp, p1.node(), p2.node(),
1385 1385 user, ctx.date(), ctx.extra().copy())
1386 1386 p = lambda: self.changelog.writepending() and self.root or ""
1387 1387 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1388 1388 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1389 1389 parent2=xp2, pending=p)
1390 1390 self.changelog.finalize(trp)
1391 1391 # set the new commit is proper phase
1392 1392 targetphase = subrepo.newcommitphase(self.ui, ctx)
1393 1393 if targetphase:
1394 1394 # retract boundary do not alter parent changeset.
1395 1395 # if a parent have higher the resulting phase will
1396 1396 # be compliant anyway
1397 1397 #
1398 1398 # if minimal phase was 0 we don't need to retract anything
1399 1399 phases.retractboundary(self, targetphase, [n])
1400 1400 tr.close()
1401 1401 branchmap.updatecache(self.filtered('served'))
1402 1402 return n
1403 1403 finally:
1404 1404 if tr:
1405 1405 tr.release()
1406 1406 lock.release()
1407 1407
1408 1408 @unfilteredmethod
1409 1409 def destroying(self):
1410 1410 '''Inform the repository that nodes are about to be destroyed.
1411 1411 Intended for use by strip and rollback, so there's a common
1412 1412 place for anything that has to be done before destroying history.
1413 1413
1414 1414 This is mostly useful for saving state that is in memory and waiting
1415 1415 to be flushed when the current lock is released. Because a call to
1416 1416 destroyed is imminent, the repo will be invalidated causing those
1417 1417 changes to stay in memory (waiting for the next unlock), or vanish
1418 1418 completely.
1419 1419 '''
1420 1420 # When using the same lock to commit and strip, the phasecache is left
1421 1421 # dirty after committing. Then when we strip, the repo is invalidated,
1422 1422 # causing those changes to disappear.
1423 1423 if '_phasecache' in vars(self):
1424 1424 self._phasecache.write()
1425 1425
1426 1426 @unfilteredmethod
1427 1427 def destroyed(self):
1428 1428 '''Inform the repository that nodes have been destroyed.
1429 1429 Intended for use by strip and rollback, so there's a common
1430 1430 place for anything that has to be done after destroying history.
1431 1431 '''
1432 1432 # When one tries to:
1433 1433 # 1) destroy nodes thus calling this method (e.g. strip)
1434 1434 # 2) use phasecache somewhere (e.g. commit)
1435 1435 #
1436 1436 # then 2) will fail because the phasecache contains nodes that were
1437 1437 # removed. We can either remove phasecache from the filecache,
1438 1438 # causing it to reload next time it is accessed, or simply filter
1439 1439 # the removed nodes now and write the updated cache.
1440 1440 self._phasecache.filterunknown(self)
1441 1441 self._phasecache.write()
1442 1442
1443 1443 # update the 'served' branch cache to help read only server process
1444 1444 # Thanks to branchcache collaboration this is done from the nearest
1445 1445 # filtered subset and it is expected to be fast.
1446 1446 branchmap.updatecache(self.filtered('served'))
1447 1447
1448 1448 # Ensure the persistent tag cache is updated. Doing it now
1449 1449 # means that the tag cache only has to worry about destroyed
1450 1450 # heads immediately after a strip/rollback. That in turn
1451 1451 # guarantees that "cachetip == currenttip" (comparing both rev
1452 1452 # and node) always means no nodes have been added or destroyed.
1453 1453
1454 1454 # XXX this is suboptimal when qrefresh'ing: we strip the current
1455 1455 # head, refresh the tag cache, then immediately add a new head.
1456 1456 # But I think doing it this way is necessary for the "instant
1457 1457 # tag cache retrieval" case to work.
1458 1458 self.invalidate()
1459 1459
1460 1460 def walk(self, match, node=None):
1461 1461 '''
1462 1462 walk recursively through the directory tree or a given
1463 1463 changeset, finding all files matched by the match
1464 1464 function
1465 1465 '''
1466 1466 return self[node].walk(match)
1467 1467
1468 1468 def status(self, node1='.', node2=None, match=None,
1469 1469 ignored=False, clean=False, unknown=False,
1470 1470 listsubrepos=False):
1471 1471 """return status of files between two nodes or node and working
1472 1472 directory.
1473 1473
1474 1474 If node1 is None, use the first dirstate parent instead.
1475 1475 If node2 is None, compare node1 with working directory.
1476 1476 """
1477 1477
1478 1478 def mfmatches(ctx):
1479 1479 mf = ctx.manifest().copy()
1480 1480 if match.always():
1481 1481 return mf
1482 1482 for fn in mf.keys():
1483 1483 if not match(fn):
1484 1484 del mf[fn]
1485 1485 return mf
1486 1486
1487 1487 ctx1 = self[node1]
1488 1488 ctx2 = self[node2]
1489 1489
1490 1490 working = ctx2.rev() is None
1491 1491 parentworking = working and ctx1 == self['.']
1492 1492 match = match or matchmod.always(self.root, self.getcwd())
1493 1493 listignored, listclean, listunknown = ignored, clean, unknown
1494 1494
1495 1495 # load earliest manifest first for caching reasons
1496 1496 if not working and ctx2.rev() < ctx1.rev():
1497 1497 ctx2.manifest()
1498 1498
1499 1499 if not parentworking:
1500 1500 def bad(f, msg):
1501 1501 # 'f' may be a directory pattern from 'match.files()',
1502 1502 # so 'f not in ctx1' is not enough
1503 1503 if f not in ctx1 and f not in ctx1.dirs():
1504 1504 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1505 1505 match.bad = bad
1506 1506
1507 1507 if working: # we need to scan the working dir
1508 1508 subrepos = []
1509 1509 if '.hgsub' in self.dirstate:
1510 1510 subrepos = sorted(ctx2.substate)
1511 1511 s = self.dirstate.status(match, subrepos, listignored,
1512 1512 listclean, listunknown)
1513 1513 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1514 1514
1515 1515 # check for any possibly clean files
1516 1516 if parentworking and cmp:
1517 1517 fixup = []
1518 1518 # do a full compare of any files that might have changed
1519 1519 for f in sorted(cmp):
1520 1520 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1521 1521 or ctx1[f].cmp(ctx2[f])):
1522 1522 modified.append(f)
1523 1523 else:
1524 1524 fixup.append(f)
1525 1525
1526 1526 # update dirstate for files that are actually clean
1527 1527 if fixup:
1528 1528 if listclean:
1529 1529 clean += fixup
1530 1530
1531 1531 try:
1532 1532 # updating the dirstate is optional
1533 1533 # so we don't wait on the lock
1534 1534 wlock = self.wlock(False)
1535 1535 try:
1536 1536 for f in fixup:
1537 1537 self.dirstate.normal(f)
1538 1538 finally:
1539 1539 wlock.release()
1540 1540 except error.LockError:
1541 1541 pass
1542 1542
1543 1543 if not parentworking:
1544 1544 mf1 = mfmatches(ctx1)
1545 1545 if working:
1546 1546 # we are comparing working dir against non-parent
1547 1547 # generate a pseudo-manifest for the working dir
1548 1548 mf2 = mfmatches(self['.'])
1549 1549 for f in cmp + modified + added:
1550 1550 mf2[f] = None
1551 1551 mf2.set(f, ctx2.flags(f))
1552 1552 for f in removed:
1553 1553 if f in mf2:
1554 1554 del mf2[f]
1555 1555 else:
1556 1556 # we are comparing two revisions
1557 1557 deleted, unknown, ignored = [], [], []
1558 1558 mf2 = mfmatches(ctx2)
1559 1559
1560 1560 modified, added, clean = [], [], []
1561 1561 withflags = mf1.withflags() | mf2.withflags()
1562 1562 for fn, mf2node in mf2.iteritems():
1563 1563 if fn in mf1:
1564 1564 if (fn not in deleted and
1565 1565 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1566 1566 (mf1[fn] != mf2node and
1567 1567 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1568 1568 modified.append(fn)
1569 1569 elif listclean:
1570 1570 clean.append(fn)
1571 1571 del mf1[fn]
1572 1572 elif fn not in deleted:
1573 1573 added.append(fn)
1574 1574 removed = mf1.keys()
1575 1575
1576 1576 if working and modified and not self.dirstate._checklink:
1577 1577 # Symlink placeholders may get non-symlink-like contents
1578 1578 # via user error or dereferencing by NFS or Samba servers,
1579 1579 # so we filter out any placeholders that don't look like a
1580 1580 # symlink
1581 1581 sane = []
1582 1582 for f in modified:
1583 1583 if ctx2.flags(f) == 'l':
1584 1584 d = ctx2[f].data()
1585 1585 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1586 1586 self.ui.debug('ignoring suspect symlink placeholder'
1587 1587 ' "%s"\n' % f)
1588 1588 continue
1589 1589 sane.append(f)
1590 1590 modified = sane
1591 1591
1592 1592 r = modified, added, removed, deleted, unknown, ignored, clean
1593 1593
1594 1594 if listsubrepos:
1595 1595 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1596 1596 if working:
1597 1597 rev2 = None
1598 1598 else:
1599 1599 rev2 = ctx2.substate[subpath][1]
1600 1600 try:
1601 1601 submatch = matchmod.narrowmatcher(subpath, match)
1602 1602 s = sub.status(rev2, match=submatch, ignored=listignored,
1603 1603 clean=listclean, unknown=listunknown,
1604 1604 listsubrepos=True)
1605 1605 for rfiles, sfiles in zip(r, s):
1606 1606 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1607 1607 except error.LookupError:
1608 1608 self.ui.status(_("skipping missing subrepository: %s\n")
1609 1609 % subpath)
1610 1610
1611 1611 for l in r:
1612 1612 l.sort()
1613 1613 return r
1614 1614
1615 1615 def heads(self, start=None):
1616 1616 heads = self.changelog.heads(start)
1617 1617 # sort the output in rev descending order
1618 1618 return sorted(heads, key=self.changelog.rev, reverse=True)
1619 1619
1620 1620 def branchheads(self, branch=None, start=None, closed=False):
1621 1621 '''return a (possibly filtered) list of heads for the given branch
1622 1622
1623 1623 Heads are returned in topological order, from newest to oldest.
1624 1624 If branch is None, use the dirstate branch.
1625 1625 If start is not None, return only heads reachable from start.
1626 1626 If closed is True, return heads that are marked as closed as well.
1627 1627 '''
1628 1628 if branch is None:
1629 1629 branch = self[None].branch()
1630 1630 branches = self.branchmap()
1631 1631 if branch not in branches:
1632 1632 return []
1633 1633 # the cache returns heads ordered lowest to highest
1634 1634 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1635 1635 if start is not None:
1636 1636 # filter out the heads that cannot be reached from startrev
1637 1637 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1638 1638 bheads = [h for h in bheads if h in fbheads]
1639 1639 return bheads
1640 1640
1641 1641 def branches(self, nodes):
1642 1642 if not nodes:
1643 1643 nodes = [self.changelog.tip()]
1644 1644 b = []
1645 1645 for n in nodes:
1646 1646 t = n
1647 1647 while True:
1648 1648 p = self.changelog.parents(n)
1649 1649 if p[1] != nullid or p[0] == nullid:
1650 1650 b.append((t, n, p[0], p[1]))
1651 1651 break
1652 1652 n = p[0]
1653 1653 return b
1654 1654
1655 1655 def between(self, pairs):
1656 1656 r = []
1657 1657
1658 1658 for top, bottom in pairs:
1659 1659 n, l, i = top, [], 0
1660 1660 f = 1
1661 1661
1662 1662 while n != bottom and n != nullid:
1663 1663 p = self.changelog.parents(n)[0]
1664 1664 if i == f:
1665 1665 l.append(n)
1666 1666 f = f * 2
1667 1667 n = p
1668 1668 i += 1
1669 1669
1670 1670 r.append(l)
1671 1671
1672 1672 return r
1673 1673
1674 1674 def pull(self, remote, heads=None, force=False):
1675 1675 return exchange.pull (self, remote, heads, force)
1676 1676
1677 1677 def checkpush(self, pushop):
1678 1678 """Extensions can override this function if additional checks have
1679 1679 to be performed before pushing, or call it if they override push
1680 1680 command.
1681 1681 """
1682 1682 pass
1683 1683
1684 1684 def push(self, remote, force=False, revs=None, newbranch=False):
1685 1685 return exchange.push(self, remote, force, revs, newbranch)
1686 1686
1687 1687 def stream_in(self, remote, requirements):
1688 1688 lock = self.lock()
1689 1689 try:
1690 1690 # Save remote branchmap. We will use it later
1691 1691 # to speed up branchcache creation
1692 1692 rbranchmap = None
1693 1693 if remote.capable("branchmap"):
1694 1694 rbranchmap = remote.branchmap()
1695 1695
1696 1696 fp = remote.stream_out()
1697 1697 l = fp.readline()
1698 1698 try:
1699 1699 resp = int(l)
1700 1700 except ValueError:
1701 1701 raise error.ResponseError(
1702 1702 _('unexpected response from remote server:'), l)
1703 1703 if resp == 1:
1704 1704 raise util.Abort(_('operation forbidden by server'))
1705 1705 elif resp == 2:
1706 1706 raise util.Abort(_('locking the remote repository failed'))
1707 1707 elif resp != 0:
1708 1708 raise util.Abort(_('the server sent an unknown error code'))
1709 1709 self.ui.status(_('streaming all changes\n'))
1710 1710 l = fp.readline()
1711 1711 try:
1712 1712 total_files, total_bytes = map(int, l.split(' ', 1))
1713 1713 except (ValueError, TypeError):
1714 1714 raise error.ResponseError(
1715 1715 _('unexpected response from remote server:'), l)
1716 1716 self.ui.status(_('%d files to transfer, %s of data\n') %
1717 1717 (total_files, util.bytecount(total_bytes)))
1718 1718 handled_bytes = 0
1719 1719 self.ui.progress(_('clone'), 0, total=total_bytes)
1720 1720 start = time.time()
1721 1721
1722 1722 tr = self.transaction(_('clone'))
1723 1723 try:
1724 1724 for i in xrange(total_files):
1725 1725 # XXX doesn't support '\n' or '\r' in filenames
1726 1726 l = fp.readline()
1727 1727 try:
1728 1728 name, size = l.split('\0', 1)
1729 1729 size = int(size)
1730 1730 except (ValueError, TypeError):
1731 1731 raise error.ResponseError(
1732 1732 _('unexpected response from remote server:'), l)
1733 1733 if self.ui.debugflag:
1734 1734 self.ui.debug('adding %s (%s)\n' %
1735 1735 (name, util.bytecount(size)))
1736 1736 # for backwards compat, name was partially encoded
1737 1737 ofp = self.sopener(store.decodedir(name), 'w')
1738 1738 for chunk in util.filechunkiter(fp, limit=size):
1739 1739 handled_bytes += len(chunk)
1740 1740 self.ui.progress(_('clone'), handled_bytes,
1741 1741 total=total_bytes)
1742 1742 ofp.write(chunk)
1743 1743 ofp.close()
1744 1744 tr.close()
1745 1745 finally:
1746 1746 tr.release()
1747 1747
1748 1748 # Writing straight to files circumvented the inmemory caches
1749 1749 self.invalidate()
1750 1750
1751 1751 elapsed = time.time() - start
1752 1752 if elapsed <= 0:
1753 1753 elapsed = 0.001
1754 1754 self.ui.progress(_('clone'), None)
1755 1755 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1756 1756 (util.bytecount(total_bytes), elapsed,
1757 1757 util.bytecount(total_bytes / elapsed)))
1758 1758
1759 1759 # new requirements = old non-format requirements +
1760 1760 # new format-related
1761 1761 # requirements from the streamed-in repository
1762 1762 requirements.update(set(self.requirements) - self.supportedformats)
1763 1763 self._applyrequirements(requirements)
1764 1764 self._writerequirements()
1765 1765
1766 1766 if rbranchmap:
1767 1767 rbheads = []
1768 1768 for bheads in rbranchmap.itervalues():
1769 1769 rbheads.extend(bheads)
1770 1770
1771 1771 if rbheads:
1772 1772 rtiprev = max((int(self.changelog.rev(node))
1773 1773 for node in rbheads))
1774 1774 cache = branchmap.branchcache(rbranchmap,
1775 1775 self[rtiprev].node(),
1776 1776 rtiprev)
1777 1777 # Try to stick it as low as possible
1778 1778 # filter above served are unlikely to be fetch from a clone
1779 1779 for candidate in ('base', 'immutable', 'served'):
1780 1780 rview = self.filtered(candidate)
1781 1781 if cache.validfor(rview):
1782 1782 self._branchcaches[candidate] = cache
1783 1783 cache.write(rview)
1784 1784 break
1785 1785 self.invalidate()
1786 1786 return len(self.heads()) + 1
1787 1787 finally:
1788 1788 lock.release()
1789 1789
1790 1790 def clone(self, remote, heads=[], stream=False):
1791 1791 '''clone remote repository.
1792 1792
1793 1793 keyword arguments:
1794 1794 heads: list of revs to clone (forces use of pull)
1795 1795 stream: use streaming clone if possible'''
1796 1796
1797 1797 # now, all clients that can request uncompressed clones can
1798 1798 # read repo formats supported by all servers that can serve
1799 1799 # them.
1800 1800
1801 1801 # if revlog format changes, client will have to check version
1802 1802 # and format flags on "stream" capability, and use
1803 1803 # uncompressed only if compatible.
1804 1804
1805 1805 if not stream:
1806 1806 # if the server explicitly prefers to stream (for fast LANs)
1807 1807 stream = remote.capable('stream-preferred')
1808 1808
1809 1809 if stream and not heads:
1810 1810 # 'stream' means remote revlog format is revlogv1 only
1811 1811 if remote.capable('stream'):
1812 1812 return self.stream_in(remote, set(('revlogv1',)))
1813 1813 # otherwise, 'streamreqs' contains the remote revlog format
1814 1814 streamreqs = remote.capable('streamreqs')
1815 1815 if streamreqs:
1816 1816 streamreqs = set(streamreqs.split(','))
1817 1817 # if we support it, stream in and adjust our requirements
1818 1818 if not streamreqs - self.supportedformats:
1819 1819 return self.stream_in(remote, streamreqs)
1820 1820 return self.pull(remote, heads)
1821 1821
1822 1822 def pushkey(self, namespace, key, old, new):
1823 1823 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1824 1824 old=old, new=new)
1825 1825 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1826 1826 ret = pushkey.push(self, namespace, key, old, new)
1827 1827 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1828 1828 ret=ret)
1829 1829 return ret
1830 1830
1831 1831 def listkeys(self, namespace):
1832 1832 self.hook('prelistkeys', throw=True, namespace=namespace)
1833 1833 self.ui.debug('listing keys for "%s"\n' % namespace)
1834 1834 values = pushkey.list(self, namespace)
1835 1835 self.hook('listkeys', namespace=namespace, values=values)
1836 1836 return values
1837 1837
1838 1838 def debugwireargs(self, one, two, three=None, four=None, five=None):
1839 1839 '''used to test argument passing over the wire'''
1840 1840 return "%s %s %s %s %s" % (one, two, three, four, five)
1841 1841
1842 1842 def savecommitmessage(self, text):
1843 1843 fp = self.opener('last-message.txt', 'wb')
1844 1844 try:
1845 1845 fp.write(text)
1846 1846 finally:
1847 1847 fp.close()
1848 1848 return self.pathto(fp.name[len(self.root) + 1:])
1849 1849
1850 1850 # used to avoid circular references so destructors work
1851 1851 def aftertrans(files):
1852 1852 renamefiles = [tuple(t) for t in files]
1853 1853 def a():
1854 1854 for vfs, src, dest in renamefiles:
1855 1855 try:
1856 1856 vfs.rename(src, dest)
1857 1857 except OSError: # journal file does not yet exist
1858 1858 pass
1859 1859 return a
1860 1860
1861 1861 def undoname(fn):
1862 1862 base, name = os.path.split(fn)
1863 1863 assert name.startswith('journal')
1864 1864 return os.path.join(base, name.replace('journal', 'undo', 1))
1865 1865
1866 1866 def instance(ui, path, create):
1867 1867 return localrepository(ui, util.urllocalpath(path), create)
1868 1868
1869 1869 def islocal(path):
1870 1870 return True
General Comments 0
You need to be logged in to leave comments. Login now