##// END OF EJS Templates
exchange: move stream clone logic into pull code path...
Gregory Szorc -
r26449:89b7a788 default
parent child Browse files
Show More
@@ -1,1477 +1,1481
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14 import streamclone
14 import tags
15 import tags
15
16
16 def readbundle(ui, fh, fname, vfs=None):
17 def readbundle(ui, fh, fname, vfs=None):
17 header = changegroup.readexactly(fh, 4)
18 header = changegroup.readexactly(fh, 4)
18
19
19 alg = None
20 alg = None
20 if not fname:
21 if not fname:
21 fname = "stream"
22 fname = "stream"
22 if not header.startswith('HG') and header.startswith('\0'):
23 if not header.startswith('HG') and header.startswith('\0'):
23 fh = changegroup.headerlessfixup(fh, header)
24 fh = changegroup.headerlessfixup(fh, header)
24 header = "HG10"
25 header = "HG10"
25 alg = 'UN'
26 alg = 'UN'
26 elif vfs:
27 elif vfs:
27 fname = vfs.join(fname)
28 fname = vfs.join(fname)
28
29
29 magic, version = header[0:2], header[2:4]
30 magic, version = header[0:2], header[2:4]
30
31
31 if magic != 'HG':
32 if magic != 'HG':
32 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
33 if version == '10':
34 if version == '10':
34 if alg is None:
35 if alg is None:
35 alg = changegroup.readexactly(fh, 2)
36 alg = changegroup.readexactly(fh, 2)
36 return changegroup.cg1unpacker(fh, alg)
37 return changegroup.cg1unpacker(fh, alg)
37 elif version.startswith('2'):
38 elif version.startswith('2'):
38 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
39 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
39 else:
40 else:
40 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
41
42
42 def buildobsmarkerspart(bundler, markers):
43 def buildobsmarkerspart(bundler, markers):
43 """add an obsmarker part to the bundler with <markers>
44 """add an obsmarker part to the bundler with <markers>
44
45
45 No part is created if markers is empty.
46 No part is created if markers is empty.
46 Raises ValueError if the bundler doesn't support any known obsmarker format.
47 Raises ValueError if the bundler doesn't support any known obsmarker format.
47 """
48 """
48 if markers:
49 if markers:
49 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
50 version = obsolete.commonversion(remoteversions)
51 version = obsolete.commonversion(remoteversions)
51 if version is None:
52 if version is None:
52 raise ValueError('bundler do not support common obsmarker format')
53 raise ValueError('bundler do not support common obsmarker format')
53 stream = obsolete.encodemarkers(markers, True, version=version)
54 stream = obsolete.encodemarkers(markers, True, version=version)
54 return bundler.newpart('obsmarkers', data=stream)
55 return bundler.newpart('obsmarkers', data=stream)
55 return None
56 return None
56
57
57 def _canusebundle2(op):
58 def _canusebundle2(op):
58 """return true if a pull/push can use bundle2
59 """return true if a pull/push can use bundle2
59
60
60 Feel free to nuke this function when we drop the experimental option"""
61 Feel free to nuke this function when we drop the experimental option"""
61 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
62 and op.remote.capable('bundle2'))
63 and op.remote.capable('bundle2'))
63
64
64
65
65 class pushoperation(object):
66 class pushoperation(object):
66 """A object that represent a single push operation
67 """A object that represent a single push operation
67
68
68 It purpose is to carry push related state and very common operation.
69 It purpose is to carry push related state and very common operation.
69
70
70 A new should be created at the beginning of each push and discarded
71 A new should be created at the beginning of each push and discarded
71 afterward.
72 afterward.
72 """
73 """
73
74
74 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
75 bookmarks=()):
76 bookmarks=()):
76 # repo we push from
77 # repo we push from
77 self.repo = repo
78 self.repo = repo
78 self.ui = repo.ui
79 self.ui = repo.ui
79 # repo we push to
80 # repo we push to
80 self.remote = remote
81 self.remote = remote
81 # force option provided
82 # force option provided
82 self.force = force
83 self.force = force
83 # revs to be pushed (None is "all")
84 # revs to be pushed (None is "all")
84 self.revs = revs
85 self.revs = revs
85 # bookmark explicitly pushed
86 # bookmark explicitly pushed
86 self.bookmarks = bookmarks
87 self.bookmarks = bookmarks
87 # allow push of new branch
88 # allow push of new branch
88 self.newbranch = newbranch
89 self.newbranch = newbranch
89 # did a local lock get acquired?
90 # did a local lock get acquired?
90 self.locallocked = None
91 self.locallocked = None
91 # step already performed
92 # step already performed
92 # (used to check what steps have been already performed through bundle2)
93 # (used to check what steps have been already performed through bundle2)
93 self.stepsdone = set()
94 self.stepsdone = set()
94 # Integer version of the changegroup push result
95 # Integer version of the changegroup push result
95 # - None means nothing to push
96 # - None means nothing to push
96 # - 0 means HTTP error
97 # - 0 means HTTP error
97 # - 1 means we pushed and remote head count is unchanged *or*
98 # - 1 means we pushed and remote head count is unchanged *or*
98 # we have outgoing changesets but refused to push
99 # we have outgoing changesets but refused to push
99 # - other values as described by addchangegroup()
100 # - other values as described by addchangegroup()
100 self.cgresult = None
101 self.cgresult = None
101 # Boolean value for the bookmark push
102 # Boolean value for the bookmark push
102 self.bkresult = None
103 self.bkresult = None
103 # discover.outgoing object (contains common and outgoing data)
104 # discover.outgoing object (contains common and outgoing data)
104 self.outgoing = None
105 self.outgoing = None
105 # all remote heads before the push
106 # all remote heads before the push
106 self.remoteheads = None
107 self.remoteheads = None
107 # testable as a boolean indicating if any nodes are missing locally.
108 # testable as a boolean indicating if any nodes are missing locally.
108 self.incoming = None
109 self.incoming = None
109 # phases changes that must be pushed along side the changesets
110 # phases changes that must be pushed along side the changesets
110 self.outdatedphases = None
111 self.outdatedphases = None
111 # phases changes that must be pushed if changeset push fails
112 # phases changes that must be pushed if changeset push fails
112 self.fallbackoutdatedphases = None
113 self.fallbackoutdatedphases = None
113 # outgoing obsmarkers
114 # outgoing obsmarkers
114 self.outobsmarkers = set()
115 self.outobsmarkers = set()
115 # outgoing bookmarks
116 # outgoing bookmarks
116 self.outbookmarks = []
117 self.outbookmarks = []
117 # transaction manager
118 # transaction manager
118 self.trmanager = None
119 self.trmanager = None
119 # map { pushkey partid -> callback handling failure}
120 # map { pushkey partid -> callback handling failure}
120 # used to handle exception from mandatory pushkey part failure
121 # used to handle exception from mandatory pushkey part failure
121 self.pkfailcb = {}
122 self.pkfailcb = {}
122
123
123 @util.propertycache
124 @util.propertycache
124 def futureheads(self):
125 def futureheads(self):
125 """future remote heads if the changeset push succeeds"""
126 """future remote heads if the changeset push succeeds"""
126 return self.outgoing.missingheads
127 return self.outgoing.missingheads
127
128
128 @util.propertycache
129 @util.propertycache
129 def fallbackheads(self):
130 def fallbackheads(self):
130 """future remote heads if the changeset push fails"""
131 """future remote heads if the changeset push fails"""
131 if self.revs is None:
132 if self.revs is None:
132 # not target to push, all common are relevant
133 # not target to push, all common are relevant
133 return self.outgoing.commonheads
134 return self.outgoing.commonheads
134 unfi = self.repo.unfiltered()
135 unfi = self.repo.unfiltered()
135 # I want cheads = heads(::missingheads and ::commonheads)
136 # I want cheads = heads(::missingheads and ::commonheads)
136 # (missingheads is revs with secret changeset filtered out)
137 # (missingheads is revs with secret changeset filtered out)
137 #
138 #
138 # This can be expressed as:
139 # This can be expressed as:
139 # cheads = ( (missingheads and ::commonheads)
140 # cheads = ( (missingheads and ::commonheads)
140 # + (commonheads and ::missingheads))"
141 # + (commonheads and ::missingheads))"
141 # )
142 # )
142 #
143 #
143 # while trying to push we already computed the following:
144 # while trying to push we already computed the following:
144 # common = (::commonheads)
145 # common = (::commonheads)
145 # missing = ((commonheads::missingheads) - commonheads)
146 # missing = ((commonheads::missingheads) - commonheads)
146 #
147 #
147 # We can pick:
148 # We can pick:
148 # * missingheads part of common (::commonheads)
149 # * missingheads part of common (::commonheads)
149 common = self.outgoing.common
150 common = self.outgoing.common
150 nm = self.repo.changelog.nodemap
151 nm = self.repo.changelog.nodemap
151 cheads = [node for node in self.revs if nm[node] in common]
152 cheads = [node for node in self.revs if nm[node] in common]
152 # and
153 # and
153 # * commonheads parents on missing
154 # * commonheads parents on missing
154 revset = unfi.set('%ln and parents(roots(%ln))',
155 revset = unfi.set('%ln and parents(roots(%ln))',
155 self.outgoing.commonheads,
156 self.outgoing.commonheads,
156 self.outgoing.missing)
157 self.outgoing.missing)
157 cheads.extend(c.node() for c in revset)
158 cheads.extend(c.node() for c in revset)
158 return cheads
159 return cheads
159
160
160 @property
161 @property
161 def commonheads(self):
162 def commonheads(self):
162 """set of all common heads after changeset bundle push"""
163 """set of all common heads after changeset bundle push"""
163 if self.cgresult:
164 if self.cgresult:
164 return self.futureheads
165 return self.futureheads
165 else:
166 else:
166 return self.fallbackheads
167 return self.fallbackheads
167
168
168 # mapping of message used when pushing bookmark
169 # mapping of message used when pushing bookmark
169 bookmsgmap = {'update': (_("updating bookmark %s\n"),
170 bookmsgmap = {'update': (_("updating bookmark %s\n"),
170 _('updating bookmark %s failed!\n')),
171 _('updating bookmark %s failed!\n')),
171 'export': (_("exporting bookmark %s\n"),
172 'export': (_("exporting bookmark %s\n"),
172 _('exporting bookmark %s failed!\n')),
173 _('exporting bookmark %s failed!\n')),
173 'delete': (_("deleting remote bookmark %s\n"),
174 'delete': (_("deleting remote bookmark %s\n"),
174 _('deleting remote bookmark %s failed!\n')),
175 _('deleting remote bookmark %s failed!\n')),
175 }
176 }
176
177
177
178
178 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
179 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
179 '''Push outgoing changesets (limited by revs) from a local
180 '''Push outgoing changesets (limited by revs) from a local
180 repository to remote. Return an integer:
181 repository to remote. Return an integer:
181 - None means nothing to push
182 - None means nothing to push
182 - 0 means HTTP error
183 - 0 means HTTP error
183 - 1 means we pushed and remote head count is unchanged *or*
184 - 1 means we pushed and remote head count is unchanged *or*
184 we have outgoing changesets but refused to push
185 we have outgoing changesets but refused to push
185 - other values as described by addchangegroup()
186 - other values as described by addchangegroup()
186 '''
187 '''
187 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
188 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
188 if pushop.remote.local():
189 if pushop.remote.local():
189 missing = (set(pushop.repo.requirements)
190 missing = (set(pushop.repo.requirements)
190 - pushop.remote.local().supported)
191 - pushop.remote.local().supported)
191 if missing:
192 if missing:
192 msg = _("required features are not"
193 msg = _("required features are not"
193 " supported in the destination:"
194 " supported in the destination:"
194 " %s") % (', '.join(sorted(missing)))
195 " %s") % (', '.join(sorted(missing)))
195 raise util.Abort(msg)
196 raise util.Abort(msg)
196
197
197 # there are two ways to push to remote repo:
198 # there are two ways to push to remote repo:
198 #
199 #
199 # addchangegroup assumes local user can lock remote
200 # addchangegroup assumes local user can lock remote
200 # repo (local filesystem, old ssh servers).
201 # repo (local filesystem, old ssh servers).
201 #
202 #
202 # unbundle assumes local user cannot lock remote repo (new ssh
203 # unbundle assumes local user cannot lock remote repo (new ssh
203 # servers, http servers).
204 # servers, http servers).
204
205
205 if not pushop.remote.canpush():
206 if not pushop.remote.canpush():
206 raise util.Abort(_("destination does not support push"))
207 raise util.Abort(_("destination does not support push"))
207 # get local lock as we might write phase data
208 # get local lock as we might write phase data
208 localwlock = locallock = None
209 localwlock = locallock = None
209 try:
210 try:
210 # bundle2 push may receive a reply bundle touching bookmarks or other
211 # bundle2 push may receive a reply bundle touching bookmarks or other
211 # things requiring the wlock. Take it now to ensure proper ordering.
212 # things requiring the wlock. Take it now to ensure proper ordering.
212 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
213 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
213 if _canusebundle2(pushop) and maypushback:
214 if _canusebundle2(pushop) and maypushback:
214 localwlock = pushop.repo.wlock()
215 localwlock = pushop.repo.wlock()
215 locallock = pushop.repo.lock()
216 locallock = pushop.repo.lock()
216 pushop.locallocked = True
217 pushop.locallocked = True
217 except IOError as err:
218 except IOError as err:
218 pushop.locallocked = False
219 pushop.locallocked = False
219 if err.errno != errno.EACCES:
220 if err.errno != errno.EACCES:
220 raise
221 raise
221 # source repo cannot be locked.
222 # source repo cannot be locked.
222 # We do not abort the push, but just disable the local phase
223 # We do not abort the push, but just disable the local phase
223 # synchronisation.
224 # synchronisation.
224 msg = 'cannot lock source repository: %s\n' % err
225 msg = 'cannot lock source repository: %s\n' % err
225 pushop.ui.debug(msg)
226 pushop.ui.debug(msg)
226 try:
227 try:
227 if pushop.locallocked:
228 if pushop.locallocked:
228 pushop.trmanager = transactionmanager(repo,
229 pushop.trmanager = transactionmanager(repo,
229 'push-response',
230 'push-response',
230 pushop.remote.url())
231 pushop.remote.url())
231 pushop.repo.checkpush(pushop)
232 pushop.repo.checkpush(pushop)
232 lock = None
233 lock = None
233 unbundle = pushop.remote.capable('unbundle')
234 unbundle = pushop.remote.capable('unbundle')
234 if not unbundle:
235 if not unbundle:
235 lock = pushop.remote.lock()
236 lock = pushop.remote.lock()
236 try:
237 try:
237 _pushdiscovery(pushop)
238 _pushdiscovery(pushop)
238 if _canusebundle2(pushop):
239 if _canusebundle2(pushop):
239 _pushbundle2(pushop)
240 _pushbundle2(pushop)
240 _pushchangeset(pushop)
241 _pushchangeset(pushop)
241 _pushsyncphase(pushop)
242 _pushsyncphase(pushop)
242 _pushobsolete(pushop)
243 _pushobsolete(pushop)
243 _pushbookmark(pushop)
244 _pushbookmark(pushop)
244 finally:
245 finally:
245 if lock is not None:
246 if lock is not None:
246 lock.release()
247 lock.release()
247 if pushop.trmanager:
248 if pushop.trmanager:
248 pushop.trmanager.close()
249 pushop.trmanager.close()
249 finally:
250 finally:
250 if pushop.trmanager:
251 if pushop.trmanager:
251 pushop.trmanager.release()
252 pushop.trmanager.release()
252 if locallock is not None:
253 if locallock is not None:
253 locallock.release()
254 locallock.release()
254 if localwlock is not None:
255 if localwlock is not None:
255 localwlock.release()
256 localwlock.release()
256
257
257 return pushop
258 return pushop
258
259
259 # list of steps to perform discovery before push
260 # list of steps to perform discovery before push
260 pushdiscoveryorder = []
261 pushdiscoveryorder = []
261
262
262 # Mapping between step name and function
263 # Mapping between step name and function
263 #
264 #
264 # This exists to help extensions wrap steps if necessary
265 # This exists to help extensions wrap steps if necessary
265 pushdiscoverymapping = {}
266 pushdiscoverymapping = {}
266
267
267 def pushdiscovery(stepname):
268 def pushdiscovery(stepname):
268 """decorator for function performing discovery before push
269 """decorator for function performing discovery before push
269
270
270 The function is added to the step -> function mapping and appended to the
271 The function is added to the step -> function mapping and appended to the
271 list of steps. Beware that decorated function will be added in order (this
272 list of steps. Beware that decorated function will be added in order (this
272 may matter).
273 may matter).
273
274
274 You can only use this decorator for a new step, if you want to wrap a step
275 You can only use this decorator for a new step, if you want to wrap a step
275 from an extension, change the pushdiscovery dictionary directly."""
276 from an extension, change the pushdiscovery dictionary directly."""
276 def dec(func):
277 def dec(func):
277 assert stepname not in pushdiscoverymapping
278 assert stepname not in pushdiscoverymapping
278 pushdiscoverymapping[stepname] = func
279 pushdiscoverymapping[stepname] = func
279 pushdiscoveryorder.append(stepname)
280 pushdiscoveryorder.append(stepname)
280 return func
281 return func
281 return dec
282 return dec
282
283
283 def _pushdiscovery(pushop):
284 def _pushdiscovery(pushop):
284 """Run all discovery steps"""
285 """Run all discovery steps"""
285 for stepname in pushdiscoveryorder:
286 for stepname in pushdiscoveryorder:
286 step = pushdiscoverymapping[stepname]
287 step = pushdiscoverymapping[stepname]
287 step(pushop)
288 step(pushop)
288
289
289 @pushdiscovery('changeset')
290 @pushdiscovery('changeset')
290 def _pushdiscoverychangeset(pushop):
291 def _pushdiscoverychangeset(pushop):
291 """discover the changeset that need to be pushed"""
292 """discover the changeset that need to be pushed"""
292 fci = discovery.findcommonincoming
293 fci = discovery.findcommonincoming
293 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
294 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
294 common, inc, remoteheads = commoninc
295 common, inc, remoteheads = commoninc
295 fco = discovery.findcommonoutgoing
296 fco = discovery.findcommonoutgoing
296 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
297 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
297 commoninc=commoninc, force=pushop.force)
298 commoninc=commoninc, force=pushop.force)
298 pushop.outgoing = outgoing
299 pushop.outgoing = outgoing
299 pushop.remoteheads = remoteheads
300 pushop.remoteheads = remoteheads
300 pushop.incoming = inc
301 pushop.incoming = inc
301
302
302 @pushdiscovery('phase')
303 @pushdiscovery('phase')
303 def _pushdiscoveryphase(pushop):
304 def _pushdiscoveryphase(pushop):
304 """discover the phase that needs to be pushed
305 """discover the phase that needs to be pushed
305
306
306 (computed for both success and failure case for changesets push)"""
307 (computed for both success and failure case for changesets push)"""
307 outgoing = pushop.outgoing
308 outgoing = pushop.outgoing
308 unfi = pushop.repo.unfiltered()
309 unfi = pushop.repo.unfiltered()
309 remotephases = pushop.remote.listkeys('phases')
310 remotephases = pushop.remote.listkeys('phases')
310 publishing = remotephases.get('publishing', False)
311 publishing = remotephases.get('publishing', False)
311 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
312 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
312 and remotephases # server supports phases
313 and remotephases # server supports phases
313 and not pushop.outgoing.missing # no changesets to be pushed
314 and not pushop.outgoing.missing # no changesets to be pushed
314 and publishing):
315 and publishing):
315 # When:
316 # When:
316 # - this is a subrepo push
317 # - this is a subrepo push
317 # - and remote support phase
318 # - and remote support phase
318 # - and no changeset are to be pushed
319 # - and no changeset are to be pushed
319 # - and remote is publishing
320 # - and remote is publishing
320 # We may be in issue 3871 case!
321 # We may be in issue 3871 case!
321 # We drop the possible phase synchronisation done by
322 # We drop the possible phase synchronisation done by
322 # courtesy to publish changesets possibly locally draft
323 # courtesy to publish changesets possibly locally draft
323 # on the remote.
324 # on the remote.
324 remotephases = {'publishing': 'True'}
325 remotephases = {'publishing': 'True'}
325 ana = phases.analyzeremotephases(pushop.repo,
326 ana = phases.analyzeremotephases(pushop.repo,
326 pushop.fallbackheads,
327 pushop.fallbackheads,
327 remotephases)
328 remotephases)
328 pheads, droots = ana
329 pheads, droots = ana
329 extracond = ''
330 extracond = ''
330 if not publishing:
331 if not publishing:
331 extracond = ' and public()'
332 extracond = ' and public()'
332 revset = 'heads((%%ln::%%ln) %s)' % extracond
333 revset = 'heads((%%ln::%%ln) %s)' % extracond
333 # Get the list of all revs draft on remote by public here.
334 # Get the list of all revs draft on remote by public here.
334 # XXX Beware that revset break if droots is not strictly
335 # XXX Beware that revset break if droots is not strictly
335 # XXX root we may want to ensure it is but it is costly
336 # XXX root we may want to ensure it is but it is costly
336 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
337 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
337 if not outgoing.missing:
338 if not outgoing.missing:
338 future = fallback
339 future = fallback
339 else:
340 else:
340 # adds changeset we are going to push as draft
341 # adds changeset we are going to push as draft
341 #
342 #
342 # should not be necessary for publishing server, but because of an
343 # should not be necessary for publishing server, but because of an
343 # issue fixed in xxxxx we have to do it anyway.
344 # issue fixed in xxxxx we have to do it anyway.
344 fdroots = list(unfi.set('roots(%ln + %ln::)',
345 fdroots = list(unfi.set('roots(%ln + %ln::)',
345 outgoing.missing, droots))
346 outgoing.missing, droots))
346 fdroots = [f.node() for f in fdroots]
347 fdroots = [f.node() for f in fdroots]
347 future = list(unfi.set(revset, fdroots, pushop.futureheads))
348 future = list(unfi.set(revset, fdroots, pushop.futureheads))
348 pushop.outdatedphases = future
349 pushop.outdatedphases = future
349 pushop.fallbackoutdatedphases = fallback
350 pushop.fallbackoutdatedphases = fallback
350
351
351 @pushdiscovery('obsmarker')
352 @pushdiscovery('obsmarker')
352 def _pushdiscoveryobsmarkers(pushop):
353 def _pushdiscoveryobsmarkers(pushop):
353 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
354 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
354 and pushop.repo.obsstore
355 and pushop.repo.obsstore
355 and 'obsolete' in pushop.remote.listkeys('namespaces')):
356 and 'obsolete' in pushop.remote.listkeys('namespaces')):
356 repo = pushop.repo
357 repo = pushop.repo
357 # very naive computation, that can be quite expensive on big repo.
358 # very naive computation, that can be quite expensive on big repo.
358 # However: evolution is currently slow on them anyway.
359 # However: evolution is currently slow on them anyway.
359 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
360 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
360 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
361 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
361
362
362 @pushdiscovery('bookmarks')
363 @pushdiscovery('bookmarks')
363 def _pushdiscoverybookmarks(pushop):
364 def _pushdiscoverybookmarks(pushop):
364 ui = pushop.ui
365 ui = pushop.ui
365 repo = pushop.repo.unfiltered()
366 repo = pushop.repo.unfiltered()
366 remote = pushop.remote
367 remote = pushop.remote
367 ui.debug("checking for updated bookmarks\n")
368 ui.debug("checking for updated bookmarks\n")
368 ancestors = ()
369 ancestors = ()
369 if pushop.revs:
370 if pushop.revs:
370 revnums = map(repo.changelog.rev, pushop.revs)
371 revnums = map(repo.changelog.rev, pushop.revs)
371 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
372 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
372 remotebookmark = remote.listkeys('bookmarks')
373 remotebookmark = remote.listkeys('bookmarks')
373
374
374 explicit = set(pushop.bookmarks)
375 explicit = set(pushop.bookmarks)
375
376
376 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
377 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
377 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
378 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
378 for b, scid, dcid in advsrc:
379 for b, scid, dcid in advsrc:
379 if b in explicit:
380 if b in explicit:
380 explicit.remove(b)
381 explicit.remove(b)
381 if not ancestors or repo[scid].rev() in ancestors:
382 if not ancestors or repo[scid].rev() in ancestors:
382 pushop.outbookmarks.append((b, dcid, scid))
383 pushop.outbookmarks.append((b, dcid, scid))
383 # search added bookmark
384 # search added bookmark
384 for b, scid, dcid in addsrc:
385 for b, scid, dcid in addsrc:
385 if b in explicit:
386 if b in explicit:
386 explicit.remove(b)
387 explicit.remove(b)
387 pushop.outbookmarks.append((b, '', scid))
388 pushop.outbookmarks.append((b, '', scid))
388 # search for overwritten bookmark
389 # search for overwritten bookmark
389 for b, scid, dcid in advdst + diverge + differ:
390 for b, scid, dcid in advdst + diverge + differ:
390 if b in explicit:
391 if b in explicit:
391 explicit.remove(b)
392 explicit.remove(b)
392 pushop.outbookmarks.append((b, dcid, scid))
393 pushop.outbookmarks.append((b, dcid, scid))
393 # search for bookmark to delete
394 # search for bookmark to delete
394 for b, scid, dcid in adddst:
395 for b, scid, dcid in adddst:
395 if b in explicit:
396 if b in explicit:
396 explicit.remove(b)
397 explicit.remove(b)
397 # treat as "deleted locally"
398 # treat as "deleted locally"
398 pushop.outbookmarks.append((b, dcid, ''))
399 pushop.outbookmarks.append((b, dcid, ''))
399 # identical bookmarks shouldn't get reported
400 # identical bookmarks shouldn't get reported
400 for b, scid, dcid in same:
401 for b, scid, dcid in same:
401 if b in explicit:
402 if b in explicit:
402 explicit.remove(b)
403 explicit.remove(b)
403
404
404 if explicit:
405 if explicit:
405 explicit = sorted(explicit)
406 explicit = sorted(explicit)
406 # we should probably list all of them
407 # we should probably list all of them
407 ui.warn(_('bookmark %s does not exist on the local '
408 ui.warn(_('bookmark %s does not exist on the local '
408 'or remote repository!\n') % explicit[0])
409 'or remote repository!\n') % explicit[0])
409 pushop.bkresult = 2
410 pushop.bkresult = 2
410
411
411 pushop.outbookmarks.sort()
412 pushop.outbookmarks.sort()
412
413
413 def _pushcheckoutgoing(pushop):
414 def _pushcheckoutgoing(pushop):
414 outgoing = pushop.outgoing
415 outgoing = pushop.outgoing
415 unfi = pushop.repo.unfiltered()
416 unfi = pushop.repo.unfiltered()
416 if not outgoing.missing:
417 if not outgoing.missing:
417 # nothing to push
418 # nothing to push
418 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
419 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
419 return False
420 return False
420 # something to push
421 # something to push
421 if not pushop.force:
422 if not pushop.force:
422 # if repo.obsstore == False --> no obsolete
423 # if repo.obsstore == False --> no obsolete
423 # then, save the iteration
424 # then, save the iteration
424 if unfi.obsstore:
425 if unfi.obsstore:
425 # this message are here for 80 char limit reason
426 # this message are here for 80 char limit reason
426 mso = _("push includes obsolete changeset: %s!")
427 mso = _("push includes obsolete changeset: %s!")
427 mst = {"unstable": _("push includes unstable changeset: %s!"),
428 mst = {"unstable": _("push includes unstable changeset: %s!"),
428 "bumped": _("push includes bumped changeset: %s!"),
429 "bumped": _("push includes bumped changeset: %s!"),
429 "divergent": _("push includes divergent changeset: %s!")}
430 "divergent": _("push includes divergent changeset: %s!")}
430 # If we are to push if there is at least one
431 # If we are to push if there is at least one
431 # obsolete or unstable changeset in missing, at
432 # obsolete or unstable changeset in missing, at
432 # least one of the missinghead will be obsolete or
433 # least one of the missinghead will be obsolete or
433 # unstable. So checking heads only is ok
434 # unstable. So checking heads only is ok
434 for node in outgoing.missingheads:
435 for node in outgoing.missingheads:
435 ctx = unfi[node]
436 ctx = unfi[node]
436 if ctx.obsolete():
437 if ctx.obsolete():
437 raise util.Abort(mso % ctx)
438 raise util.Abort(mso % ctx)
438 elif ctx.troubled():
439 elif ctx.troubled():
439 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
440 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
440
441
441 # internal config: bookmarks.pushing
442 # internal config: bookmarks.pushing
442 newbm = pushop.ui.configlist('bookmarks', 'pushing')
443 newbm = pushop.ui.configlist('bookmarks', 'pushing')
443 discovery.checkheads(unfi, pushop.remote, outgoing,
444 discovery.checkheads(unfi, pushop.remote, outgoing,
444 pushop.remoteheads,
445 pushop.remoteheads,
445 pushop.newbranch,
446 pushop.newbranch,
446 bool(pushop.incoming),
447 bool(pushop.incoming),
447 newbm)
448 newbm)
448 return True
449 return True
449
450
450 # List of names of steps to perform for an outgoing bundle2, order matters.
451 # List of names of steps to perform for an outgoing bundle2, order matters.
451 b2partsgenorder = []
452 b2partsgenorder = []
452
453
453 # Mapping between step name and function
454 # Mapping between step name and function
454 #
455 #
455 # This exists to help extensions wrap steps if necessary
456 # This exists to help extensions wrap steps if necessary
456 b2partsgenmapping = {}
457 b2partsgenmapping = {}
457
458
458 def b2partsgenerator(stepname, idx=None):
459 def b2partsgenerator(stepname, idx=None):
459 """decorator for function generating bundle2 part
460 """decorator for function generating bundle2 part
460
461
461 The function is added to the step -> function mapping and appended to the
462 The function is added to the step -> function mapping and appended to the
462 list of steps. Beware that decorated functions will be added in order
463 list of steps. Beware that decorated functions will be added in order
463 (this may matter).
464 (this may matter).
464
465
465 You can only use this decorator for new steps, if you want to wrap a step
466 You can only use this decorator for new steps, if you want to wrap a step
466 from an extension, attack the b2partsgenmapping dictionary directly."""
467 from an extension, attack the b2partsgenmapping dictionary directly."""
467 def dec(func):
468 def dec(func):
468 assert stepname not in b2partsgenmapping
469 assert stepname not in b2partsgenmapping
469 b2partsgenmapping[stepname] = func
470 b2partsgenmapping[stepname] = func
470 if idx is None:
471 if idx is None:
471 b2partsgenorder.append(stepname)
472 b2partsgenorder.append(stepname)
472 else:
473 else:
473 b2partsgenorder.insert(idx, stepname)
474 b2partsgenorder.insert(idx, stepname)
474 return func
475 return func
475 return dec
476 return dec
476
477
477 def _pushb2ctxcheckheads(pushop, bundler):
478 def _pushb2ctxcheckheads(pushop, bundler):
478 """Generate race condition checking parts
479 """Generate race condition checking parts
479
480
480 Exists as an indepedent function to aid extensions
481 Exists as an indepedent function to aid extensions
481 """
482 """
482 if not pushop.force:
483 if not pushop.force:
483 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
484 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
484
485
485 @b2partsgenerator('changeset')
486 @b2partsgenerator('changeset')
486 def _pushb2ctx(pushop, bundler):
487 def _pushb2ctx(pushop, bundler):
487 """handle changegroup push through bundle2
488 """handle changegroup push through bundle2
488
489
489 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
490 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
490 """
491 """
491 if 'changesets' in pushop.stepsdone:
492 if 'changesets' in pushop.stepsdone:
492 return
493 return
493 pushop.stepsdone.add('changesets')
494 pushop.stepsdone.add('changesets')
494 # Send known heads to the server for race detection.
495 # Send known heads to the server for race detection.
495 if not _pushcheckoutgoing(pushop):
496 if not _pushcheckoutgoing(pushop):
496 return
497 return
497 pushop.repo.prepushoutgoinghooks(pushop.repo,
498 pushop.repo.prepushoutgoinghooks(pushop.repo,
498 pushop.remote,
499 pushop.remote,
499 pushop.outgoing)
500 pushop.outgoing)
500
501
501 _pushb2ctxcheckheads(pushop, bundler)
502 _pushb2ctxcheckheads(pushop, bundler)
502
503
503 b2caps = bundle2.bundle2caps(pushop.remote)
504 b2caps = bundle2.bundle2caps(pushop.remote)
504 version = None
505 version = None
505 cgversions = b2caps.get('changegroup')
506 cgversions = b2caps.get('changegroup')
506 if not cgversions: # 3.1 and 3.2 ship with an empty value
507 if not cgversions: # 3.1 and 3.2 ship with an empty value
507 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
508 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
508 pushop.outgoing)
509 pushop.outgoing)
509 else:
510 else:
510 cgversions = [v for v in cgversions if v in changegroup.packermap]
511 cgversions = [v for v in cgversions if v in changegroup.packermap]
511 if not cgversions:
512 if not cgversions:
512 raise ValueError(_('no common changegroup version'))
513 raise ValueError(_('no common changegroup version'))
513 version = max(cgversions)
514 version = max(cgversions)
514 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
515 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
515 pushop.outgoing,
516 pushop.outgoing,
516 version=version)
517 version=version)
517 cgpart = bundler.newpart('changegroup', data=cg)
518 cgpart = bundler.newpart('changegroup', data=cg)
518 if version is not None:
519 if version is not None:
519 cgpart.addparam('version', version)
520 cgpart.addparam('version', version)
520 def handlereply(op):
521 def handlereply(op):
521 """extract addchangegroup returns from server reply"""
522 """extract addchangegroup returns from server reply"""
522 cgreplies = op.records.getreplies(cgpart.id)
523 cgreplies = op.records.getreplies(cgpart.id)
523 assert len(cgreplies['changegroup']) == 1
524 assert len(cgreplies['changegroup']) == 1
524 pushop.cgresult = cgreplies['changegroup'][0]['return']
525 pushop.cgresult = cgreplies['changegroup'][0]['return']
525 return handlereply
526 return handlereply
526
527
527 @b2partsgenerator('phase')
528 @b2partsgenerator('phase')
528 def _pushb2phases(pushop, bundler):
529 def _pushb2phases(pushop, bundler):
529 """handle phase push through bundle2"""
530 """handle phase push through bundle2"""
530 if 'phases' in pushop.stepsdone:
531 if 'phases' in pushop.stepsdone:
531 return
532 return
532 b2caps = bundle2.bundle2caps(pushop.remote)
533 b2caps = bundle2.bundle2caps(pushop.remote)
533 if not 'pushkey' in b2caps:
534 if not 'pushkey' in b2caps:
534 return
535 return
535 pushop.stepsdone.add('phases')
536 pushop.stepsdone.add('phases')
536 part2node = []
537 part2node = []
537
538
538 def handlefailure(pushop, exc):
539 def handlefailure(pushop, exc):
539 targetid = int(exc.partid)
540 targetid = int(exc.partid)
540 for partid, node in part2node:
541 for partid, node in part2node:
541 if partid == targetid:
542 if partid == targetid:
542 raise error.Abort(_('updating %s to public failed') % node)
543 raise error.Abort(_('updating %s to public failed') % node)
543
544
544 enc = pushkey.encode
545 enc = pushkey.encode
545 for newremotehead in pushop.outdatedphases:
546 for newremotehead in pushop.outdatedphases:
546 part = bundler.newpart('pushkey')
547 part = bundler.newpart('pushkey')
547 part.addparam('namespace', enc('phases'))
548 part.addparam('namespace', enc('phases'))
548 part.addparam('key', enc(newremotehead.hex()))
549 part.addparam('key', enc(newremotehead.hex()))
549 part.addparam('old', enc(str(phases.draft)))
550 part.addparam('old', enc(str(phases.draft)))
550 part.addparam('new', enc(str(phases.public)))
551 part.addparam('new', enc(str(phases.public)))
551 part2node.append((part.id, newremotehead))
552 part2node.append((part.id, newremotehead))
552 pushop.pkfailcb[part.id] = handlefailure
553 pushop.pkfailcb[part.id] = handlefailure
553
554
554 def handlereply(op):
555 def handlereply(op):
555 for partid, node in part2node:
556 for partid, node in part2node:
556 partrep = op.records.getreplies(partid)
557 partrep = op.records.getreplies(partid)
557 results = partrep['pushkey']
558 results = partrep['pushkey']
558 assert len(results) <= 1
559 assert len(results) <= 1
559 msg = None
560 msg = None
560 if not results:
561 if not results:
561 msg = _('server ignored update of %s to public!\n') % node
562 msg = _('server ignored update of %s to public!\n') % node
562 elif not int(results[0]['return']):
563 elif not int(results[0]['return']):
563 msg = _('updating %s to public failed!\n') % node
564 msg = _('updating %s to public failed!\n') % node
564 if msg is not None:
565 if msg is not None:
565 pushop.ui.warn(msg)
566 pushop.ui.warn(msg)
566 return handlereply
567 return handlereply
567
568
568 @b2partsgenerator('obsmarkers')
569 @b2partsgenerator('obsmarkers')
569 def _pushb2obsmarkers(pushop, bundler):
570 def _pushb2obsmarkers(pushop, bundler):
570 if 'obsmarkers' in pushop.stepsdone:
571 if 'obsmarkers' in pushop.stepsdone:
571 return
572 return
572 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
573 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
573 if obsolete.commonversion(remoteversions) is None:
574 if obsolete.commonversion(remoteversions) is None:
574 return
575 return
575 pushop.stepsdone.add('obsmarkers')
576 pushop.stepsdone.add('obsmarkers')
576 if pushop.outobsmarkers:
577 if pushop.outobsmarkers:
577 markers = sorted(pushop.outobsmarkers)
578 markers = sorted(pushop.outobsmarkers)
578 buildobsmarkerspart(bundler, markers)
579 buildobsmarkerspart(bundler, markers)
579
580
580 @b2partsgenerator('bookmarks')
581 @b2partsgenerator('bookmarks')
581 def _pushb2bookmarks(pushop, bundler):
582 def _pushb2bookmarks(pushop, bundler):
582 """handle bookmark push through bundle2"""
583 """handle bookmark push through bundle2"""
583 if 'bookmarks' in pushop.stepsdone:
584 if 'bookmarks' in pushop.stepsdone:
584 return
585 return
585 b2caps = bundle2.bundle2caps(pushop.remote)
586 b2caps = bundle2.bundle2caps(pushop.remote)
586 if 'pushkey' not in b2caps:
587 if 'pushkey' not in b2caps:
587 return
588 return
588 pushop.stepsdone.add('bookmarks')
589 pushop.stepsdone.add('bookmarks')
589 part2book = []
590 part2book = []
590 enc = pushkey.encode
591 enc = pushkey.encode
591
592
592 def handlefailure(pushop, exc):
593 def handlefailure(pushop, exc):
593 targetid = int(exc.partid)
594 targetid = int(exc.partid)
594 for partid, book, action in part2book:
595 for partid, book, action in part2book:
595 if partid == targetid:
596 if partid == targetid:
596 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
597 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
597 # we should not be called for part we did not generated
598 # we should not be called for part we did not generated
598 assert False
599 assert False
599
600
600 for book, old, new in pushop.outbookmarks:
601 for book, old, new in pushop.outbookmarks:
601 part = bundler.newpart('pushkey')
602 part = bundler.newpart('pushkey')
602 part.addparam('namespace', enc('bookmarks'))
603 part.addparam('namespace', enc('bookmarks'))
603 part.addparam('key', enc(book))
604 part.addparam('key', enc(book))
604 part.addparam('old', enc(old))
605 part.addparam('old', enc(old))
605 part.addparam('new', enc(new))
606 part.addparam('new', enc(new))
606 action = 'update'
607 action = 'update'
607 if not old:
608 if not old:
608 action = 'export'
609 action = 'export'
609 elif not new:
610 elif not new:
610 action = 'delete'
611 action = 'delete'
611 part2book.append((part.id, book, action))
612 part2book.append((part.id, book, action))
612 pushop.pkfailcb[part.id] = handlefailure
613 pushop.pkfailcb[part.id] = handlefailure
613
614
614 def handlereply(op):
615 def handlereply(op):
615 ui = pushop.ui
616 ui = pushop.ui
616 for partid, book, action in part2book:
617 for partid, book, action in part2book:
617 partrep = op.records.getreplies(partid)
618 partrep = op.records.getreplies(partid)
618 results = partrep['pushkey']
619 results = partrep['pushkey']
619 assert len(results) <= 1
620 assert len(results) <= 1
620 if not results:
621 if not results:
621 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
622 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
622 else:
623 else:
623 ret = int(results[0]['return'])
624 ret = int(results[0]['return'])
624 if ret:
625 if ret:
625 ui.status(bookmsgmap[action][0] % book)
626 ui.status(bookmsgmap[action][0] % book)
626 else:
627 else:
627 ui.warn(bookmsgmap[action][1] % book)
628 ui.warn(bookmsgmap[action][1] % book)
628 if pushop.bkresult is not None:
629 if pushop.bkresult is not None:
629 pushop.bkresult = 1
630 pushop.bkresult = 1
630 return handlereply
631 return handlereply
631
632
632
633
633 def _pushbundle2(pushop):
634 def _pushbundle2(pushop):
634 """push data to the remote using bundle2
635 """push data to the remote using bundle2
635
636
636 The only currently supported type of data is changegroup but this will
637 The only currently supported type of data is changegroup but this will
637 evolve in the future."""
638 evolve in the future."""
638 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
639 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
639 pushback = (pushop.trmanager
640 pushback = (pushop.trmanager
640 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
641 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
641
642
642 # create reply capability
643 # create reply capability
643 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
644 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
644 allowpushback=pushback))
645 allowpushback=pushback))
645 bundler.newpart('replycaps', data=capsblob)
646 bundler.newpart('replycaps', data=capsblob)
646 replyhandlers = []
647 replyhandlers = []
647 for partgenname in b2partsgenorder:
648 for partgenname in b2partsgenorder:
648 partgen = b2partsgenmapping[partgenname]
649 partgen = b2partsgenmapping[partgenname]
649 ret = partgen(pushop, bundler)
650 ret = partgen(pushop, bundler)
650 if callable(ret):
651 if callable(ret):
651 replyhandlers.append(ret)
652 replyhandlers.append(ret)
652 # do not push if nothing to push
653 # do not push if nothing to push
653 if bundler.nbparts <= 1:
654 if bundler.nbparts <= 1:
654 return
655 return
655 stream = util.chunkbuffer(bundler.getchunks())
656 stream = util.chunkbuffer(bundler.getchunks())
656 try:
657 try:
657 try:
658 try:
658 reply = pushop.remote.unbundle(stream, ['force'], 'push')
659 reply = pushop.remote.unbundle(stream, ['force'], 'push')
659 except error.BundleValueError as exc:
660 except error.BundleValueError as exc:
660 raise util.Abort('missing support for %s' % exc)
661 raise util.Abort('missing support for %s' % exc)
661 try:
662 try:
662 trgetter = None
663 trgetter = None
663 if pushback:
664 if pushback:
664 trgetter = pushop.trmanager.transaction
665 trgetter = pushop.trmanager.transaction
665 op = bundle2.processbundle(pushop.repo, reply, trgetter)
666 op = bundle2.processbundle(pushop.repo, reply, trgetter)
666 except error.BundleValueError as exc:
667 except error.BundleValueError as exc:
667 raise util.Abort('missing support for %s' % exc)
668 raise util.Abort('missing support for %s' % exc)
668 except error.PushkeyFailed as exc:
669 except error.PushkeyFailed as exc:
669 partid = int(exc.partid)
670 partid = int(exc.partid)
670 if partid not in pushop.pkfailcb:
671 if partid not in pushop.pkfailcb:
671 raise
672 raise
672 pushop.pkfailcb[partid](pushop, exc)
673 pushop.pkfailcb[partid](pushop, exc)
673 for rephand in replyhandlers:
674 for rephand in replyhandlers:
674 rephand(op)
675 rephand(op)
675
676
676 def _pushchangeset(pushop):
677 def _pushchangeset(pushop):
677 """Make the actual push of changeset bundle to remote repo"""
678 """Make the actual push of changeset bundle to remote repo"""
678 if 'changesets' in pushop.stepsdone:
679 if 'changesets' in pushop.stepsdone:
679 return
680 return
680 pushop.stepsdone.add('changesets')
681 pushop.stepsdone.add('changesets')
681 if not _pushcheckoutgoing(pushop):
682 if not _pushcheckoutgoing(pushop):
682 return
683 return
683 pushop.repo.prepushoutgoinghooks(pushop.repo,
684 pushop.repo.prepushoutgoinghooks(pushop.repo,
684 pushop.remote,
685 pushop.remote,
685 pushop.outgoing)
686 pushop.outgoing)
686 outgoing = pushop.outgoing
687 outgoing = pushop.outgoing
687 unbundle = pushop.remote.capable('unbundle')
688 unbundle = pushop.remote.capable('unbundle')
688 # TODO: get bundlecaps from remote
689 # TODO: get bundlecaps from remote
689 bundlecaps = None
690 bundlecaps = None
690 # create a changegroup from local
691 # create a changegroup from local
691 if pushop.revs is None and not (outgoing.excluded
692 if pushop.revs is None and not (outgoing.excluded
692 or pushop.repo.changelog.filteredrevs):
693 or pushop.repo.changelog.filteredrevs):
693 # push everything,
694 # push everything,
694 # use the fast path, no race possible on push
695 # use the fast path, no race possible on push
695 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
696 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
696 cg = changegroup.getsubset(pushop.repo,
697 cg = changegroup.getsubset(pushop.repo,
697 outgoing,
698 outgoing,
698 bundler,
699 bundler,
699 'push',
700 'push',
700 fastpath=True)
701 fastpath=True)
701 else:
702 else:
702 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
703 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
703 bundlecaps)
704 bundlecaps)
704
705
705 # apply changegroup to remote
706 # apply changegroup to remote
706 if unbundle:
707 if unbundle:
707 # local repo finds heads on server, finds out what
708 # local repo finds heads on server, finds out what
708 # revs it must push. once revs transferred, if server
709 # revs it must push. once revs transferred, if server
709 # finds it has different heads (someone else won
710 # finds it has different heads (someone else won
710 # commit/push race), server aborts.
711 # commit/push race), server aborts.
711 if pushop.force:
712 if pushop.force:
712 remoteheads = ['force']
713 remoteheads = ['force']
713 else:
714 else:
714 remoteheads = pushop.remoteheads
715 remoteheads = pushop.remoteheads
715 # ssh: return remote's addchangegroup()
716 # ssh: return remote's addchangegroup()
716 # http: return remote's addchangegroup() or 0 for error
717 # http: return remote's addchangegroup() or 0 for error
717 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
718 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
718 pushop.repo.url())
719 pushop.repo.url())
719 else:
720 else:
720 # we return an integer indicating remote head count
721 # we return an integer indicating remote head count
721 # change
722 # change
722 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
723 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
723 pushop.repo.url())
724 pushop.repo.url())
724
725
725 def _pushsyncphase(pushop):
726 def _pushsyncphase(pushop):
726 """synchronise phase information locally and remotely"""
727 """synchronise phase information locally and remotely"""
727 cheads = pushop.commonheads
728 cheads = pushop.commonheads
728 # even when we don't push, exchanging phase data is useful
729 # even when we don't push, exchanging phase data is useful
729 remotephases = pushop.remote.listkeys('phases')
730 remotephases = pushop.remote.listkeys('phases')
730 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
731 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
731 and remotephases # server supports phases
732 and remotephases # server supports phases
732 and pushop.cgresult is None # nothing was pushed
733 and pushop.cgresult is None # nothing was pushed
733 and remotephases.get('publishing', False)):
734 and remotephases.get('publishing', False)):
734 # When:
735 # When:
735 # - this is a subrepo push
736 # - this is a subrepo push
736 # - and remote support phase
737 # - and remote support phase
737 # - and no changeset was pushed
738 # - and no changeset was pushed
738 # - and remote is publishing
739 # - and remote is publishing
739 # We may be in issue 3871 case!
740 # We may be in issue 3871 case!
740 # We drop the possible phase synchronisation done by
741 # We drop the possible phase synchronisation done by
741 # courtesy to publish changesets possibly locally draft
742 # courtesy to publish changesets possibly locally draft
742 # on the remote.
743 # on the remote.
743 remotephases = {'publishing': 'True'}
744 remotephases = {'publishing': 'True'}
744 if not remotephases: # old server or public only reply from non-publishing
745 if not remotephases: # old server or public only reply from non-publishing
745 _localphasemove(pushop, cheads)
746 _localphasemove(pushop, cheads)
746 # don't push any phase data as there is nothing to push
747 # don't push any phase data as there is nothing to push
747 else:
748 else:
748 ana = phases.analyzeremotephases(pushop.repo, cheads,
749 ana = phases.analyzeremotephases(pushop.repo, cheads,
749 remotephases)
750 remotephases)
750 pheads, droots = ana
751 pheads, droots = ana
751 ### Apply remote phase on local
752 ### Apply remote phase on local
752 if remotephases.get('publishing', False):
753 if remotephases.get('publishing', False):
753 _localphasemove(pushop, cheads)
754 _localphasemove(pushop, cheads)
754 else: # publish = False
755 else: # publish = False
755 _localphasemove(pushop, pheads)
756 _localphasemove(pushop, pheads)
756 _localphasemove(pushop, cheads, phases.draft)
757 _localphasemove(pushop, cheads, phases.draft)
757 ### Apply local phase on remote
758 ### Apply local phase on remote
758
759
759 if pushop.cgresult:
760 if pushop.cgresult:
760 if 'phases' in pushop.stepsdone:
761 if 'phases' in pushop.stepsdone:
761 # phases already pushed though bundle2
762 # phases already pushed though bundle2
762 return
763 return
763 outdated = pushop.outdatedphases
764 outdated = pushop.outdatedphases
764 else:
765 else:
765 outdated = pushop.fallbackoutdatedphases
766 outdated = pushop.fallbackoutdatedphases
766
767
767 pushop.stepsdone.add('phases')
768 pushop.stepsdone.add('phases')
768
769
769 # filter heads already turned public by the push
770 # filter heads already turned public by the push
770 outdated = [c for c in outdated if c.node() not in pheads]
771 outdated = [c for c in outdated if c.node() not in pheads]
771 # fallback to independent pushkey command
772 # fallback to independent pushkey command
772 for newremotehead in outdated:
773 for newremotehead in outdated:
773 r = pushop.remote.pushkey('phases',
774 r = pushop.remote.pushkey('phases',
774 newremotehead.hex(),
775 newremotehead.hex(),
775 str(phases.draft),
776 str(phases.draft),
776 str(phases.public))
777 str(phases.public))
777 if not r:
778 if not r:
778 pushop.ui.warn(_('updating %s to public failed!\n')
779 pushop.ui.warn(_('updating %s to public failed!\n')
779 % newremotehead)
780 % newremotehead)
780
781
781 def _localphasemove(pushop, nodes, phase=phases.public):
782 def _localphasemove(pushop, nodes, phase=phases.public):
782 """move <nodes> to <phase> in the local source repo"""
783 """move <nodes> to <phase> in the local source repo"""
783 if pushop.trmanager:
784 if pushop.trmanager:
784 phases.advanceboundary(pushop.repo,
785 phases.advanceboundary(pushop.repo,
785 pushop.trmanager.transaction(),
786 pushop.trmanager.transaction(),
786 phase,
787 phase,
787 nodes)
788 nodes)
788 else:
789 else:
789 # repo is not locked, do not change any phases!
790 # repo is not locked, do not change any phases!
790 # Informs the user that phases should have been moved when
791 # Informs the user that phases should have been moved when
791 # applicable.
792 # applicable.
792 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
793 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
793 phasestr = phases.phasenames[phase]
794 phasestr = phases.phasenames[phase]
794 if actualmoves:
795 if actualmoves:
795 pushop.ui.status(_('cannot lock source repo, skipping '
796 pushop.ui.status(_('cannot lock source repo, skipping '
796 'local %s phase update\n') % phasestr)
797 'local %s phase update\n') % phasestr)
797
798
798 def _pushobsolete(pushop):
799 def _pushobsolete(pushop):
799 """utility function to push obsolete markers to a remote"""
800 """utility function to push obsolete markers to a remote"""
800 if 'obsmarkers' in pushop.stepsdone:
801 if 'obsmarkers' in pushop.stepsdone:
801 return
802 return
802 repo = pushop.repo
803 repo = pushop.repo
803 remote = pushop.remote
804 remote = pushop.remote
804 pushop.stepsdone.add('obsmarkers')
805 pushop.stepsdone.add('obsmarkers')
805 if pushop.outobsmarkers:
806 if pushop.outobsmarkers:
806 pushop.ui.debug('try to push obsolete markers to remote\n')
807 pushop.ui.debug('try to push obsolete markers to remote\n')
807 rslts = []
808 rslts = []
808 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
809 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
809 for key in sorted(remotedata, reverse=True):
810 for key in sorted(remotedata, reverse=True):
810 # reverse sort to ensure we end with dump0
811 # reverse sort to ensure we end with dump0
811 data = remotedata[key]
812 data = remotedata[key]
812 rslts.append(remote.pushkey('obsolete', key, '', data))
813 rslts.append(remote.pushkey('obsolete', key, '', data))
813 if [r for r in rslts if not r]:
814 if [r for r in rslts if not r]:
814 msg = _('failed to push some obsolete markers!\n')
815 msg = _('failed to push some obsolete markers!\n')
815 repo.ui.warn(msg)
816 repo.ui.warn(msg)
816
817
817 def _pushbookmark(pushop):
818 def _pushbookmark(pushop):
818 """Update bookmark position on remote"""
819 """Update bookmark position on remote"""
819 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
820 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
820 return
821 return
821 pushop.stepsdone.add('bookmarks')
822 pushop.stepsdone.add('bookmarks')
822 ui = pushop.ui
823 ui = pushop.ui
823 remote = pushop.remote
824 remote = pushop.remote
824
825
825 for b, old, new in pushop.outbookmarks:
826 for b, old, new in pushop.outbookmarks:
826 action = 'update'
827 action = 'update'
827 if not old:
828 if not old:
828 action = 'export'
829 action = 'export'
829 elif not new:
830 elif not new:
830 action = 'delete'
831 action = 'delete'
831 if remote.pushkey('bookmarks', b, old, new):
832 if remote.pushkey('bookmarks', b, old, new):
832 ui.status(bookmsgmap[action][0] % b)
833 ui.status(bookmsgmap[action][0] % b)
833 else:
834 else:
834 ui.warn(bookmsgmap[action][1] % b)
835 ui.warn(bookmsgmap[action][1] % b)
835 # discovery can have set the value form invalid entry
836 # discovery can have set the value form invalid entry
836 if pushop.bkresult is not None:
837 if pushop.bkresult is not None:
837 pushop.bkresult = 1
838 pushop.bkresult = 1
838
839
839 class pulloperation(object):
840 class pulloperation(object):
840 """A object that represent a single pull operation
841 """A object that represent a single pull operation
841
842
842 It purpose is to carry pull related state and very common operation.
843 It purpose is to carry pull related state and very common operation.
843
844
844 A new should be created at the beginning of each pull and discarded
845 A new should be created at the beginning of each pull and discarded
845 afterward.
846 afterward.
846 """
847 """
847
848
848 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
849 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
849 remotebookmarks=None, streamclonerequested=None):
850 remotebookmarks=None, streamclonerequested=None):
850 # repo we pull into
851 # repo we pull into
851 self.repo = repo
852 self.repo = repo
852 # repo we pull from
853 # repo we pull from
853 self.remote = remote
854 self.remote = remote
854 # revision we try to pull (None is "all")
855 # revision we try to pull (None is "all")
855 self.heads = heads
856 self.heads = heads
856 # bookmark pulled explicitly
857 # bookmark pulled explicitly
857 self.explicitbookmarks = bookmarks
858 self.explicitbookmarks = bookmarks
858 # do we force pull?
859 # do we force pull?
859 self.force = force
860 self.force = force
860 # whether a streaming clone was requested
861 # whether a streaming clone was requested
861 self.streamclonerequested = streamclonerequested
862 self.streamclonerequested = streamclonerequested
862 # transaction manager
863 # transaction manager
863 self.trmanager = None
864 self.trmanager = None
864 # set of common changeset between local and remote before pull
865 # set of common changeset between local and remote before pull
865 self.common = None
866 self.common = None
866 # set of pulled head
867 # set of pulled head
867 self.rheads = None
868 self.rheads = None
868 # list of missing changeset to fetch remotely
869 # list of missing changeset to fetch remotely
869 self.fetch = None
870 self.fetch = None
870 # remote bookmarks data
871 # remote bookmarks data
871 self.remotebookmarks = remotebookmarks
872 self.remotebookmarks = remotebookmarks
872 # result of changegroup pulling (used as return code by pull)
873 # result of changegroup pulling (used as return code by pull)
873 self.cgresult = None
874 self.cgresult = None
874 # list of step already done
875 # list of step already done
875 self.stepsdone = set()
876 self.stepsdone = set()
876
877
877 @util.propertycache
878 @util.propertycache
878 def pulledsubset(self):
879 def pulledsubset(self):
879 """heads of the set of changeset target by the pull"""
880 """heads of the set of changeset target by the pull"""
880 # compute target subset
881 # compute target subset
881 if self.heads is None:
882 if self.heads is None:
882 # We pulled every thing possible
883 # We pulled every thing possible
883 # sync on everything common
884 # sync on everything common
884 c = set(self.common)
885 c = set(self.common)
885 ret = list(self.common)
886 ret = list(self.common)
886 for n in self.rheads:
887 for n in self.rheads:
887 if n not in c:
888 if n not in c:
888 ret.append(n)
889 ret.append(n)
889 return ret
890 return ret
890 else:
891 else:
891 # We pulled a specific subset
892 # We pulled a specific subset
892 # sync on this subset
893 # sync on this subset
893 return self.heads
894 return self.heads
894
895
895 def gettransaction(self):
896 def gettransaction(self):
896 # deprecated; talk to trmanager directly
897 # deprecated; talk to trmanager directly
897 return self.trmanager.transaction()
898 return self.trmanager.transaction()
898
899
899 class transactionmanager(object):
900 class transactionmanager(object):
900 """An object to manage the life cycle of a transaction
901 """An object to manage the life cycle of a transaction
901
902
902 It creates the transaction on demand and calls the appropriate hooks when
903 It creates the transaction on demand and calls the appropriate hooks when
903 closing the transaction."""
904 closing the transaction."""
904 def __init__(self, repo, source, url):
905 def __init__(self, repo, source, url):
905 self.repo = repo
906 self.repo = repo
906 self.source = source
907 self.source = source
907 self.url = url
908 self.url = url
908 self._tr = None
909 self._tr = None
909
910
910 def transaction(self):
911 def transaction(self):
911 """Return an open transaction object, constructing if necessary"""
912 """Return an open transaction object, constructing if necessary"""
912 if not self._tr:
913 if not self._tr:
913 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
914 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
914 self._tr = self.repo.transaction(trname)
915 self._tr = self.repo.transaction(trname)
915 self._tr.hookargs['source'] = self.source
916 self._tr.hookargs['source'] = self.source
916 self._tr.hookargs['url'] = self.url
917 self._tr.hookargs['url'] = self.url
917 return self._tr
918 return self._tr
918
919
919 def close(self):
920 def close(self):
920 """close transaction if created"""
921 """close transaction if created"""
921 if self._tr is not None:
922 if self._tr is not None:
922 self._tr.close()
923 self._tr.close()
923
924
924 def release(self):
925 def release(self):
925 """release transaction if created"""
926 """release transaction if created"""
926 if self._tr is not None:
927 if self._tr is not None:
927 self._tr.release()
928 self._tr.release()
928
929
929 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
930 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
930 streamclonerequested=None):
931 streamclonerequested=None):
931 """Fetch repository data from a remote.
932 """Fetch repository data from a remote.
932
933
933 This is the main function used to retrieve data from a remote repository.
934 This is the main function used to retrieve data from a remote repository.
934
935
935 ``repo`` is the local repository to clone into.
936 ``repo`` is the local repository to clone into.
936 ``remote`` is a peer instance.
937 ``remote`` is a peer instance.
937 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
938 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
938 default) means to pull everything from the remote.
939 default) means to pull everything from the remote.
939 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
940 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
940 default, all remote bookmarks are pulled.
941 default, all remote bookmarks are pulled.
941 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
942 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
942 initialization.
943 initialization.
943 ``streamclonerequested`` is a boolean indicating whether a "streaming
944 ``streamclonerequested`` is a boolean indicating whether a "streaming
944 clone" is requested. A "streaming clone" is essentially a raw file copy
945 clone" is requested. A "streaming clone" is essentially a raw file copy
945 of revlogs from the server. This only works when the local repository is
946 of revlogs from the server. This only works when the local repository is
946 empty. The default value of ``None`` means to respect the server
947 empty. The default value of ``None`` means to respect the server
947 configuration for preferring stream clones.
948 configuration for preferring stream clones.
948
949
949 Returns the ``pulloperation`` created for this pull.
950 Returns the ``pulloperation`` created for this pull.
950 """
951 """
951 if opargs is None:
952 if opargs is None:
952 opargs = {}
953 opargs = {}
953 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
954 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
954 streamclonerequested=streamclonerequested, **opargs)
955 streamclonerequested=streamclonerequested, **opargs)
955 if pullop.remote.local():
956 if pullop.remote.local():
956 missing = set(pullop.remote.requirements) - pullop.repo.supported
957 missing = set(pullop.remote.requirements) - pullop.repo.supported
957 if missing:
958 if missing:
958 msg = _("required features are not"
959 msg = _("required features are not"
959 " supported in the destination:"
960 " supported in the destination:"
960 " %s") % (', '.join(sorted(missing)))
961 " %s") % (', '.join(sorted(missing)))
961 raise util.Abort(msg)
962 raise util.Abort(msg)
962
963
963 lock = pullop.repo.lock()
964 lock = pullop.repo.lock()
964 try:
965 try:
965 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
966 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
967 streamclone.maybeperformstreamclone(pullop.repo, pullop.remote,
968 pullop.heads,
969 pullop.streamclonerequested)
966 _pulldiscovery(pullop)
970 _pulldiscovery(pullop)
967 if _canusebundle2(pullop):
971 if _canusebundle2(pullop):
968 _pullbundle2(pullop)
972 _pullbundle2(pullop)
969 _pullchangeset(pullop)
973 _pullchangeset(pullop)
970 _pullphase(pullop)
974 _pullphase(pullop)
971 _pullbookmarks(pullop)
975 _pullbookmarks(pullop)
972 _pullobsolete(pullop)
976 _pullobsolete(pullop)
973 pullop.trmanager.close()
977 pullop.trmanager.close()
974 finally:
978 finally:
975 pullop.trmanager.release()
979 pullop.trmanager.release()
976 lock.release()
980 lock.release()
977
981
978 return pullop
982 return pullop
979
983
980 # list of steps to perform discovery before pull
984 # list of steps to perform discovery before pull
981 pulldiscoveryorder = []
985 pulldiscoveryorder = []
982
986
983 # Mapping between step name and function
987 # Mapping between step name and function
984 #
988 #
985 # This exists to help extensions wrap steps if necessary
989 # This exists to help extensions wrap steps if necessary
986 pulldiscoverymapping = {}
990 pulldiscoverymapping = {}
987
991
988 def pulldiscovery(stepname):
992 def pulldiscovery(stepname):
989 """decorator for function performing discovery before pull
993 """decorator for function performing discovery before pull
990
994
991 The function is added to the step -> function mapping and appended to the
995 The function is added to the step -> function mapping and appended to the
992 list of steps. Beware that decorated function will be added in order (this
996 list of steps. Beware that decorated function will be added in order (this
993 may matter).
997 may matter).
994
998
995 You can only use this decorator for a new step, if you want to wrap a step
999 You can only use this decorator for a new step, if you want to wrap a step
996 from an extension, change the pulldiscovery dictionary directly."""
1000 from an extension, change the pulldiscovery dictionary directly."""
997 def dec(func):
1001 def dec(func):
998 assert stepname not in pulldiscoverymapping
1002 assert stepname not in pulldiscoverymapping
999 pulldiscoverymapping[stepname] = func
1003 pulldiscoverymapping[stepname] = func
1000 pulldiscoveryorder.append(stepname)
1004 pulldiscoveryorder.append(stepname)
1001 return func
1005 return func
1002 return dec
1006 return dec
1003
1007
1004 def _pulldiscovery(pullop):
1008 def _pulldiscovery(pullop):
1005 """Run all discovery steps"""
1009 """Run all discovery steps"""
1006 for stepname in pulldiscoveryorder:
1010 for stepname in pulldiscoveryorder:
1007 step = pulldiscoverymapping[stepname]
1011 step = pulldiscoverymapping[stepname]
1008 step(pullop)
1012 step(pullop)
1009
1013
1010 @pulldiscovery('b1:bookmarks')
1014 @pulldiscovery('b1:bookmarks')
1011 def _pullbookmarkbundle1(pullop):
1015 def _pullbookmarkbundle1(pullop):
1012 """fetch bookmark data in bundle1 case
1016 """fetch bookmark data in bundle1 case
1013
1017
1014 If not using bundle2, we have to fetch bookmarks before changeset
1018 If not using bundle2, we have to fetch bookmarks before changeset
1015 discovery to reduce the chance and impact of race conditions."""
1019 discovery to reduce the chance and impact of race conditions."""
1016 if pullop.remotebookmarks is not None:
1020 if pullop.remotebookmarks is not None:
1017 return
1021 return
1018 if (_canusebundle2(pullop)
1022 if (_canusebundle2(pullop)
1019 and 'listkeys' in bundle2.bundle2caps(pullop.remote)):
1023 and 'listkeys' in bundle2.bundle2caps(pullop.remote)):
1020 # all known bundle2 servers now support listkeys, but lets be nice with
1024 # all known bundle2 servers now support listkeys, but lets be nice with
1021 # new implementation.
1025 # new implementation.
1022 return
1026 return
1023 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1027 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1024
1028
1025
1029
1026 @pulldiscovery('changegroup')
1030 @pulldiscovery('changegroup')
1027 def _pulldiscoverychangegroup(pullop):
1031 def _pulldiscoverychangegroup(pullop):
1028 """discovery phase for the pull
1032 """discovery phase for the pull
1029
1033
1030 Current handle changeset discovery only, will change handle all discovery
1034 Current handle changeset discovery only, will change handle all discovery
1031 at some point."""
1035 at some point."""
1032 tmp = discovery.findcommonincoming(pullop.repo,
1036 tmp = discovery.findcommonincoming(pullop.repo,
1033 pullop.remote,
1037 pullop.remote,
1034 heads=pullop.heads,
1038 heads=pullop.heads,
1035 force=pullop.force)
1039 force=pullop.force)
1036 common, fetch, rheads = tmp
1040 common, fetch, rheads = tmp
1037 nm = pullop.repo.unfiltered().changelog.nodemap
1041 nm = pullop.repo.unfiltered().changelog.nodemap
1038 if fetch and rheads:
1042 if fetch and rheads:
1039 # If a remote heads in filtered locally, lets drop it from the unknown
1043 # If a remote heads in filtered locally, lets drop it from the unknown
1040 # remote heads and put in back in common.
1044 # remote heads and put in back in common.
1041 #
1045 #
1042 # This is a hackish solution to catch most of "common but locally
1046 # This is a hackish solution to catch most of "common but locally
1043 # hidden situation". We do not performs discovery on unfiltered
1047 # hidden situation". We do not performs discovery on unfiltered
1044 # repository because it end up doing a pathological amount of round
1048 # repository because it end up doing a pathological amount of round
1045 # trip for w huge amount of changeset we do not care about.
1049 # trip for w huge amount of changeset we do not care about.
1046 #
1050 #
1047 # If a set of such "common but filtered" changeset exist on the server
1051 # If a set of such "common but filtered" changeset exist on the server
1048 # but are not including a remote heads, we'll not be able to detect it,
1052 # but are not including a remote heads, we'll not be able to detect it,
1049 scommon = set(common)
1053 scommon = set(common)
1050 filteredrheads = []
1054 filteredrheads = []
1051 for n in rheads:
1055 for n in rheads:
1052 if n in nm:
1056 if n in nm:
1053 if n not in scommon:
1057 if n not in scommon:
1054 common.append(n)
1058 common.append(n)
1055 else:
1059 else:
1056 filteredrheads.append(n)
1060 filteredrheads.append(n)
1057 if not filteredrheads:
1061 if not filteredrheads:
1058 fetch = []
1062 fetch = []
1059 rheads = filteredrheads
1063 rheads = filteredrheads
1060 pullop.common = common
1064 pullop.common = common
1061 pullop.fetch = fetch
1065 pullop.fetch = fetch
1062 pullop.rheads = rheads
1066 pullop.rheads = rheads
1063
1067
1064 def _pullbundle2(pullop):
1068 def _pullbundle2(pullop):
1065 """pull data using bundle2
1069 """pull data using bundle2
1066
1070
1067 For now, the only supported data are changegroup."""
1071 For now, the only supported data are changegroup."""
1068 remotecaps = bundle2.bundle2caps(pullop.remote)
1072 remotecaps = bundle2.bundle2caps(pullop.remote)
1069 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1073 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1070 # pulling changegroup
1074 # pulling changegroup
1071 pullop.stepsdone.add('changegroup')
1075 pullop.stepsdone.add('changegroup')
1072
1076
1073 kwargs['common'] = pullop.common
1077 kwargs['common'] = pullop.common
1074 kwargs['heads'] = pullop.heads or pullop.rheads
1078 kwargs['heads'] = pullop.heads or pullop.rheads
1075 kwargs['cg'] = pullop.fetch
1079 kwargs['cg'] = pullop.fetch
1076 if 'listkeys' in remotecaps:
1080 if 'listkeys' in remotecaps:
1077 kwargs['listkeys'] = ['phase']
1081 kwargs['listkeys'] = ['phase']
1078 if pullop.remotebookmarks is None:
1082 if pullop.remotebookmarks is None:
1079 # make sure to always includes bookmark data when migrating
1083 # make sure to always includes bookmark data when migrating
1080 # `hg incoming --bundle` to using this function.
1084 # `hg incoming --bundle` to using this function.
1081 kwargs['listkeys'].append('bookmarks')
1085 kwargs['listkeys'].append('bookmarks')
1082 if not pullop.fetch:
1086 if not pullop.fetch:
1083 pullop.repo.ui.status(_("no changes found\n"))
1087 pullop.repo.ui.status(_("no changes found\n"))
1084 pullop.cgresult = 0
1088 pullop.cgresult = 0
1085 else:
1089 else:
1086 if pullop.heads is None and list(pullop.common) == [nullid]:
1090 if pullop.heads is None and list(pullop.common) == [nullid]:
1087 pullop.repo.ui.status(_("requesting all changes\n"))
1091 pullop.repo.ui.status(_("requesting all changes\n"))
1088 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1092 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1089 remoteversions = bundle2.obsmarkersversion(remotecaps)
1093 remoteversions = bundle2.obsmarkersversion(remotecaps)
1090 if obsolete.commonversion(remoteversions) is not None:
1094 if obsolete.commonversion(remoteversions) is not None:
1091 kwargs['obsmarkers'] = True
1095 kwargs['obsmarkers'] = True
1092 pullop.stepsdone.add('obsmarkers')
1096 pullop.stepsdone.add('obsmarkers')
1093 _pullbundle2extraprepare(pullop, kwargs)
1097 _pullbundle2extraprepare(pullop, kwargs)
1094 bundle = pullop.remote.getbundle('pull', **kwargs)
1098 bundle = pullop.remote.getbundle('pull', **kwargs)
1095 try:
1099 try:
1096 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1100 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1097 except error.BundleValueError as exc:
1101 except error.BundleValueError as exc:
1098 raise util.Abort('missing support for %s' % exc)
1102 raise util.Abort('missing support for %s' % exc)
1099
1103
1100 if pullop.fetch:
1104 if pullop.fetch:
1101 results = [cg['return'] for cg in op.records['changegroup']]
1105 results = [cg['return'] for cg in op.records['changegroup']]
1102 pullop.cgresult = changegroup.combineresults(results)
1106 pullop.cgresult = changegroup.combineresults(results)
1103
1107
1104 # processing phases change
1108 # processing phases change
1105 for namespace, value in op.records['listkeys']:
1109 for namespace, value in op.records['listkeys']:
1106 if namespace == 'phases':
1110 if namespace == 'phases':
1107 _pullapplyphases(pullop, value)
1111 _pullapplyphases(pullop, value)
1108
1112
1109 # processing bookmark update
1113 # processing bookmark update
1110 for namespace, value in op.records['listkeys']:
1114 for namespace, value in op.records['listkeys']:
1111 if namespace == 'bookmarks':
1115 if namespace == 'bookmarks':
1112 pullop.remotebookmarks = value
1116 pullop.remotebookmarks = value
1113
1117
1114 # bookmark data were either already there or pulled in the bundle
1118 # bookmark data were either already there or pulled in the bundle
1115 if pullop.remotebookmarks is not None:
1119 if pullop.remotebookmarks is not None:
1116 _pullbookmarks(pullop)
1120 _pullbookmarks(pullop)
1117
1121
1118 def _pullbundle2extraprepare(pullop, kwargs):
1122 def _pullbundle2extraprepare(pullop, kwargs):
1119 """hook function so that extensions can extend the getbundle call"""
1123 """hook function so that extensions can extend the getbundle call"""
1120 pass
1124 pass
1121
1125
1122 def _pullchangeset(pullop):
1126 def _pullchangeset(pullop):
1123 """pull changeset from unbundle into the local repo"""
1127 """pull changeset from unbundle into the local repo"""
1124 # We delay the open of the transaction as late as possible so we
1128 # We delay the open of the transaction as late as possible so we
1125 # don't open transaction for nothing or you break future useful
1129 # don't open transaction for nothing or you break future useful
1126 # rollback call
1130 # rollback call
1127 if 'changegroup' in pullop.stepsdone:
1131 if 'changegroup' in pullop.stepsdone:
1128 return
1132 return
1129 pullop.stepsdone.add('changegroup')
1133 pullop.stepsdone.add('changegroup')
1130 if not pullop.fetch:
1134 if not pullop.fetch:
1131 pullop.repo.ui.status(_("no changes found\n"))
1135 pullop.repo.ui.status(_("no changes found\n"))
1132 pullop.cgresult = 0
1136 pullop.cgresult = 0
1133 return
1137 return
1134 pullop.gettransaction()
1138 pullop.gettransaction()
1135 if pullop.heads is None and list(pullop.common) == [nullid]:
1139 if pullop.heads is None and list(pullop.common) == [nullid]:
1136 pullop.repo.ui.status(_("requesting all changes\n"))
1140 pullop.repo.ui.status(_("requesting all changes\n"))
1137 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1141 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1138 # issue1320, avoid a race if remote changed after discovery
1142 # issue1320, avoid a race if remote changed after discovery
1139 pullop.heads = pullop.rheads
1143 pullop.heads = pullop.rheads
1140
1144
1141 if pullop.remote.capable('getbundle'):
1145 if pullop.remote.capable('getbundle'):
1142 # TODO: get bundlecaps from remote
1146 # TODO: get bundlecaps from remote
1143 cg = pullop.remote.getbundle('pull', common=pullop.common,
1147 cg = pullop.remote.getbundle('pull', common=pullop.common,
1144 heads=pullop.heads or pullop.rheads)
1148 heads=pullop.heads or pullop.rheads)
1145 elif pullop.heads is None:
1149 elif pullop.heads is None:
1146 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1150 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1147 elif not pullop.remote.capable('changegroupsubset'):
1151 elif not pullop.remote.capable('changegroupsubset'):
1148 raise util.Abort(_("partial pull cannot be done because "
1152 raise util.Abort(_("partial pull cannot be done because "
1149 "other repository doesn't support "
1153 "other repository doesn't support "
1150 "changegroupsubset."))
1154 "changegroupsubset."))
1151 else:
1155 else:
1152 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1156 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1153 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1157 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1154 pullop.remote.url())
1158 pullop.remote.url())
1155
1159
1156 def _pullphase(pullop):
1160 def _pullphase(pullop):
1157 # Get remote phases data from remote
1161 # Get remote phases data from remote
1158 if 'phases' in pullop.stepsdone:
1162 if 'phases' in pullop.stepsdone:
1159 return
1163 return
1160 remotephases = pullop.remote.listkeys('phases')
1164 remotephases = pullop.remote.listkeys('phases')
1161 _pullapplyphases(pullop, remotephases)
1165 _pullapplyphases(pullop, remotephases)
1162
1166
1163 def _pullapplyphases(pullop, remotephases):
1167 def _pullapplyphases(pullop, remotephases):
1164 """apply phase movement from observed remote state"""
1168 """apply phase movement from observed remote state"""
1165 if 'phases' in pullop.stepsdone:
1169 if 'phases' in pullop.stepsdone:
1166 return
1170 return
1167 pullop.stepsdone.add('phases')
1171 pullop.stepsdone.add('phases')
1168 publishing = bool(remotephases.get('publishing', False))
1172 publishing = bool(remotephases.get('publishing', False))
1169 if remotephases and not publishing:
1173 if remotephases and not publishing:
1170 # remote is new and unpublishing
1174 # remote is new and unpublishing
1171 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1175 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1172 pullop.pulledsubset,
1176 pullop.pulledsubset,
1173 remotephases)
1177 remotephases)
1174 dheads = pullop.pulledsubset
1178 dheads = pullop.pulledsubset
1175 else:
1179 else:
1176 # Remote is old or publishing all common changesets
1180 # Remote is old or publishing all common changesets
1177 # should be seen as public
1181 # should be seen as public
1178 pheads = pullop.pulledsubset
1182 pheads = pullop.pulledsubset
1179 dheads = []
1183 dheads = []
1180 unfi = pullop.repo.unfiltered()
1184 unfi = pullop.repo.unfiltered()
1181 phase = unfi._phasecache.phase
1185 phase = unfi._phasecache.phase
1182 rev = unfi.changelog.nodemap.get
1186 rev = unfi.changelog.nodemap.get
1183 public = phases.public
1187 public = phases.public
1184 draft = phases.draft
1188 draft = phases.draft
1185
1189
1186 # exclude changesets already public locally and update the others
1190 # exclude changesets already public locally and update the others
1187 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1191 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1188 if pheads:
1192 if pheads:
1189 tr = pullop.gettransaction()
1193 tr = pullop.gettransaction()
1190 phases.advanceboundary(pullop.repo, tr, public, pheads)
1194 phases.advanceboundary(pullop.repo, tr, public, pheads)
1191
1195
1192 # exclude changesets already draft locally and update the others
1196 # exclude changesets already draft locally and update the others
1193 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1197 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1194 if dheads:
1198 if dheads:
1195 tr = pullop.gettransaction()
1199 tr = pullop.gettransaction()
1196 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1200 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1197
1201
1198 def _pullbookmarks(pullop):
1202 def _pullbookmarks(pullop):
1199 """process the remote bookmark information to update the local one"""
1203 """process the remote bookmark information to update the local one"""
1200 if 'bookmarks' in pullop.stepsdone:
1204 if 'bookmarks' in pullop.stepsdone:
1201 return
1205 return
1202 pullop.stepsdone.add('bookmarks')
1206 pullop.stepsdone.add('bookmarks')
1203 repo = pullop.repo
1207 repo = pullop.repo
1204 remotebookmarks = pullop.remotebookmarks
1208 remotebookmarks = pullop.remotebookmarks
1205 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1209 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1206 pullop.remote.url(),
1210 pullop.remote.url(),
1207 pullop.gettransaction,
1211 pullop.gettransaction,
1208 explicit=pullop.explicitbookmarks)
1212 explicit=pullop.explicitbookmarks)
1209
1213
1210 def _pullobsolete(pullop):
1214 def _pullobsolete(pullop):
1211 """utility function to pull obsolete markers from a remote
1215 """utility function to pull obsolete markers from a remote
1212
1216
1213 The `gettransaction` is function that return the pull transaction, creating
1217 The `gettransaction` is function that return the pull transaction, creating
1214 one if necessary. We return the transaction to inform the calling code that
1218 one if necessary. We return the transaction to inform the calling code that
1215 a new transaction have been created (when applicable).
1219 a new transaction have been created (when applicable).
1216
1220
1217 Exists mostly to allow overriding for experimentation purpose"""
1221 Exists mostly to allow overriding for experimentation purpose"""
1218 if 'obsmarkers' in pullop.stepsdone:
1222 if 'obsmarkers' in pullop.stepsdone:
1219 return
1223 return
1220 pullop.stepsdone.add('obsmarkers')
1224 pullop.stepsdone.add('obsmarkers')
1221 tr = None
1225 tr = None
1222 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1226 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1223 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1227 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1224 remoteobs = pullop.remote.listkeys('obsolete')
1228 remoteobs = pullop.remote.listkeys('obsolete')
1225 if 'dump0' in remoteobs:
1229 if 'dump0' in remoteobs:
1226 tr = pullop.gettransaction()
1230 tr = pullop.gettransaction()
1227 for key in sorted(remoteobs, reverse=True):
1231 for key in sorted(remoteobs, reverse=True):
1228 if key.startswith('dump'):
1232 if key.startswith('dump'):
1229 data = base85.b85decode(remoteobs[key])
1233 data = base85.b85decode(remoteobs[key])
1230 pullop.repo.obsstore.mergemarkers(tr, data)
1234 pullop.repo.obsstore.mergemarkers(tr, data)
1231 pullop.repo.invalidatevolatilesets()
1235 pullop.repo.invalidatevolatilesets()
1232 return tr
1236 return tr
1233
1237
1234 def caps20to10(repo):
1238 def caps20to10(repo):
1235 """return a set with appropriate options to use bundle20 during getbundle"""
1239 """return a set with appropriate options to use bundle20 during getbundle"""
1236 caps = set(['HG20'])
1240 caps = set(['HG20'])
1237 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1241 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1238 caps.add('bundle2=' + urllib.quote(capsblob))
1242 caps.add('bundle2=' + urllib.quote(capsblob))
1239 return caps
1243 return caps
1240
1244
1241 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1245 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1242 getbundle2partsorder = []
1246 getbundle2partsorder = []
1243
1247
1244 # Mapping between step name and function
1248 # Mapping between step name and function
1245 #
1249 #
1246 # This exists to help extensions wrap steps if necessary
1250 # This exists to help extensions wrap steps if necessary
1247 getbundle2partsmapping = {}
1251 getbundle2partsmapping = {}
1248
1252
1249 def getbundle2partsgenerator(stepname, idx=None):
1253 def getbundle2partsgenerator(stepname, idx=None):
1250 """decorator for function generating bundle2 part for getbundle
1254 """decorator for function generating bundle2 part for getbundle
1251
1255
1252 The function is added to the step -> function mapping and appended to the
1256 The function is added to the step -> function mapping and appended to the
1253 list of steps. Beware that decorated functions will be added in order
1257 list of steps. Beware that decorated functions will be added in order
1254 (this may matter).
1258 (this may matter).
1255
1259
1256 You can only use this decorator for new steps, if you want to wrap a step
1260 You can only use this decorator for new steps, if you want to wrap a step
1257 from an extension, attack the getbundle2partsmapping dictionary directly."""
1261 from an extension, attack the getbundle2partsmapping dictionary directly."""
1258 def dec(func):
1262 def dec(func):
1259 assert stepname not in getbundle2partsmapping
1263 assert stepname not in getbundle2partsmapping
1260 getbundle2partsmapping[stepname] = func
1264 getbundle2partsmapping[stepname] = func
1261 if idx is None:
1265 if idx is None:
1262 getbundle2partsorder.append(stepname)
1266 getbundle2partsorder.append(stepname)
1263 else:
1267 else:
1264 getbundle2partsorder.insert(idx, stepname)
1268 getbundle2partsorder.insert(idx, stepname)
1265 return func
1269 return func
1266 return dec
1270 return dec
1267
1271
1268 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1272 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1269 **kwargs):
1273 **kwargs):
1270 """return a full bundle (with potentially multiple kind of parts)
1274 """return a full bundle (with potentially multiple kind of parts)
1271
1275
1272 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1276 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1273 passed. For now, the bundle can contain only changegroup, but this will
1277 passed. For now, the bundle can contain only changegroup, but this will
1274 changes when more part type will be available for bundle2.
1278 changes when more part type will be available for bundle2.
1275
1279
1276 This is different from changegroup.getchangegroup that only returns an HG10
1280 This is different from changegroup.getchangegroup that only returns an HG10
1277 changegroup bundle. They may eventually get reunited in the future when we
1281 changegroup bundle. They may eventually get reunited in the future when we
1278 have a clearer idea of the API we what to query different data.
1282 have a clearer idea of the API we what to query different data.
1279
1283
1280 The implementation is at a very early stage and will get massive rework
1284 The implementation is at a very early stage and will get massive rework
1281 when the API of bundle is refined.
1285 when the API of bundle is refined.
1282 """
1286 """
1283 # bundle10 case
1287 # bundle10 case
1284 usebundle2 = False
1288 usebundle2 = False
1285 if bundlecaps is not None:
1289 if bundlecaps is not None:
1286 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1290 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1287 if not usebundle2:
1291 if not usebundle2:
1288 if bundlecaps and not kwargs.get('cg', True):
1292 if bundlecaps and not kwargs.get('cg', True):
1289 raise ValueError(_('request for bundle10 must include changegroup'))
1293 raise ValueError(_('request for bundle10 must include changegroup'))
1290
1294
1291 if kwargs:
1295 if kwargs:
1292 raise ValueError(_('unsupported getbundle arguments: %s')
1296 raise ValueError(_('unsupported getbundle arguments: %s')
1293 % ', '.join(sorted(kwargs.keys())))
1297 % ', '.join(sorted(kwargs.keys())))
1294 return changegroup.getchangegroup(repo, source, heads=heads,
1298 return changegroup.getchangegroup(repo, source, heads=heads,
1295 common=common, bundlecaps=bundlecaps)
1299 common=common, bundlecaps=bundlecaps)
1296
1300
1297 # bundle20 case
1301 # bundle20 case
1298 b2caps = {}
1302 b2caps = {}
1299 for bcaps in bundlecaps:
1303 for bcaps in bundlecaps:
1300 if bcaps.startswith('bundle2='):
1304 if bcaps.startswith('bundle2='):
1301 blob = urllib.unquote(bcaps[len('bundle2='):])
1305 blob = urllib.unquote(bcaps[len('bundle2='):])
1302 b2caps.update(bundle2.decodecaps(blob))
1306 b2caps.update(bundle2.decodecaps(blob))
1303 bundler = bundle2.bundle20(repo.ui, b2caps)
1307 bundler = bundle2.bundle20(repo.ui, b2caps)
1304
1308
1305 kwargs['heads'] = heads
1309 kwargs['heads'] = heads
1306 kwargs['common'] = common
1310 kwargs['common'] = common
1307
1311
1308 for name in getbundle2partsorder:
1312 for name in getbundle2partsorder:
1309 func = getbundle2partsmapping[name]
1313 func = getbundle2partsmapping[name]
1310 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1314 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1311 **kwargs)
1315 **kwargs)
1312
1316
1313 return util.chunkbuffer(bundler.getchunks())
1317 return util.chunkbuffer(bundler.getchunks())
1314
1318
1315 @getbundle2partsgenerator('changegroup')
1319 @getbundle2partsgenerator('changegroup')
1316 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1320 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1317 b2caps=None, heads=None, common=None, **kwargs):
1321 b2caps=None, heads=None, common=None, **kwargs):
1318 """add a changegroup part to the requested bundle"""
1322 """add a changegroup part to the requested bundle"""
1319 cg = None
1323 cg = None
1320 if kwargs.get('cg', True):
1324 if kwargs.get('cg', True):
1321 # build changegroup bundle here.
1325 # build changegroup bundle here.
1322 version = None
1326 version = None
1323 cgversions = b2caps.get('changegroup')
1327 cgversions = b2caps.get('changegroup')
1324 getcgkwargs = {}
1328 getcgkwargs = {}
1325 if cgversions: # 3.1 and 3.2 ship with an empty value
1329 if cgversions: # 3.1 and 3.2 ship with an empty value
1326 cgversions = [v for v in cgversions if v in changegroup.packermap]
1330 cgversions = [v for v in cgversions if v in changegroup.packermap]
1327 if not cgversions:
1331 if not cgversions:
1328 raise ValueError(_('no common changegroup version'))
1332 raise ValueError(_('no common changegroup version'))
1329 version = getcgkwargs['version'] = max(cgversions)
1333 version = getcgkwargs['version'] = max(cgversions)
1330 outgoing = changegroup.computeoutgoing(repo, heads, common)
1334 outgoing = changegroup.computeoutgoing(repo, heads, common)
1331 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1335 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1332 bundlecaps=bundlecaps,
1336 bundlecaps=bundlecaps,
1333 **getcgkwargs)
1337 **getcgkwargs)
1334
1338
1335 if cg:
1339 if cg:
1336 part = bundler.newpart('changegroup', data=cg)
1340 part = bundler.newpart('changegroup', data=cg)
1337 if version is not None:
1341 if version is not None:
1338 part.addparam('version', version)
1342 part.addparam('version', version)
1339 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1343 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1340
1344
1341 @getbundle2partsgenerator('listkeys')
1345 @getbundle2partsgenerator('listkeys')
1342 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1346 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1343 b2caps=None, **kwargs):
1347 b2caps=None, **kwargs):
1344 """add parts containing listkeys namespaces to the requested bundle"""
1348 """add parts containing listkeys namespaces to the requested bundle"""
1345 listkeys = kwargs.get('listkeys', ())
1349 listkeys = kwargs.get('listkeys', ())
1346 for namespace in listkeys:
1350 for namespace in listkeys:
1347 part = bundler.newpart('listkeys')
1351 part = bundler.newpart('listkeys')
1348 part.addparam('namespace', namespace)
1352 part.addparam('namespace', namespace)
1349 keys = repo.listkeys(namespace).items()
1353 keys = repo.listkeys(namespace).items()
1350 part.data = pushkey.encodekeys(keys)
1354 part.data = pushkey.encodekeys(keys)
1351
1355
1352 @getbundle2partsgenerator('obsmarkers')
1356 @getbundle2partsgenerator('obsmarkers')
1353 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1357 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1354 b2caps=None, heads=None, **kwargs):
1358 b2caps=None, heads=None, **kwargs):
1355 """add an obsolescence markers part to the requested bundle"""
1359 """add an obsolescence markers part to the requested bundle"""
1356 if kwargs.get('obsmarkers', False):
1360 if kwargs.get('obsmarkers', False):
1357 if heads is None:
1361 if heads is None:
1358 heads = repo.heads()
1362 heads = repo.heads()
1359 subset = [c.node() for c in repo.set('::%ln', heads)]
1363 subset = [c.node() for c in repo.set('::%ln', heads)]
1360 markers = repo.obsstore.relevantmarkers(subset)
1364 markers = repo.obsstore.relevantmarkers(subset)
1361 markers = sorted(markers)
1365 markers = sorted(markers)
1362 buildobsmarkerspart(bundler, markers)
1366 buildobsmarkerspart(bundler, markers)
1363
1367
1364 @getbundle2partsgenerator('hgtagsfnodes')
1368 @getbundle2partsgenerator('hgtagsfnodes')
1365 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1369 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1366 b2caps=None, heads=None, common=None,
1370 b2caps=None, heads=None, common=None,
1367 **kwargs):
1371 **kwargs):
1368 """Transfer the .hgtags filenodes mapping.
1372 """Transfer the .hgtags filenodes mapping.
1369
1373
1370 Only values for heads in this bundle will be transferred.
1374 Only values for heads in this bundle will be transferred.
1371
1375
1372 The part data consists of pairs of 20 byte changeset node and .hgtags
1376 The part data consists of pairs of 20 byte changeset node and .hgtags
1373 filenodes raw values.
1377 filenodes raw values.
1374 """
1378 """
1375 # Don't send unless:
1379 # Don't send unless:
1376 # - changeset are being exchanged,
1380 # - changeset are being exchanged,
1377 # - the client supports it.
1381 # - the client supports it.
1378 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1382 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1379 return
1383 return
1380
1384
1381 outgoing = changegroup.computeoutgoing(repo, heads, common)
1385 outgoing = changegroup.computeoutgoing(repo, heads, common)
1382
1386
1383 if not outgoing.missingheads:
1387 if not outgoing.missingheads:
1384 return
1388 return
1385
1389
1386 cache = tags.hgtagsfnodescache(repo.unfiltered())
1390 cache = tags.hgtagsfnodescache(repo.unfiltered())
1387 chunks = []
1391 chunks = []
1388
1392
1389 # .hgtags fnodes are only relevant for head changesets. While we could
1393 # .hgtags fnodes are only relevant for head changesets. While we could
1390 # transfer values for all known nodes, there will likely be little to
1394 # transfer values for all known nodes, there will likely be little to
1391 # no benefit.
1395 # no benefit.
1392 #
1396 #
1393 # We don't bother using a generator to produce output data because
1397 # We don't bother using a generator to produce output data because
1394 # a) we only have 40 bytes per head and even esoteric numbers of heads
1398 # a) we only have 40 bytes per head and even esoteric numbers of heads
1395 # consume little memory (1M heads is 40MB) b) we don't want to send the
1399 # consume little memory (1M heads is 40MB) b) we don't want to send the
1396 # part if we don't have entries and knowing if we have entries requires
1400 # part if we don't have entries and knowing if we have entries requires
1397 # cache lookups.
1401 # cache lookups.
1398 for node in outgoing.missingheads:
1402 for node in outgoing.missingheads:
1399 # Don't compute missing, as this may slow down serving.
1403 # Don't compute missing, as this may slow down serving.
1400 fnode = cache.getfnode(node, computemissing=False)
1404 fnode = cache.getfnode(node, computemissing=False)
1401 if fnode is not None:
1405 if fnode is not None:
1402 chunks.extend([node, fnode])
1406 chunks.extend([node, fnode])
1403
1407
1404 if chunks:
1408 if chunks:
1405 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1409 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1406
1410
1407 def check_heads(repo, their_heads, context):
1411 def check_heads(repo, their_heads, context):
1408 """check if the heads of a repo have been modified
1412 """check if the heads of a repo have been modified
1409
1413
1410 Used by peer for unbundling.
1414 Used by peer for unbundling.
1411 """
1415 """
1412 heads = repo.heads()
1416 heads = repo.heads()
1413 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1417 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1414 if not (their_heads == ['force'] or their_heads == heads or
1418 if not (their_heads == ['force'] or their_heads == heads or
1415 their_heads == ['hashed', heads_hash]):
1419 their_heads == ['hashed', heads_hash]):
1416 # someone else committed/pushed/unbundled while we
1420 # someone else committed/pushed/unbundled while we
1417 # were transferring data
1421 # were transferring data
1418 raise error.PushRaced('repository changed while %s - '
1422 raise error.PushRaced('repository changed while %s - '
1419 'please try again' % context)
1423 'please try again' % context)
1420
1424
1421 def unbundle(repo, cg, heads, source, url):
1425 def unbundle(repo, cg, heads, source, url):
1422 """Apply a bundle to a repo.
1426 """Apply a bundle to a repo.
1423
1427
1424 this function makes sure the repo is locked during the application and have
1428 this function makes sure the repo is locked during the application and have
1425 mechanism to check that no push race occurred between the creation of the
1429 mechanism to check that no push race occurred between the creation of the
1426 bundle and its application.
1430 bundle and its application.
1427
1431
1428 If the push was raced as PushRaced exception is raised."""
1432 If the push was raced as PushRaced exception is raised."""
1429 r = 0
1433 r = 0
1430 # need a transaction when processing a bundle2 stream
1434 # need a transaction when processing a bundle2 stream
1431 wlock = lock = tr = None
1435 wlock = lock = tr = None
1432 recordout = None
1436 recordout = None
1433 # quick fix for output mismatch with bundle2 in 3.4
1437 # quick fix for output mismatch with bundle2 in 3.4
1434 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1438 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1435 False)
1439 False)
1436 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1440 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1437 captureoutput = True
1441 captureoutput = True
1438 try:
1442 try:
1439 check_heads(repo, heads, 'uploading changes')
1443 check_heads(repo, heads, 'uploading changes')
1440 # push can proceed
1444 # push can proceed
1441 if util.safehasattr(cg, 'params'):
1445 if util.safehasattr(cg, 'params'):
1442 r = None
1446 r = None
1443 try:
1447 try:
1444 wlock = repo.wlock()
1448 wlock = repo.wlock()
1445 lock = repo.lock()
1449 lock = repo.lock()
1446 tr = repo.transaction(source)
1450 tr = repo.transaction(source)
1447 tr.hookargs['source'] = source
1451 tr.hookargs['source'] = source
1448 tr.hookargs['url'] = url
1452 tr.hookargs['url'] = url
1449 tr.hookargs['bundle2'] = '1'
1453 tr.hookargs['bundle2'] = '1'
1450 op = bundle2.bundleoperation(repo, lambda: tr,
1454 op = bundle2.bundleoperation(repo, lambda: tr,
1451 captureoutput=captureoutput)
1455 captureoutput=captureoutput)
1452 try:
1456 try:
1453 op = bundle2.processbundle(repo, cg, op=op)
1457 op = bundle2.processbundle(repo, cg, op=op)
1454 finally:
1458 finally:
1455 r = op.reply
1459 r = op.reply
1456 if captureoutput and r is not None:
1460 if captureoutput and r is not None:
1457 repo.ui.pushbuffer(error=True, subproc=True)
1461 repo.ui.pushbuffer(error=True, subproc=True)
1458 def recordout(output):
1462 def recordout(output):
1459 r.newpart('output', data=output, mandatory=False)
1463 r.newpart('output', data=output, mandatory=False)
1460 tr.close()
1464 tr.close()
1461 except BaseException as exc:
1465 except BaseException as exc:
1462 exc.duringunbundle2 = True
1466 exc.duringunbundle2 = True
1463 if captureoutput and r is not None:
1467 if captureoutput and r is not None:
1464 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1468 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1465 def recordout(output):
1469 def recordout(output):
1466 part = bundle2.bundlepart('output', data=output,
1470 part = bundle2.bundlepart('output', data=output,
1467 mandatory=False)
1471 mandatory=False)
1468 parts.append(part)
1472 parts.append(part)
1469 raise
1473 raise
1470 else:
1474 else:
1471 lock = repo.lock()
1475 lock = repo.lock()
1472 r = changegroup.addchangegroup(repo, cg, source, url)
1476 r = changegroup.addchangegroup(repo, cg, source, url)
1473 finally:
1477 finally:
1474 lockmod.release(tr, lock, wlock)
1478 lockmod.release(tr, lock, wlock)
1475 if recordout is not None:
1479 if recordout is not None:
1476 recordout(repo.ui.popbuffer())
1480 recordout(repo.ui.popbuffer())
1477 return r
1481 return r
@@ -1,1874 +1,1871
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, wdirrev, short
7 from node import hex, nullid, wdirrev, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 import streamclone
23 propertycache = util.propertycache
22 propertycache = util.propertycache
24 filecache = scmutil.filecache
23 filecache = scmutil.filecache
25
24
26 class repofilecache(filecache):
25 class repofilecache(filecache):
27 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
28 """
27 """
29
28
30 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
31 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
32 def __set__(self, repo, value):
31 def __set__(self, repo, value):
33 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
34 def __delete__(self, repo):
33 def __delete__(self, repo):
35 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
36
35
37 class storecache(repofilecache):
36 class storecache(repofilecache):
38 """filecache for files in the store"""
37 """filecache for files in the store"""
39 def join(self, obj, fname):
38 def join(self, obj, fname):
40 return obj.sjoin(fname)
39 return obj.sjoin(fname)
41
40
42 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
43 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
44
43
45 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
46 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
47 if unfi is repo:
46 if unfi is repo:
48 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
49 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
50
49
51 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
52 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
53
52
54 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
55 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
56
55
57
56
58 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
59 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
60 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
61
60
62 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
63 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
64 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
65 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
66 return wrapper
65 return wrapper
67
66
68 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
69 'unbundle'))
68 'unbundle'))
70 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
71
70
72 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
73 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
74
73
75 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
76 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
77 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
78 self.ui = repo.ui
77 self.ui = repo.ui
79 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
80 self.requirements = repo.requirements
79 self.requirements = repo.requirements
81 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
82
81
83 def close(self):
82 def close(self):
84 self._repo.close()
83 self._repo.close()
85
84
86 def _capabilities(self):
85 def _capabilities(self):
87 return self._caps
86 return self._caps
88
87
89 def local(self):
88 def local(self):
90 return self._repo
89 return self._repo
91
90
92 def canpush(self):
91 def canpush(self):
93 return True
92 return True
94
93
95 def url(self):
94 def url(self):
96 return self._repo.url()
95 return self._repo.url()
97
96
98 def lookup(self, key):
97 def lookup(self, key):
99 return self._repo.lookup(key)
98 return self._repo.lookup(key)
100
99
101 def branchmap(self):
100 def branchmap(self):
102 return self._repo.branchmap()
101 return self._repo.branchmap()
103
102
104 def heads(self):
103 def heads(self):
105 return self._repo.heads()
104 return self._repo.heads()
106
105
107 def known(self, nodes):
106 def known(self, nodes):
108 return self._repo.known(nodes)
107 return self._repo.known(nodes)
109
108
110 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
111 **kwargs):
110 **kwargs):
112 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
113 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
114 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
115 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
116 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
117 # from it in local peer.
116 # from it in local peer.
118 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
119 return cg
118 return cg
120
119
121 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
122 # unbundle instead.
121 # unbundle instead.
123
122
124 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
125 """apply a bundle on a repo
124 """apply a bundle on a repo
126
125
127 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
128 try:
127 try:
129 try:
128 try:
130 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
131 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
132 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
133 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
134 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
135 # API is finally improved.
134 # API is finally improved.
136 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
137 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
138 return ret
137 return ret
139 except Exception as exc:
138 except Exception as exc:
140 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
141 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
142 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
143 # it directly.
142 # it directly.
144 #
143 #
145 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
146 # issue4594
145 # issue4594
147 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
148 if output:
147 if output:
149 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
150 for out in output:
149 for out in output:
151 bundler.addpart(out)
150 bundler.addpart(out)
152 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
153 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
154 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
155 raise
154 raise
156 except error.PushRaced as exc:
155 except error.PushRaced as exc:
157 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
158
157
159 def lock(self):
158 def lock(self):
160 return self._repo.lock()
159 return self._repo.lock()
161
160
162 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
163 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
164
163
165 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
166 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
167
166
168 def listkeys(self, namespace):
167 def listkeys(self, namespace):
169 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
170
169
171 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
172 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
173 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
174
173
175 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
176 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
177 restricted capabilities'''
176 restricted capabilities'''
178
177
179 def __init__(self, repo):
178 def __init__(self, repo):
180 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
181
180
182 def branches(self, nodes):
181 def branches(self, nodes):
183 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
184
183
185 def between(self, pairs):
184 def between(self, pairs):
186 return self._repo.between(pairs)
185 return self._repo.between(pairs)
187
186
188 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
189 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
190
189
191 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
192 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
193
192
194 class localrepository(object):
193 class localrepository(object):
195
194
196 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
197 'manifestv2'))
196 'manifestv2'))
198 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
199 'dotencode'))
198 'dotencode'))
200 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
201 filtername = None
200 filtername = None
202
201
203 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
204 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
205 featuresetupfuncs = set()
204 featuresetupfuncs = set()
206
205
207 def _baserequirements(self, create):
206 def _baserequirements(self, create):
208 return ['revlogv1']
207 return ['revlogv1']
209
208
210 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
211 self.requirements = set()
210 self.requirements = set()
212 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
213 self.wopener = self.wvfs
212 self.wopener = self.wvfs
214 self.root = self.wvfs.base
213 self.root = self.wvfs.base
215 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
216 self.origroot = path
215 self.origroot = path
217 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
218 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
219 self.opener = self.vfs
218 self.opener = self.vfs
220 self.baseui = baseui
219 self.baseui = baseui
221 self.ui = baseui.copy()
220 self.ui = baseui.copy()
222 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
223 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
224 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
225 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
226 self._phasedefaults = []
225 self._phasedefaults = []
227 try:
226 try:
228 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
229 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
230 except IOError:
229 except IOError:
231 pass
230 pass
232
231
233 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
234 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
235 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
236 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
237 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
238 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
239 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
240 else:
239 else:
241 self.supported = self._basesupported
240 self.supported = self._basesupported
242
241
243 if not self.vfs.isdir():
242 if not self.vfs.isdir():
244 if create:
243 if create:
245 if not self.wvfs.exists():
244 if not self.wvfs.exists():
246 self.wvfs.makedirs()
245 self.wvfs.makedirs()
247 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
248 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
249 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
250 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
251 self.requirements.add("store")
250 self.requirements.add("store")
252 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
253 self.requirements.add("fncache")
252 self.requirements.add("fncache")
254 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
255 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
256 # create an invalid changelog
255 # create an invalid changelog
257 self.vfs.append(
256 self.vfs.append(
258 "00changelog.i",
257 "00changelog.i",
259 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
260 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
261 )
260 )
262 # experimental config: format.generaldelta
261 # experimental config: format.generaldelta
263 if self.ui.configbool('format', 'generaldelta', False):
262 if self.ui.configbool('format', 'generaldelta', False):
264 self.requirements.add("generaldelta")
263 self.requirements.add("generaldelta")
265 if self.ui.configbool('experimental', 'treemanifest', False):
264 if self.ui.configbool('experimental', 'treemanifest', False):
266 self.requirements.add("treemanifest")
265 self.requirements.add("treemanifest")
267 if self.ui.configbool('experimental', 'manifestv2', False):
266 if self.ui.configbool('experimental', 'manifestv2', False):
268 self.requirements.add("manifestv2")
267 self.requirements.add("manifestv2")
269 else:
268 else:
270 raise error.RepoError(_("repository %s not found") % path)
269 raise error.RepoError(_("repository %s not found") % path)
271 elif create:
270 elif create:
272 raise error.RepoError(_("repository %s already exists") % path)
271 raise error.RepoError(_("repository %s already exists") % path)
273 else:
272 else:
274 try:
273 try:
275 self.requirements = scmutil.readrequires(
274 self.requirements = scmutil.readrequires(
276 self.vfs, self.supported)
275 self.vfs, self.supported)
277 except IOError as inst:
276 except IOError as inst:
278 if inst.errno != errno.ENOENT:
277 if inst.errno != errno.ENOENT:
279 raise
278 raise
280
279
281 self.sharedpath = self.path
280 self.sharedpath = self.path
282 try:
281 try:
283 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
284 realpath=True)
283 realpath=True)
285 s = vfs.base
284 s = vfs.base
286 if not vfs.exists():
285 if not vfs.exists():
287 raise error.RepoError(
286 raise error.RepoError(
288 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 _('.hg/sharedpath points to nonexistent directory %s') % s)
289 self.sharedpath = s
288 self.sharedpath = s
290 except IOError as inst:
289 except IOError as inst:
291 if inst.errno != errno.ENOENT:
290 if inst.errno != errno.ENOENT:
292 raise
291 raise
293
292
294 self.store = store.store(
293 self.store = store.store(
295 self.requirements, self.sharedpath, scmutil.vfs)
294 self.requirements, self.sharedpath, scmutil.vfs)
296 self.spath = self.store.path
295 self.spath = self.store.path
297 self.svfs = self.store.vfs
296 self.svfs = self.store.vfs
298 self.sjoin = self.store.join
297 self.sjoin = self.store.join
299 self.vfs.createmode = self.store.createmode
298 self.vfs.createmode = self.store.createmode
300 self._applyopenerreqs()
299 self._applyopenerreqs()
301 if create:
300 if create:
302 self._writerequirements()
301 self._writerequirements()
303
302
304 self._dirstatevalidatewarned = False
303 self._dirstatevalidatewarned = False
305
304
306 self._branchcaches = {}
305 self._branchcaches = {}
307 self._revbranchcache = None
306 self._revbranchcache = None
308 self.filterpats = {}
307 self.filterpats = {}
309 self._datafilters = {}
308 self._datafilters = {}
310 self._transref = self._lockref = self._wlockref = None
309 self._transref = self._lockref = self._wlockref = None
311
310
312 # A cache for various files under .hg/ that tracks file changes,
311 # A cache for various files under .hg/ that tracks file changes,
313 # (used by the filecache decorator)
312 # (used by the filecache decorator)
314 #
313 #
315 # Maps a property name to its util.filecacheentry
314 # Maps a property name to its util.filecacheentry
316 self._filecache = {}
315 self._filecache = {}
317
316
318 # hold sets of revision to be filtered
317 # hold sets of revision to be filtered
319 # should be cleared when something might have changed the filter value:
318 # should be cleared when something might have changed the filter value:
320 # - new changesets,
319 # - new changesets,
321 # - phase change,
320 # - phase change,
322 # - new obsolescence marker,
321 # - new obsolescence marker,
323 # - working directory parent change,
322 # - working directory parent change,
324 # - bookmark changes
323 # - bookmark changes
325 self.filteredrevcache = {}
324 self.filteredrevcache = {}
326
325
327 # generic mapping between names and nodes
326 # generic mapping between names and nodes
328 self.names = namespaces.namespaces()
327 self.names = namespaces.namespaces()
329
328
330 def close(self):
329 def close(self):
331 self._writecaches()
330 self._writecaches()
332
331
333 def _writecaches(self):
332 def _writecaches(self):
334 if self._revbranchcache:
333 if self._revbranchcache:
335 self._revbranchcache.write()
334 self._revbranchcache.write()
336
335
337 def _restrictcapabilities(self, caps):
336 def _restrictcapabilities(self, caps):
338 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 if self.ui.configbool('experimental', 'bundle2-advertise', True):
339 caps = set(caps)
338 caps = set(caps)
340 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
341 caps.add('bundle2=' + urllib.quote(capsblob))
340 caps.add('bundle2=' + urllib.quote(capsblob))
342 return caps
341 return caps
343
342
344 def _applyopenerreqs(self):
343 def _applyopenerreqs(self):
345 self.svfs.options = dict((r, 1) for r in self.requirements
344 self.svfs.options = dict((r, 1) for r in self.requirements
346 if r in self.openerreqs)
345 if r in self.openerreqs)
347 # experimental config: format.chunkcachesize
346 # experimental config: format.chunkcachesize
348 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
349 if chunkcachesize is not None:
348 if chunkcachesize is not None:
350 self.svfs.options['chunkcachesize'] = chunkcachesize
349 self.svfs.options['chunkcachesize'] = chunkcachesize
351 # experimental config: format.maxchainlen
350 # experimental config: format.maxchainlen
352 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 maxchainlen = self.ui.configint('format', 'maxchainlen')
353 if maxchainlen is not None:
352 if maxchainlen is not None:
354 self.svfs.options['maxchainlen'] = maxchainlen
353 self.svfs.options['maxchainlen'] = maxchainlen
355 # experimental config: format.manifestcachesize
354 # experimental config: format.manifestcachesize
356 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
357 if manifestcachesize is not None:
356 if manifestcachesize is not None:
358 self.svfs.options['manifestcachesize'] = manifestcachesize
357 self.svfs.options['manifestcachesize'] = manifestcachesize
359 # experimental config: format.aggressivemergedeltas
358 # experimental config: format.aggressivemergedeltas
360 aggressivemergedeltas = self.ui.configbool('format',
359 aggressivemergedeltas = self.ui.configbool('format',
361 'aggressivemergedeltas', False)
360 'aggressivemergedeltas', False)
362 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
361 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
363
362
364 def _writerequirements(self):
363 def _writerequirements(self):
365 scmutil.writerequires(self.vfs, self.requirements)
364 scmutil.writerequires(self.vfs, self.requirements)
366
365
367 def _checknested(self, path):
366 def _checknested(self, path):
368 """Determine if path is a legal nested repository."""
367 """Determine if path is a legal nested repository."""
369 if not path.startswith(self.root):
368 if not path.startswith(self.root):
370 return False
369 return False
371 subpath = path[len(self.root) + 1:]
370 subpath = path[len(self.root) + 1:]
372 normsubpath = util.pconvert(subpath)
371 normsubpath = util.pconvert(subpath)
373
372
374 # XXX: Checking against the current working copy is wrong in
373 # XXX: Checking against the current working copy is wrong in
375 # the sense that it can reject things like
374 # the sense that it can reject things like
376 #
375 #
377 # $ hg cat -r 10 sub/x.txt
376 # $ hg cat -r 10 sub/x.txt
378 #
377 #
379 # if sub/ is no longer a subrepository in the working copy
378 # if sub/ is no longer a subrepository in the working copy
380 # parent revision.
379 # parent revision.
381 #
380 #
382 # However, it can of course also allow things that would have
381 # However, it can of course also allow things that would have
383 # been rejected before, such as the above cat command if sub/
382 # been rejected before, such as the above cat command if sub/
384 # is a subrepository now, but was a normal directory before.
383 # is a subrepository now, but was a normal directory before.
385 # The old path auditor would have rejected by mistake since it
384 # The old path auditor would have rejected by mistake since it
386 # panics when it sees sub/.hg/.
385 # panics when it sees sub/.hg/.
387 #
386 #
388 # All in all, checking against the working copy seems sensible
387 # All in all, checking against the working copy seems sensible
389 # since we want to prevent access to nested repositories on
388 # since we want to prevent access to nested repositories on
390 # the filesystem *now*.
389 # the filesystem *now*.
391 ctx = self[None]
390 ctx = self[None]
392 parts = util.splitpath(subpath)
391 parts = util.splitpath(subpath)
393 while parts:
392 while parts:
394 prefix = '/'.join(parts)
393 prefix = '/'.join(parts)
395 if prefix in ctx.substate:
394 if prefix in ctx.substate:
396 if prefix == normsubpath:
395 if prefix == normsubpath:
397 return True
396 return True
398 else:
397 else:
399 sub = ctx.sub(prefix)
398 sub = ctx.sub(prefix)
400 return sub.checknested(subpath[len(prefix) + 1:])
399 return sub.checknested(subpath[len(prefix) + 1:])
401 else:
400 else:
402 parts.pop()
401 parts.pop()
403 return False
402 return False
404
403
405 def peer(self):
404 def peer(self):
406 return localpeer(self) # not cached to avoid reference cycle
405 return localpeer(self) # not cached to avoid reference cycle
407
406
408 def unfiltered(self):
407 def unfiltered(self):
409 """Return unfiltered version of the repository
408 """Return unfiltered version of the repository
410
409
411 Intended to be overwritten by filtered repo."""
410 Intended to be overwritten by filtered repo."""
412 return self
411 return self
413
412
414 def filtered(self, name):
413 def filtered(self, name):
415 """Return a filtered version of a repository"""
414 """Return a filtered version of a repository"""
416 # build a new class with the mixin and the current class
415 # build a new class with the mixin and the current class
417 # (possibly subclass of the repo)
416 # (possibly subclass of the repo)
418 class proxycls(repoview.repoview, self.unfiltered().__class__):
417 class proxycls(repoview.repoview, self.unfiltered().__class__):
419 pass
418 pass
420 return proxycls(self, name)
419 return proxycls(self, name)
421
420
422 @repofilecache('bookmarks')
421 @repofilecache('bookmarks')
423 def _bookmarks(self):
422 def _bookmarks(self):
424 return bookmarks.bmstore(self)
423 return bookmarks.bmstore(self)
425
424
426 @repofilecache('bookmarks.current')
425 @repofilecache('bookmarks.current')
427 def _activebookmark(self):
426 def _activebookmark(self):
428 return bookmarks.readactive(self)
427 return bookmarks.readactive(self)
429
428
430 def bookmarkheads(self, bookmark):
429 def bookmarkheads(self, bookmark):
431 name = bookmark.split('@', 1)[0]
430 name = bookmark.split('@', 1)[0]
432 heads = []
431 heads = []
433 for mark, n in self._bookmarks.iteritems():
432 for mark, n in self._bookmarks.iteritems():
434 if mark.split('@', 1)[0] == name:
433 if mark.split('@', 1)[0] == name:
435 heads.append(n)
434 heads.append(n)
436 return heads
435 return heads
437
436
438 # _phaserevs and _phasesets depend on changelog. what we need is to
437 # _phaserevs and _phasesets depend on changelog. what we need is to
439 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
438 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
440 # can't be easily expressed in filecache mechanism.
439 # can't be easily expressed in filecache mechanism.
441 @storecache('phaseroots', '00changelog.i')
440 @storecache('phaseroots', '00changelog.i')
442 def _phasecache(self):
441 def _phasecache(self):
443 return phases.phasecache(self, self._phasedefaults)
442 return phases.phasecache(self, self._phasedefaults)
444
443
445 @storecache('obsstore')
444 @storecache('obsstore')
446 def obsstore(self):
445 def obsstore(self):
447 # read default format for new obsstore.
446 # read default format for new obsstore.
448 # developer config: format.obsstore-version
447 # developer config: format.obsstore-version
449 defaultformat = self.ui.configint('format', 'obsstore-version', None)
448 defaultformat = self.ui.configint('format', 'obsstore-version', None)
450 # rely on obsstore class default when possible.
449 # rely on obsstore class default when possible.
451 kwargs = {}
450 kwargs = {}
452 if defaultformat is not None:
451 if defaultformat is not None:
453 kwargs['defaultformat'] = defaultformat
452 kwargs['defaultformat'] = defaultformat
454 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
453 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
455 store = obsolete.obsstore(self.svfs, readonly=readonly,
454 store = obsolete.obsstore(self.svfs, readonly=readonly,
456 **kwargs)
455 **kwargs)
457 if store and readonly:
456 if store and readonly:
458 self.ui.warn(
457 self.ui.warn(
459 _('obsolete feature not enabled but %i markers found!\n')
458 _('obsolete feature not enabled but %i markers found!\n')
460 % len(list(store)))
459 % len(list(store)))
461 return store
460 return store
462
461
463 @storecache('00changelog.i')
462 @storecache('00changelog.i')
464 def changelog(self):
463 def changelog(self):
465 c = changelog.changelog(self.svfs)
464 c = changelog.changelog(self.svfs)
466 if 'HG_PENDING' in os.environ:
465 if 'HG_PENDING' in os.environ:
467 p = os.environ['HG_PENDING']
466 p = os.environ['HG_PENDING']
468 if p.startswith(self.root):
467 if p.startswith(self.root):
469 c.readpending('00changelog.i.a')
468 c.readpending('00changelog.i.a')
470 return c
469 return c
471
470
472 @storecache('00manifest.i')
471 @storecache('00manifest.i')
473 def manifest(self):
472 def manifest(self):
474 return manifest.manifest(self.svfs)
473 return manifest.manifest(self.svfs)
475
474
476 def dirlog(self, dir):
475 def dirlog(self, dir):
477 return self.manifest.dirlog(dir)
476 return self.manifest.dirlog(dir)
478
477
479 @repofilecache('dirstate')
478 @repofilecache('dirstate')
480 def dirstate(self):
479 def dirstate(self):
481 return dirstate.dirstate(self.vfs, self.ui, self.root,
480 return dirstate.dirstate(self.vfs, self.ui, self.root,
482 self._dirstatevalidate)
481 self._dirstatevalidate)
483
482
484 def _dirstatevalidate(self, node):
483 def _dirstatevalidate(self, node):
485 try:
484 try:
486 self.changelog.rev(node)
485 self.changelog.rev(node)
487 return node
486 return node
488 except error.LookupError:
487 except error.LookupError:
489 if not self._dirstatevalidatewarned:
488 if not self._dirstatevalidatewarned:
490 self._dirstatevalidatewarned = True
489 self._dirstatevalidatewarned = True
491 self.ui.warn(_("warning: ignoring unknown"
490 self.ui.warn(_("warning: ignoring unknown"
492 " working parent %s!\n") % short(node))
491 " working parent %s!\n") % short(node))
493 return nullid
492 return nullid
494
493
495 def __getitem__(self, changeid):
494 def __getitem__(self, changeid):
496 if changeid is None or changeid == wdirrev:
495 if changeid is None or changeid == wdirrev:
497 return context.workingctx(self)
496 return context.workingctx(self)
498 if isinstance(changeid, slice):
497 if isinstance(changeid, slice):
499 return [context.changectx(self, i)
498 return [context.changectx(self, i)
500 for i in xrange(*changeid.indices(len(self)))
499 for i in xrange(*changeid.indices(len(self)))
501 if i not in self.changelog.filteredrevs]
500 if i not in self.changelog.filteredrevs]
502 return context.changectx(self, changeid)
501 return context.changectx(self, changeid)
503
502
504 def __contains__(self, changeid):
503 def __contains__(self, changeid):
505 try:
504 try:
506 self[changeid]
505 self[changeid]
507 return True
506 return True
508 except error.RepoLookupError:
507 except error.RepoLookupError:
509 return False
508 return False
510
509
511 def __nonzero__(self):
510 def __nonzero__(self):
512 return True
511 return True
513
512
514 def __len__(self):
513 def __len__(self):
515 return len(self.changelog)
514 return len(self.changelog)
516
515
517 def __iter__(self):
516 def __iter__(self):
518 return iter(self.changelog)
517 return iter(self.changelog)
519
518
520 def revs(self, expr, *args):
519 def revs(self, expr, *args):
521 '''Return a list of revisions matching the given revset'''
520 '''Return a list of revisions matching the given revset'''
522 expr = revset.formatspec(expr, *args)
521 expr = revset.formatspec(expr, *args)
523 m = revset.match(None, expr)
522 m = revset.match(None, expr)
524 return m(self)
523 return m(self)
525
524
526 def set(self, expr, *args):
525 def set(self, expr, *args):
527 '''
526 '''
528 Yield a context for each matching revision, after doing arg
527 Yield a context for each matching revision, after doing arg
529 replacement via revset.formatspec
528 replacement via revset.formatspec
530 '''
529 '''
531 for r in self.revs(expr, *args):
530 for r in self.revs(expr, *args):
532 yield self[r]
531 yield self[r]
533
532
534 def url(self):
533 def url(self):
535 return 'file:' + self.root
534 return 'file:' + self.root
536
535
537 def hook(self, name, throw=False, **args):
536 def hook(self, name, throw=False, **args):
538 """Call a hook, passing this repo instance.
537 """Call a hook, passing this repo instance.
539
538
540 This a convenience method to aid invoking hooks. Extensions likely
539 This a convenience method to aid invoking hooks. Extensions likely
541 won't call this unless they have registered a custom hook or are
540 won't call this unless they have registered a custom hook or are
542 replacing code that is expected to call a hook.
541 replacing code that is expected to call a hook.
543 """
542 """
544 return hook.hook(self.ui, self, name, throw, **args)
543 return hook.hook(self.ui, self, name, throw, **args)
545
544
546 @unfilteredmethod
545 @unfilteredmethod
547 def _tag(self, names, node, message, local, user, date, extra=None,
546 def _tag(self, names, node, message, local, user, date, extra=None,
548 editor=False):
547 editor=False):
549 if isinstance(names, str):
548 if isinstance(names, str):
550 names = (names,)
549 names = (names,)
551
550
552 branches = self.branchmap()
551 branches = self.branchmap()
553 for name in names:
552 for name in names:
554 self.hook('pretag', throw=True, node=hex(node), tag=name,
553 self.hook('pretag', throw=True, node=hex(node), tag=name,
555 local=local)
554 local=local)
556 if name in branches:
555 if name in branches:
557 self.ui.warn(_("warning: tag %s conflicts with existing"
556 self.ui.warn(_("warning: tag %s conflicts with existing"
558 " branch name\n") % name)
557 " branch name\n") % name)
559
558
560 def writetags(fp, names, munge, prevtags):
559 def writetags(fp, names, munge, prevtags):
561 fp.seek(0, 2)
560 fp.seek(0, 2)
562 if prevtags and prevtags[-1] != '\n':
561 if prevtags and prevtags[-1] != '\n':
563 fp.write('\n')
562 fp.write('\n')
564 for name in names:
563 for name in names:
565 if munge:
564 if munge:
566 m = munge(name)
565 m = munge(name)
567 else:
566 else:
568 m = name
567 m = name
569
568
570 if (self._tagscache.tagtypes and
569 if (self._tagscache.tagtypes and
571 name in self._tagscache.tagtypes):
570 name in self._tagscache.tagtypes):
572 old = self.tags().get(name, nullid)
571 old = self.tags().get(name, nullid)
573 fp.write('%s %s\n' % (hex(old), m))
572 fp.write('%s %s\n' % (hex(old), m))
574 fp.write('%s %s\n' % (hex(node), m))
573 fp.write('%s %s\n' % (hex(node), m))
575 fp.close()
574 fp.close()
576
575
577 prevtags = ''
576 prevtags = ''
578 if local:
577 if local:
579 try:
578 try:
580 fp = self.vfs('localtags', 'r+')
579 fp = self.vfs('localtags', 'r+')
581 except IOError:
580 except IOError:
582 fp = self.vfs('localtags', 'a')
581 fp = self.vfs('localtags', 'a')
583 else:
582 else:
584 prevtags = fp.read()
583 prevtags = fp.read()
585
584
586 # local tags are stored in the current charset
585 # local tags are stored in the current charset
587 writetags(fp, names, None, prevtags)
586 writetags(fp, names, None, prevtags)
588 for name in names:
587 for name in names:
589 self.hook('tag', node=hex(node), tag=name, local=local)
588 self.hook('tag', node=hex(node), tag=name, local=local)
590 return
589 return
591
590
592 try:
591 try:
593 fp = self.wfile('.hgtags', 'rb+')
592 fp = self.wfile('.hgtags', 'rb+')
594 except IOError as e:
593 except IOError as e:
595 if e.errno != errno.ENOENT:
594 if e.errno != errno.ENOENT:
596 raise
595 raise
597 fp = self.wfile('.hgtags', 'ab')
596 fp = self.wfile('.hgtags', 'ab')
598 else:
597 else:
599 prevtags = fp.read()
598 prevtags = fp.read()
600
599
601 # committed tags are stored in UTF-8
600 # committed tags are stored in UTF-8
602 writetags(fp, names, encoding.fromlocal, prevtags)
601 writetags(fp, names, encoding.fromlocal, prevtags)
603
602
604 fp.close()
603 fp.close()
605
604
606 self.invalidatecaches()
605 self.invalidatecaches()
607
606
608 if '.hgtags' not in self.dirstate:
607 if '.hgtags' not in self.dirstate:
609 self[None].add(['.hgtags'])
608 self[None].add(['.hgtags'])
610
609
611 m = matchmod.exact(self.root, '', ['.hgtags'])
610 m = matchmod.exact(self.root, '', ['.hgtags'])
612 tagnode = self.commit(message, user, date, extra=extra, match=m,
611 tagnode = self.commit(message, user, date, extra=extra, match=m,
613 editor=editor)
612 editor=editor)
614
613
615 for name in names:
614 for name in names:
616 self.hook('tag', node=hex(node), tag=name, local=local)
615 self.hook('tag', node=hex(node), tag=name, local=local)
617
616
618 return tagnode
617 return tagnode
619
618
620 def tag(self, names, node, message, local, user, date, editor=False):
619 def tag(self, names, node, message, local, user, date, editor=False):
621 '''tag a revision with one or more symbolic names.
620 '''tag a revision with one or more symbolic names.
622
621
623 names is a list of strings or, when adding a single tag, names may be a
622 names is a list of strings or, when adding a single tag, names may be a
624 string.
623 string.
625
624
626 if local is True, the tags are stored in a per-repository file.
625 if local is True, the tags are stored in a per-repository file.
627 otherwise, they are stored in the .hgtags file, and a new
626 otherwise, they are stored in the .hgtags file, and a new
628 changeset is committed with the change.
627 changeset is committed with the change.
629
628
630 keyword arguments:
629 keyword arguments:
631
630
632 local: whether to store tags in non-version-controlled file
631 local: whether to store tags in non-version-controlled file
633 (default False)
632 (default False)
634
633
635 message: commit message to use if committing
634 message: commit message to use if committing
636
635
637 user: name of user to use if committing
636 user: name of user to use if committing
638
637
639 date: date tuple to use if committing'''
638 date: date tuple to use if committing'''
640
639
641 if not local:
640 if not local:
642 m = matchmod.exact(self.root, '', ['.hgtags'])
641 m = matchmod.exact(self.root, '', ['.hgtags'])
643 if any(self.status(match=m, unknown=True, ignored=True)):
642 if any(self.status(match=m, unknown=True, ignored=True)):
644 raise util.Abort(_('working copy of .hgtags is changed'),
643 raise util.Abort(_('working copy of .hgtags is changed'),
645 hint=_('please commit .hgtags manually'))
644 hint=_('please commit .hgtags manually'))
646
645
647 self.tags() # instantiate the cache
646 self.tags() # instantiate the cache
648 self._tag(names, node, message, local, user, date, editor=editor)
647 self._tag(names, node, message, local, user, date, editor=editor)
649
648
650 @filteredpropertycache
649 @filteredpropertycache
651 def _tagscache(self):
650 def _tagscache(self):
652 '''Returns a tagscache object that contains various tags related
651 '''Returns a tagscache object that contains various tags related
653 caches.'''
652 caches.'''
654
653
655 # This simplifies its cache management by having one decorated
654 # This simplifies its cache management by having one decorated
656 # function (this one) and the rest simply fetch things from it.
655 # function (this one) and the rest simply fetch things from it.
657 class tagscache(object):
656 class tagscache(object):
658 def __init__(self):
657 def __init__(self):
659 # These two define the set of tags for this repository. tags
658 # These two define the set of tags for this repository. tags
660 # maps tag name to node; tagtypes maps tag name to 'global' or
659 # maps tag name to node; tagtypes maps tag name to 'global' or
661 # 'local'. (Global tags are defined by .hgtags across all
660 # 'local'. (Global tags are defined by .hgtags across all
662 # heads, and local tags are defined in .hg/localtags.)
661 # heads, and local tags are defined in .hg/localtags.)
663 # They constitute the in-memory cache of tags.
662 # They constitute the in-memory cache of tags.
664 self.tags = self.tagtypes = None
663 self.tags = self.tagtypes = None
665
664
666 self.nodetagscache = self.tagslist = None
665 self.nodetagscache = self.tagslist = None
667
666
668 cache = tagscache()
667 cache = tagscache()
669 cache.tags, cache.tagtypes = self._findtags()
668 cache.tags, cache.tagtypes = self._findtags()
670
669
671 return cache
670 return cache
672
671
673 def tags(self):
672 def tags(self):
674 '''return a mapping of tag to node'''
673 '''return a mapping of tag to node'''
675 t = {}
674 t = {}
676 if self.changelog.filteredrevs:
675 if self.changelog.filteredrevs:
677 tags, tt = self._findtags()
676 tags, tt = self._findtags()
678 else:
677 else:
679 tags = self._tagscache.tags
678 tags = self._tagscache.tags
680 for k, v in tags.iteritems():
679 for k, v in tags.iteritems():
681 try:
680 try:
682 # ignore tags to unknown nodes
681 # ignore tags to unknown nodes
683 self.changelog.rev(v)
682 self.changelog.rev(v)
684 t[k] = v
683 t[k] = v
685 except (error.LookupError, ValueError):
684 except (error.LookupError, ValueError):
686 pass
685 pass
687 return t
686 return t
688
687
689 def _findtags(self):
688 def _findtags(self):
690 '''Do the hard work of finding tags. Return a pair of dicts
689 '''Do the hard work of finding tags. Return a pair of dicts
691 (tags, tagtypes) where tags maps tag name to node, and tagtypes
690 (tags, tagtypes) where tags maps tag name to node, and tagtypes
692 maps tag name to a string like \'global\' or \'local\'.
691 maps tag name to a string like \'global\' or \'local\'.
693 Subclasses or extensions are free to add their own tags, but
692 Subclasses or extensions are free to add their own tags, but
694 should be aware that the returned dicts will be retained for the
693 should be aware that the returned dicts will be retained for the
695 duration of the localrepo object.'''
694 duration of the localrepo object.'''
696
695
697 # XXX what tagtype should subclasses/extensions use? Currently
696 # XXX what tagtype should subclasses/extensions use? Currently
698 # mq and bookmarks add tags, but do not set the tagtype at all.
697 # mq and bookmarks add tags, but do not set the tagtype at all.
699 # Should each extension invent its own tag type? Should there
698 # Should each extension invent its own tag type? Should there
700 # be one tagtype for all such "virtual" tags? Or is the status
699 # be one tagtype for all such "virtual" tags? Or is the status
701 # quo fine?
700 # quo fine?
702
701
703 alltags = {} # map tag name to (node, hist)
702 alltags = {} # map tag name to (node, hist)
704 tagtypes = {}
703 tagtypes = {}
705
704
706 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
705 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
707 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
706 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
708
707
709 # Build the return dicts. Have to re-encode tag names because
708 # Build the return dicts. Have to re-encode tag names because
710 # the tags module always uses UTF-8 (in order not to lose info
709 # the tags module always uses UTF-8 (in order not to lose info
711 # writing to the cache), but the rest of Mercurial wants them in
710 # writing to the cache), but the rest of Mercurial wants them in
712 # local encoding.
711 # local encoding.
713 tags = {}
712 tags = {}
714 for (name, (node, hist)) in alltags.iteritems():
713 for (name, (node, hist)) in alltags.iteritems():
715 if node != nullid:
714 if node != nullid:
716 tags[encoding.tolocal(name)] = node
715 tags[encoding.tolocal(name)] = node
717 tags['tip'] = self.changelog.tip()
716 tags['tip'] = self.changelog.tip()
718 tagtypes = dict([(encoding.tolocal(name), value)
717 tagtypes = dict([(encoding.tolocal(name), value)
719 for (name, value) in tagtypes.iteritems()])
718 for (name, value) in tagtypes.iteritems()])
720 return (tags, tagtypes)
719 return (tags, tagtypes)
721
720
722 def tagtype(self, tagname):
721 def tagtype(self, tagname):
723 '''
722 '''
724 return the type of the given tag. result can be:
723 return the type of the given tag. result can be:
725
724
726 'local' : a local tag
725 'local' : a local tag
727 'global' : a global tag
726 'global' : a global tag
728 None : tag does not exist
727 None : tag does not exist
729 '''
728 '''
730
729
731 return self._tagscache.tagtypes.get(tagname)
730 return self._tagscache.tagtypes.get(tagname)
732
731
733 def tagslist(self):
732 def tagslist(self):
734 '''return a list of tags ordered by revision'''
733 '''return a list of tags ordered by revision'''
735 if not self._tagscache.tagslist:
734 if not self._tagscache.tagslist:
736 l = []
735 l = []
737 for t, n in self.tags().iteritems():
736 for t, n in self.tags().iteritems():
738 l.append((self.changelog.rev(n), t, n))
737 l.append((self.changelog.rev(n), t, n))
739 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
738 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
740
739
741 return self._tagscache.tagslist
740 return self._tagscache.tagslist
742
741
743 def nodetags(self, node):
742 def nodetags(self, node):
744 '''return the tags associated with a node'''
743 '''return the tags associated with a node'''
745 if not self._tagscache.nodetagscache:
744 if not self._tagscache.nodetagscache:
746 nodetagscache = {}
745 nodetagscache = {}
747 for t, n in self._tagscache.tags.iteritems():
746 for t, n in self._tagscache.tags.iteritems():
748 nodetagscache.setdefault(n, []).append(t)
747 nodetagscache.setdefault(n, []).append(t)
749 for tags in nodetagscache.itervalues():
748 for tags in nodetagscache.itervalues():
750 tags.sort()
749 tags.sort()
751 self._tagscache.nodetagscache = nodetagscache
750 self._tagscache.nodetagscache = nodetagscache
752 return self._tagscache.nodetagscache.get(node, [])
751 return self._tagscache.nodetagscache.get(node, [])
753
752
754 def nodebookmarks(self, node):
753 def nodebookmarks(self, node):
755 marks = []
754 marks = []
756 for bookmark, n in self._bookmarks.iteritems():
755 for bookmark, n in self._bookmarks.iteritems():
757 if n == node:
756 if n == node:
758 marks.append(bookmark)
757 marks.append(bookmark)
759 return sorted(marks)
758 return sorted(marks)
760
759
761 def branchmap(self):
760 def branchmap(self):
762 '''returns a dictionary {branch: [branchheads]} with branchheads
761 '''returns a dictionary {branch: [branchheads]} with branchheads
763 ordered by increasing revision number'''
762 ordered by increasing revision number'''
764 branchmap.updatecache(self)
763 branchmap.updatecache(self)
765 return self._branchcaches[self.filtername]
764 return self._branchcaches[self.filtername]
766
765
767 @unfilteredmethod
766 @unfilteredmethod
768 def revbranchcache(self):
767 def revbranchcache(self):
769 if not self._revbranchcache:
768 if not self._revbranchcache:
770 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
769 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
771 return self._revbranchcache
770 return self._revbranchcache
772
771
773 def branchtip(self, branch, ignoremissing=False):
772 def branchtip(self, branch, ignoremissing=False):
774 '''return the tip node for a given branch
773 '''return the tip node for a given branch
775
774
776 If ignoremissing is True, then this method will not raise an error.
775 If ignoremissing is True, then this method will not raise an error.
777 This is helpful for callers that only expect None for a missing branch
776 This is helpful for callers that only expect None for a missing branch
778 (e.g. namespace).
777 (e.g. namespace).
779
778
780 '''
779 '''
781 try:
780 try:
782 return self.branchmap().branchtip(branch)
781 return self.branchmap().branchtip(branch)
783 except KeyError:
782 except KeyError:
784 if not ignoremissing:
783 if not ignoremissing:
785 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
784 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
786 else:
785 else:
787 pass
786 pass
788
787
789 def lookup(self, key):
788 def lookup(self, key):
790 return self[key].node()
789 return self[key].node()
791
790
792 def lookupbranch(self, key, remote=None):
791 def lookupbranch(self, key, remote=None):
793 repo = remote or self
792 repo = remote or self
794 if key in repo.branchmap():
793 if key in repo.branchmap():
795 return key
794 return key
796
795
797 repo = (remote and remote.local()) and remote or self
796 repo = (remote and remote.local()) and remote or self
798 return repo[key].branch()
797 return repo[key].branch()
799
798
800 def known(self, nodes):
799 def known(self, nodes):
801 nm = self.changelog.nodemap
800 nm = self.changelog.nodemap
802 pc = self._phasecache
801 pc = self._phasecache
803 result = []
802 result = []
804 for n in nodes:
803 for n in nodes:
805 r = nm.get(n)
804 r = nm.get(n)
806 resp = not (r is None or pc.phase(self, r) >= phases.secret)
805 resp = not (r is None or pc.phase(self, r) >= phases.secret)
807 result.append(resp)
806 result.append(resp)
808 return result
807 return result
809
808
810 def local(self):
809 def local(self):
811 return self
810 return self
812
811
813 def publishing(self):
812 def publishing(self):
814 # it's safe (and desirable) to trust the publish flag unconditionally
813 # it's safe (and desirable) to trust the publish flag unconditionally
815 # so that we don't finalize changes shared between users via ssh or nfs
814 # so that we don't finalize changes shared between users via ssh or nfs
816 return self.ui.configbool('phases', 'publish', True, untrusted=True)
815 return self.ui.configbool('phases', 'publish', True, untrusted=True)
817
816
818 def cancopy(self):
817 def cancopy(self):
819 # so statichttprepo's override of local() works
818 # so statichttprepo's override of local() works
820 if not self.local():
819 if not self.local():
821 return False
820 return False
822 if not self.publishing():
821 if not self.publishing():
823 return True
822 return True
824 # if publishing we can't copy if there is filtered content
823 # if publishing we can't copy if there is filtered content
825 return not self.filtered('visible').changelog.filteredrevs
824 return not self.filtered('visible').changelog.filteredrevs
826
825
827 def shared(self):
826 def shared(self):
828 '''the type of shared repository (None if not shared)'''
827 '''the type of shared repository (None if not shared)'''
829 if self.sharedpath != self.path:
828 if self.sharedpath != self.path:
830 return 'store'
829 return 'store'
831 return None
830 return None
832
831
833 def join(self, f, *insidef):
832 def join(self, f, *insidef):
834 return self.vfs.join(os.path.join(f, *insidef))
833 return self.vfs.join(os.path.join(f, *insidef))
835
834
836 def wjoin(self, f, *insidef):
835 def wjoin(self, f, *insidef):
837 return self.vfs.reljoin(self.root, f, *insidef)
836 return self.vfs.reljoin(self.root, f, *insidef)
838
837
839 def file(self, f):
838 def file(self, f):
840 if f[0] == '/':
839 if f[0] == '/':
841 f = f[1:]
840 f = f[1:]
842 return filelog.filelog(self.svfs, f)
841 return filelog.filelog(self.svfs, f)
843
842
844 def changectx(self, changeid):
843 def changectx(self, changeid):
845 return self[changeid]
844 return self[changeid]
846
845
847 def parents(self, changeid=None):
846 def parents(self, changeid=None):
848 '''get list of changectxs for parents of changeid'''
847 '''get list of changectxs for parents of changeid'''
849 return self[changeid].parents()
848 return self[changeid].parents()
850
849
851 def setparents(self, p1, p2=nullid):
850 def setparents(self, p1, p2=nullid):
852 self.dirstate.beginparentchange()
851 self.dirstate.beginparentchange()
853 copies = self.dirstate.setparents(p1, p2)
852 copies = self.dirstate.setparents(p1, p2)
854 pctx = self[p1]
853 pctx = self[p1]
855 if copies:
854 if copies:
856 # Adjust copy records, the dirstate cannot do it, it
855 # Adjust copy records, the dirstate cannot do it, it
857 # requires access to parents manifests. Preserve them
856 # requires access to parents manifests. Preserve them
858 # only for entries added to first parent.
857 # only for entries added to first parent.
859 for f in copies:
858 for f in copies:
860 if f not in pctx and copies[f] in pctx:
859 if f not in pctx and copies[f] in pctx:
861 self.dirstate.copy(copies[f], f)
860 self.dirstate.copy(copies[f], f)
862 if p2 == nullid:
861 if p2 == nullid:
863 for f, s in sorted(self.dirstate.copies().items()):
862 for f, s in sorted(self.dirstate.copies().items()):
864 if f not in pctx and s not in pctx:
863 if f not in pctx and s not in pctx:
865 self.dirstate.copy(None, f)
864 self.dirstate.copy(None, f)
866 self.dirstate.endparentchange()
865 self.dirstate.endparentchange()
867
866
868 def filectx(self, path, changeid=None, fileid=None):
867 def filectx(self, path, changeid=None, fileid=None):
869 """changeid can be a changeset revision, node, or tag.
868 """changeid can be a changeset revision, node, or tag.
870 fileid can be a file revision or node."""
869 fileid can be a file revision or node."""
871 return context.filectx(self, path, changeid, fileid)
870 return context.filectx(self, path, changeid, fileid)
872
871
873 def getcwd(self):
872 def getcwd(self):
874 return self.dirstate.getcwd()
873 return self.dirstate.getcwd()
875
874
876 def pathto(self, f, cwd=None):
875 def pathto(self, f, cwd=None):
877 return self.dirstate.pathto(f, cwd)
876 return self.dirstate.pathto(f, cwd)
878
877
879 def wfile(self, f, mode='r'):
878 def wfile(self, f, mode='r'):
880 return self.wvfs(f, mode)
879 return self.wvfs(f, mode)
881
880
882 def _link(self, f):
881 def _link(self, f):
883 return self.wvfs.islink(f)
882 return self.wvfs.islink(f)
884
883
885 def _loadfilter(self, filter):
884 def _loadfilter(self, filter):
886 if filter not in self.filterpats:
885 if filter not in self.filterpats:
887 l = []
886 l = []
888 for pat, cmd in self.ui.configitems(filter):
887 for pat, cmd in self.ui.configitems(filter):
889 if cmd == '!':
888 if cmd == '!':
890 continue
889 continue
891 mf = matchmod.match(self.root, '', [pat])
890 mf = matchmod.match(self.root, '', [pat])
892 fn = None
891 fn = None
893 params = cmd
892 params = cmd
894 for name, filterfn in self._datafilters.iteritems():
893 for name, filterfn in self._datafilters.iteritems():
895 if cmd.startswith(name):
894 if cmd.startswith(name):
896 fn = filterfn
895 fn = filterfn
897 params = cmd[len(name):].lstrip()
896 params = cmd[len(name):].lstrip()
898 break
897 break
899 if not fn:
898 if not fn:
900 fn = lambda s, c, **kwargs: util.filter(s, c)
899 fn = lambda s, c, **kwargs: util.filter(s, c)
901 # Wrap old filters not supporting keyword arguments
900 # Wrap old filters not supporting keyword arguments
902 if not inspect.getargspec(fn)[2]:
901 if not inspect.getargspec(fn)[2]:
903 oldfn = fn
902 oldfn = fn
904 fn = lambda s, c, **kwargs: oldfn(s, c)
903 fn = lambda s, c, **kwargs: oldfn(s, c)
905 l.append((mf, fn, params))
904 l.append((mf, fn, params))
906 self.filterpats[filter] = l
905 self.filterpats[filter] = l
907 return self.filterpats[filter]
906 return self.filterpats[filter]
908
907
909 def _filter(self, filterpats, filename, data):
908 def _filter(self, filterpats, filename, data):
910 for mf, fn, cmd in filterpats:
909 for mf, fn, cmd in filterpats:
911 if mf(filename):
910 if mf(filename):
912 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
911 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
913 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
912 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
914 break
913 break
915
914
916 return data
915 return data
917
916
918 @unfilteredpropertycache
917 @unfilteredpropertycache
919 def _encodefilterpats(self):
918 def _encodefilterpats(self):
920 return self._loadfilter('encode')
919 return self._loadfilter('encode')
921
920
922 @unfilteredpropertycache
921 @unfilteredpropertycache
923 def _decodefilterpats(self):
922 def _decodefilterpats(self):
924 return self._loadfilter('decode')
923 return self._loadfilter('decode')
925
924
926 def adddatafilter(self, name, filter):
925 def adddatafilter(self, name, filter):
927 self._datafilters[name] = filter
926 self._datafilters[name] = filter
928
927
929 def wread(self, filename):
928 def wread(self, filename):
930 if self._link(filename):
929 if self._link(filename):
931 data = self.wvfs.readlink(filename)
930 data = self.wvfs.readlink(filename)
932 else:
931 else:
933 data = self.wvfs.read(filename)
932 data = self.wvfs.read(filename)
934 return self._filter(self._encodefilterpats, filename, data)
933 return self._filter(self._encodefilterpats, filename, data)
935
934
936 def wwrite(self, filename, data, flags):
935 def wwrite(self, filename, data, flags):
937 """write ``data`` into ``filename`` in the working directory
936 """write ``data`` into ``filename`` in the working directory
938
937
939 This returns length of written (maybe decoded) data.
938 This returns length of written (maybe decoded) data.
940 """
939 """
941 data = self._filter(self._decodefilterpats, filename, data)
940 data = self._filter(self._decodefilterpats, filename, data)
942 if 'l' in flags:
941 if 'l' in flags:
943 self.wvfs.symlink(data, filename)
942 self.wvfs.symlink(data, filename)
944 else:
943 else:
945 self.wvfs.write(filename, data)
944 self.wvfs.write(filename, data)
946 if 'x' in flags:
945 if 'x' in flags:
947 self.wvfs.setflags(filename, False, True)
946 self.wvfs.setflags(filename, False, True)
948 return len(data)
947 return len(data)
949
948
950 def wwritedata(self, filename, data):
949 def wwritedata(self, filename, data):
951 return self._filter(self._decodefilterpats, filename, data)
950 return self._filter(self._decodefilterpats, filename, data)
952
951
953 def currenttransaction(self):
952 def currenttransaction(self):
954 """return the current transaction or None if non exists"""
953 """return the current transaction or None if non exists"""
955 if self._transref:
954 if self._transref:
956 tr = self._transref()
955 tr = self._transref()
957 else:
956 else:
958 tr = None
957 tr = None
959
958
960 if tr and tr.running():
959 if tr and tr.running():
961 return tr
960 return tr
962 return None
961 return None
963
962
964 def transaction(self, desc, report=None):
963 def transaction(self, desc, report=None):
965 if (self.ui.configbool('devel', 'all-warnings')
964 if (self.ui.configbool('devel', 'all-warnings')
966 or self.ui.configbool('devel', 'check-locks')):
965 or self.ui.configbool('devel', 'check-locks')):
967 l = self._lockref and self._lockref()
966 l = self._lockref and self._lockref()
968 if l is None or not l.held:
967 if l is None or not l.held:
969 self.ui.develwarn('transaction with no lock')
968 self.ui.develwarn('transaction with no lock')
970 tr = self.currenttransaction()
969 tr = self.currenttransaction()
971 if tr is not None:
970 if tr is not None:
972 return tr.nest()
971 return tr.nest()
973
972
974 # abort here if the journal already exists
973 # abort here if the journal already exists
975 if self.svfs.exists("journal"):
974 if self.svfs.exists("journal"):
976 raise error.RepoError(
975 raise error.RepoError(
977 _("abandoned transaction found"),
976 _("abandoned transaction found"),
978 hint=_("run 'hg recover' to clean up transaction"))
977 hint=_("run 'hg recover' to clean up transaction"))
979
978
980 # make journal.dirstate contain in-memory changes at this point
979 # make journal.dirstate contain in-memory changes at this point
981 self.dirstate.write()
980 self.dirstate.write()
982
981
983 idbase = "%.40f#%f" % (random.random(), time.time())
982 idbase = "%.40f#%f" % (random.random(), time.time())
984 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
983 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
985 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
984 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
986
985
987 self._writejournal(desc)
986 self._writejournal(desc)
988 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
987 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
989 if report:
988 if report:
990 rp = report
989 rp = report
991 else:
990 else:
992 rp = self.ui.warn
991 rp = self.ui.warn
993 vfsmap = {'plain': self.vfs} # root of .hg/
992 vfsmap = {'plain': self.vfs} # root of .hg/
994 # we must avoid cyclic reference between repo and transaction.
993 # we must avoid cyclic reference between repo and transaction.
995 reporef = weakref.ref(self)
994 reporef = weakref.ref(self)
996 def validate(tr):
995 def validate(tr):
997 """will run pre-closing hooks"""
996 """will run pre-closing hooks"""
998 pending = lambda: tr.writepending() and self.root or ""
997 pending = lambda: tr.writepending() and self.root or ""
999 reporef().hook('pretxnclose', throw=True, pending=pending,
998 reporef().hook('pretxnclose', throw=True, pending=pending,
1000 txnname=desc, **tr.hookargs)
999 txnname=desc, **tr.hookargs)
1001
1000
1002 tr = transaction.transaction(rp, self.svfs, vfsmap,
1001 tr = transaction.transaction(rp, self.svfs, vfsmap,
1003 "journal",
1002 "journal",
1004 "undo",
1003 "undo",
1005 aftertrans(renames),
1004 aftertrans(renames),
1006 self.store.createmode,
1005 self.store.createmode,
1007 validator=validate)
1006 validator=validate)
1008
1007
1009 tr.hookargs['txnid'] = txnid
1008 tr.hookargs['txnid'] = txnid
1010 # note: writing the fncache only during finalize mean that the file is
1009 # note: writing the fncache only during finalize mean that the file is
1011 # outdated when running hooks. As fncache is used for streaming clone,
1010 # outdated when running hooks. As fncache is used for streaming clone,
1012 # this is not expected to break anything that happen during the hooks.
1011 # this is not expected to break anything that happen during the hooks.
1013 tr.addfinalize('flush-fncache', self.store.write)
1012 tr.addfinalize('flush-fncache', self.store.write)
1014 def txnclosehook(tr2):
1013 def txnclosehook(tr2):
1015 """To be run if transaction is successful, will schedule a hook run
1014 """To be run if transaction is successful, will schedule a hook run
1016 """
1015 """
1017 def hook():
1016 def hook():
1018 reporef().hook('txnclose', throw=False, txnname=desc,
1017 reporef().hook('txnclose', throw=False, txnname=desc,
1019 **tr2.hookargs)
1018 **tr2.hookargs)
1020 reporef()._afterlock(hook)
1019 reporef()._afterlock(hook)
1021 tr.addfinalize('txnclose-hook', txnclosehook)
1020 tr.addfinalize('txnclose-hook', txnclosehook)
1022 def txnaborthook(tr2):
1021 def txnaborthook(tr2):
1023 """To be run if transaction is aborted
1022 """To be run if transaction is aborted
1024 """
1023 """
1025 reporef().hook('txnabort', throw=False, txnname=desc,
1024 reporef().hook('txnabort', throw=False, txnname=desc,
1026 **tr2.hookargs)
1025 **tr2.hookargs)
1027 tr.addabort('txnabort-hook', txnaborthook)
1026 tr.addabort('txnabort-hook', txnaborthook)
1028 # avoid eager cache invalidation. in-memory data should be identical
1027 # avoid eager cache invalidation. in-memory data should be identical
1029 # to stored data if transaction has no error.
1028 # to stored data if transaction has no error.
1030 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1029 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1031 self._transref = weakref.ref(tr)
1030 self._transref = weakref.ref(tr)
1032 return tr
1031 return tr
1033
1032
1034 def _journalfiles(self):
1033 def _journalfiles(self):
1035 return ((self.svfs, 'journal'),
1034 return ((self.svfs, 'journal'),
1036 (self.vfs, 'journal.dirstate'),
1035 (self.vfs, 'journal.dirstate'),
1037 (self.vfs, 'journal.branch'),
1036 (self.vfs, 'journal.branch'),
1038 (self.vfs, 'journal.desc'),
1037 (self.vfs, 'journal.desc'),
1039 (self.vfs, 'journal.bookmarks'),
1038 (self.vfs, 'journal.bookmarks'),
1040 (self.svfs, 'journal.phaseroots'))
1039 (self.svfs, 'journal.phaseroots'))
1041
1040
1042 def undofiles(self):
1041 def undofiles(self):
1043 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1042 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1044
1043
1045 def _writejournal(self, desc):
1044 def _writejournal(self, desc):
1046 self.vfs.write("journal.dirstate",
1045 self.vfs.write("journal.dirstate",
1047 self.vfs.tryread("dirstate"))
1046 self.vfs.tryread("dirstate"))
1048 self.vfs.write("journal.branch",
1047 self.vfs.write("journal.branch",
1049 encoding.fromlocal(self.dirstate.branch()))
1048 encoding.fromlocal(self.dirstate.branch()))
1050 self.vfs.write("journal.desc",
1049 self.vfs.write("journal.desc",
1051 "%d\n%s\n" % (len(self), desc))
1050 "%d\n%s\n" % (len(self), desc))
1052 self.vfs.write("journal.bookmarks",
1051 self.vfs.write("journal.bookmarks",
1053 self.vfs.tryread("bookmarks"))
1052 self.vfs.tryread("bookmarks"))
1054 self.svfs.write("journal.phaseroots",
1053 self.svfs.write("journal.phaseroots",
1055 self.svfs.tryread("phaseroots"))
1054 self.svfs.tryread("phaseroots"))
1056
1055
1057 def recover(self):
1056 def recover(self):
1058 lock = self.lock()
1057 lock = self.lock()
1059 try:
1058 try:
1060 if self.svfs.exists("journal"):
1059 if self.svfs.exists("journal"):
1061 self.ui.status(_("rolling back interrupted transaction\n"))
1060 self.ui.status(_("rolling back interrupted transaction\n"))
1062 vfsmap = {'': self.svfs,
1061 vfsmap = {'': self.svfs,
1063 'plain': self.vfs,}
1062 'plain': self.vfs,}
1064 transaction.rollback(self.svfs, vfsmap, "journal",
1063 transaction.rollback(self.svfs, vfsmap, "journal",
1065 self.ui.warn)
1064 self.ui.warn)
1066 self.invalidate()
1065 self.invalidate()
1067 return True
1066 return True
1068 else:
1067 else:
1069 self.ui.warn(_("no interrupted transaction available\n"))
1068 self.ui.warn(_("no interrupted transaction available\n"))
1070 return False
1069 return False
1071 finally:
1070 finally:
1072 lock.release()
1071 lock.release()
1073
1072
1074 def rollback(self, dryrun=False, force=False):
1073 def rollback(self, dryrun=False, force=False):
1075 wlock = lock = None
1074 wlock = lock = None
1076 try:
1075 try:
1077 wlock = self.wlock()
1076 wlock = self.wlock()
1078 lock = self.lock()
1077 lock = self.lock()
1079 if self.svfs.exists("undo"):
1078 if self.svfs.exists("undo"):
1080 return self._rollback(dryrun, force)
1079 return self._rollback(dryrun, force)
1081 else:
1080 else:
1082 self.ui.warn(_("no rollback information available\n"))
1081 self.ui.warn(_("no rollback information available\n"))
1083 return 1
1082 return 1
1084 finally:
1083 finally:
1085 release(lock, wlock)
1084 release(lock, wlock)
1086
1085
1087 @unfilteredmethod # Until we get smarter cache management
1086 @unfilteredmethod # Until we get smarter cache management
1088 def _rollback(self, dryrun, force):
1087 def _rollback(self, dryrun, force):
1089 ui = self.ui
1088 ui = self.ui
1090 try:
1089 try:
1091 args = self.vfs.read('undo.desc').splitlines()
1090 args = self.vfs.read('undo.desc').splitlines()
1092 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1091 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1093 if len(args) >= 3:
1092 if len(args) >= 3:
1094 detail = args[2]
1093 detail = args[2]
1095 oldtip = oldlen - 1
1094 oldtip = oldlen - 1
1096
1095
1097 if detail and ui.verbose:
1096 if detail and ui.verbose:
1098 msg = (_('repository tip rolled back to revision %s'
1097 msg = (_('repository tip rolled back to revision %s'
1099 ' (undo %s: %s)\n')
1098 ' (undo %s: %s)\n')
1100 % (oldtip, desc, detail))
1099 % (oldtip, desc, detail))
1101 else:
1100 else:
1102 msg = (_('repository tip rolled back to revision %s'
1101 msg = (_('repository tip rolled back to revision %s'
1103 ' (undo %s)\n')
1102 ' (undo %s)\n')
1104 % (oldtip, desc))
1103 % (oldtip, desc))
1105 except IOError:
1104 except IOError:
1106 msg = _('rolling back unknown transaction\n')
1105 msg = _('rolling back unknown transaction\n')
1107 desc = None
1106 desc = None
1108
1107
1109 if not force and self['.'] != self['tip'] and desc == 'commit':
1108 if not force and self['.'] != self['tip'] and desc == 'commit':
1110 raise util.Abort(
1109 raise util.Abort(
1111 _('rollback of last commit while not checked out '
1110 _('rollback of last commit while not checked out '
1112 'may lose data'), hint=_('use -f to force'))
1111 'may lose data'), hint=_('use -f to force'))
1113
1112
1114 ui.status(msg)
1113 ui.status(msg)
1115 if dryrun:
1114 if dryrun:
1116 return 0
1115 return 0
1117
1116
1118 parents = self.dirstate.parents()
1117 parents = self.dirstate.parents()
1119 self.destroying()
1118 self.destroying()
1120 vfsmap = {'plain': self.vfs, '': self.svfs}
1119 vfsmap = {'plain': self.vfs, '': self.svfs}
1121 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1120 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1122 if self.vfs.exists('undo.bookmarks'):
1121 if self.vfs.exists('undo.bookmarks'):
1123 self.vfs.rename('undo.bookmarks', 'bookmarks')
1122 self.vfs.rename('undo.bookmarks', 'bookmarks')
1124 if self.svfs.exists('undo.phaseroots'):
1123 if self.svfs.exists('undo.phaseroots'):
1125 self.svfs.rename('undo.phaseroots', 'phaseroots')
1124 self.svfs.rename('undo.phaseroots', 'phaseroots')
1126 self.invalidate()
1125 self.invalidate()
1127
1126
1128 parentgone = (parents[0] not in self.changelog.nodemap or
1127 parentgone = (parents[0] not in self.changelog.nodemap or
1129 parents[1] not in self.changelog.nodemap)
1128 parents[1] not in self.changelog.nodemap)
1130 if parentgone:
1129 if parentgone:
1131 self.vfs.rename('undo.dirstate', 'dirstate')
1130 self.vfs.rename('undo.dirstate', 'dirstate')
1132 try:
1131 try:
1133 branch = self.vfs.read('undo.branch')
1132 branch = self.vfs.read('undo.branch')
1134 self.dirstate.setbranch(encoding.tolocal(branch))
1133 self.dirstate.setbranch(encoding.tolocal(branch))
1135 except IOError:
1134 except IOError:
1136 ui.warn(_('named branch could not be reset: '
1135 ui.warn(_('named branch could not be reset: '
1137 'current branch is still \'%s\'\n')
1136 'current branch is still \'%s\'\n')
1138 % self.dirstate.branch())
1137 % self.dirstate.branch())
1139
1138
1140 self.dirstate.invalidate()
1139 self.dirstate.invalidate()
1141 parents = tuple([p.rev() for p in self.parents()])
1140 parents = tuple([p.rev() for p in self.parents()])
1142 if len(parents) > 1:
1141 if len(parents) > 1:
1143 ui.status(_('working directory now based on '
1142 ui.status(_('working directory now based on '
1144 'revisions %d and %d\n') % parents)
1143 'revisions %d and %d\n') % parents)
1145 else:
1144 else:
1146 ui.status(_('working directory now based on '
1145 ui.status(_('working directory now based on '
1147 'revision %d\n') % parents)
1146 'revision %d\n') % parents)
1148 ms = mergemod.mergestate(self)
1147 ms = mergemod.mergestate(self)
1149 ms.reset(self['.'].node())
1148 ms.reset(self['.'].node())
1150
1149
1151 # TODO: if we know which new heads may result from this rollback, pass
1150 # TODO: if we know which new heads may result from this rollback, pass
1152 # them to destroy(), which will prevent the branchhead cache from being
1151 # them to destroy(), which will prevent the branchhead cache from being
1153 # invalidated.
1152 # invalidated.
1154 self.destroyed()
1153 self.destroyed()
1155 return 0
1154 return 0
1156
1155
1157 def invalidatecaches(self):
1156 def invalidatecaches(self):
1158
1157
1159 if '_tagscache' in vars(self):
1158 if '_tagscache' in vars(self):
1160 # can't use delattr on proxy
1159 # can't use delattr on proxy
1161 del self.__dict__['_tagscache']
1160 del self.__dict__['_tagscache']
1162
1161
1163 self.unfiltered()._branchcaches.clear()
1162 self.unfiltered()._branchcaches.clear()
1164 self.invalidatevolatilesets()
1163 self.invalidatevolatilesets()
1165
1164
1166 def invalidatevolatilesets(self):
1165 def invalidatevolatilesets(self):
1167 self.filteredrevcache.clear()
1166 self.filteredrevcache.clear()
1168 obsolete.clearobscaches(self)
1167 obsolete.clearobscaches(self)
1169
1168
1170 def invalidatedirstate(self):
1169 def invalidatedirstate(self):
1171 '''Invalidates the dirstate, causing the next call to dirstate
1170 '''Invalidates the dirstate, causing the next call to dirstate
1172 to check if it was modified since the last time it was read,
1171 to check if it was modified since the last time it was read,
1173 rereading it if it has.
1172 rereading it if it has.
1174
1173
1175 This is different to dirstate.invalidate() that it doesn't always
1174 This is different to dirstate.invalidate() that it doesn't always
1176 rereads the dirstate. Use dirstate.invalidate() if you want to
1175 rereads the dirstate. Use dirstate.invalidate() if you want to
1177 explicitly read the dirstate again (i.e. restoring it to a previous
1176 explicitly read the dirstate again (i.e. restoring it to a previous
1178 known good state).'''
1177 known good state).'''
1179 if hasunfilteredcache(self, 'dirstate'):
1178 if hasunfilteredcache(self, 'dirstate'):
1180 for k in self.dirstate._filecache:
1179 for k in self.dirstate._filecache:
1181 try:
1180 try:
1182 delattr(self.dirstate, k)
1181 delattr(self.dirstate, k)
1183 except AttributeError:
1182 except AttributeError:
1184 pass
1183 pass
1185 delattr(self.unfiltered(), 'dirstate')
1184 delattr(self.unfiltered(), 'dirstate')
1186
1185
1187 def invalidate(self):
1186 def invalidate(self):
1188 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1187 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1189 for k in self._filecache:
1188 for k in self._filecache:
1190 # dirstate is invalidated separately in invalidatedirstate()
1189 # dirstate is invalidated separately in invalidatedirstate()
1191 if k == 'dirstate':
1190 if k == 'dirstate':
1192 continue
1191 continue
1193
1192
1194 try:
1193 try:
1195 delattr(unfiltered, k)
1194 delattr(unfiltered, k)
1196 except AttributeError:
1195 except AttributeError:
1197 pass
1196 pass
1198 self.invalidatecaches()
1197 self.invalidatecaches()
1199 self.store.invalidatecaches()
1198 self.store.invalidatecaches()
1200
1199
1201 def invalidateall(self):
1200 def invalidateall(self):
1202 '''Fully invalidates both store and non-store parts, causing the
1201 '''Fully invalidates both store and non-store parts, causing the
1203 subsequent operation to reread any outside changes.'''
1202 subsequent operation to reread any outside changes.'''
1204 # extension should hook this to invalidate its caches
1203 # extension should hook this to invalidate its caches
1205 self.invalidate()
1204 self.invalidate()
1206 self.invalidatedirstate()
1205 self.invalidatedirstate()
1207
1206
1208 def _refreshfilecachestats(self, tr):
1207 def _refreshfilecachestats(self, tr):
1209 """Reload stats of cached files so that they are flagged as valid"""
1208 """Reload stats of cached files so that they are flagged as valid"""
1210 for k, ce in self._filecache.items():
1209 for k, ce in self._filecache.items():
1211 if k == 'dirstate' or k not in self.__dict__:
1210 if k == 'dirstate' or k not in self.__dict__:
1212 continue
1211 continue
1213 ce.refresh()
1212 ce.refresh()
1214
1213
1215 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1214 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1216 parentenvvar=None):
1215 parentenvvar=None):
1217 parentlock = None
1216 parentlock = None
1218 if parentenvvar is not None:
1217 if parentenvvar is not None:
1219 parentlock = os.environ.get(parentenvvar)
1218 parentlock = os.environ.get(parentenvvar)
1220 try:
1219 try:
1221 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1220 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1222 acquirefn=acquirefn, desc=desc,
1221 acquirefn=acquirefn, desc=desc,
1223 parentlock=parentlock)
1222 parentlock=parentlock)
1224 except error.LockHeld as inst:
1223 except error.LockHeld as inst:
1225 if not wait:
1224 if not wait:
1226 raise
1225 raise
1227 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1226 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1228 (desc, inst.locker))
1227 (desc, inst.locker))
1229 # default to 600 seconds timeout
1228 # default to 600 seconds timeout
1230 l = lockmod.lock(vfs, lockname,
1229 l = lockmod.lock(vfs, lockname,
1231 int(self.ui.config("ui", "timeout", "600")),
1230 int(self.ui.config("ui", "timeout", "600")),
1232 releasefn=releasefn, acquirefn=acquirefn,
1231 releasefn=releasefn, acquirefn=acquirefn,
1233 desc=desc)
1232 desc=desc)
1234 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1233 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1235 return l
1234 return l
1236
1235
1237 def _afterlock(self, callback):
1236 def _afterlock(self, callback):
1238 """add a callback to be run when the repository is fully unlocked
1237 """add a callback to be run when the repository is fully unlocked
1239
1238
1240 The callback will be executed when the outermost lock is released
1239 The callback will be executed when the outermost lock is released
1241 (with wlock being higher level than 'lock')."""
1240 (with wlock being higher level than 'lock')."""
1242 for ref in (self._wlockref, self._lockref):
1241 for ref in (self._wlockref, self._lockref):
1243 l = ref and ref()
1242 l = ref and ref()
1244 if l and l.held:
1243 if l and l.held:
1245 l.postrelease.append(callback)
1244 l.postrelease.append(callback)
1246 break
1245 break
1247 else: # no lock have been found.
1246 else: # no lock have been found.
1248 callback()
1247 callback()
1249
1248
1250 def lock(self, wait=True):
1249 def lock(self, wait=True):
1251 '''Lock the repository store (.hg/store) and return a weak reference
1250 '''Lock the repository store (.hg/store) and return a weak reference
1252 to the lock. Use this before modifying the store (e.g. committing or
1251 to the lock. Use this before modifying the store (e.g. committing or
1253 stripping). If you are opening a transaction, get a lock as well.)
1252 stripping). If you are opening a transaction, get a lock as well.)
1254
1253
1255 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1254 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1256 'wlock' first to avoid a dead-lock hazard.'''
1255 'wlock' first to avoid a dead-lock hazard.'''
1257 l = self._lockref and self._lockref()
1256 l = self._lockref and self._lockref()
1258 if l is not None and l.held:
1257 if l is not None and l.held:
1259 l.lock()
1258 l.lock()
1260 return l
1259 return l
1261
1260
1262 l = self._lock(self.svfs, "lock", wait, None,
1261 l = self._lock(self.svfs, "lock", wait, None,
1263 self.invalidate, _('repository %s') % self.origroot)
1262 self.invalidate, _('repository %s') % self.origroot)
1264 self._lockref = weakref.ref(l)
1263 self._lockref = weakref.ref(l)
1265 return l
1264 return l
1266
1265
1267 def wlock(self, wait=True):
1266 def wlock(self, wait=True):
1268 '''Lock the non-store parts of the repository (everything under
1267 '''Lock the non-store parts of the repository (everything under
1269 .hg except .hg/store) and return a weak reference to the lock.
1268 .hg except .hg/store) and return a weak reference to the lock.
1270
1269
1271 Use this before modifying files in .hg.
1270 Use this before modifying files in .hg.
1272
1271
1273 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1272 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1274 'wlock' first to avoid a dead-lock hazard.'''
1273 'wlock' first to avoid a dead-lock hazard.'''
1275 l = self._wlockref and self._wlockref()
1274 l = self._wlockref and self._wlockref()
1276 if l is not None and l.held:
1275 if l is not None and l.held:
1277 l.lock()
1276 l.lock()
1278 return l
1277 return l
1279
1278
1280 # We do not need to check for non-waiting lock aquisition. Such
1279 # We do not need to check for non-waiting lock aquisition. Such
1281 # acquisition would not cause dead-lock as they would just fail.
1280 # acquisition would not cause dead-lock as they would just fail.
1282 if wait and (self.ui.configbool('devel', 'all-warnings')
1281 if wait and (self.ui.configbool('devel', 'all-warnings')
1283 or self.ui.configbool('devel', 'check-locks')):
1282 or self.ui.configbool('devel', 'check-locks')):
1284 l = self._lockref and self._lockref()
1283 l = self._lockref and self._lockref()
1285 if l is not None and l.held:
1284 if l is not None and l.held:
1286 self.ui.develwarn('"wlock" acquired after "lock"')
1285 self.ui.develwarn('"wlock" acquired after "lock"')
1287
1286
1288 def unlock():
1287 def unlock():
1289 if self.dirstate.pendingparentchange():
1288 if self.dirstate.pendingparentchange():
1290 self.dirstate.invalidate()
1289 self.dirstate.invalidate()
1291 else:
1290 else:
1292 self.dirstate.write()
1291 self.dirstate.write()
1293
1292
1294 self._filecache['dirstate'].refresh()
1293 self._filecache['dirstate'].refresh()
1295
1294
1296 l = self._lock(self.vfs, "wlock", wait, unlock,
1295 l = self._lock(self.vfs, "wlock", wait, unlock,
1297 self.invalidatedirstate, _('working directory of %s') %
1296 self.invalidatedirstate, _('working directory of %s') %
1298 self.origroot)
1297 self.origroot)
1299 self._wlockref = weakref.ref(l)
1298 self._wlockref = weakref.ref(l)
1300 return l
1299 return l
1301
1300
1302 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1301 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1303 """
1302 """
1304 commit an individual file as part of a larger transaction
1303 commit an individual file as part of a larger transaction
1305 """
1304 """
1306
1305
1307 fname = fctx.path()
1306 fname = fctx.path()
1308 fparent1 = manifest1.get(fname, nullid)
1307 fparent1 = manifest1.get(fname, nullid)
1309 fparent2 = manifest2.get(fname, nullid)
1308 fparent2 = manifest2.get(fname, nullid)
1310 if isinstance(fctx, context.filectx):
1309 if isinstance(fctx, context.filectx):
1311 node = fctx.filenode()
1310 node = fctx.filenode()
1312 if node in [fparent1, fparent2]:
1311 if node in [fparent1, fparent2]:
1313 self.ui.debug('reusing %s filelog entry\n' % fname)
1312 self.ui.debug('reusing %s filelog entry\n' % fname)
1314 return node
1313 return node
1315
1314
1316 flog = self.file(fname)
1315 flog = self.file(fname)
1317 meta = {}
1316 meta = {}
1318 copy = fctx.renamed()
1317 copy = fctx.renamed()
1319 if copy and copy[0] != fname:
1318 if copy and copy[0] != fname:
1320 # Mark the new revision of this file as a copy of another
1319 # Mark the new revision of this file as a copy of another
1321 # file. This copy data will effectively act as a parent
1320 # file. This copy data will effectively act as a parent
1322 # of this new revision. If this is a merge, the first
1321 # of this new revision. If this is a merge, the first
1323 # parent will be the nullid (meaning "look up the copy data")
1322 # parent will be the nullid (meaning "look up the copy data")
1324 # and the second one will be the other parent. For example:
1323 # and the second one will be the other parent. For example:
1325 #
1324 #
1326 # 0 --- 1 --- 3 rev1 changes file foo
1325 # 0 --- 1 --- 3 rev1 changes file foo
1327 # \ / rev2 renames foo to bar and changes it
1326 # \ / rev2 renames foo to bar and changes it
1328 # \- 2 -/ rev3 should have bar with all changes and
1327 # \- 2 -/ rev3 should have bar with all changes and
1329 # should record that bar descends from
1328 # should record that bar descends from
1330 # bar in rev2 and foo in rev1
1329 # bar in rev2 and foo in rev1
1331 #
1330 #
1332 # this allows this merge to succeed:
1331 # this allows this merge to succeed:
1333 #
1332 #
1334 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1333 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1335 # \ / merging rev3 and rev4 should use bar@rev2
1334 # \ / merging rev3 and rev4 should use bar@rev2
1336 # \- 2 --- 4 as the merge base
1335 # \- 2 --- 4 as the merge base
1337 #
1336 #
1338
1337
1339 cfname = copy[0]
1338 cfname = copy[0]
1340 crev = manifest1.get(cfname)
1339 crev = manifest1.get(cfname)
1341 newfparent = fparent2
1340 newfparent = fparent2
1342
1341
1343 if manifest2: # branch merge
1342 if manifest2: # branch merge
1344 if fparent2 == nullid or crev is None: # copied on remote side
1343 if fparent2 == nullid or crev is None: # copied on remote side
1345 if cfname in manifest2:
1344 if cfname in manifest2:
1346 crev = manifest2[cfname]
1345 crev = manifest2[cfname]
1347 newfparent = fparent1
1346 newfparent = fparent1
1348
1347
1349 # Here, we used to search backwards through history to try to find
1348 # Here, we used to search backwards through history to try to find
1350 # where the file copy came from if the source of a copy was not in
1349 # where the file copy came from if the source of a copy was not in
1351 # the parent directory. However, this doesn't actually make sense to
1350 # the parent directory. However, this doesn't actually make sense to
1352 # do (what does a copy from something not in your working copy even
1351 # do (what does a copy from something not in your working copy even
1353 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1352 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1354 # the user that copy information was dropped, so if they didn't
1353 # the user that copy information was dropped, so if they didn't
1355 # expect this outcome it can be fixed, but this is the correct
1354 # expect this outcome it can be fixed, but this is the correct
1356 # behavior in this circumstance.
1355 # behavior in this circumstance.
1357
1356
1358 if crev:
1357 if crev:
1359 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1358 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1360 meta["copy"] = cfname
1359 meta["copy"] = cfname
1361 meta["copyrev"] = hex(crev)
1360 meta["copyrev"] = hex(crev)
1362 fparent1, fparent2 = nullid, newfparent
1361 fparent1, fparent2 = nullid, newfparent
1363 else:
1362 else:
1364 self.ui.warn(_("warning: can't find ancestor for '%s' "
1363 self.ui.warn(_("warning: can't find ancestor for '%s' "
1365 "copied from '%s'!\n") % (fname, cfname))
1364 "copied from '%s'!\n") % (fname, cfname))
1366
1365
1367 elif fparent1 == nullid:
1366 elif fparent1 == nullid:
1368 fparent1, fparent2 = fparent2, nullid
1367 fparent1, fparent2 = fparent2, nullid
1369 elif fparent2 != nullid:
1368 elif fparent2 != nullid:
1370 # is one parent an ancestor of the other?
1369 # is one parent an ancestor of the other?
1371 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1370 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1372 if fparent1 in fparentancestors:
1371 if fparent1 in fparentancestors:
1373 fparent1, fparent2 = fparent2, nullid
1372 fparent1, fparent2 = fparent2, nullid
1374 elif fparent2 in fparentancestors:
1373 elif fparent2 in fparentancestors:
1375 fparent2 = nullid
1374 fparent2 = nullid
1376
1375
1377 # is the file changed?
1376 # is the file changed?
1378 text = fctx.data()
1377 text = fctx.data()
1379 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1378 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1380 changelist.append(fname)
1379 changelist.append(fname)
1381 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1380 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1382 # are just the flags changed during merge?
1381 # are just the flags changed during merge?
1383 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1382 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1384 changelist.append(fname)
1383 changelist.append(fname)
1385
1384
1386 return fparent1
1385 return fparent1
1387
1386
1388 @unfilteredmethod
1387 @unfilteredmethod
1389 def commit(self, text="", user=None, date=None, match=None, force=False,
1388 def commit(self, text="", user=None, date=None, match=None, force=False,
1390 editor=False, extra=None):
1389 editor=False, extra=None):
1391 """Add a new revision to current repository.
1390 """Add a new revision to current repository.
1392
1391
1393 Revision information is gathered from the working directory,
1392 Revision information is gathered from the working directory,
1394 match can be used to filter the committed files. If editor is
1393 match can be used to filter the committed files. If editor is
1395 supplied, it is called to get a commit message.
1394 supplied, it is called to get a commit message.
1396 """
1395 """
1397 if extra is None:
1396 if extra is None:
1398 extra = {}
1397 extra = {}
1399
1398
1400 def fail(f, msg):
1399 def fail(f, msg):
1401 raise util.Abort('%s: %s' % (f, msg))
1400 raise util.Abort('%s: %s' % (f, msg))
1402
1401
1403 if not match:
1402 if not match:
1404 match = matchmod.always(self.root, '')
1403 match = matchmod.always(self.root, '')
1405
1404
1406 if not force:
1405 if not force:
1407 vdirs = []
1406 vdirs = []
1408 match.explicitdir = vdirs.append
1407 match.explicitdir = vdirs.append
1409 match.bad = fail
1408 match.bad = fail
1410
1409
1411 wlock = self.wlock()
1410 wlock = self.wlock()
1412 try:
1411 try:
1413 wctx = self[None]
1412 wctx = self[None]
1414 merge = len(wctx.parents()) > 1
1413 merge = len(wctx.parents()) > 1
1415
1414
1416 if not force and merge and match.ispartial():
1415 if not force and merge and match.ispartial():
1417 raise util.Abort(_('cannot partially commit a merge '
1416 raise util.Abort(_('cannot partially commit a merge '
1418 '(do not specify files or patterns)'))
1417 '(do not specify files or patterns)'))
1419
1418
1420 status = self.status(match=match, clean=force)
1419 status = self.status(match=match, clean=force)
1421 if force:
1420 if force:
1422 status.modified.extend(status.clean) # mq may commit clean files
1421 status.modified.extend(status.clean) # mq may commit clean files
1423
1422
1424 # check subrepos
1423 # check subrepos
1425 subs = []
1424 subs = []
1426 commitsubs = set()
1425 commitsubs = set()
1427 newstate = wctx.substate.copy()
1426 newstate = wctx.substate.copy()
1428 # only manage subrepos and .hgsubstate if .hgsub is present
1427 # only manage subrepos and .hgsubstate if .hgsub is present
1429 if '.hgsub' in wctx:
1428 if '.hgsub' in wctx:
1430 # we'll decide whether to track this ourselves, thanks
1429 # we'll decide whether to track this ourselves, thanks
1431 for c in status.modified, status.added, status.removed:
1430 for c in status.modified, status.added, status.removed:
1432 if '.hgsubstate' in c:
1431 if '.hgsubstate' in c:
1433 c.remove('.hgsubstate')
1432 c.remove('.hgsubstate')
1434
1433
1435 # compare current state to last committed state
1434 # compare current state to last committed state
1436 # build new substate based on last committed state
1435 # build new substate based on last committed state
1437 oldstate = wctx.p1().substate
1436 oldstate = wctx.p1().substate
1438 for s in sorted(newstate.keys()):
1437 for s in sorted(newstate.keys()):
1439 if not match(s):
1438 if not match(s):
1440 # ignore working copy, use old state if present
1439 # ignore working copy, use old state if present
1441 if s in oldstate:
1440 if s in oldstate:
1442 newstate[s] = oldstate[s]
1441 newstate[s] = oldstate[s]
1443 continue
1442 continue
1444 if not force:
1443 if not force:
1445 raise util.Abort(
1444 raise util.Abort(
1446 _("commit with new subrepo %s excluded") % s)
1445 _("commit with new subrepo %s excluded") % s)
1447 dirtyreason = wctx.sub(s).dirtyreason(True)
1446 dirtyreason = wctx.sub(s).dirtyreason(True)
1448 if dirtyreason:
1447 if dirtyreason:
1449 if not self.ui.configbool('ui', 'commitsubrepos'):
1448 if not self.ui.configbool('ui', 'commitsubrepos'):
1450 raise util.Abort(dirtyreason,
1449 raise util.Abort(dirtyreason,
1451 hint=_("use --subrepos for recursive commit"))
1450 hint=_("use --subrepos for recursive commit"))
1452 subs.append(s)
1451 subs.append(s)
1453 commitsubs.add(s)
1452 commitsubs.add(s)
1454 else:
1453 else:
1455 bs = wctx.sub(s).basestate()
1454 bs = wctx.sub(s).basestate()
1456 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1455 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1457 if oldstate.get(s, (None, None, None))[1] != bs:
1456 if oldstate.get(s, (None, None, None))[1] != bs:
1458 subs.append(s)
1457 subs.append(s)
1459
1458
1460 # check for removed subrepos
1459 # check for removed subrepos
1461 for p in wctx.parents():
1460 for p in wctx.parents():
1462 r = [s for s in p.substate if s not in newstate]
1461 r = [s for s in p.substate if s not in newstate]
1463 subs += [s for s in r if match(s)]
1462 subs += [s for s in r if match(s)]
1464 if subs:
1463 if subs:
1465 if (not match('.hgsub') and
1464 if (not match('.hgsub') and
1466 '.hgsub' in (wctx.modified() + wctx.added())):
1465 '.hgsub' in (wctx.modified() + wctx.added())):
1467 raise util.Abort(
1466 raise util.Abort(
1468 _("can't commit subrepos without .hgsub"))
1467 _("can't commit subrepos without .hgsub"))
1469 status.modified.insert(0, '.hgsubstate')
1468 status.modified.insert(0, '.hgsubstate')
1470
1469
1471 elif '.hgsub' in status.removed:
1470 elif '.hgsub' in status.removed:
1472 # clean up .hgsubstate when .hgsub is removed
1471 # clean up .hgsubstate when .hgsub is removed
1473 if ('.hgsubstate' in wctx and
1472 if ('.hgsubstate' in wctx and
1474 '.hgsubstate' not in (status.modified + status.added +
1473 '.hgsubstate' not in (status.modified + status.added +
1475 status.removed)):
1474 status.removed)):
1476 status.removed.insert(0, '.hgsubstate')
1475 status.removed.insert(0, '.hgsubstate')
1477
1476
1478 # make sure all explicit patterns are matched
1477 # make sure all explicit patterns are matched
1479 if not force and (match.isexact() or match.prefix()):
1478 if not force and (match.isexact() or match.prefix()):
1480 matched = set(status.modified + status.added + status.removed)
1479 matched = set(status.modified + status.added + status.removed)
1481
1480
1482 for f in match.files():
1481 for f in match.files():
1483 f = self.dirstate.normalize(f)
1482 f = self.dirstate.normalize(f)
1484 if f == '.' or f in matched or f in wctx.substate:
1483 if f == '.' or f in matched or f in wctx.substate:
1485 continue
1484 continue
1486 if f in status.deleted:
1485 if f in status.deleted:
1487 fail(f, _('file not found!'))
1486 fail(f, _('file not found!'))
1488 if f in vdirs: # visited directory
1487 if f in vdirs: # visited directory
1489 d = f + '/'
1488 d = f + '/'
1490 for mf in matched:
1489 for mf in matched:
1491 if mf.startswith(d):
1490 if mf.startswith(d):
1492 break
1491 break
1493 else:
1492 else:
1494 fail(f, _("no match under directory!"))
1493 fail(f, _("no match under directory!"))
1495 elif f not in self.dirstate:
1494 elif f not in self.dirstate:
1496 fail(f, _("file not tracked!"))
1495 fail(f, _("file not tracked!"))
1497
1496
1498 cctx = context.workingcommitctx(self, status,
1497 cctx = context.workingcommitctx(self, status,
1499 text, user, date, extra)
1498 text, user, date, extra)
1500
1499
1501 # internal config: ui.allowemptycommit
1500 # internal config: ui.allowemptycommit
1502 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1501 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1503 or extra.get('close') or merge or cctx.files()
1502 or extra.get('close') or merge or cctx.files()
1504 or self.ui.configbool('ui', 'allowemptycommit'))
1503 or self.ui.configbool('ui', 'allowemptycommit'))
1505 if not allowemptycommit:
1504 if not allowemptycommit:
1506 return None
1505 return None
1507
1506
1508 if merge and cctx.deleted():
1507 if merge and cctx.deleted():
1509 raise util.Abort(_("cannot commit merge with missing files"))
1508 raise util.Abort(_("cannot commit merge with missing files"))
1510
1509
1511 ms = mergemod.mergestate(self)
1510 ms = mergemod.mergestate(self)
1512 for f in status.modified:
1511 for f in status.modified:
1513 if f in ms and ms[f] == 'u':
1512 if f in ms and ms[f] == 'u':
1514 raise util.Abort(_('unresolved merge conflicts '
1513 raise util.Abort(_('unresolved merge conflicts '
1515 '(see "hg help resolve")'))
1514 '(see "hg help resolve")'))
1516
1515
1517 if editor:
1516 if editor:
1518 cctx._text = editor(self, cctx, subs)
1517 cctx._text = editor(self, cctx, subs)
1519 edited = (text != cctx._text)
1518 edited = (text != cctx._text)
1520
1519
1521 # Save commit message in case this transaction gets rolled back
1520 # Save commit message in case this transaction gets rolled back
1522 # (e.g. by a pretxncommit hook). Leave the content alone on
1521 # (e.g. by a pretxncommit hook). Leave the content alone on
1523 # the assumption that the user will use the same editor again.
1522 # the assumption that the user will use the same editor again.
1524 msgfn = self.savecommitmessage(cctx._text)
1523 msgfn = self.savecommitmessage(cctx._text)
1525
1524
1526 # commit subs and write new state
1525 # commit subs and write new state
1527 if subs:
1526 if subs:
1528 for s in sorted(commitsubs):
1527 for s in sorted(commitsubs):
1529 sub = wctx.sub(s)
1528 sub = wctx.sub(s)
1530 self.ui.status(_('committing subrepository %s\n') %
1529 self.ui.status(_('committing subrepository %s\n') %
1531 subrepo.subrelpath(sub))
1530 subrepo.subrelpath(sub))
1532 sr = sub.commit(cctx._text, user, date)
1531 sr = sub.commit(cctx._text, user, date)
1533 newstate[s] = (newstate[s][0], sr)
1532 newstate[s] = (newstate[s][0], sr)
1534 subrepo.writestate(self, newstate)
1533 subrepo.writestate(self, newstate)
1535
1534
1536 p1, p2 = self.dirstate.parents()
1535 p1, p2 = self.dirstate.parents()
1537 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1536 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1538 try:
1537 try:
1539 self.hook("precommit", throw=True, parent1=hookp1,
1538 self.hook("precommit", throw=True, parent1=hookp1,
1540 parent2=hookp2)
1539 parent2=hookp2)
1541 ret = self.commitctx(cctx, True)
1540 ret = self.commitctx(cctx, True)
1542 except: # re-raises
1541 except: # re-raises
1543 if edited:
1542 if edited:
1544 self.ui.write(
1543 self.ui.write(
1545 _('note: commit message saved in %s\n') % msgfn)
1544 _('note: commit message saved in %s\n') % msgfn)
1546 raise
1545 raise
1547
1546
1548 # update bookmarks, dirstate and mergestate
1547 # update bookmarks, dirstate and mergestate
1549 bookmarks.update(self, [p1, p2], ret)
1548 bookmarks.update(self, [p1, p2], ret)
1550 cctx.markcommitted(ret)
1549 cctx.markcommitted(ret)
1551 ms.reset()
1550 ms.reset()
1552 finally:
1551 finally:
1553 wlock.release()
1552 wlock.release()
1554
1553
1555 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1554 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1556 # hack for command that use a temporary commit (eg: histedit)
1555 # hack for command that use a temporary commit (eg: histedit)
1557 # temporary commit got stripped before hook release
1556 # temporary commit got stripped before hook release
1558 if self.changelog.hasnode(ret):
1557 if self.changelog.hasnode(ret):
1559 self.hook("commit", node=node, parent1=parent1,
1558 self.hook("commit", node=node, parent1=parent1,
1560 parent2=parent2)
1559 parent2=parent2)
1561 self._afterlock(commithook)
1560 self._afterlock(commithook)
1562 return ret
1561 return ret
1563
1562
1564 @unfilteredmethod
1563 @unfilteredmethod
1565 def commitctx(self, ctx, error=False):
1564 def commitctx(self, ctx, error=False):
1566 """Add a new revision to current repository.
1565 """Add a new revision to current repository.
1567 Revision information is passed via the context argument.
1566 Revision information is passed via the context argument.
1568 """
1567 """
1569
1568
1570 tr = None
1569 tr = None
1571 p1, p2 = ctx.p1(), ctx.p2()
1570 p1, p2 = ctx.p1(), ctx.p2()
1572 user = ctx.user()
1571 user = ctx.user()
1573
1572
1574 lock = self.lock()
1573 lock = self.lock()
1575 try:
1574 try:
1576 tr = self.transaction("commit")
1575 tr = self.transaction("commit")
1577 trp = weakref.proxy(tr)
1576 trp = weakref.proxy(tr)
1578
1577
1579 if ctx.files():
1578 if ctx.files():
1580 m1 = p1.manifest()
1579 m1 = p1.manifest()
1581 m2 = p2.manifest()
1580 m2 = p2.manifest()
1582 m = m1.copy()
1581 m = m1.copy()
1583
1582
1584 # check in files
1583 # check in files
1585 added = []
1584 added = []
1586 changed = []
1585 changed = []
1587 removed = list(ctx.removed())
1586 removed = list(ctx.removed())
1588 linkrev = len(self)
1587 linkrev = len(self)
1589 self.ui.note(_("committing files:\n"))
1588 self.ui.note(_("committing files:\n"))
1590 for f in sorted(ctx.modified() + ctx.added()):
1589 for f in sorted(ctx.modified() + ctx.added()):
1591 self.ui.note(f + "\n")
1590 self.ui.note(f + "\n")
1592 try:
1591 try:
1593 fctx = ctx[f]
1592 fctx = ctx[f]
1594 if fctx is None:
1593 if fctx is None:
1595 removed.append(f)
1594 removed.append(f)
1596 else:
1595 else:
1597 added.append(f)
1596 added.append(f)
1598 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1597 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1599 trp, changed)
1598 trp, changed)
1600 m.setflag(f, fctx.flags())
1599 m.setflag(f, fctx.flags())
1601 except OSError as inst:
1600 except OSError as inst:
1602 self.ui.warn(_("trouble committing %s!\n") % f)
1601 self.ui.warn(_("trouble committing %s!\n") % f)
1603 raise
1602 raise
1604 except IOError as inst:
1603 except IOError as inst:
1605 errcode = getattr(inst, 'errno', errno.ENOENT)
1604 errcode = getattr(inst, 'errno', errno.ENOENT)
1606 if error or errcode and errcode != errno.ENOENT:
1605 if error or errcode and errcode != errno.ENOENT:
1607 self.ui.warn(_("trouble committing %s!\n") % f)
1606 self.ui.warn(_("trouble committing %s!\n") % f)
1608 raise
1607 raise
1609
1608
1610 # update manifest
1609 # update manifest
1611 self.ui.note(_("committing manifest\n"))
1610 self.ui.note(_("committing manifest\n"))
1612 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1611 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1613 drop = [f for f in removed if f in m]
1612 drop = [f for f in removed if f in m]
1614 for f in drop:
1613 for f in drop:
1615 del m[f]
1614 del m[f]
1616 mn = self.manifest.add(m, trp, linkrev,
1615 mn = self.manifest.add(m, trp, linkrev,
1617 p1.manifestnode(), p2.manifestnode(),
1616 p1.manifestnode(), p2.manifestnode(),
1618 added, drop)
1617 added, drop)
1619 files = changed + removed
1618 files = changed + removed
1620 else:
1619 else:
1621 mn = p1.manifestnode()
1620 mn = p1.manifestnode()
1622 files = []
1621 files = []
1623
1622
1624 # update changelog
1623 # update changelog
1625 self.ui.note(_("committing changelog\n"))
1624 self.ui.note(_("committing changelog\n"))
1626 self.changelog.delayupdate(tr)
1625 self.changelog.delayupdate(tr)
1627 n = self.changelog.add(mn, files, ctx.description(),
1626 n = self.changelog.add(mn, files, ctx.description(),
1628 trp, p1.node(), p2.node(),
1627 trp, p1.node(), p2.node(),
1629 user, ctx.date(), ctx.extra().copy())
1628 user, ctx.date(), ctx.extra().copy())
1630 p = lambda: tr.writepending() and self.root or ""
1629 p = lambda: tr.writepending() and self.root or ""
1631 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1630 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1632 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1631 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1633 parent2=xp2, pending=p)
1632 parent2=xp2, pending=p)
1634 # set the new commit is proper phase
1633 # set the new commit is proper phase
1635 targetphase = subrepo.newcommitphase(self.ui, ctx)
1634 targetphase = subrepo.newcommitphase(self.ui, ctx)
1636 if targetphase:
1635 if targetphase:
1637 # retract boundary do not alter parent changeset.
1636 # retract boundary do not alter parent changeset.
1638 # if a parent have higher the resulting phase will
1637 # if a parent have higher the resulting phase will
1639 # be compliant anyway
1638 # be compliant anyway
1640 #
1639 #
1641 # if minimal phase was 0 we don't need to retract anything
1640 # if minimal phase was 0 we don't need to retract anything
1642 phases.retractboundary(self, tr, targetphase, [n])
1641 phases.retractboundary(self, tr, targetphase, [n])
1643 tr.close()
1642 tr.close()
1644 branchmap.updatecache(self.filtered('served'))
1643 branchmap.updatecache(self.filtered('served'))
1645 return n
1644 return n
1646 finally:
1645 finally:
1647 if tr:
1646 if tr:
1648 tr.release()
1647 tr.release()
1649 lock.release()
1648 lock.release()
1650
1649
1651 @unfilteredmethod
1650 @unfilteredmethod
1652 def destroying(self):
1651 def destroying(self):
1653 '''Inform the repository that nodes are about to be destroyed.
1652 '''Inform the repository that nodes are about to be destroyed.
1654 Intended for use by strip and rollback, so there's a common
1653 Intended for use by strip and rollback, so there's a common
1655 place for anything that has to be done before destroying history.
1654 place for anything that has to be done before destroying history.
1656
1655
1657 This is mostly useful for saving state that is in memory and waiting
1656 This is mostly useful for saving state that is in memory and waiting
1658 to be flushed when the current lock is released. Because a call to
1657 to be flushed when the current lock is released. Because a call to
1659 destroyed is imminent, the repo will be invalidated causing those
1658 destroyed is imminent, the repo will be invalidated causing those
1660 changes to stay in memory (waiting for the next unlock), or vanish
1659 changes to stay in memory (waiting for the next unlock), or vanish
1661 completely.
1660 completely.
1662 '''
1661 '''
1663 # When using the same lock to commit and strip, the phasecache is left
1662 # When using the same lock to commit and strip, the phasecache is left
1664 # dirty after committing. Then when we strip, the repo is invalidated,
1663 # dirty after committing. Then when we strip, the repo is invalidated,
1665 # causing those changes to disappear.
1664 # causing those changes to disappear.
1666 if '_phasecache' in vars(self):
1665 if '_phasecache' in vars(self):
1667 self._phasecache.write()
1666 self._phasecache.write()
1668
1667
1669 @unfilteredmethod
1668 @unfilteredmethod
1670 def destroyed(self):
1669 def destroyed(self):
1671 '''Inform the repository that nodes have been destroyed.
1670 '''Inform the repository that nodes have been destroyed.
1672 Intended for use by strip and rollback, so there's a common
1671 Intended for use by strip and rollback, so there's a common
1673 place for anything that has to be done after destroying history.
1672 place for anything that has to be done after destroying history.
1674 '''
1673 '''
1675 # When one tries to:
1674 # When one tries to:
1676 # 1) destroy nodes thus calling this method (e.g. strip)
1675 # 1) destroy nodes thus calling this method (e.g. strip)
1677 # 2) use phasecache somewhere (e.g. commit)
1676 # 2) use phasecache somewhere (e.g. commit)
1678 #
1677 #
1679 # then 2) will fail because the phasecache contains nodes that were
1678 # then 2) will fail because the phasecache contains nodes that were
1680 # removed. We can either remove phasecache from the filecache,
1679 # removed. We can either remove phasecache from the filecache,
1681 # causing it to reload next time it is accessed, or simply filter
1680 # causing it to reload next time it is accessed, or simply filter
1682 # the removed nodes now and write the updated cache.
1681 # the removed nodes now and write the updated cache.
1683 self._phasecache.filterunknown(self)
1682 self._phasecache.filterunknown(self)
1684 self._phasecache.write()
1683 self._phasecache.write()
1685
1684
1686 # update the 'served' branch cache to help read only server process
1685 # update the 'served' branch cache to help read only server process
1687 # Thanks to branchcache collaboration this is done from the nearest
1686 # Thanks to branchcache collaboration this is done from the nearest
1688 # filtered subset and it is expected to be fast.
1687 # filtered subset and it is expected to be fast.
1689 branchmap.updatecache(self.filtered('served'))
1688 branchmap.updatecache(self.filtered('served'))
1690
1689
1691 # Ensure the persistent tag cache is updated. Doing it now
1690 # Ensure the persistent tag cache is updated. Doing it now
1692 # means that the tag cache only has to worry about destroyed
1691 # means that the tag cache only has to worry about destroyed
1693 # heads immediately after a strip/rollback. That in turn
1692 # heads immediately after a strip/rollback. That in turn
1694 # guarantees that "cachetip == currenttip" (comparing both rev
1693 # guarantees that "cachetip == currenttip" (comparing both rev
1695 # and node) always means no nodes have been added or destroyed.
1694 # and node) always means no nodes have been added or destroyed.
1696
1695
1697 # XXX this is suboptimal when qrefresh'ing: we strip the current
1696 # XXX this is suboptimal when qrefresh'ing: we strip the current
1698 # head, refresh the tag cache, then immediately add a new head.
1697 # head, refresh the tag cache, then immediately add a new head.
1699 # But I think doing it this way is necessary for the "instant
1698 # But I think doing it this way is necessary for the "instant
1700 # tag cache retrieval" case to work.
1699 # tag cache retrieval" case to work.
1701 self.invalidate()
1700 self.invalidate()
1702
1701
1703 def walk(self, match, node=None):
1702 def walk(self, match, node=None):
1704 '''
1703 '''
1705 walk recursively through the directory tree or a given
1704 walk recursively through the directory tree or a given
1706 changeset, finding all files matched by the match
1705 changeset, finding all files matched by the match
1707 function
1706 function
1708 '''
1707 '''
1709 return self[node].walk(match)
1708 return self[node].walk(match)
1710
1709
1711 def status(self, node1='.', node2=None, match=None,
1710 def status(self, node1='.', node2=None, match=None,
1712 ignored=False, clean=False, unknown=False,
1711 ignored=False, clean=False, unknown=False,
1713 listsubrepos=False):
1712 listsubrepos=False):
1714 '''a convenience method that calls node1.status(node2)'''
1713 '''a convenience method that calls node1.status(node2)'''
1715 return self[node1].status(node2, match, ignored, clean, unknown,
1714 return self[node1].status(node2, match, ignored, clean, unknown,
1716 listsubrepos)
1715 listsubrepos)
1717
1716
1718 def heads(self, start=None):
1717 def heads(self, start=None):
1719 heads = self.changelog.heads(start)
1718 heads = self.changelog.heads(start)
1720 # sort the output in rev descending order
1719 # sort the output in rev descending order
1721 return sorted(heads, key=self.changelog.rev, reverse=True)
1720 return sorted(heads, key=self.changelog.rev, reverse=True)
1722
1721
1723 def branchheads(self, branch=None, start=None, closed=False):
1722 def branchheads(self, branch=None, start=None, closed=False):
1724 '''return a (possibly filtered) list of heads for the given branch
1723 '''return a (possibly filtered) list of heads for the given branch
1725
1724
1726 Heads are returned in topological order, from newest to oldest.
1725 Heads are returned in topological order, from newest to oldest.
1727 If branch is None, use the dirstate branch.
1726 If branch is None, use the dirstate branch.
1728 If start is not None, return only heads reachable from start.
1727 If start is not None, return only heads reachable from start.
1729 If closed is True, return heads that are marked as closed as well.
1728 If closed is True, return heads that are marked as closed as well.
1730 '''
1729 '''
1731 if branch is None:
1730 if branch is None:
1732 branch = self[None].branch()
1731 branch = self[None].branch()
1733 branches = self.branchmap()
1732 branches = self.branchmap()
1734 if branch not in branches:
1733 if branch not in branches:
1735 return []
1734 return []
1736 # the cache returns heads ordered lowest to highest
1735 # the cache returns heads ordered lowest to highest
1737 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1736 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1738 if start is not None:
1737 if start is not None:
1739 # filter out the heads that cannot be reached from startrev
1738 # filter out the heads that cannot be reached from startrev
1740 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1739 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1741 bheads = [h for h in bheads if h in fbheads]
1740 bheads = [h for h in bheads if h in fbheads]
1742 return bheads
1741 return bheads
1743
1742
1744 def branches(self, nodes):
1743 def branches(self, nodes):
1745 if not nodes:
1744 if not nodes:
1746 nodes = [self.changelog.tip()]
1745 nodes = [self.changelog.tip()]
1747 b = []
1746 b = []
1748 for n in nodes:
1747 for n in nodes:
1749 t = n
1748 t = n
1750 while True:
1749 while True:
1751 p = self.changelog.parents(n)
1750 p = self.changelog.parents(n)
1752 if p[1] != nullid or p[0] == nullid:
1751 if p[1] != nullid or p[0] == nullid:
1753 b.append((t, n, p[0], p[1]))
1752 b.append((t, n, p[0], p[1]))
1754 break
1753 break
1755 n = p[0]
1754 n = p[0]
1756 return b
1755 return b
1757
1756
1758 def between(self, pairs):
1757 def between(self, pairs):
1759 r = []
1758 r = []
1760
1759
1761 for top, bottom in pairs:
1760 for top, bottom in pairs:
1762 n, l, i = top, [], 0
1761 n, l, i = top, [], 0
1763 f = 1
1762 f = 1
1764
1763
1765 while n != bottom and n != nullid:
1764 while n != bottom and n != nullid:
1766 p = self.changelog.parents(n)[0]
1765 p = self.changelog.parents(n)[0]
1767 if i == f:
1766 if i == f:
1768 l.append(n)
1767 l.append(n)
1769 f = f * 2
1768 f = f * 2
1770 n = p
1769 n = p
1771 i += 1
1770 i += 1
1772
1771
1773 r.append(l)
1772 r.append(l)
1774
1773
1775 return r
1774 return r
1776
1775
1777 def checkpush(self, pushop):
1776 def checkpush(self, pushop):
1778 """Extensions can override this function if additional checks have
1777 """Extensions can override this function if additional checks have
1779 to be performed before pushing, or call it if they override push
1778 to be performed before pushing, or call it if they override push
1780 command.
1779 command.
1781 """
1780 """
1782 pass
1781 pass
1783
1782
1784 @unfilteredpropertycache
1783 @unfilteredpropertycache
1785 def prepushoutgoinghooks(self):
1784 def prepushoutgoinghooks(self):
1786 """Return util.hooks consists of "(repo, remote, outgoing)"
1785 """Return util.hooks consists of "(repo, remote, outgoing)"
1787 functions, which are called before pushing changesets.
1786 functions, which are called before pushing changesets.
1788 """
1787 """
1789 return util.hooks()
1788 return util.hooks()
1790
1789
1791 def clone(self, remote, heads=[], stream=None):
1790 def clone(self, remote, heads=[], stream=None):
1792 '''clone remote repository.
1791 '''clone remote repository.
1793
1792
1794 keyword arguments:
1793 keyword arguments:
1795 heads: list of revs to clone (forces use of pull)
1794 heads: list of revs to clone (forces use of pull)
1796 stream: use streaming clone if possible'''
1795 stream: use streaming clone if possible'''
1797 streamclone.maybeperformstreamclone(self, remote, heads, stream)
1798
1799 # internal config: ui.quietbookmarkmove
1796 # internal config: ui.quietbookmarkmove
1800 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1797 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1801 try:
1798 try:
1802 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1799 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1803 pullop = exchange.pull(self, remote, heads,
1800 pullop = exchange.pull(self, remote, heads,
1804 streamclonerequested=stream)
1801 streamclonerequested=stream)
1805 return pullop.cgresult
1802 return pullop.cgresult
1806 finally:
1803 finally:
1807 self.ui.restoreconfig(quiet)
1804 self.ui.restoreconfig(quiet)
1808
1805
1809 def pushkey(self, namespace, key, old, new):
1806 def pushkey(self, namespace, key, old, new):
1810 try:
1807 try:
1811 tr = self.currenttransaction()
1808 tr = self.currenttransaction()
1812 hookargs = {}
1809 hookargs = {}
1813 if tr is not None:
1810 if tr is not None:
1814 hookargs.update(tr.hookargs)
1811 hookargs.update(tr.hookargs)
1815 pending = lambda: tr.writepending() and self.root or ""
1812 pending = lambda: tr.writepending() and self.root or ""
1816 hookargs['pending'] = pending
1813 hookargs['pending'] = pending
1817 hookargs['namespace'] = namespace
1814 hookargs['namespace'] = namespace
1818 hookargs['key'] = key
1815 hookargs['key'] = key
1819 hookargs['old'] = old
1816 hookargs['old'] = old
1820 hookargs['new'] = new
1817 hookargs['new'] = new
1821 self.hook('prepushkey', throw=True, **hookargs)
1818 self.hook('prepushkey', throw=True, **hookargs)
1822 except error.HookAbort as exc:
1819 except error.HookAbort as exc:
1823 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1820 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1824 if exc.hint:
1821 if exc.hint:
1825 self.ui.write_err(_("(%s)\n") % exc.hint)
1822 self.ui.write_err(_("(%s)\n") % exc.hint)
1826 return False
1823 return False
1827 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1824 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1828 ret = pushkey.push(self, namespace, key, old, new)
1825 ret = pushkey.push(self, namespace, key, old, new)
1829 def runhook():
1826 def runhook():
1830 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1827 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1831 ret=ret)
1828 ret=ret)
1832 self._afterlock(runhook)
1829 self._afterlock(runhook)
1833 return ret
1830 return ret
1834
1831
1835 def listkeys(self, namespace):
1832 def listkeys(self, namespace):
1836 self.hook('prelistkeys', throw=True, namespace=namespace)
1833 self.hook('prelistkeys', throw=True, namespace=namespace)
1837 self.ui.debug('listing keys for "%s"\n' % namespace)
1834 self.ui.debug('listing keys for "%s"\n' % namespace)
1838 values = pushkey.list(self, namespace)
1835 values = pushkey.list(self, namespace)
1839 self.hook('listkeys', namespace=namespace, values=values)
1836 self.hook('listkeys', namespace=namespace, values=values)
1840 return values
1837 return values
1841
1838
1842 def debugwireargs(self, one, two, three=None, four=None, five=None):
1839 def debugwireargs(self, one, two, three=None, four=None, five=None):
1843 '''used to test argument passing over the wire'''
1840 '''used to test argument passing over the wire'''
1844 return "%s %s %s %s %s" % (one, two, three, four, five)
1841 return "%s %s %s %s %s" % (one, two, three, four, five)
1845
1842
1846 def savecommitmessage(self, text):
1843 def savecommitmessage(self, text):
1847 fp = self.vfs('last-message.txt', 'wb')
1844 fp = self.vfs('last-message.txt', 'wb')
1848 try:
1845 try:
1849 fp.write(text)
1846 fp.write(text)
1850 finally:
1847 finally:
1851 fp.close()
1848 fp.close()
1852 return self.pathto(fp.name[len(self.root) + 1:])
1849 return self.pathto(fp.name[len(self.root) + 1:])
1853
1850
1854 # used to avoid circular references so destructors work
1851 # used to avoid circular references so destructors work
1855 def aftertrans(files):
1852 def aftertrans(files):
1856 renamefiles = [tuple(t) for t in files]
1853 renamefiles = [tuple(t) for t in files]
1857 def a():
1854 def a():
1858 for vfs, src, dest in renamefiles:
1855 for vfs, src, dest in renamefiles:
1859 try:
1856 try:
1860 vfs.rename(src, dest)
1857 vfs.rename(src, dest)
1861 except OSError: # journal file does not yet exist
1858 except OSError: # journal file does not yet exist
1862 pass
1859 pass
1863 return a
1860 return a
1864
1861
1865 def undoname(fn):
1862 def undoname(fn):
1866 base, name = os.path.split(fn)
1863 base, name = os.path.split(fn)
1867 assert name.startswith('journal')
1864 assert name.startswith('journal')
1868 return os.path.join(base, name.replace('journal', 'undo', 1))
1865 return os.path.join(base, name.replace('journal', 'undo', 1))
1869
1866
1870 def instance(ui, path, create):
1867 def instance(ui, path, create):
1871 return localrepository(ui, util.urllocalpath(path), create)
1868 return localrepository(ui, util.urllocalpath(path), create)
1872
1869
1873 def islocal(path):
1870 def islocal(path):
1874 return True
1871 return True
General Comments 0
You need to be logged in to leave comments. Login now