##// END OF EJS Templates
getbundle: add a ``cg`` boolean argument to control changegroup inclusion...
Pierre-Yves David -
r21989:bdb6d97f default
parent child Browse files
Show More
@@ -1,812 +1,816
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # step already performed
64 # step already performed
65 # (used to check what steps have been already performed through bundle2)
65 # (used to check what steps have been already performed through bundle2)
66 self.stepsdone = set()
66 self.stepsdone = set()
67 # Integer version of the push result
67 # Integer version of the push result
68 # - None means nothing to push
68 # - None means nothing to push
69 # - 0 means HTTP error
69 # - 0 means HTTP error
70 # - 1 means we pushed and remote head count is unchanged *or*
70 # - 1 means we pushed and remote head count is unchanged *or*
71 # we have outgoing changesets but refused to push
71 # we have outgoing changesets but refused to push
72 # - other values as described by addchangegroup()
72 # - other values as described by addchangegroup()
73 self.ret = None
73 self.ret = None
74 # discover.outgoing object (contains common and outgoing data)
74 # discover.outgoing object (contains common and outgoing data)
75 self.outgoing = None
75 self.outgoing = None
76 # all remote heads before the push
76 # all remote heads before the push
77 self.remoteheads = None
77 self.remoteheads = None
78 # testable as a boolean indicating if any nodes are missing locally.
78 # testable as a boolean indicating if any nodes are missing locally.
79 self.incoming = None
79 self.incoming = None
80 # set of all heads common after changeset bundle push
80 # set of all heads common after changeset bundle push
81 self.commonheads = None
81 self.commonheads = None
82
82
83 def push(repo, remote, force=False, revs=None, newbranch=False):
83 def push(repo, remote, force=False, revs=None, newbranch=False):
84 '''Push outgoing changesets (limited by revs) from a local
84 '''Push outgoing changesets (limited by revs) from a local
85 repository to remote. Return an integer:
85 repository to remote. Return an integer:
86 - None means nothing to push
86 - None means nothing to push
87 - 0 means HTTP error
87 - 0 means HTTP error
88 - 1 means we pushed and remote head count is unchanged *or*
88 - 1 means we pushed and remote head count is unchanged *or*
89 we have outgoing changesets but refused to push
89 we have outgoing changesets but refused to push
90 - other values as described by addchangegroup()
90 - other values as described by addchangegroup()
91 '''
91 '''
92 pushop = pushoperation(repo, remote, force, revs, newbranch)
92 pushop = pushoperation(repo, remote, force, revs, newbranch)
93 if pushop.remote.local():
93 if pushop.remote.local():
94 missing = (set(pushop.repo.requirements)
94 missing = (set(pushop.repo.requirements)
95 - pushop.remote.local().supported)
95 - pushop.remote.local().supported)
96 if missing:
96 if missing:
97 msg = _("required features are not"
97 msg = _("required features are not"
98 " supported in the destination:"
98 " supported in the destination:"
99 " %s") % (', '.join(sorted(missing)))
99 " %s") % (', '.join(sorted(missing)))
100 raise util.Abort(msg)
100 raise util.Abort(msg)
101
101
102 # there are two ways to push to remote repo:
102 # there are two ways to push to remote repo:
103 #
103 #
104 # addchangegroup assumes local user can lock remote
104 # addchangegroup assumes local user can lock remote
105 # repo (local filesystem, old ssh servers).
105 # repo (local filesystem, old ssh servers).
106 #
106 #
107 # unbundle assumes local user cannot lock remote repo (new ssh
107 # unbundle assumes local user cannot lock remote repo (new ssh
108 # servers, http servers).
108 # servers, http servers).
109
109
110 if not pushop.remote.canpush():
110 if not pushop.remote.canpush():
111 raise util.Abort(_("destination does not support push"))
111 raise util.Abort(_("destination does not support push"))
112 # get local lock as we might write phase data
112 # get local lock as we might write phase data
113 locallock = None
113 locallock = None
114 try:
114 try:
115 locallock = pushop.repo.lock()
115 locallock = pushop.repo.lock()
116 pushop.locallocked = True
116 pushop.locallocked = True
117 except IOError, err:
117 except IOError, err:
118 pushop.locallocked = False
118 pushop.locallocked = False
119 if err.errno != errno.EACCES:
119 if err.errno != errno.EACCES:
120 raise
120 raise
121 # source repo cannot be locked.
121 # source repo cannot be locked.
122 # We do not abort the push, but just disable the local phase
122 # We do not abort the push, but just disable the local phase
123 # synchronisation.
123 # synchronisation.
124 msg = 'cannot lock source repository: %s\n' % err
124 msg = 'cannot lock source repository: %s\n' % err
125 pushop.ui.debug(msg)
125 pushop.ui.debug(msg)
126 try:
126 try:
127 pushop.repo.checkpush(pushop)
127 pushop.repo.checkpush(pushop)
128 lock = None
128 lock = None
129 unbundle = pushop.remote.capable('unbundle')
129 unbundle = pushop.remote.capable('unbundle')
130 if not unbundle:
130 if not unbundle:
131 lock = pushop.remote.lock()
131 lock = pushop.remote.lock()
132 try:
132 try:
133 _pushdiscovery(pushop)
133 _pushdiscovery(pushop)
134 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
134 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
135 False)
135 False)
136 and pushop.remote.capable('bundle2-exp')):
136 and pushop.remote.capable('bundle2-exp')):
137 _pushbundle2(pushop)
137 _pushbundle2(pushop)
138 _pushchangeset(pushop)
138 _pushchangeset(pushop)
139 _pushcomputecommonheads(pushop)
139 _pushcomputecommonheads(pushop)
140 _pushsyncphase(pushop)
140 _pushsyncphase(pushop)
141 _pushobsolete(pushop)
141 _pushobsolete(pushop)
142 finally:
142 finally:
143 if lock is not None:
143 if lock is not None:
144 lock.release()
144 lock.release()
145 finally:
145 finally:
146 if locallock is not None:
146 if locallock is not None:
147 locallock.release()
147 locallock.release()
148
148
149 _pushbookmark(pushop)
149 _pushbookmark(pushop)
150 return pushop.ret
150 return pushop.ret
151
151
152 def _pushdiscovery(pushop):
152 def _pushdiscovery(pushop):
153 # discovery
153 # discovery
154 unfi = pushop.repo.unfiltered()
154 unfi = pushop.repo.unfiltered()
155 fci = discovery.findcommonincoming
155 fci = discovery.findcommonincoming
156 commoninc = fci(unfi, pushop.remote, force=pushop.force)
156 commoninc = fci(unfi, pushop.remote, force=pushop.force)
157 common, inc, remoteheads = commoninc
157 common, inc, remoteheads = commoninc
158 fco = discovery.findcommonoutgoing
158 fco = discovery.findcommonoutgoing
159 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
159 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
160 commoninc=commoninc, force=pushop.force)
160 commoninc=commoninc, force=pushop.force)
161 pushop.outgoing = outgoing
161 pushop.outgoing = outgoing
162 pushop.remoteheads = remoteheads
162 pushop.remoteheads = remoteheads
163 pushop.incoming = inc
163 pushop.incoming = inc
164
164
165 def _pushcheckoutgoing(pushop):
165 def _pushcheckoutgoing(pushop):
166 outgoing = pushop.outgoing
166 outgoing = pushop.outgoing
167 unfi = pushop.repo.unfiltered()
167 unfi = pushop.repo.unfiltered()
168 if not outgoing.missing:
168 if not outgoing.missing:
169 # nothing to push
169 # nothing to push
170 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
170 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
171 return False
171 return False
172 # something to push
172 # something to push
173 if not pushop.force:
173 if not pushop.force:
174 # if repo.obsstore == False --> no obsolete
174 # if repo.obsstore == False --> no obsolete
175 # then, save the iteration
175 # then, save the iteration
176 if unfi.obsstore:
176 if unfi.obsstore:
177 # this message are here for 80 char limit reason
177 # this message are here for 80 char limit reason
178 mso = _("push includes obsolete changeset: %s!")
178 mso = _("push includes obsolete changeset: %s!")
179 mst = "push includes %s changeset: %s!"
179 mst = "push includes %s changeset: %s!"
180 # plain versions for i18n tool to detect them
180 # plain versions for i18n tool to detect them
181 _("push includes unstable changeset: %s!")
181 _("push includes unstable changeset: %s!")
182 _("push includes bumped changeset: %s!")
182 _("push includes bumped changeset: %s!")
183 _("push includes divergent changeset: %s!")
183 _("push includes divergent changeset: %s!")
184 # If we are to push if there is at least one
184 # If we are to push if there is at least one
185 # obsolete or unstable changeset in missing, at
185 # obsolete or unstable changeset in missing, at
186 # least one of the missinghead will be obsolete or
186 # least one of the missinghead will be obsolete or
187 # unstable. So checking heads only is ok
187 # unstable. So checking heads only is ok
188 for node in outgoing.missingheads:
188 for node in outgoing.missingheads:
189 ctx = unfi[node]
189 ctx = unfi[node]
190 if ctx.obsolete():
190 if ctx.obsolete():
191 raise util.Abort(mso % ctx)
191 raise util.Abort(mso % ctx)
192 elif ctx.troubled():
192 elif ctx.troubled():
193 raise util.Abort(_(mst)
193 raise util.Abort(_(mst)
194 % (ctx.troubles()[0],
194 % (ctx.troubles()[0],
195 ctx))
195 ctx))
196 newbm = pushop.ui.configlist('bookmarks', 'pushing')
196 newbm = pushop.ui.configlist('bookmarks', 'pushing')
197 discovery.checkheads(unfi, pushop.remote, outgoing,
197 discovery.checkheads(unfi, pushop.remote, outgoing,
198 pushop.remoteheads,
198 pushop.remoteheads,
199 pushop.newbranch,
199 pushop.newbranch,
200 bool(pushop.incoming),
200 bool(pushop.incoming),
201 newbm)
201 newbm)
202 return True
202 return True
203
203
204 def _pushb2ctx(pushop, bundler):
204 def _pushb2ctx(pushop, bundler):
205 """handle changegroup push through bundle2
205 """handle changegroup push through bundle2
206
206
207 addchangegroup result is stored in the ``pushop.ret`` attribute.
207 addchangegroup result is stored in the ``pushop.ret`` attribute.
208 """
208 """
209 if 'changesets' in pushop.stepsdone:
209 if 'changesets' in pushop.stepsdone:
210 return
210 return
211 pushop.stepsdone.add('changesets')
211 pushop.stepsdone.add('changesets')
212 # Send known heads to the server for race detection.
212 # Send known heads to the server for race detection.
213 pushop.stepsdone.add('changesets')
213 pushop.stepsdone.add('changesets')
214 if not _pushcheckoutgoing(pushop):
214 if not _pushcheckoutgoing(pushop):
215 return
215 return
216 pushop.repo.prepushoutgoinghooks(pushop.repo,
216 pushop.repo.prepushoutgoinghooks(pushop.repo,
217 pushop.remote,
217 pushop.remote,
218 pushop.outgoing)
218 pushop.outgoing)
219 if not pushop.force:
219 if not pushop.force:
220 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
220 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
221 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
221 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
222 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
222 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
223 def handlereply(op):
223 def handlereply(op):
224 """extract addchangroup returns from server reply"""
224 """extract addchangroup returns from server reply"""
225 cgreplies = op.records.getreplies(cgpart.id)
225 cgreplies = op.records.getreplies(cgpart.id)
226 assert len(cgreplies['changegroup']) == 1
226 assert len(cgreplies['changegroup']) == 1
227 pushop.ret = cgreplies['changegroup'][0]['return']
227 pushop.ret = cgreplies['changegroup'][0]['return']
228 return handlereply
228 return handlereply
229
229
230 # list of function that may decide to add parts to an outgoing bundle2
230 # list of function that may decide to add parts to an outgoing bundle2
231 bundle2partsgenerators = [_pushb2ctx]
231 bundle2partsgenerators = [_pushb2ctx]
232
232
233 def _pushbundle2(pushop):
233 def _pushbundle2(pushop):
234 """push data to the remote using bundle2
234 """push data to the remote using bundle2
235
235
236 The only currently supported type of data is changegroup but this will
236 The only currently supported type of data is changegroup but this will
237 evolve in the future."""
237 evolve in the future."""
238 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
238 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
239 # create reply capability
239 # create reply capability
240 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
240 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
241 bundler.newpart('b2x:replycaps', data=capsblob)
241 bundler.newpart('b2x:replycaps', data=capsblob)
242 replyhandlers = []
242 replyhandlers = []
243 for partgen in bundle2partsgenerators:
243 for partgen in bundle2partsgenerators:
244 ret = partgen(pushop, bundler)
244 ret = partgen(pushop, bundler)
245 if callable(ret):
245 if callable(ret):
246 replyhandlers.append(ret)
246 replyhandlers.append(ret)
247 # do not push if nothing to push
247 # do not push if nothing to push
248 if bundler.nbparts <= 1:
248 if bundler.nbparts <= 1:
249 return
249 return
250 stream = util.chunkbuffer(bundler.getchunks())
250 stream = util.chunkbuffer(bundler.getchunks())
251 try:
251 try:
252 reply = pushop.remote.unbundle(stream, ['force'], 'push')
252 reply = pushop.remote.unbundle(stream, ['force'], 'push')
253 except error.BundleValueError, exc:
253 except error.BundleValueError, exc:
254 raise util.Abort('missing support for %s' % exc)
254 raise util.Abort('missing support for %s' % exc)
255 try:
255 try:
256 op = bundle2.processbundle(pushop.repo, reply)
256 op = bundle2.processbundle(pushop.repo, reply)
257 except error.BundleValueError, exc:
257 except error.BundleValueError, exc:
258 raise util.Abort('missing support for %s' % exc)
258 raise util.Abort('missing support for %s' % exc)
259 for rephand in replyhandlers:
259 for rephand in replyhandlers:
260 rephand(op)
260 rephand(op)
261
261
262 def _pushchangeset(pushop):
262 def _pushchangeset(pushop):
263 """Make the actual push of changeset bundle to remote repo"""
263 """Make the actual push of changeset bundle to remote repo"""
264 if 'changesets' in pushop.stepsdone:
264 if 'changesets' in pushop.stepsdone:
265 return
265 return
266 pushop.stepsdone.add('changesets')
266 pushop.stepsdone.add('changesets')
267 if not _pushcheckoutgoing(pushop):
267 if not _pushcheckoutgoing(pushop):
268 return
268 return
269 pushop.repo.prepushoutgoinghooks(pushop.repo,
269 pushop.repo.prepushoutgoinghooks(pushop.repo,
270 pushop.remote,
270 pushop.remote,
271 pushop.outgoing)
271 pushop.outgoing)
272 outgoing = pushop.outgoing
272 outgoing = pushop.outgoing
273 unbundle = pushop.remote.capable('unbundle')
273 unbundle = pushop.remote.capable('unbundle')
274 # TODO: get bundlecaps from remote
274 # TODO: get bundlecaps from remote
275 bundlecaps = None
275 bundlecaps = None
276 # create a changegroup from local
276 # create a changegroup from local
277 if pushop.revs is None and not (outgoing.excluded
277 if pushop.revs is None and not (outgoing.excluded
278 or pushop.repo.changelog.filteredrevs):
278 or pushop.repo.changelog.filteredrevs):
279 # push everything,
279 # push everything,
280 # use the fast path, no race possible on push
280 # use the fast path, no race possible on push
281 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
281 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
282 cg = changegroup.getsubset(pushop.repo,
282 cg = changegroup.getsubset(pushop.repo,
283 outgoing,
283 outgoing,
284 bundler,
284 bundler,
285 'push',
285 'push',
286 fastpath=True)
286 fastpath=True)
287 else:
287 else:
288 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
288 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
289 bundlecaps)
289 bundlecaps)
290
290
291 # apply changegroup to remote
291 # apply changegroup to remote
292 if unbundle:
292 if unbundle:
293 # local repo finds heads on server, finds out what
293 # local repo finds heads on server, finds out what
294 # revs it must push. once revs transferred, if server
294 # revs it must push. once revs transferred, if server
295 # finds it has different heads (someone else won
295 # finds it has different heads (someone else won
296 # commit/push race), server aborts.
296 # commit/push race), server aborts.
297 if pushop.force:
297 if pushop.force:
298 remoteheads = ['force']
298 remoteheads = ['force']
299 else:
299 else:
300 remoteheads = pushop.remoteheads
300 remoteheads = pushop.remoteheads
301 # ssh: return remote's addchangegroup()
301 # ssh: return remote's addchangegroup()
302 # http: return remote's addchangegroup() or 0 for error
302 # http: return remote's addchangegroup() or 0 for error
303 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
303 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
304 pushop.repo.url())
304 pushop.repo.url())
305 else:
305 else:
306 # we return an integer indicating remote head count
306 # we return an integer indicating remote head count
307 # change
307 # change
308 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
308 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
309
309
310 def _pushcomputecommonheads(pushop):
310 def _pushcomputecommonheads(pushop):
311 unfi = pushop.repo.unfiltered()
311 unfi = pushop.repo.unfiltered()
312 if pushop.ret:
312 if pushop.ret:
313 # push succeed, synchronize target of the push
313 # push succeed, synchronize target of the push
314 cheads = pushop.outgoing.missingheads
314 cheads = pushop.outgoing.missingheads
315 elif pushop.revs is None:
315 elif pushop.revs is None:
316 # All out push fails. synchronize all common
316 # All out push fails. synchronize all common
317 cheads = pushop.outgoing.commonheads
317 cheads = pushop.outgoing.commonheads
318 else:
318 else:
319 # I want cheads = heads(::missingheads and ::commonheads)
319 # I want cheads = heads(::missingheads and ::commonheads)
320 # (missingheads is revs with secret changeset filtered out)
320 # (missingheads is revs with secret changeset filtered out)
321 #
321 #
322 # This can be expressed as:
322 # This can be expressed as:
323 # cheads = ( (missingheads and ::commonheads)
323 # cheads = ( (missingheads and ::commonheads)
324 # + (commonheads and ::missingheads))"
324 # + (commonheads and ::missingheads))"
325 # )
325 # )
326 #
326 #
327 # while trying to push we already computed the following:
327 # while trying to push we already computed the following:
328 # common = (::commonheads)
328 # common = (::commonheads)
329 # missing = ((commonheads::missingheads) - commonheads)
329 # missing = ((commonheads::missingheads) - commonheads)
330 #
330 #
331 # We can pick:
331 # We can pick:
332 # * missingheads part of common (::commonheads)
332 # * missingheads part of common (::commonheads)
333 common = set(pushop.outgoing.common)
333 common = set(pushop.outgoing.common)
334 nm = pushop.repo.changelog.nodemap
334 nm = pushop.repo.changelog.nodemap
335 cheads = [node for node in pushop.revs if nm[node] in common]
335 cheads = [node for node in pushop.revs if nm[node] in common]
336 # and
336 # and
337 # * commonheads parents on missing
337 # * commonheads parents on missing
338 revset = unfi.set('%ln and parents(roots(%ln))',
338 revset = unfi.set('%ln and parents(roots(%ln))',
339 pushop.outgoing.commonheads,
339 pushop.outgoing.commonheads,
340 pushop.outgoing.missing)
340 pushop.outgoing.missing)
341 cheads.extend(c.node() for c in revset)
341 cheads.extend(c.node() for c in revset)
342 pushop.commonheads = cheads
342 pushop.commonheads = cheads
343
343
344 def _pushsyncphase(pushop):
344 def _pushsyncphase(pushop):
345 """synchronise phase information locally and remotely"""
345 """synchronise phase information locally and remotely"""
346 unfi = pushop.repo.unfiltered()
346 unfi = pushop.repo.unfiltered()
347 cheads = pushop.commonheads
347 cheads = pushop.commonheads
348 # even when we don't push, exchanging phase data is useful
348 # even when we don't push, exchanging phase data is useful
349 remotephases = pushop.remote.listkeys('phases')
349 remotephases = pushop.remote.listkeys('phases')
350 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
350 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
351 and remotephases # server supports phases
351 and remotephases # server supports phases
352 and pushop.ret is None # nothing was pushed
352 and pushop.ret is None # nothing was pushed
353 and remotephases.get('publishing', False)):
353 and remotephases.get('publishing', False)):
354 # When:
354 # When:
355 # - this is a subrepo push
355 # - this is a subrepo push
356 # - and remote support phase
356 # - and remote support phase
357 # - and no changeset was pushed
357 # - and no changeset was pushed
358 # - and remote is publishing
358 # - and remote is publishing
359 # We may be in issue 3871 case!
359 # We may be in issue 3871 case!
360 # We drop the possible phase synchronisation done by
360 # We drop the possible phase synchronisation done by
361 # courtesy to publish changesets possibly locally draft
361 # courtesy to publish changesets possibly locally draft
362 # on the remote.
362 # on the remote.
363 remotephases = {'publishing': 'True'}
363 remotephases = {'publishing': 'True'}
364 if not remotephases: # old server or public only reply from non-publishing
364 if not remotephases: # old server or public only reply from non-publishing
365 _localphasemove(pushop, cheads)
365 _localphasemove(pushop, cheads)
366 # don't push any phase data as there is nothing to push
366 # don't push any phase data as there is nothing to push
367 else:
367 else:
368 ana = phases.analyzeremotephases(pushop.repo, cheads,
368 ana = phases.analyzeremotephases(pushop.repo, cheads,
369 remotephases)
369 remotephases)
370 pheads, droots = ana
370 pheads, droots = ana
371 ### Apply remote phase on local
371 ### Apply remote phase on local
372 if remotephases.get('publishing', False):
372 if remotephases.get('publishing', False):
373 _localphasemove(pushop, cheads)
373 _localphasemove(pushop, cheads)
374 else: # publish = False
374 else: # publish = False
375 _localphasemove(pushop, pheads)
375 _localphasemove(pushop, pheads)
376 _localphasemove(pushop, cheads, phases.draft)
376 _localphasemove(pushop, cheads, phases.draft)
377 ### Apply local phase on remote
377 ### Apply local phase on remote
378
378
379 # Get the list of all revs draft on remote by public here.
379 # Get the list of all revs draft on remote by public here.
380 # XXX Beware that revset break if droots is not strictly
380 # XXX Beware that revset break if droots is not strictly
381 # XXX root we may want to ensure it is but it is costly
381 # XXX root we may want to ensure it is but it is costly
382 outdated = unfi.set('heads((%ln::%ln) and public())',
382 outdated = unfi.set('heads((%ln::%ln) and public())',
383 droots, cheads)
383 droots, cheads)
384
384
385 b2caps = bundle2.bundle2caps(pushop.remote)
385 b2caps = bundle2.bundle2caps(pushop.remote)
386 if 'b2x:pushkey' in b2caps:
386 if 'b2x:pushkey' in b2caps:
387 # server supports bundle2, let's do a batched push through it
387 # server supports bundle2, let's do a batched push through it
388 #
388 #
389 # This will eventually be unified with the changesets bundle2 push
389 # This will eventually be unified with the changesets bundle2 push
390 bundler = bundle2.bundle20(pushop.ui, b2caps)
390 bundler = bundle2.bundle20(pushop.ui, b2caps)
391 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
391 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
392 bundler.newpart('b2x:replycaps', data=capsblob)
392 bundler.newpart('b2x:replycaps', data=capsblob)
393 part2node = []
393 part2node = []
394 enc = pushkey.encode
394 enc = pushkey.encode
395 for newremotehead in outdated:
395 for newremotehead in outdated:
396 part = bundler.newpart('b2x:pushkey')
396 part = bundler.newpart('b2x:pushkey')
397 part.addparam('namespace', enc('phases'))
397 part.addparam('namespace', enc('phases'))
398 part.addparam('key', enc(newremotehead.hex()))
398 part.addparam('key', enc(newremotehead.hex()))
399 part.addparam('old', enc(str(phases.draft)))
399 part.addparam('old', enc(str(phases.draft)))
400 part.addparam('new', enc(str(phases.public)))
400 part.addparam('new', enc(str(phases.public)))
401 part2node.append((part.id, newremotehead))
401 part2node.append((part.id, newremotehead))
402 stream = util.chunkbuffer(bundler.getchunks())
402 stream = util.chunkbuffer(bundler.getchunks())
403 try:
403 try:
404 reply = pushop.remote.unbundle(stream, ['force'], 'push')
404 reply = pushop.remote.unbundle(stream, ['force'], 'push')
405 op = bundle2.processbundle(pushop.repo, reply)
405 op = bundle2.processbundle(pushop.repo, reply)
406 except error.BundleValueError, exc:
406 except error.BundleValueError, exc:
407 raise util.Abort('missing support for %s' % exc)
407 raise util.Abort('missing support for %s' % exc)
408 for partid, node in part2node:
408 for partid, node in part2node:
409 partrep = op.records.getreplies(partid)
409 partrep = op.records.getreplies(partid)
410 results = partrep['pushkey']
410 results = partrep['pushkey']
411 assert len(results) <= 1
411 assert len(results) <= 1
412 msg = None
412 msg = None
413 if not results:
413 if not results:
414 msg = _('server ignored update of %s to public!\n') % node
414 msg = _('server ignored update of %s to public!\n') % node
415 elif not int(results[0]['return']):
415 elif not int(results[0]['return']):
416 msg = _('updating %s to public failed!\n') % node
416 msg = _('updating %s to public failed!\n') % node
417 if msg is not None:
417 if msg is not None:
418 pushop.ui.warn(msg)
418 pushop.ui.warn(msg)
419
419
420 else:
420 else:
421 # fallback to independant pushkey command
421 # fallback to independant pushkey command
422 for newremotehead in outdated:
422 for newremotehead in outdated:
423 r = pushop.remote.pushkey('phases',
423 r = pushop.remote.pushkey('phases',
424 newremotehead.hex(),
424 newremotehead.hex(),
425 str(phases.draft),
425 str(phases.draft),
426 str(phases.public))
426 str(phases.public))
427 if not r:
427 if not r:
428 pushop.ui.warn(_('updating %s to public failed!\n')
428 pushop.ui.warn(_('updating %s to public failed!\n')
429 % newremotehead)
429 % newremotehead)
430
430
431 def _localphasemove(pushop, nodes, phase=phases.public):
431 def _localphasemove(pushop, nodes, phase=phases.public):
432 """move <nodes> to <phase> in the local source repo"""
432 """move <nodes> to <phase> in the local source repo"""
433 if pushop.locallocked:
433 if pushop.locallocked:
434 phases.advanceboundary(pushop.repo, phase, nodes)
434 phases.advanceboundary(pushop.repo, phase, nodes)
435 else:
435 else:
436 # repo is not locked, do not change any phases!
436 # repo is not locked, do not change any phases!
437 # Informs the user that phases should have been moved when
437 # Informs the user that phases should have been moved when
438 # applicable.
438 # applicable.
439 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
439 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
440 phasestr = phases.phasenames[phase]
440 phasestr = phases.phasenames[phase]
441 if actualmoves:
441 if actualmoves:
442 pushop.ui.status(_('cannot lock source repo, skipping '
442 pushop.ui.status(_('cannot lock source repo, skipping '
443 'local %s phase update\n') % phasestr)
443 'local %s phase update\n') % phasestr)
444
444
445 def _pushobsolete(pushop):
445 def _pushobsolete(pushop):
446 """utility function to push obsolete markers to a remote"""
446 """utility function to push obsolete markers to a remote"""
447 pushop.ui.debug('try to push obsolete markers to remote\n')
447 pushop.ui.debug('try to push obsolete markers to remote\n')
448 repo = pushop.repo
448 repo = pushop.repo
449 remote = pushop.remote
449 remote = pushop.remote
450 if (obsolete._enabled and repo.obsstore and
450 if (obsolete._enabled and repo.obsstore and
451 'obsolete' in remote.listkeys('namespaces')):
451 'obsolete' in remote.listkeys('namespaces')):
452 rslts = []
452 rslts = []
453 remotedata = repo.listkeys('obsolete')
453 remotedata = repo.listkeys('obsolete')
454 for key in sorted(remotedata, reverse=True):
454 for key in sorted(remotedata, reverse=True):
455 # reverse sort to ensure we end with dump0
455 # reverse sort to ensure we end with dump0
456 data = remotedata[key]
456 data = remotedata[key]
457 rslts.append(remote.pushkey('obsolete', key, '', data))
457 rslts.append(remote.pushkey('obsolete', key, '', data))
458 if [r for r in rslts if not r]:
458 if [r for r in rslts if not r]:
459 msg = _('failed to push some obsolete markers!\n')
459 msg = _('failed to push some obsolete markers!\n')
460 repo.ui.warn(msg)
460 repo.ui.warn(msg)
461
461
462 def _pushbookmark(pushop):
462 def _pushbookmark(pushop):
463 """Update bookmark position on remote"""
463 """Update bookmark position on remote"""
464 ui = pushop.ui
464 ui = pushop.ui
465 repo = pushop.repo.unfiltered()
465 repo = pushop.repo.unfiltered()
466 remote = pushop.remote
466 remote = pushop.remote
467 ui.debug("checking for updated bookmarks\n")
467 ui.debug("checking for updated bookmarks\n")
468 revnums = map(repo.changelog.rev, pushop.revs or [])
468 revnums = map(repo.changelog.rev, pushop.revs or [])
469 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
469 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
470 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
470 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
471 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
471 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
472 srchex=hex)
472 srchex=hex)
473
473
474 for b, scid, dcid in advsrc:
474 for b, scid, dcid in advsrc:
475 if ancestors and repo[scid].rev() not in ancestors:
475 if ancestors and repo[scid].rev() not in ancestors:
476 continue
476 continue
477 if remote.pushkey('bookmarks', b, dcid, scid):
477 if remote.pushkey('bookmarks', b, dcid, scid):
478 ui.status(_("updating bookmark %s\n") % b)
478 ui.status(_("updating bookmark %s\n") % b)
479 else:
479 else:
480 ui.warn(_('updating bookmark %s failed!\n') % b)
480 ui.warn(_('updating bookmark %s failed!\n') % b)
481
481
482 class pulloperation(object):
482 class pulloperation(object):
483 """A object that represent a single pull operation
483 """A object that represent a single pull operation
484
484
485 It purpose is to carry push related state and very common operation.
485 It purpose is to carry push related state and very common operation.
486
486
487 A new should be created at the beginning of each pull and discarded
487 A new should be created at the beginning of each pull and discarded
488 afterward.
488 afterward.
489 """
489 """
490
490
491 def __init__(self, repo, remote, heads=None, force=False):
491 def __init__(self, repo, remote, heads=None, force=False):
492 # repo we pull into
492 # repo we pull into
493 self.repo = repo
493 self.repo = repo
494 # repo we pull from
494 # repo we pull from
495 self.remote = remote
495 self.remote = remote
496 # revision we try to pull (None is "all")
496 # revision we try to pull (None is "all")
497 self.heads = heads
497 self.heads = heads
498 # do we force pull?
498 # do we force pull?
499 self.force = force
499 self.force = force
500 # the name the pull transaction
500 # the name the pull transaction
501 self._trname = 'pull\n' + util.hidepassword(remote.url())
501 self._trname = 'pull\n' + util.hidepassword(remote.url())
502 # hold the transaction once created
502 # hold the transaction once created
503 self._tr = None
503 self._tr = None
504 # set of common changeset between local and remote before pull
504 # set of common changeset between local and remote before pull
505 self.common = None
505 self.common = None
506 # set of pulled head
506 # set of pulled head
507 self.rheads = None
507 self.rheads = None
508 # list of missing changeset to fetch remotely
508 # list of missing changeset to fetch remotely
509 self.fetch = None
509 self.fetch = None
510 # result of changegroup pulling (used as return code by pull)
510 # result of changegroup pulling (used as return code by pull)
511 self.cgresult = None
511 self.cgresult = None
512 # list of step remaining todo (related to future bundle2 usage)
512 # list of step remaining todo (related to future bundle2 usage)
513 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
513 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
514
514
515 @util.propertycache
515 @util.propertycache
516 def pulledsubset(self):
516 def pulledsubset(self):
517 """heads of the set of changeset target by the pull"""
517 """heads of the set of changeset target by the pull"""
518 # compute target subset
518 # compute target subset
519 if self.heads is None:
519 if self.heads is None:
520 # We pulled every thing possible
520 # We pulled every thing possible
521 # sync on everything common
521 # sync on everything common
522 c = set(self.common)
522 c = set(self.common)
523 ret = list(self.common)
523 ret = list(self.common)
524 for n in self.rheads:
524 for n in self.rheads:
525 if n not in c:
525 if n not in c:
526 ret.append(n)
526 ret.append(n)
527 return ret
527 return ret
528 else:
528 else:
529 # We pulled a specific subset
529 # We pulled a specific subset
530 # sync on this subset
530 # sync on this subset
531 return self.heads
531 return self.heads
532
532
533 def gettransaction(self):
533 def gettransaction(self):
534 """get appropriate pull transaction, creating it if needed"""
534 """get appropriate pull transaction, creating it if needed"""
535 if self._tr is None:
535 if self._tr is None:
536 self._tr = self.repo.transaction(self._trname)
536 self._tr = self.repo.transaction(self._trname)
537 return self._tr
537 return self._tr
538
538
539 def closetransaction(self):
539 def closetransaction(self):
540 """close transaction if created"""
540 """close transaction if created"""
541 if self._tr is not None:
541 if self._tr is not None:
542 self._tr.close()
542 self._tr.close()
543
543
544 def releasetransaction(self):
544 def releasetransaction(self):
545 """release transaction if created"""
545 """release transaction if created"""
546 if self._tr is not None:
546 if self._tr is not None:
547 self._tr.release()
547 self._tr.release()
548
548
549 def pull(repo, remote, heads=None, force=False):
549 def pull(repo, remote, heads=None, force=False):
550 pullop = pulloperation(repo, remote, heads, force)
550 pullop = pulloperation(repo, remote, heads, force)
551 if pullop.remote.local():
551 if pullop.remote.local():
552 missing = set(pullop.remote.requirements) - pullop.repo.supported
552 missing = set(pullop.remote.requirements) - pullop.repo.supported
553 if missing:
553 if missing:
554 msg = _("required features are not"
554 msg = _("required features are not"
555 " supported in the destination:"
555 " supported in the destination:"
556 " %s") % (', '.join(sorted(missing)))
556 " %s") % (', '.join(sorted(missing)))
557 raise util.Abort(msg)
557 raise util.Abort(msg)
558
558
559 lock = pullop.repo.lock()
559 lock = pullop.repo.lock()
560 try:
560 try:
561 _pulldiscovery(pullop)
561 _pulldiscovery(pullop)
562 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
562 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
563 and pullop.remote.capable('bundle2-exp')):
563 and pullop.remote.capable('bundle2-exp')):
564 _pullbundle2(pullop)
564 _pullbundle2(pullop)
565 if 'changegroup' in pullop.todosteps:
565 if 'changegroup' in pullop.todosteps:
566 _pullchangeset(pullop)
566 _pullchangeset(pullop)
567 if 'phases' in pullop.todosteps:
567 if 'phases' in pullop.todosteps:
568 _pullphase(pullop)
568 _pullphase(pullop)
569 if 'obsmarkers' in pullop.todosteps:
569 if 'obsmarkers' in pullop.todosteps:
570 _pullobsolete(pullop)
570 _pullobsolete(pullop)
571 pullop.closetransaction()
571 pullop.closetransaction()
572 finally:
572 finally:
573 pullop.releasetransaction()
573 pullop.releasetransaction()
574 lock.release()
574 lock.release()
575
575
576 return pullop.cgresult
576 return pullop.cgresult
577
577
578 def _pulldiscovery(pullop):
578 def _pulldiscovery(pullop):
579 """discovery phase for the pull
579 """discovery phase for the pull
580
580
581 Current handle changeset discovery only, will change handle all discovery
581 Current handle changeset discovery only, will change handle all discovery
582 at some point."""
582 at some point."""
583 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
583 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
584 pullop.remote,
584 pullop.remote,
585 heads=pullop.heads,
585 heads=pullop.heads,
586 force=pullop.force)
586 force=pullop.force)
587 pullop.common, pullop.fetch, pullop.rheads = tmp
587 pullop.common, pullop.fetch, pullop.rheads = tmp
588
588
589 def _pullbundle2(pullop):
589 def _pullbundle2(pullop):
590 """pull data using bundle2
590 """pull data using bundle2
591
591
592 For now, the only supported data are changegroup."""
592 For now, the only supported data are changegroup."""
593 remotecaps = bundle2.bundle2caps(pullop.remote)
593 remotecaps = bundle2.bundle2caps(pullop.remote)
594 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
594 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
595 # pulling changegroup
595 # pulling changegroup
596 pullop.todosteps.remove('changegroup')
596 pullop.todosteps.remove('changegroup')
597
597
598 kwargs['common'] = pullop.common
598 kwargs['common'] = pullop.common
599 kwargs['heads'] = pullop.heads or pullop.rheads
599 kwargs['heads'] = pullop.heads or pullop.rheads
600 if 'b2x:listkeys' in remotecaps:
600 if 'b2x:listkeys' in remotecaps:
601 kwargs['listkeys'] = ['phase']
601 kwargs['listkeys'] = ['phase']
602 if not pullop.fetch:
602 if not pullop.fetch:
603 pullop.repo.ui.status(_("no changes found\n"))
603 pullop.repo.ui.status(_("no changes found\n"))
604 pullop.cgresult = 0
604 pullop.cgresult = 0
605 else:
605 else:
606 if pullop.heads is None and list(pullop.common) == [nullid]:
606 if pullop.heads is None and list(pullop.common) == [nullid]:
607 pullop.repo.ui.status(_("requesting all changes\n"))
607 pullop.repo.ui.status(_("requesting all changes\n"))
608 _pullbundle2extraprepare(pullop, kwargs)
608 _pullbundle2extraprepare(pullop, kwargs)
609 if kwargs.keys() == ['format']:
609 if kwargs.keys() == ['format']:
610 return # nothing to pull
610 return # nothing to pull
611 bundle = pullop.remote.getbundle('pull', **kwargs)
611 bundle = pullop.remote.getbundle('pull', **kwargs)
612 try:
612 try:
613 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
613 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
614 except error.BundleValueError, exc:
614 except error.BundleValueError, exc:
615 raise util.Abort('missing support for %s' % exc)
615 raise util.Abort('missing support for %s' % exc)
616
616
617 if pullop.fetch:
617 if pullop.fetch:
618 assert len(op.records['changegroup']) == 1
618 assert len(op.records['changegroup']) == 1
619 pullop.cgresult = op.records['changegroup'][0]['return']
619 pullop.cgresult = op.records['changegroup'][0]['return']
620
620
621 # processing phases change
621 # processing phases change
622 for namespace, value in op.records['listkeys']:
622 for namespace, value in op.records['listkeys']:
623 if namespace == 'phases':
623 if namespace == 'phases':
624 _pullapplyphases(pullop, value)
624 _pullapplyphases(pullop, value)
625
625
626 def _pullbundle2extraprepare(pullop, kwargs):
626 def _pullbundle2extraprepare(pullop, kwargs):
627 """hook function so that extensions can extend the getbundle call"""
627 """hook function so that extensions can extend the getbundle call"""
628 pass
628 pass
629
629
630 def _pullchangeset(pullop):
630 def _pullchangeset(pullop):
631 """pull changeset from unbundle into the local repo"""
631 """pull changeset from unbundle into the local repo"""
632 # We delay the open of the transaction as late as possible so we
632 # We delay the open of the transaction as late as possible so we
633 # don't open transaction for nothing or you break future useful
633 # don't open transaction for nothing or you break future useful
634 # rollback call
634 # rollback call
635 pullop.todosteps.remove('changegroup')
635 pullop.todosteps.remove('changegroup')
636 if not pullop.fetch:
636 if not pullop.fetch:
637 pullop.repo.ui.status(_("no changes found\n"))
637 pullop.repo.ui.status(_("no changes found\n"))
638 pullop.cgresult = 0
638 pullop.cgresult = 0
639 return
639 return
640 pullop.gettransaction()
640 pullop.gettransaction()
641 if pullop.heads is None and list(pullop.common) == [nullid]:
641 if pullop.heads is None and list(pullop.common) == [nullid]:
642 pullop.repo.ui.status(_("requesting all changes\n"))
642 pullop.repo.ui.status(_("requesting all changes\n"))
643 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
643 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
644 # issue1320, avoid a race if remote changed after discovery
644 # issue1320, avoid a race if remote changed after discovery
645 pullop.heads = pullop.rheads
645 pullop.heads = pullop.rheads
646
646
647 if pullop.remote.capable('getbundle'):
647 if pullop.remote.capable('getbundle'):
648 # TODO: get bundlecaps from remote
648 # TODO: get bundlecaps from remote
649 cg = pullop.remote.getbundle('pull', common=pullop.common,
649 cg = pullop.remote.getbundle('pull', common=pullop.common,
650 heads=pullop.heads or pullop.rheads)
650 heads=pullop.heads or pullop.rheads)
651 elif pullop.heads is None:
651 elif pullop.heads is None:
652 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
652 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
653 elif not pullop.remote.capable('changegroupsubset'):
653 elif not pullop.remote.capable('changegroupsubset'):
654 raise util.Abort(_("partial pull cannot be done because "
654 raise util.Abort(_("partial pull cannot be done because "
655 "other repository doesn't support "
655 "other repository doesn't support "
656 "changegroupsubset."))
656 "changegroupsubset."))
657 else:
657 else:
658 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
658 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
659 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
659 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
660 pullop.remote.url())
660 pullop.remote.url())
661
661
662 def _pullphase(pullop):
662 def _pullphase(pullop):
663 # Get remote phases data from remote
663 # Get remote phases data from remote
664 remotephases = pullop.remote.listkeys('phases')
664 remotephases = pullop.remote.listkeys('phases')
665 _pullapplyphases(pullop, remotephases)
665 _pullapplyphases(pullop, remotephases)
666
666
667 def _pullapplyphases(pullop, remotephases):
667 def _pullapplyphases(pullop, remotephases):
668 """apply phase movement from observed remote state"""
668 """apply phase movement from observed remote state"""
669 pullop.todosteps.remove('phases')
669 pullop.todosteps.remove('phases')
670 publishing = bool(remotephases.get('publishing', False))
670 publishing = bool(remotephases.get('publishing', False))
671 if remotephases and not publishing:
671 if remotephases and not publishing:
672 # remote is new and unpublishing
672 # remote is new and unpublishing
673 pheads, _dr = phases.analyzeremotephases(pullop.repo,
673 pheads, _dr = phases.analyzeremotephases(pullop.repo,
674 pullop.pulledsubset,
674 pullop.pulledsubset,
675 remotephases)
675 remotephases)
676 phases.advanceboundary(pullop.repo, phases.public, pheads)
676 phases.advanceboundary(pullop.repo, phases.public, pheads)
677 phases.advanceboundary(pullop.repo, phases.draft,
677 phases.advanceboundary(pullop.repo, phases.draft,
678 pullop.pulledsubset)
678 pullop.pulledsubset)
679 else:
679 else:
680 # Remote is old or publishing all common changesets
680 # Remote is old or publishing all common changesets
681 # should be seen as public
681 # should be seen as public
682 phases.advanceboundary(pullop.repo, phases.public,
682 phases.advanceboundary(pullop.repo, phases.public,
683 pullop.pulledsubset)
683 pullop.pulledsubset)
684
684
685 def _pullobsolete(pullop):
685 def _pullobsolete(pullop):
686 """utility function to pull obsolete markers from a remote
686 """utility function to pull obsolete markers from a remote
687
687
688 The `gettransaction` is function that return the pull transaction, creating
688 The `gettransaction` is function that return the pull transaction, creating
689 one if necessary. We return the transaction to inform the calling code that
689 one if necessary. We return the transaction to inform the calling code that
690 a new transaction have been created (when applicable).
690 a new transaction have been created (when applicable).
691
691
692 Exists mostly to allow overriding for experimentation purpose"""
692 Exists mostly to allow overriding for experimentation purpose"""
693 pullop.todosteps.remove('obsmarkers')
693 pullop.todosteps.remove('obsmarkers')
694 tr = None
694 tr = None
695 if obsolete._enabled:
695 if obsolete._enabled:
696 pullop.repo.ui.debug('fetching remote obsolete markers\n')
696 pullop.repo.ui.debug('fetching remote obsolete markers\n')
697 remoteobs = pullop.remote.listkeys('obsolete')
697 remoteobs = pullop.remote.listkeys('obsolete')
698 if 'dump0' in remoteobs:
698 if 'dump0' in remoteobs:
699 tr = pullop.gettransaction()
699 tr = pullop.gettransaction()
700 for key in sorted(remoteobs, reverse=True):
700 for key in sorted(remoteobs, reverse=True):
701 if key.startswith('dump'):
701 if key.startswith('dump'):
702 data = base85.b85decode(remoteobs[key])
702 data = base85.b85decode(remoteobs[key])
703 pullop.repo.obsstore.mergemarkers(tr, data)
703 pullop.repo.obsstore.mergemarkers(tr, data)
704 pullop.repo.invalidatevolatilesets()
704 pullop.repo.invalidatevolatilesets()
705 return tr
705 return tr
706
706
707 def caps20to10(repo):
707 def caps20to10(repo):
708 """return a set with appropriate options to use bundle20 during getbundle"""
708 """return a set with appropriate options to use bundle20 during getbundle"""
709 caps = set(['HG2X'])
709 caps = set(['HG2X'])
710 capsblob = bundle2.encodecaps(repo.bundle2caps)
710 capsblob = bundle2.encodecaps(repo.bundle2caps)
711 caps.add('bundle2=' + urllib.quote(capsblob))
711 caps.add('bundle2=' + urllib.quote(capsblob))
712 return caps
712 return caps
713
713
714 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
714 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
715 **kwargs):
715 **kwargs):
716 """return a full bundle (with potentially multiple kind of parts)
716 """return a full bundle (with potentially multiple kind of parts)
717
717
718 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
718 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
719 passed. For now, the bundle can contain only changegroup, but this will
719 passed. For now, the bundle can contain only changegroup, but this will
720 changes when more part type will be available for bundle2.
720 changes when more part type will be available for bundle2.
721
721
722 This is different from changegroup.getbundle that only returns an HG10
722 This is different from changegroup.getbundle that only returns an HG10
723 changegroup bundle. They may eventually get reunited in the future when we
723 changegroup bundle. They may eventually get reunited in the future when we
724 have a clearer idea of the API we what to query different data.
724 have a clearer idea of the API we what to query different data.
725
725
726 The implementation is at a very early stage and will get massive rework
726 The implementation is at a very early stage and will get massive rework
727 when the API of bundle is refined.
727 when the API of bundle is refined.
728 """
728 """
729 cg = None
730 if kwargs.get('cg', True):
729 # build changegroup bundle here.
731 # build changegroup bundle here.
730 cg = changegroup.getbundle(repo, source, heads=heads,
732 cg = changegroup.getbundle(repo, source, heads=heads,
731 common=common, bundlecaps=bundlecaps)
733 common=common, bundlecaps=bundlecaps)
734 elif 'HG2X' not in bundlecaps:
735 raise ValueError(_('request for bundle10 must include changegroup'))
732 if bundlecaps is None or 'HG2X' not in bundlecaps:
736 if bundlecaps is None or 'HG2X' not in bundlecaps:
733 if kwargs:
737 if kwargs:
734 raise ValueError(_('unsupported getbundle arguments: %s')
738 raise ValueError(_('unsupported getbundle arguments: %s')
735 % ', '.join(sorted(kwargs.keys())))
739 % ', '.join(sorted(kwargs.keys())))
736 return cg
740 return cg
737 # very crude first implementation,
741 # very crude first implementation,
738 # the bundle API will change and the generation will be done lazily.
742 # the bundle API will change and the generation will be done lazily.
739 b2caps = {}
743 b2caps = {}
740 for bcaps in bundlecaps:
744 for bcaps in bundlecaps:
741 if bcaps.startswith('bundle2='):
745 if bcaps.startswith('bundle2='):
742 blob = urllib.unquote(bcaps[len('bundle2='):])
746 blob = urllib.unquote(bcaps[len('bundle2='):])
743 b2caps.update(bundle2.decodecaps(blob))
747 b2caps.update(bundle2.decodecaps(blob))
744 bundler = bundle2.bundle20(repo.ui, b2caps)
748 bundler = bundle2.bundle20(repo.ui, b2caps)
745 if cg:
749 if cg:
746 bundler.newpart('b2x:changegroup', data=cg.getchunks())
750 bundler.newpart('b2x:changegroup', data=cg.getchunks())
747 listkeys = kwargs.get('listkeys', ())
751 listkeys = kwargs.get('listkeys', ())
748 for namespace in listkeys:
752 for namespace in listkeys:
749 part = bundler.newpart('b2x:listkeys')
753 part = bundler.newpart('b2x:listkeys')
750 part.addparam('namespace', namespace)
754 part.addparam('namespace', namespace)
751 keys = repo.listkeys(namespace).items()
755 keys = repo.listkeys(namespace).items()
752 part.data = pushkey.encodekeys(keys)
756 part.data = pushkey.encodekeys(keys)
753 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
757 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
754 bundlecaps=bundlecaps, **kwargs)
758 bundlecaps=bundlecaps, **kwargs)
755 return util.chunkbuffer(bundler.getchunks())
759 return util.chunkbuffer(bundler.getchunks())
756
760
757 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
761 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
758 bundlecaps=None, **kwargs):
762 bundlecaps=None, **kwargs):
759 """hook function to let extensions add parts to the requested bundle"""
763 """hook function to let extensions add parts to the requested bundle"""
760 pass
764 pass
761
765
762 def check_heads(repo, their_heads, context):
766 def check_heads(repo, their_heads, context):
763 """check if the heads of a repo have been modified
767 """check if the heads of a repo have been modified
764
768
765 Used by peer for unbundling.
769 Used by peer for unbundling.
766 """
770 """
767 heads = repo.heads()
771 heads = repo.heads()
768 heads_hash = util.sha1(''.join(sorted(heads))).digest()
772 heads_hash = util.sha1(''.join(sorted(heads))).digest()
769 if not (their_heads == ['force'] or their_heads == heads or
773 if not (their_heads == ['force'] or their_heads == heads or
770 their_heads == ['hashed', heads_hash]):
774 their_heads == ['hashed', heads_hash]):
771 # someone else committed/pushed/unbundled while we
775 # someone else committed/pushed/unbundled while we
772 # were transferring data
776 # were transferring data
773 raise error.PushRaced('repository changed while %s - '
777 raise error.PushRaced('repository changed while %s - '
774 'please try again' % context)
778 'please try again' % context)
775
779
776 def unbundle(repo, cg, heads, source, url):
780 def unbundle(repo, cg, heads, source, url):
777 """Apply a bundle to a repo.
781 """Apply a bundle to a repo.
778
782
779 this function makes sure the repo is locked during the application and have
783 this function makes sure the repo is locked during the application and have
780 mechanism to check that no push race occurred between the creation of the
784 mechanism to check that no push race occurred between the creation of the
781 bundle and its application.
785 bundle and its application.
782
786
783 If the push was raced as PushRaced exception is raised."""
787 If the push was raced as PushRaced exception is raised."""
784 r = 0
788 r = 0
785 # need a transaction when processing a bundle2 stream
789 # need a transaction when processing a bundle2 stream
786 tr = None
790 tr = None
787 lock = repo.lock()
791 lock = repo.lock()
788 try:
792 try:
789 check_heads(repo, heads, 'uploading changes')
793 check_heads(repo, heads, 'uploading changes')
790 # push can proceed
794 # push can proceed
791 if util.safehasattr(cg, 'params'):
795 if util.safehasattr(cg, 'params'):
792 try:
796 try:
793 tr = repo.transaction('unbundle')
797 tr = repo.transaction('unbundle')
794 tr.hookargs['bundle2-exp'] = '1'
798 tr.hookargs['bundle2-exp'] = '1'
795 r = bundle2.processbundle(repo, cg, lambda: tr).reply
799 r = bundle2.processbundle(repo, cg, lambda: tr).reply
796 cl = repo.unfiltered().changelog
800 cl = repo.unfiltered().changelog
797 p = cl.writepending() and repo.root or ""
801 p = cl.writepending() and repo.root or ""
798 repo.hook('b2x-pretransactionclose', throw=True, source=source,
802 repo.hook('b2x-pretransactionclose', throw=True, source=source,
799 url=url, pending=p, **tr.hookargs)
803 url=url, pending=p, **tr.hookargs)
800 tr.close()
804 tr.close()
801 repo.hook('b2x-transactionclose', source=source, url=url,
805 repo.hook('b2x-transactionclose', source=source, url=url,
802 **tr.hookargs)
806 **tr.hookargs)
803 except Exception, exc:
807 except Exception, exc:
804 exc.duringunbundle2 = True
808 exc.duringunbundle2 = True
805 raise
809 raise
806 else:
810 else:
807 r = changegroup.addchangegroup(repo, cg, source, url)
811 r = changegroup.addchangegroup(repo, cg, source, url)
808 finally:
812 finally:
809 if tr is not None:
813 if tr is not None:
810 tr.release()
814 tr.release()
811 lock.release()
815 lock.release()
812 return r
816 return r
@@ -1,1781 +1,1783
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 bundle2caps = {'HG2X': (),
183 bundle2caps = {'HG2X': (),
184 'b2x:listkeys': (),
184 'b2x:listkeys': (),
185 'b2x:pushkey': ()}
185 'b2x:pushkey': (),
186 'b2x:changegroup': (),
187 }
186
188
187 # a list of (ui, featureset) functions.
189 # a list of (ui, featureset) functions.
188 # only functions defined in module of enabled extensions are invoked
190 # only functions defined in module of enabled extensions are invoked
189 featuresetupfuncs = set()
191 featuresetupfuncs = set()
190
192
191 def _baserequirements(self, create):
193 def _baserequirements(self, create):
192 return self.requirements[:]
194 return self.requirements[:]
193
195
194 def __init__(self, baseui, path=None, create=False):
196 def __init__(self, baseui, path=None, create=False):
195 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
197 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
196 self.wopener = self.wvfs
198 self.wopener = self.wvfs
197 self.root = self.wvfs.base
199 self.root = self.wvfs.base
198 self.path = self.wvfs.join(".hg")
200 self.path = self.wvfs.join(".hg")
199 self.origroot = path
201 self.origroot = path
200 self.auditor = pathutil.pathauditor(self.root, self._checknested)
202 self.auditor = pathutil.pathauditor(self.root, self._checknested)
201 self.vfs = scmutil.vfs(self.path)
203 self.vfs = scmutil.vfs(self.path)
202 self.opener = self.vfs
204 self.opener = self.vfs
203 self.baseui = baseui
205 self.baseui = baseui
204 self.ui = baseui.copy()
206 self.ui = baseui.copy()
205 self.ui.copy = baseui.copy # prevent copying repo configuration
207 self.ui.copy = baseui.copy # prevent copying repo configuration
206 # A list of callback to shape the phase if no data were found.
208 # A list of callback to shape the phase if no data were found.
207 # Callback are in the form: func(repo, roots) --> processed root.
209 # Callback are in the form: func(repo, roots) --> processed root.
208 # This list it to be filled by extension during repo setup
210 # This list it to be filled by extension during repo setup
209 self._phasedefaults = []
211 self._phasedefaults = []
210 try:
212 try:
211 self.ui.readconfig(self.join("hgrc"), self.root)
213 self.ui.readconfig(self.join("hgrc"), self.root)
212 extensions.loadall(self.ui)
214 extensions.loadall(self.ui)
213 except IOError:
215 except IOError:
214 pass
216 pass
215
217
216 if self.featuresetupfuncs:
218 if self.featuresetupfuncs:
217 self.supported = set(self._basesupported) # use private copy
219 self.supported = set(self._basesupported) # use private copy
218 extmods = set(m.__name__ for n, m
220 extmods = set(m.__name__ for n, m
219 in extensions.extensions(self.ui))
221 in extensions.extensions(self.ui))
220 for setupfunc in self.featuresetupfuncs:
222 for setupfunc in self.featuresetupfuncs:
221 if setupfunc.__module__ in extmods:
223 if setupfunc.__module__ in extmods:
222 setupfunc(self.ui, self.supported)
224 setupfunc(self.ui, self.supported)
223 else:
225 else:
224 self.supported = self._basesupported
226 self.supported = self._basesupported
225
227
226 if not self.vfs.isdir():
228 if not self.vfs.isdir():
227 if create:
229 if create:
228 if not self.wvfs.exists():
230 if not self.wvfs.exists():
229 self.wvfs.makedirs()
231 self.wvfs.makedirs()
230 self.vfs.makedir(notindexed=True)
232 self.vfs.makedir(notindexed=True)
231 requirements = self._baserequirements(create)
233 requirements = self._baserequirements(create)
232 if self.ui.configbool('format', 'usestore', True):
234 if self.ui.configbool('format', 'usestore', True):
233 self.vfs.mkdir("store")
235 self.vfs.mkdir("store")
234 requirements.append("store")
236 requirements.append("store")
235 if self.ui.configbool('format', 'usefncache', True):
237 if self.ui.configbool('format', 'usefncache', True):
236 requirements.append("fncache")
238 requirements.append("fncache")
237 if self.ui.configbool('format', 'dotencode', True):
239 if self.ui.configbool('format', 'dotencode', True):
238 requirements.append('dotencode')
240 requirements.append('dotencode')
239 # create an invalid changelog
241 # create an invalid changelog
240 self.vfs.append(
242 self.vfs.append(
241 "00changelog.i",
243 "00changelog.i",
242 '\0\0\0\2' # represents revlogv2
244 '\0\0\0\2' # represents revlogv2
243 ' dummy changelog to prevent using the old repo layout'
245 ' dummy changelog to prevent using the old repo layout'
244 )
246 )
245 if self.ui.configbool('format', 'generaldelta', False):
247 if self.ui.configbool('format', 'generaldelta', False):
246 requirements.append("generaldelta")
248 requirements.append("generaldelta")
247 requirements = set(requirements)
249 requirements = set(requirements)
248 else:
250 else:
249 raise error.RepoError(_("repository %s not found") % path)
251 raise error.RepoError(_("repository %s not found") % path)
250 elif create:
252 elif create:
251 raise error.RepoError(_("repository %s already exists") % path)
253 raise error.RepoError(_("repository %s already exists") % path)
252 else:
254 else:
253 try:
255 try:
254 requirements = scmutil.readrequires(self.vfs, self.supported)
256 requirements = scmutil.readrequires(self.vfs, self.supported)
255 except IOError, inst:
257 except IOError, inst:
256 if inst.errno != errno.ENOENT:
258 if inst.errno != errno.ENOENT:
257 raise
259 raise
258 requirements = set()
260 requirements = set()
259
261
260 self.sharedpath = self.path
262 self.sharedpath = self.path
261 try:
263 try:
262 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
264 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
263 realpath=True)
265 realpath=True)
264 s = vfs.base
266 s = vfs.base
265 if not vfs.exists():
267 if not vfs.exists():
266 raise error.RepoError(
268 raise error.RepoError(
267 _('.hg/sharedpath points to nonexistent directory %s') % s)
269 _('.hg/sharedpath points to nonexistent directory %s') % s)
268 self.sharedpath = s
270 self.sharedpath = s
269 except IOError, inst:
271 except IOError, inst:
270 if inst.errno != errno.ENOENT:
272 if inst.errno != errno.ENOENT:
271 raise
273 raise
272
274
273 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
275 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
274 self.spath = self.store.path
276 self.spath = self.store.path
275 self.svfs = self.store.vfs
277 self.svfs = self.store.vfs
276 self.sopener = self.svfs
278 self.sopener = self.svfs
277 self.sjoin = self.store.join
279 self.sjoin = self.store.join
278 self.vfs.createmode = self.store.createmode
280 self.vfs.createmode = self.store.createmode
279 self._applyrequirements(requirements)
281 self._applyrequirements(requirements)
280 if create:
282 if create:
281 self._writerequirements()
283 self._writerequirements()
282
284
283
285
284 self._branchcaches = {}
286 self._branchcaches = {}
285 self.filterpats = {}
287 self.filterpats = {}
286 self._datafilters = {}
288 self._datafilters = {}
287 self._transref = self._lockref = self._wlockref = None
289 self._transref = self._lockref = self._wlockref = None
288
290
289 # A cache for various files under .hg/ that tracks file changes,
291 # A cache for various files under .hg/ that tracks file changes,
290 # (used by the filecache decorator)
292 # (used by the filecache decorator)
291 #
293 #
292 # Maps a property name to its util.filecacheentry
294 # Maps a property name to its util.filecacheentry
293 self._filecache = {}
295 self._filecache = {}
294
296
295 # hold sets of revision to be filtered
297 # hold sets of revision to be filtered
296 # should be cleared when something might have changed the filter value:
298 # should be cleared when something might have changed the filter value:
297 # - new changesets,
299 # - new changesets,
298 # - phase change,
300 # - phase change,
299 # - new obsolescence marker,
301 # - new obsolescence marker,
300 # - working directory parent change,
302 # - working directory parent change,
301 # - bookmark changes
303 # - bookmark changes
302 self.filteredrevcache = {}
304 self.filteredrevcache = {}
303
305
304 def close(self):
306 def close(self):
305 pass
307 pass
306
308
307 def _restrictcapabilities(self, caps):
309 def _restrictcapabilities(self, caps):
308 # bundle2 is not ready for prime time, drop it unless explicitly
310 # bundle2 is not ready for prime time, drop it unless explicitly
309 # required by the tests (or some brave tester)
311 # required by the tests (or some brave tester)
310 if self.ui.configbool('experimental', 'bundle2-exp', False):
312 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 caps = set(caps)
313 caps = set(caps)
312 capsblob = bundle2.encodecaps(self.bundle2caps)
314 capsblob = bundle2.encodecaps(self.bundle2caps)
313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
315 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 return caps
316 return caps
315
317
316 def _applyrequirements(self, requirements):
318 def _applyrequirements(self, requirements):
317 self.requirements = requirements
319 self.requirements = requirements
318 self.sopener.options = dict((r, 1) for r in requirements
320 self.sopener.options = dict((r, 1) for r in requirements
319 if r in self.openerreqs)
321 if r in self.openerreqs)
320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
322 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 if chunkcachesize is not None:
323 if chunkcachesize is not None:
322 self.sopener.options['chunkcachesize'] = chunkcachesize
324 self.sopener.options['chunkcachesize'] = chunkcachesize
323
325
324 def _writerequirements(self):
326 def _writerequirements(self):
325 reqfile = self.opener("requires", "w")
327 reqfile = self.opener("requires", "w")
326 for r in sorted(self.requirements):
328 for r in sorted(self.requirements):
327 reqfile.write("%s\n" % r)
329 reqfile.write("%s\n" % r)
328 reqfile.close()
330 reqfile.close()
329
331
330 def _checknested(self, path):
332 def _checknested(self, path):
331 """Determine if path is a legal nested repository."""
333 """Determine if path is a legal nested repository."""
332 if not path.startswith(self.root):
334 if not path.startswith(self.root):
333 return False
335 return False
334 subpath = path[len(self.root) + 1:]
336 subpath = path[len(self.root) + 1:]
335 normsubpath = util.pconvert(subpath)
337 normsubpath = util.pconvert(subpath)
336
338
337 # XXX: Checking against the current working copy is wrong in
339 # XXX: Checking against the current working copy is wrong in
338 # the sense that it can reject things like
340 # the sense that it can reject things like
339 #
341 #
340 # $ hg cat -r 10 sub/x.txt
342 # $ hg cat -r 10 sub/x.txt
341 #
343 #
342 # if sub/ is no longer a subrepository in the working copy
344 # if sub/ is no longer a subrepository in the working copy
343 # parent revision.
345 # parent revision.
344 #
346 #
345 # However, it can of course also allow things that would have
347 # However, it can of course also allow things that would have
346 # been rejected before, such as the above cat command if sub/
348 # been rejected before, such as the above cat command if sub/
347 # is a subrepository now, but was a normal directory before.
349 # is a subrepository now, but was a normal directory before.
348 # The old path auditor would have rejected by mistake since it
350 # The old path auditor would have rejected by mistake since it
349 # panics when it sees sub/.hg/.
351 # panics when it sees sub/.hg/.
350 #
352 #
351 # All in all, checking against the working copy seems sensible
353 # All in all, checking against the working copy seems sensible
352 # since we want to prevent access to nested repositories on
354 # since we want to prevent access to nested repositories on
353 # the filesystem *now*.
355 # the filesystem *now*.
354 ctx = self[None]
356 ctx = self[None]
355 parts = util.splitpath(subpath)
357 parts = util.splitpath(subpath)
356 while parts:
358 while parts:
357 prefix = '/'.join(parts)
359 prefix = '/'.join(parts)
358 if prefix in ctx.substate:
360 if prefix in ctx.substate:
359 if prefix == normsubpath:
361 if prefix == normsubpath:
360 return True
362 return True
361 else:
363 else:
362 sub = ctx.sub(prefix)
364 sub = ctx.sub(prefix)
363 return sub.checknested(subpath[len(prefix) + 1:])
365 return sub.checknested(subpath[len(prefix) + 1:])
364 else:
366 else:
365 parts.pop()
367 parts.pop()
366 return False
368 return False
367
369
368 def peer(self):
370 def peer(self):
369 return localpeer(self) # not cached to avoid reference cycle
371 return localpeer(self) # not cached to avoid reference cycle
370
372
371 def unfiltered(self):
373 def unfiltered(self):
372 """Return unfiltered version of the repository
374 """Return unfiltered version of the repository
373
375
374 Intended to be overwritten by filtered repo."""
376 Intended to be overwritten by filtered repo."""
375 return self
377 return self
376
378
377 def filtered(self, name):
379 def filtered(self, name):
378 """Return a filtered version of a repository"""
380 """Return a filtered version of a repository"""
379 # build a new class with the mixin and the current class
381 # build a new class with the mixin and the current class
380 # (possibly subclass of the repo)
382 # (possibly subclass of the repo)
381 class proxycls(repoview.repoview, self.unfiltered().__class__):
383 class proxycls(repoview.repoview, self.unfiltered().__class__):
382 pass
384 pass
383 return proxycls(self, name)
385 return proxycls(self, name)
384
386
385 @repofilecache('bookmarks')
387 @repofilecache('bookmarks')
386 def _bookmarks(self):
388 def _bookmarks(self):
387 return bookmarks.bmstore(self)
389 return bookmarks.bmstore(self)
388
390
389 @repofilecache('bookmarks.current')
391 @repofilecache('bookmarks.current')
390 def _bookmarkcurrent(self):
392 def _bookmarkcurrent(self):
391 return bookmarks.readcurrent(self)
393 return bookmarks.readcurrent(self)
392
394
393 def bookmarkheads(self, bookmark):
395 def bookmarkheads(self, bookmark):
394 name = bookmark.split('@', 1)[0]
396 name = bookmark.split('@', 1)[0]
395 heads = []
397 heads = []
396 for mark, n in self._bookmarks.iteritems():
398 for mark, n in self._bookmarks.iteritems():
397 if mark.split('@', 1)[0] == name:
399 if mark.split('@', 1)[0] == name:
398 heads.append(n)
400 heads.append(n)
399 return heads
401 return heads
400
402
401 @storecache('phaseroots')
403 @storecache('phaseroots')
402 def _phasecache(self):
404 def _phasecache(self):
403 return phases.phasecache(self, self._phasedefaults)
405 return phases.phasecache(self, self._phasedefaults)
404
406
405 @storecache('obsstore')
407 @storecache('obsstore')
406 def obsstore(self):
408 def obsstore(self):
407 store = obsolete.obsstore(self.sopener)
409 store = obsolete.obsstore(self.sopener)
408 if store and not obsolete._enabled:
410 if store and not obsolete._enabled:
409 # message is rare enough to not be translated
411 # message is rare enough to not be translated
410 msg = 'obsolete feature not enabled but %i markers found!\n'
412 msg = 'obsolete feature not enabled but %i markers found!\n'
411 self.ui.warn(msg % len(list(store)))
413 self.ui.warn(msg % len(list(store)))
412 return store
414 return store
413
415
414 @storecache('00changelog.i')
416 @storecache('00changelog.i')
415 def changelog(self):
417 def changelog(self):
416 c = changelog.changelog(self.sopener)
418 c = changelog.changelog(self.sopener)
417 if 'HG_PENDING' in os.environ:
419 if 'HG_PENDING' in os.environ:
418 p = os.environ['HG_PENDING']
420 p = os.environ['HG_PENDING']
419 if p.startswith(self.root):
421 if p.startswith(self.root):
420 c.readpending('00changelog.i.a')
422 c.readpending('00changelog.i.a')
421 return c
423 return c
422
424
423 @storecache('00manifest.i')
425 @storecache('00manifest.i')
424 def manifest(self):
426 def manifest(self):
425 return manifest.manifest(self.sopener)
427 return manifest.manifest(self.sopener)
426
428
427 @repofilecache('dirstate')
429 @repofilecache('dirstate')
428 def dirstate(self):
430 def dirstate(self):
429 warned = [0]
431 warned = [0]
430 def validate(node):
432 def validate(node):
431 try:
433 try:
432 self.changelog.rev(node)
434 self.changelog.rev(node)
433 return node
435 return node
434 except error.LookupError:
436 except error.LookupError:
435 if not warned[0]:
437 if not warned[0]:
436 warned[0] = True
438 warned[0] = True
437 self.ui.warn(_("warning: ignoring unknown"
439 self.ui.warn(_("warning: ignoring unknown"
438 " working parent %s!\n") % short(node))
440 " working parent %s!\n") % short(node))
439 return nullid
441 return nullid
440
442
441 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
442
444
443 def __getitem__(self, changeid):
445 def __getitem__(self, changeid):
444 if changeid is None:
446 if changeid is None:
445 return context.workingctx(self)
447 return context.workingctx(self)
446 return context.changectx(self, changeid)
448 return context.changectx(self, changeid)
447
449
448 def __contains__(self, changeid):
450 def __contains__(self, changeid):
449 try:
451 try:
450 return bool(self.lookup(changeid))
452 return bool(self.lookup(changeid))
451 except error.RepoLookupError:
453 except error.RepoLookupError:
452 return False
454 return False
453
455
454 def __nonzero__(self):
456 def __nonzero__(self):
455 return True
457 return True
456
458
457 def __len__(self):
459 def __len__(self):
458 return len(self.changelog)
460 return len(self.changelog)
459
461
460 def __iter__(self):
462 def __iter__(self):
461 return iter(self.changelog)
463 return iter(self.changelog)
462
464
463 def revs(self, expr, *args):
465 def revs(self, expr, *args):
464 '''Return a list of revisions matching the given revset'''
466 '''Return a list of revisions matching the given revset'''
465 expr = revset.formatspec(expr, *args)
467 expr = revset.formatspec(expr, *args)
466 m = revset.match(None, expr)
468 m = revset.match(None, expr)
467 return m(self, revset.spanset(self))
469 return m(self, revset.spanset(self))
468
470
469 def set(self, expr, *args):
471 def set(self, expr, *args):
470 '''
472 '''
471 Yield a context for each matching revision, after doing arg
473 Yield a context for each matching revision, after doing arg
472 replacement via revset.formatspec
474 replacement via revset.formatspec
473 '''
475 '''
474 for r in self.revs(expr, *args):
476 for r in self.revs(expr, *args):
475 yield self[r]
477 yield self[r]
476
478
477 def url(self):
479 def url(self):
478 return 'file:' + self.root
480 return 'file:' + self.root
479
481
480 def hook(self, name, throw=False, **args):
482 def hook(self, name, throw=False, **args):
481 """Call a hook, passing this repo instance.
483 """Call a hook, passing this repo instance.
482
484
483 This a convenience method to aid invoking hooks. Extensions likely
485 This a convenience method to aid invoking hooks. Extensions likely
484 won't call this unless they have registered a custom hook or are
486 won't call this unless they have registered a custom hook or are
485 replacing code that is expected to call a hook.
487 replacing code that is expected to call a hook.
486 """
488 """
487 return hook.hook(self.ui, self, name, throw, **args)
489 return hook.hook(self.ui, self, name, throw, **args)
488
490
489 @unfilteredmethod
491 @unfilteredmethod
490 def _tag(self, names, node, message, local, user, date, extra={},
492 def _tag(self, names, node, message, local, user, date, extra={},
491 editor=False):
493 editor=False):
492 if isinstance(names, str):
494 if isinstance(names, str):
493 names = (names,)
495 names = (names,)
494
496
495 branches = self.branchmap()
497 branches = self.branchmap()
496 for name in names:
498 for name in names:
497 self.hook('pretag', throw=True, node=hex(node), tag=name,
499 self.hook('pretag', throw=True, node=hex(node), tag=name,
498 local=local)
500 local=local)
499 if name in branches:
501 if name in branches:
500 self.ui.warn(_("warning: tag %s conflicts with existing"
502 self.ui.warn(_("warning: tag %s conflicts with existing"
501 " branch name\n") % name)
503 " branch name\n") % name)
502
504
503 def writetags(fp, names, munge, prevtags):
505 def writetags(fp, names, munge, prevtags):
504 fp.seek(0, 2)
506 fp.seek(0, 2)
505 if prevtags and prevtags[-1] != '\n':
507 if prevtags and prevtags[-1] != '\n':
506 fp.write('\n')
508 fp.write('\n')
507 for name in names:
509 for name in names:
508 m = munge and munge(name) or name
510 m = munge and munge(name) or name
509 if (self._tagscache.tagtypes and
511 if (self._tagscache.tagtypes and
510 name in self._tagscache.tagtypes):
512 name in self._tagscache.tagtypes):
511 old = self.tags().get(name, nullid)
513 old = self.tags().get(name, nullid)
512 fp.write('%s %s\n' % (hex(old), m))
514 fp.write('%s %s\n' % (hex(old), m))
513 fp.write('%s %s\n' % (hex(node), m))
515 fp.write('%s %s\n' % (hex(node), m))
514 fp.close()
516 fp.close()
515
517
516 prevtags = ''
518 prevtags = ''
517 if local:
519 if local:
518 try:
520 try:
519 fp = self.opener('localtags', 'r+')
521 fp = self.opener('localtags', 'r+')
520 except IOError:
522 except IOError:
521 fp = self.opener('localtags', 'a')
523 fp = self.opener('localtags', 'a')
522 else:
524 else:
523 prevtags = fp.read()
525 prevtags = fp.read()
524
526
525 # local tags are stored in the current charset
527 # local tags are stored in the current charset
526 writetags(fp, names, None, prevtags)
528 writetags(fp, names, None, prevtags)
527 for name in names:
529 for name in names:
528 self.hook('tag', node=hex(node), tag=name, local=local)
530 self.hook('tag', node=hex(node), tag=name, local=local)
529 return
531 return
530
532
531 try:
533 try:
532 fp = self.wfile('.hgtags', 'rb+')
534 fp = self.wfile('.hgtags', 'rb+')
533 except IOError, e:
535 except IOError, e:
534 if e.errno != errno.ENOENT:
536 if e.errno != errno.ENOENT:
535 raise
537 raise
536 fp = self.wfile('.hgtags', 'ab')
538 fp = self.wfile('.hgtags', 'ab')
537 else:
539 else:
538 prevtags = fp.read()
540 prevtags = fp.read()
539
541
540 # committed tags are stored in UTF-8
542 # committed tags are stored in UTF-8
541 writetags(fp, names, encoding.fromlocal, prevtags)
543 writetags(fp, names, encoding.fromlocal, prevtags)
542
544
543 fp.close()
545 fp.close()
544
546
545 self.invalidatecaches()
547 self.invalidatecaches()
546
548
547 if '.hgtags' not in self.dirstate:
549 if '.hgtags' not in self.dirstate:
548 self[None].add(['.hgtags'])
550 self[None].add(['.hgtags'])
549
551
550 m = matchmod.exact(self.root, '', ['.hgtags'])
552 m = matchmod.exact(self.root, '', ['.hgtags'])
551 tagnode = self.commit(message, user, date, extra=extra, match=m,
553 tagnode = self.commit(message, user, date, extra=extra, match=m,
552 editor=editor)
554 editor=editor)
553
555
554 for name in names:
556 for name in names:
555 self.hook('tag', node=hex(node), tag=name, local=local)
557 self.hook('tag', node=hex(node), tag=name, local=local)
556
558
557 return tagnode
559 return tagnode
558
560
559 def tag(self, names, node, message, local, user, date, editor=False):
561 def tag(self, names, node, message, local, user, date, editor=False):
560 '''tag a revision with one or more symbolic names.
562 '''tag a revision with one or more symbolic names.
561
563
562 names is a list of strings or, when adding a single tag, names may be a
564 names is a list of strings or, when adding a single tag, names may be a
563 string.
565 string.
564
566
565 if local is True, the tags are stored in a per-repository file.
567 if local is True, the tags are stored in a per-repository file.
566 otherwise, they are stored in the .hgtags file, and a new
568 otherwise, they are stored in the .hgtags file, and a new
567 changeset is committed with the change.
569 changeset is committed with the change.
568
570
569 keyword arguments:
571 keyword arguments:
570
572
571 local: whether to store tags in non-version-controlled file
573 local: whether to store tags in non-version-controlled file
572 (default False)
574 (default False)
573
575
574 message: commit message to use if committing
576 message: commit message to use if committing
575
577
576 user: name of user to use if committing
578 user: name of user to use if committing
577
579
578 date: date tuple to use if committing'''
580 date: date tuple to use if committing'''
579
581
580 if not local:
582 if not local:
581 for x in self.status()[:5]:
583 for x in self.status()[:5]:
582 if '.hgtags' in x:
584 if '.hgtags' in x:
583 raise util.Abort(_('working copy of .hgtags is changed '
585 raise util.Abort(_('working copy of .hgtags is changed '
584 '(please commit .hgtags manually)'))
586 '(please commit .hgtags manually)'))
585
587
586 self.tags() # instantiate the cache
588 self.tags() # instantiate the cache
587 self._tag(names, node, message, local, user, date, editor=editor)
589 self._tag(names, node, message, local, user, date, editor=editor)
588
590
589 @filteredpropertycache
591 @filteredpropertycache
590 def _tagscache(self):
592 def _tagscache(self):
591 '''Returns a tagscache object that contains various tags related
593 '''Returns a tagscache object that contains various tags related
592 caches.'''
594 caches.'''
593
595
594 # This simplifies its cache management by having one decorated
596 # This simplifies its cache management by having one decorated
595 # function (this one) and the rest simply fetch things from it.
597 # function (this one) and the rest simply fetch things from it.
596 class tagscache(object):
598 class tagscache(object):
597 def __init__(self):
599 def __init__(self):
598 # These two define the set of tags for this repository. tags
600 # These two define the set of tags for this repository. tags
599 # maps tag name to node; tagtypes maps tag name to 'global' or
601 # maps tag name to node; tagtypes maps tag name to 'global' or
600 # 'local'. (Global tags are defined by .hgtags across all
602 # 'local'. (Global tags are defined by .hgtags across all
601 # heads, and local tags are defined in .hg/localtags.)
603 # heads, and local tags are defined in .hg/localtags.)
602 # They constitute the in-memory cache of tags.
604 # They constitute the in-memory cache of tags.
603 self.tags = self.tagtypes = None
605 self.tags = self.tagtypes = None
604
606
605 self.nodetagscache = self.tagslist = None
607 self.nodetagscache = self.tagslist = None
606
608
607 cache = tagscache()
609 cache = tagscache()
608 cache.tags, cache.tagtypes = self._findtags()
610 cache.tags, cache.tagtypes = self._findtags()
609
611
610 return cache
612 return cache
611
613
612 def tags(self):
614 def tags(self):
613 '''return a mapping of tag to node'''
615 '''return a mapping of tag to node'''
614 t = {}
616 t = {}
615 if self.changelog.filteredrevs:
617 if self.changelog.filteredrevs:
616 tags, tt = self._findtags()
618 tags, tt = self._findtags()
617 else:
619 else:
618 tags = self._tagscache.tags
620 tags = self._tagscache.tags
619 for k, v in tags.iteritems():
621 for k, v in tags.iteritems():
620 try:
622 try:
621 # ignore tags to unknown nodes
623 # ignore tags to unknown nodes
622 self.changelog.rev(v)
624 self.changelog.rev(v)
623 t[k] = v
625 t[k] = v
624 except (error.LookupError, ValueError):
626 except (error.LookupError, ValueError):
625 pass
627 pass
626 return t
628 return t
627
629
628 def _findtags(self):
630 def _findtags(self):
629 '''Do the hard work of finding tags. Return a pair of dicts
631 '''Do the hard work of finding tags. Return a pair of dicts
630 (tags, tagtypes) where tags maps tag name to node, and tagtypes
632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
631 maps tag name to a string like \'global\' or \'local\'.
633 maps tag name to a string like \'global\' or \'local\'.
632 Subclasses or extensions are free to add their own tags, but
634 Subclasses or extensions are free to add their own tags, but
633 should be aware that the returned dicts will be retained for the
635 should be aware that the returned dicts will be retained for the
634 duration of the localrepo object.'''
636 duration of the localrepo object.'''
635
637
636 # XXX what tagtype should subclasses/extensions use? Currently
638 # XXX what tagtype should subclasses/extensions use? Currently
637 # mq and bookmarks add tags, but do not set the tagtype at all.
639 # mq and bookmarks add tags, but do not set the tagtype at all.
638 # Should each extension invent its own tag type? Should there
640 # Should each extension invent its own tag type? Should there
639 # be one tagtype for all such "virtual" tags? Or is the status
641 # be one tagtype for all such "virtual" tags? Or is the status
640 # quo fine?
642 # quo fine?
641
643
642 alltags = {} # map tag name to (node, hist)
644 alltags = {} # map tag name to (node, hist)
643 tagtypes = {}
645 tagtypes = {}
644
646
645 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
646 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
647
649
648 # Build the return dicts. Have to re-encode tag names because
650 # Build the return dicts. Have to re-encode tag names because
649 # the tags module always uses UTF-8 (in order not to lose info
651 # the tags module always uses UTF-8 (in order not to lose info
650 # writing to the cache), but the rest of Mercurial wants them in
652 # writing to the cache), but the rest of Mercurial wants them in
651 # local encoding.
653 # local encoding.
652 tags = {}
654 tags = {}
653 for (name, (node, hist)) in alltags.iteritems():
655 for (name, (node, hist)) in alltags.iteritems():
654 if node != nullid:
656 if node != nullid:
655 tags[encoding.tolocal(name)] = node
657 tags[encoding.tolocal(name)] = node
656 tags['tip'] = self.changelog.tip()
658 tags['tip'] = self.changelog.tip()
657 tagtypes = dict([(encoding.tolocal(name), value)
659 tagtypes = dict([(encoding.tolocal(name), value)
658 for (name, value) in tagtypes.iteritems()])
660 for (name, value) in tagtypes.iteritems()])
659 return (tags, tagtypes)
661 return (tags, tagtypes)
660
662
661 def tagtype(self, tagname):
663 def tagtype(self, tagname):
662 '''
664 '''
663 return the type of the given tag. result can be:
665 return the type of the given tag. result can be:
664
666
665 'local' : a local tag
667 'local' : a local tag
666 'global' : a global tag
668 'global' : a global tag
667 None : tag does not exist
669 None : tag does not exist
668 '''
670 '''
669
671
670 return self._tagscache.tagtypes.get(tagname)
672 return self._tagscache.tagtypes.get(tagname)
671
673
672 def tagslist(self):
674 def tagslist(self):
673 '''return a list of tags ordered by revision'''
675 '''return a list of tags ordered by revision'''
674 if not self._tagscache.tagslist:
676 if not self._tagscache.tagslist:
675 l = []
677 l = []
676 for t, n in self.tags().iteritems():
678 for t, n in self.tags().iteritems():
677 r = self.changelog.rev(n)
679 r = self.changelog.rev(n)
678 l.append((r, t, n))
680 l.append((r, t, n))
679 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
681 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
680
682
681 return self._tagscache.tagslist
683 return self._tagscache.tagslist
682
684
683 def nodetags(self, node):
685 def nodetags(self, node):
684 '''return the tags associated with a node'''
686 '''return the tags associated with a node'''
685 if not self._tagscache.nodetagscache:
687 if not self._tagscache.nodetagscache:
686 nodetagscache = {}
688 nodetagscache = {}
687 for t, n in self._tagscache.tags.iteritems():
689 for t, n in self._tagscache.tags.iteritems():
688 nodetagscache.setdefault(n, []).append(t)
690 nodetagscache.setdefault(n, []).append(t)
689 for tags in nodetagscache.itervalues():
691 for tags in nodetagscache.itervalues():
690 tags.sort()
692 tags.sort()
691 self._tagscache.nodetagscache = nodetagscache
693 self._tagscache.nodetagscache = nodetagscache
692 return self._tagscache.nodetagscache.get(node, [])
694 return self._tagscache.nodetagscache.get(node, [])
693
695
694 def nodebookmarks(self, node):
696 def nodebookmarks(self, node):
695 marks = []
697 marks = []
696 for bookmark, n in self._bookmarks.iteritems():
698 for bookmark, n in self._bookmarks.iteritems():
697 if n == node:
699 if n == node:
698 marks.append(bookmark)
700 marks.append(bookmark)
699 return sorted(marks)
701 return sorted(marks)
700
702
701 def branchmap(self):
703 def branchmap(self):
702 '''returns a dictionary {branch: [branchheads]} with branchheads
704 '''returns a dictionary {branch: [branchheads]} with branchheads
703 ordered by increasing revision number'''
705 ordered by increasing revision number'''
704 branchmap.updatecache(self)
706 branchmap.updatecache(self)
705 return self._branchcaches[self.filtername]
707 return self._branchcaches[self.filtername]
706
708
707 def branchtip(self, branch):
709 def branchtip(self, branch):
708 '''return the tip node for a given branch'''
710 '''return the tip node for a given branch'''
709 try:
711 try:
710 return self.branchmap().branchtip(branch)
712 return self.branchmap().branchtip(branch)
711 except KeyError:
713 except KeyError:
712 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
714 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
713
715
714 def lookup(self, key):
716 def lookup(self, key):
715 return self[key].node()
717 return self[key].node()
716
718
717 def lookupbranch(self, key, remote=None):
719 def lookupbranch(self, key, remote=None):
718 repo = remote or self
720 repo = remote or self
719 if key in repo.branchmap():
721 if key in repo.branchmap():
720 return key
722 return key
721
723
722 repo = (remote and remote.local()) and remote or self
724 repo = (remote and remote.local()) and remote or self
723 return repo[key].branch()
725 return repo[key].branch()
724
726
725 def known(self, nodes):
727 def known(self, nodes):
726 nm = self.changelog.nodemap
728 nm = self.changelog.nodemap
727 pc = self._phasecache
729 pc = self._phasecache
728 result = []
730 result = []
729 for n in nodes:
731 for n in nodes:
730 r = nm.get(n)
732 r = nm.get(n)
731 resp = not (r is None or pc.phase(self, r) >= phases.secret)
733 resp = not (r is None or pc.phase(self, r) >= phases.secret)
732 result.append(resp)
734 result.append(resp)
733 return result
735 return result
734
736
735 def local(self):
737 def local(self):
736 return self
738 return self
737
739
738 def cancopy(self):
740 def cancopy(self):
739 # so statichttprepo's override of local() works
741 # so statichttprepo's override of local() works
740 if not self.local():
742 if not self.local():
741 return False
743 return False
742 if not self.ui.configbool('phases', 'publish', True):
744 if not self.ui.configbool('phases', 'publish', True):
743 return True
745 return True
744 # if publishing we can't copy if there is filtered content
746 # if publishing we can't copy if there is filtered content
745 return not self.filtered('visible').changelog.filteredrevs
747 return not self.filtered('visible').changelog.filteredrevs
746
748
747 def join(self, f):
749 def join(self, f):
748 return os.path.join(self.path, f)
750 return os.path.join(self.path, f)
749
751
750 def wjoin(self, f):
752 def wjoin(self, f):
751 return os.path.join(self.root, f)
753 return os.path.join(self.root, f)
752
754
753 def file(self, f):
755 def file(self, f):
754 if f[0] == '/':
756 if f[0] == '/':
755 f = f[1:]
757 f = f[1:]
756 return filelog.filelog(self.sopener, f)
758 return filelog.filelog(self.sopener, f)
757
759
758 def changectx(self, changeid):
760 def changectx(self, changeid):
759 return self[changeid]
761 return self[changeid]
760
762
761 def parents(self, changeid=None):
763 def parents(self, changeid=None):
762 '''get list of changectxs for parents of changeid'''
764 '''get list of changectxs for parents of changeid'''
763 return self[changeid].parents()
765 return self[changeid].parents()
764
766
765 def setparents(self, p1, p2=nullid):
767 def setparents(self, p1, p2=nullid):
766 copies = self.dirstate.setparents(p1, p2)
768 copies = self.dirstate.setparents(p1, p2)
767 pctx = self[p1]
769 pctx = self[p1]
768 if copies:
770 if copies:
769 # Adjust copy records, the dirstate cannot do it, it
771 # Adjust copy records, the dirstate cannot do it, it
770 # requires access to parents manifests. Preserve them
772 # requires access to parents manifests. Preserve them
771 # only for entries added to first parent.
773 # only for entries added to first parent.
772 for f in copies:
774 for f in copies:
773 if f not in pctx and copies[f] in pctx:
775 if f not in pctx and copies[f] in pctx:
774 self.dirstate.copy(copies[f], f)
776 self.dirstate.copy(copies[f], f)
775 if p2 == nullid:
777 if p2 == nullid:
776 for f, s in sorted(self.dirstate.copies().items()):
778 for f, s in sorted(self.dirstate.copies().items()):
777 if f not in pctx and s not in pctx:
779 if f not in pctx and s not in pctx:
778 self.dirstate.copy(None, f)
780 self.dirstate.copy(None, f)
779
781
780 def filectx(self, path, changeid=None, fileid=None):
782 def filectx(self, path, changeid=None, fileid=None):
781 """changeid can be a changeset revision, node, or tag.
783 """changeid can be a changeset revision, node, or tag.
782 fileid can be a file revision or node."""
784 fileid can be a file revision or node."""
783 return context.filectx(self, path, changeid, fileid)
785 return context.filectx(self, path, changeid, fileid)
784
786
785 def getcwd(self):
787 def getcwd(self):
786 return self.dirstate.getcwd()
788 return self.dirstate.getcwd()
787
789
788 def pathto(self, f, cwd=None):
790 def pathto(self, f, cwd=None):
789 return self.dirstate.pathto(f, cwd)
791 return self.dirstate.pathto(f, cwd)
790
792
791 def wfile(self, f, mode='r'):
793 def wfile(self, f, mode='r'):
792 return self.wopener(f, mode)
794 return self.wopener(f, mode)
793
795
794 def _link(self, f):
796 def _link(self, f):
795 return self.wvfs.islink(f)
797 return self.wvfs.islink(f)
796
798
797 def _loadfilter(self, filter):
799 def _loadfilter(self, filter):
798 if filter not in self.filterpats:
800 if filter not in self.filterpats:
799 l = []
801 l = []
800 for pat, cmd in self.ui.configitems(filter):
802 for pat, cmd in self.ui.configitems(filter):
801 if cmd == '!':
803 if cmd == '!':
802 continue
804 continue
803 mf = matchmod.match(self.root, '', [pat])
805 mf = matchmod.match(self.root, '', [pat])
804 fn = None
806 fn = None
805 params = cmd
807 params = cmd
806 for name, filterfn in self._datafilters.iteritems():
808 for name, filterfn in self._datafilters.iteritems():
807 if cmd.startswith(name):
809 if cmd.startswith(name):
808 fn = filterfn
810 fn = filterfn
809 params = cmd[len(name):].lstrip()
811 params = cmd[len(name):].lstrip()
810 break
812 break
811 if not fn:
813 if not fn:
812 fn = lambda s, c, **kwargs: util.filter(s, c)
814 fn = lambda s, c, **kwargs: util.filter(s, c)
813 # Wrap old filters not supporting keyword arguments
815 # Wrap old filters not supporting keyword arguments
814 if not inspect.getargspec(fn)[2]:
816 if not inspect.getargspec(fn)[2]:
815 oldfn = fn
817 oldfn = fn
816 fn = lambda s, c, **kwargs: oldfn(s, c)
818 fn = lambda s, c, **kwargs: oldfn(s, c)
817 l.append((mf, fn, params))
819 l.append((mf, fn, params))
818 self.filterpats[filter] = l
820 self.filterpats[filter] = l
819 return self.filterpats[filter]
821 return self.filterpats[filter]
820
822
821 def _filter(self, filterpats, filename, data):
823 def _filter(self, filterpats, filename, data):
822 for mf, fn, cmd in filterpats:
824 for mf, fn, cmd in filterpats:
823 if mf(filename):
825 if mf(filename):
824 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
826 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
825 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
827 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
826 break
828 break
827
829
828 return data
830 return data
829
831
830 @unfilteredpropertycache
832 @unfilteredpropertycache
831 def _encodefilterpats(self):
833 def _encodefilterpats(self):
832 return self._loadfilter('encode')
834 return self._loadfilter('encode')
833
835
834 @unfilteredpropertycache
836 @unfilteredpropertycache
835 def _decodefilterpats(self):
837 def _decodefilterpats(self):
836 return self._loadfilter('decode')
838 return self._loadfilter('decode')
837
839
838 def adddatafilter(self, name, filter):
840 def adddatafilter(self, name, filter):
839 self._datafilters[name] = filter
841 self._datafilters[name] = filter
840
842
841 def wread(self, filename):
843 def wread(self, filename):
842 if self._link(filename):
844 if self._link(filename):
843 data = self.wvfs.readlink(filename)
845 data = self.wvfs.readlink(filename)
844 else:
846 else:
845 data = self.wopener.read(filename)
847 data = self.wopener.read(filename)
846 return self._filter(self._encodefilterpats, filename, data)
848 return self._filter(self._encodefilterpats, filename, data)
847
849
848 def wwrite(self, filename, data, flags):
850 def wwrite(self, filename, data, flags):
849 data = self._filter(self._decodefilterpats, filename, data)
851 data = self._filter(self._decodefilterpats, filename, data)
850 if 'l' in flags:
852 if 'l' in flags:
851 self.wopener.symlink(data, filename)
853 self.wopener.symlink(data, filename)
852 else:
854 else:
853 self.wopener.write(filename, data)
855 self.wopener.write(filename, data)
854 if 'x' in flags:
856 if 'x' in flags:
855 self.wvfs.setflags(filename, False, True)
857 self.wvfs.setflags(filename, False, True)
856
858
857 def wwritedata(self, filename, data):
859 def wwritedata(self, filename, data):
858 return self._filter(self._decodefilterpats, filename, data)
860 return self._filter(self._decodefilterpats, filename, data)
859
861
860 def transaction(self, desc, report=None):
862 def transaction(self, desc, report=None):
861 tr = self._transref and self._transref() or None
863 tr = self._transref and self._transref() or None
862 if tr and tr.running():
864 if tr and tr.running():
863 return tr.nest()
865 return tr.nest()
864
866
865 # abort here if the journal already exists
867 # abort here if the journal already exists
866 if self.svfs.exists("journal"):
868 if self.svfs.exists("journal"):
867 raise error.RepoError(
869 raise error.RepoError(
868 _("abandoned transaction found"),
870 _("abandoned transaction found"),
869 hint=_("run 'hg recover' to clean up transaction"))
871 hint=_("run 'hg recover' to clean up transaction"))
870
872
871 def onclose():
873 def onclose():
872 self.store.write(self._transref())
874 self.store.write(self._transref())
873
875
874 self._writejournal(desc)
876 self._writejournal(desc)
875 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
877 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
876 rp = report and report or self.ui.warn
878 rp = report and report or self.ui.warn
877 tr = transaction.transaction(rp, self.sopener,
879 tr = transaction.transaction(rp, self.sopener,
878 "journal",
880 "journal",
879 aftertrans(renames),
881 aftertrans(renames),
880 self.store.createmode,
882 self.store.createmode,
881 onclose)
883 onclose)
882 self._transref = weakref.ref(tr)
884 self._transref = weakref.ref(tr)
883 return tr
885 return tr
884
886
885 def _journalfiles(self):
887 def _journalfiles(self):
886 return ((self.svfs, 'journal'),
888 return ((self.svfs, 'journal'),
887 (self.vfs, 'journal.dirstate'),
889 (self.vfs, 'journal.dirstate'),
888 (self.vfs, 'journal.branch'),
890 (self.vfs, 'journal.branch'),
889 (self.vfs, 'journal.desc'),
891 (self.vfs, 'journal.desc'),
890 (self.vfs, 'journal.bookmarks'),
892 (self.vfs, 'journal.bookmarks'),
891 (self.svfs, 'journal.phaseroots'))
893 (self.svfs, 'journal.phaseroots'))
892
894
893 def undofiles(self):
895 def undofiles(self):
894 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
896 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
895
897
896 def _writejournal(self, desc):
898 def _writejournal(self, desc):
897 self.opener.write("journal.dirstate",
899 self.opener.write("journal.dirstate",
898 self.opener.tryread("dirstate"))
900 self.opener.tryread("dirstate"))
899 self.opener.write("journal.branch",
901 self.opener.write("journal.branch",
900 encoding.fromlocal(self.dirstate.branch()))
902 encoding.fromlocal(self.dirstate.branch()))
901 self.opener.write("journal.desc",
903 self.opener.write("journal.desc",
902 "%d\n%s\n" % (len(self), desc))
904 "%d\n%s\n" % (len(self), desc))
903 self.opener.write("journal.bookmarks",
905 self.opener.write("journal.bookmarks",
904 self.opener.tryread("bookmarks"))
906 self.opener.tryread("bookmarks"))
905 self.sopener.write("journal.phaseroots",
907 self.sopener.write("journal.phaseroots",
906 self.sopener.tryread("phaseroots"))
908 self.sopener.tryread("phaseroots"))
907
909
908 def recover(self):
910 def recover(self):
909 lock = self.lock()
911 lock = self.lock()
910 try:
912 try:
911 if self.svfs.exists("journal"):
913 if self.svfs.exists("journal"):
912 self.ui.status(_("rolling back interrupted transaction\n"))
914 self.ui.status(_("rolling back interrupted transaction\n"))
913 transaction.rollback(self.sopener, "journal",
915 transaction.rollback(self.sopener, "journal",
914 self.ui.warn)
916 self.ui.warn)
915 self.invalidate()
917 self.invalidate()
916 return True
918 return True
917 else:
919 else:
918 self.ui.warn(_("no interrupted transaction available\n"))
920 self.ui.warn(_("no interrupted transaction available\n"))
919 return False
921 return False
920 finally:
922 finally:
921 lock.release()
923 lock.release()
922
924
923 def rollback(self, dryrun=False, force=False):
925 def rollback(self, dryrun=False, force=False):
924 wlock = lock = None
926 wlock = lock = None
925 try:
927 try:
926 wlock = self.wlock()
928 wlock = self.wlock()
927 lock = self.lock()
929 lock = self.lock()
928 if self.svfs.exists("undo"):
930 if self.svfs.exists("undo"):
929 return self._rollback(dryrun, force)
931 return self._rollback(dryrun, force)
930 else:
932 else:
931 self.ui.warn(_("no rollback information available\n"))
933 self.ui.warn(_("no rollback information available\n"))
932 return 1
934 return 1
933 finally:
935 finally:
934 release(lock, wlock)
936 release(lock, wlock)
935
937
936 @unfilteredmethod # Until we get smarter cache management
938 @unfilteredmethod # Until we get smarter cache management
937 def _rollback(self, dryrun, force):
939 def _rollback(self, dryrun, force):
938 ui = self.ui
940 ui = self.ui
939 try:
941 try:
940 args = self.opener.read('undo.desc').splitlines()
942 args = self.opener.read('undo.desc').splitlines()
941 (oldlen, desc, detail) = (int(args[0]), args[1], None)
943 (oldlen, desc, detail) = (int(args[0]), args[1], None)
942 if len(args) >= 3:
944 if len(args) >= 3:
943 detail = args[2]
945 detail = args[2]
944 oldtip = oldlen - 1
946 oldtip = oldlen - 1
945
947
946 if detail and ui.verbose:
948 if detail and ui.verbose:
947 msg = (_('repository tip rolled back to revision %s'
949 msg = (_('repository tip rolled back to revision %s'
948 ' (undo %s: %s)\n')
950 ' (undo %s: %s)\n')
949 % (oldtip, desc, detail))
951 % (oldtip, desc, detail))
950 else:
952 else:
951 msg = (_('repository tip rolled back to revision %s'
953 msg = (_('repository tip rolled back to revision %s'
952 ' (undo %s)\n')
954 ' (undo %s)\n')
953 % (oldtip, desc))
955 % (oldtip, desc))
954 except IOError:
956 except IOError:
955 msg = _('rolling back unknown transaction\n')
957 msg = _('rolling back unknown transaction\n')
956 desc = None
958 desc = None
957
959
958 if not force and self['.'] != self['tip'] and desc == 'commit':
960 if not force and self['.'] != self['tip'] and desc == 'commit':
959 raise util.Abort(
961 raise util.Abort(
960 _('rollback of last commit while not checked out '
962 _('rollback of last commit while not checked out '
961 'may lose data'), hint=_('use -f to force'))
963 'may lose data'), hint=_('use -f to force'))
962
964
963 ui.status(msg)
965 ui.status(msg)
964 if dryrun:
966 if dryrun:
965 return 0
967 return 0
966
968
967 parents = self.dirstate.parents()
969 parents = self.dirstate.parents()
968 self.destroying()
970 self.destroying()
969 transaction.rollback(self.sopener, 'undo', ui.warn)
971 transaction.rollback(self.sopener, 'undo', ui.warn)
970 if self.vfs.exists('undo.bookmarks'):
972 if self.vfs.exists('undo.bookmarks'):
971 self.vfs.rename('undo.bookmarks', 'bookmarks')
973 self.vfs.rename('undo.bookmarks', 'bookmarks')
972 if self.svfs.exists('undo.phaseroots'):
974 if self.svfs.exists('undo.phaseroots'):
973 self.svfs.rename('undo.phaseroots', 'phaseroots')
975 self.svfs.rename('undo.phaseroots', 'phaseroots')
974 self.invalidate()
976 self.invalidate()
975
977
976 parentgone = (parents[0] not in self.changelog.nodemap or
978 parentgone = (parents[0] not in self.changelog.nodemap or
977 parents[1] not in self.changelog.nodemap)
979 parents[1] not in self.changelog.nodemap)
978 if parentgone:
980 if parentgone:
979 self.vfs.rename('undo.dirstate', 'dirstate')
981 self.vfs.rename('undo.dirstate', 'dirstate')
980 try:
982 try:
981 branch = self.opener.read('undo.branch')
983 branch = self.opener.read('undo.branch')
982 self.dirstate.setbranch(encoding.tolocal(branch))
984 self.dirstate.setbranch(encoding.tolocal(branch))
983 except IOError:
985 except IOError:
984 ui.warn(_('named branch could not be reset: '
986 ui.warn(_('named branch could not be reset: '
985 'current branch is still \'%s\'\n')
987 'current branch is still \'%s\'\n')
986 % self.dirstate.branch())
988 % self.dirstate.branch())
987
989
988 self.dirstate.invalidate()
990 self.dirstate.invalidate()
989 parents = tuple([p.rev() for p in self.parents()])
991 parents = tuple([p.rev() for p in self.parents()])
990 if len(parents) > 1:
992 if len(parents) > 1:
991 ui.status(_('working directory now based on '
993 ui.status(_('working directory now based on '
992 'revisions %d and %d\n') % parents)
994 'revisions %d and %d\n') % parents)
993 else:
995 else:
994 ui.status(_('working directory now based on '
996 ui.status(_('working directory now based on '
995 'revision %d\n') % parents)
997 'revision %d\n') % parents)
996 # TODO: if we know which new heads may result from this rollback, pass
998 # TODO: if we know which new heads may result from this rollback, pass
997 # them to destroy(), which will prevent the branchhead cache from being
999 # them to destroy(), which will prevent the branchhead cache from being
998 # invalidated.
1000 # invalidated.
999 self.destroyed()
1001 self.destroyed()
1000 return 0
1002 return 0
1001
1003
1002 def invalidatecaches(self):
1004 def invalidatecaches(self):
1003
1005
1004 if '_tagscache' in vars(self):
1006 if '_tagscache' in vars(self):
1005 # can't use delattr on proxy
1007 # can't use delattr on proxy
1006 del self.__dict__['_tagscache']
1008 del self.__dict__['_tagscache']
1007
1009
1008 self.unfiltered()._branchcaches.clear()
1010 self.unfiltered()._branchcaches.clear()
1009 self.invalidatevolatilesets()
1011 self.invalidatevolatilesets()
1010
1012
1011 def invalidatevolatilesets(self):
1013 def invalidatevolatilesets(self):
1012 self.filteredrevcache.clear()
1014 self.filteredrevcache.clear()
1013 obsolete.clearobscaches(self)
1015 obsolete.clearobscaches(self)
1014
1016
1015 def invalidatedirstate(self):
1017 def invalidatedirstate(self):
1016 '''Invalidates the dirstate, causing the next call to dirstate
1018 '''Invalidates the dirstate, causing the next call to dirstate
1017 to check if it was modified since the last time it was read,
1019 to check if it was modified since the last time it was read,
1018 rereading it if it has.
1020 rereading it if it has.
1019
1021
1020 This is different to dirstate.invalidate() that it doesn't always
1022 This is different to dirstate.invalidate() that it doesn't always
1021 rereads the dirstate. Use dirstate.invalidate() if you want to
1023 rereads the dirstate. Use dirstate.invalidate() if you want to
1022 explicitly read the dirstate again (i.e. restoring it to a previous
1024 explicitly read the dirstate again (i.e. restoring it to a previous
1023 known good state).'''
1025 known good state).'''
1024 if hasunfilteredcache(self, 'dirstate'):
1026 if hasunfilteredcache(self, 'dirstate'):
1025 for k in self.dirstate._filecache:
1027 for k in self.dirstate._filecache:
1026 try:
1028 try:
1027 delattr(self.dirstate, k)
1029 delattr(self.dirstate, k)
1028 except AttributeError:
1030 except AttributeError:
1029 pass
1031 pass
1030 delattr(self.unfiltered(), 'dirstate')
1032 delattr(self.unfiltered(), 'dirstate')
1031
1033
1032 def invalidate(self):
1034 def invalidate(self):
1033 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1035 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1034 for k in self._filecache:
1036 for k in self._filecache:
1035 # dirstate is invalidated separately in invalidatedirstate()
1037 # dirstate is invalidated separately in invalidatedirstate()
1036 if k == 'dirstate':
1038 if k == 'dirstate':
1037 continue
1039 continue
1038
1040
1039 try:
1041 try:
1040 delattr(unfiltered, k)
1042 delattr(unfiltered, k)
1041 except AttributeError:
1043 except AttributeError:
1042 pass
1044 pass
1043 self.invalidatecaches()
1045 self.invalidatecaches()
1044 self.store.invalidatecaches()
1046 self.store.invalidatecaches()
1045
1047
1046 def invalidateall(self):
1048 def invalidateall(self):
1047 '''Fully invalidates both store and non-store parts, causing the
1049 '''Fully invalidates both store and non-store parts, causing the
1048 subsequent operation to reread any outside changes.'''
1050 subsequent operation to reread any outside changes.'''
1049 # extension should hook this to invalidate its caches
1051 # extension should hook this to invalidate its caches
1050 self.invalidate()
1052 self.invalidate()
1051 self.invalidatedirstate()
1053 self.invalidatedirstate()
1052
1054
1053 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1055 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1054 try:
1056 try:
1055 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1057 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1056 except error.LockHeld, inst:
1058 except error.LockHeld, inst:
1057 if not wait:
1059 if not wait:
1058 raise
1060 raise
1059 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1061 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1060 (desc, inst.locker))
1062 (desc, inst.locker))
1061 # default to 600 seconds timeout
1063 # default to 600 seconds timeout
1062 l = lockmod.lock(vfs, lockname,
1064 l = lockmod.lock(vfs, lockname,
1063 int(self.ui.config("ui", "timeout", "600")),
1065 int(self.ui.config("ui", "timeout", "600")),
1064 releasefn, desc=desc)
1066 releasefn, desc=desc)
1065 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1067 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1066 if acquirefn:
1068 if acquirefn:
1067 acquirefn()
1069 acquirefn()
1068 return l
1070 return l
1069
1071
1070 def _afterlock(self, callback):
1072 def _afterlock(self, callback):
1071 """add a callback to the current repository lock.
1073 """add a callback to the current repository lock.
1072
1074
1073 The callback will be executed on lock release."""
1075 The callback will be executed on lock release."""
1074 l = self._lockref and self._lockref()
1076 l = self._lockref and self._lockref()
1075 if l:
1077 if l:
1076 l.postrelease.append(callback)
1078 l.postrelease.append(callback)
1077 else:
1079 else:
1078 callback()
1080 callback()
1079
1081
1080 def lock(self, wait=True):
1082 def lock(self, wait=True):
1081 '''Lock the repository store (.hg/store) and return a weak reference
1083 '''Lock the repository store (.hg/store) and return a weak reference
1082 to the lock. Use this before modifying the store (e.g. committing or
1084 to the lock. Use this before modifying the store (e.g. committing or
1083 stripping). If you are opening a transaction, get a lock as well.)'''
1085 stripping). If you are opening a transaction, get a lock as well.)'''
1084 l = self._lockref and self._lockref()
1086 l = self._lockref and self._lockref()
1085 if l is not None and l.held:
1087 if l is not None and l.held:
1086 l.lock()
1088 l.lock()
1087 return l
1089 return l
1088
1090
1089 def unlock():
1091 def unlock():
1090 if hasunfilteredcache(self, '_phasecache'):
1092 if hasunfilteredcache(self, '_phasecache'):
1091 self._phasecache.write()
1093 self._phasecache.write()
1092 for k, ce in self._filecache.items():
1094 for k, ce in self._filecache.items():
1093 if k == 'dirstate' or k not in self.__dict__:
1095 if k == 'dirstate' or k not in self.__dict__:
1094 continue
1096 continue
1095 ce.refresh()
1097 ce.refresh()
1096
1098
1097 l = self._lock(self.svfs, "lock", wait, unlock,
1099 l = self._lock(self.svfs, "lock", wait, unlock,
1098 self.invalidate, _('repository %s') % self.origroot)
1100 self.invalidate, _('repository %s') % self.origroot)
1099 self._lockref = weakref.ref(l)
1101 self._lockref = weakref.ref(l)
1100 return l
1102 return l
1101
1103
1102 def wlock(self, wait=True):
1104 def wlock(self, wait=True):
1103 '''Lock the non-store parts of the repository (everything under
1105 '''Lock the non-store parts of the repository (everything under
1104 .hg except .hg/store) and return a weak reference to the lock.
1106 .hg except .hg/store) and return a weak reference to the lock.
1105 Use this before modifying files in .hg.'''
1107 Use this before modifying files in .hg.'''
1106 l = self._wlockref and self._wlockref()
1108 l = self._wlockref and self._wlockref()
1107 if l is not None and l.held:
1109 if l is not None and l.held:
1108 l.lock()
1110 l.lock()
1109 return l
1111 return l
1110
1112
1111 def unlock():
1113 def unlock():
1112 self.dirstate.write()
1114 self.dirstate.write()
1113 self._filecache['dirstate'].refresh()
1115 self._filecache['dirstate'].refresh()
1114
1116
1115 l = self._lock(self.vfs, "wlock", wait, unlock,
1117 l = self._lock(self.vfs, "wlock", wait, unlock,
1116 self.invalidatedirstate, _('working directory of %s') %
1118 self.invalidatedirstate, _('working directory of %s') %
1117 self.origroot)
1119 self.origroot)
1118 self._wlockref = weakref.ref(l)
1120 self._wlockref = weakref.ref(l)
1119 return l
1121 return l
1120
1122
1121 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1123 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1122 """
1124 """
1123 commit an individual file as part of a larger transaction
1125 commit an individual file as part of a larger transaction
1124 """
1126 """
1125
1127
1126 fname = fctx.path()
1128 fname = fctx.path()
1127 text = fctx.data()
1129 text = fctx.data()
1128 flog = self.file(fname)
1130 flog = self.file(fname)
1129 fparent1 = manifest1.get(fname, nullid)
1131 fparent1 = manifest1.get(fname, nullid)
1130 fparent2 = fparent2o = manifest2.get(fname, nullid)
1132 fparent2 = fparent2o = manifest2.get(fname, nullid)
1131
1133
1132 meta = {}
1134 meta = {}
1133 copy = fctx.renamed()
1135 copy = fctx.renamed()
1134 if copy and copy[0] != fname:
1136 if copy and copy[0] != fname:
1135 # Mark the new revision of this file as a copy of another
1137 # Mark the new revision of this file as a copy of another
1136 # file. This copy data will effectively act as a parent
1138 # file. This copy data will effectively act as a parent
1137 # of this new revision. If this is a merge, the first
1139 # of this new revision. If this is a merge, the first
1138 # parent will be the nullid (meaning "look up the copy data")
1140 # parent will be the nullid (meaning "look up the copy data")
1139 # and the second one will be the other parent. For example:
1141 # and the second one will be the other parent. For example:
1140 #
1142 #
1141 # 0 --- 1 --- 3 rev1 changes file foo
1143 # 0 --- 1 --- 3 rev1 changes file foo
1142 # \ / rev2 renames foo to bar and changes it
1144 # \ / rev2 renames foo to bar and changes it
1143 # \- 2 -/ rev3 should have bar with all changes and
1145 # \- 2 -/ rev3 should have bar with all changes and
1144 # should record that bar descends from
1146 # should record that bar descends from
1145 # bar in rev2 and foo in rev1
1147 # bar in rev2 and foo in rev1
1146 #
1148 #
1147 # this allows this merge to succeed:
1149 # this allows this merge to succeed:
1148 #
1150 #
1149 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1151 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1150 # \ / merging rev3 and rev4 should use bar@rev2
1152 # \ / merging rev3 and rev4 should use bar@rev2
1151 # \- 2 --- 4 as the merge base
1153 # \- 2 --- 4 as the merge base
1152 #
1154 #
1153
1155
1154 cfname = copy[0]
1156 cfname = copy[0]
1155 crev = manifest1.get(cfname)
1157 crev = manifest1.get(cfname)
1156 newfparent = fparent2
1158 newfparent = fparent2
1157
1159
1158 if manifest2: # branch merge
1160 if manifest2: # branch merge
1159 if fparent2 == nullid or crev is None: # copied on remote side
1161 if fparent2 == nullid or crev is None: # copied on remote side
1160 if cfname in manifest2:
1162 if cfname in manifest2:
1161 crev = manifest2[cfname]
1163 crev = manifest2[cfname]
1162 newfparent = fparent1
1164 newfparent = fparent1
1163
1165
1164 # find source in nearest ancestor if we've lost track
1166 # find source in nearest ancestor if we've lost track
1165 if not crev:
1167 if not crev:
1166 self.ui.debug(" %s: searching for copy revision for %s\n" %
1168 self.ui.debug(" %s: searching for copy revision for %s\n" %
1167 (fname, cfname))
1169 (fname, cfname))
1168 for ancestor in self[None].ancestors():
1170 for ancestor in self[None].ancestors():
1169 if cfname in ancestor:
1171 if cfname in ancestor:
1170 crev = ancestor[cfname].filenode()
1172 crev = ancestor[cfname].filenode()
1171 break
1173 break
1172
1174
1173 if crev:
1175 if crev:
1174 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1176 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1175 meta["copy"] = cfname
1177 meta["copy"] = cfname
1176 meta["copyrev"] = hex(crev)
1178 meta["copyrev"] = hex(crev)
1177 fparent1, fparent2 = nullid, newfparent
1179 fparent1, fparent2 = nullid, newfparent
1178 else:
1180 else:
1179 self.ui.warn(_("warning: can't find ancestor for '%s' "
1181 self.ui.warn(_("warning: can't find ancestor for '%s' "
1180 "copied from '%s'!\n") % (fname, cfname))
1182 "copied from '%s'!\n") % (fname, cfname))
1181
1183
1182 elif fparent1 == nullid:
1184 elif fparent1 == nullid:
1183 fparent1, fparent2 = fparent2, nullid
1185 fparent1, fparent2 = fparent2, nullid
1184 elif fparent2 != nullid:
1186 elif fparent2 != nullid:
1185 # is one parent an ancestor of the other?
1187 # is one parent an ancestor of the other?
1186 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1188 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1187 if fparent1 in fparentancestors:
1189 if fparent1 in fparentancestors:
1188 fparent1, fparent2 = fparent2, nullid
1190 fparent1, fparent2 = fparent2, nullid
1189 elif fparent2 in fparentancestors:
1191 elif fparent2 in fparentancestors:
1190 fparent2 = nullid
1192 fparent2 = nullid
1191
1193
1192 # is the file changed?
1194 # is the file changed?
1193 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1195 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1194 changelist.append(fname)
1196 changelist.append(fname)
1195 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1197 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1196
1198
1197 # are just the flags changed during merge?
1199 # are just the flags changed during merge?
1198 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1200 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1199 changelist.append(fname)
1201 changelist.append(fname)
1200
1202
1201 return fparent1
1203 return fparent1
1202
1204
1203 @unfilteredmethod
1205 @unfilteredmethod
1204 def commit(self, text="", user=None, date=None, match=None, force=False,
1206 def commit(self, text="", user=None, date=None, match=None, force=False,
1205 editor=False, extra={}):
1207 editor=False, extra={}):
1206 """Add a new revision to current repository.
1208 """Add a new revision to current repository.
1207
1209
1208 Revision information is gathered from the working directory,
1210 Revision information is gathered from the working directory,
1209 match can be used to filter the committed files. If editor is
1211 match can be used to filter the committed files. If editor is
1210 supplied, it is called to get a commit message.
1212 supplied, it is called to get a commit message.
1211 """
1213 """
1212
1214
1213 def fail(f, msg):
1215 def fail(f, msg):
1214 raise util.Abort('%s: %s' % (f, msg))
1216 raise util.Abort('%s: %s' % (f, msg))
1215
1217
1216 if not match:
1218 if not match:
1217 match = matchmod.always(self.root, '')
1219 match = matchmod.always(self.root, '')
1218
1220
1219 if not force:
1221 if not force:
1220 vdirs = []
1222 vdirs = []
1221 match.explicitdir = vdirs.append
1223 match.explicitdir = vdirs.append
1222 match.bad = fail
1224 match.bad = fail
1223
1225
1224 wlock = self.wlock()
1226 wlock = self.wlock()
1225 try:
1227 try:
1226 wctx = self[None]
1228 wctx = self[None]
1227 merge = len(wctx.parents()) > 1
1229 merge = len(wctx.parents()) > 1
1228
1230
1229 if (not force and merge and match and
1231 if (not force and merge and match and
1230 (match.files() or match.anypats())):
1232 (match.files() or match.anypats())):
1231 raise util.Abort(_('cannot partially commit a merge '
1233 raise util.Abort(_('cannot partially commit a merge '
1232 '(do not specify files or patterns)'))
1234 '(do not specify files or patterns)'))
1233
1235
1234 changes = self.status(match=match, clean=force)
1236 changes = self.status(match=match, clean=force)
1235 if force:
1237 if force:
1236 changes[0].extend(changes[6]) # mq may commit unchanged files
1238 changes[0].extend(changes[6]) # mq may commit unchanged files
1237
1239
1238 # check subrepos
1240 # check subrepos
1239 subs = []
1241 subs = []
1240 commitsubs = set()
1242 commitsubs = set()
1241 newstate = wctx.substate.copy()
1243 newstate = wctx.substate.copy()
1242 # only manage subrepos and .hgsubstate if .hgsub is present
1244 # only manage subrepos and .hgsubstate if .hgsub is present
1243 if '.hgsub' in wctx:
1245 if '.hgsub' in wctx:
1244 # we'll decide whether to track this ourselves, thanks
1246 # we'll decide whether to track this ourselves, thanks
1245 for c in changes[:3]:
1247 for c in changes[:3]:
1246 if '.hgsubstate' in c:
1248 if '.hgsubstate' in c:
1247 c.remove('.hgsubstate')
1249 c.remove('.hgsubstate')
1248
1250
1249 # compare current state to last committed state
1251 # compare current state to last committed state
1250 # build new substate based on last committed state
1252 # build new substate based on last committed state
1251 oldstate = wctx.p1().substate
1253 oldstate = wctx.p1().substate
1252 for s in sorted(newstate.keys()):
1254 for s in sorted(newstate.keys()):
1253 if not match(s):
1255 if not match(s):
1254 # ignore working copy, use old state if present
1256 # ignore working copy, use old state if present
1255 if s in oldstate:
1257 if s in oldstate:
1256 newstate[s] = oldstate[s]
1258 newstate[s] = oldstate[s]
1257 continue
1259 continue
1258 if not force:
1260 if not force:
1259 raise util.Abort(
1261 raise util.Abort(
1260 _("commit with new subrepo %s excluded") % s)
1262 _("commit with new subrepo %s excluded") % s)
1261 if wctx.sub(s).dirty(True):
1263 if wctx.sub(s).dirty(True):
1262 if not self.ui.configbool('ui', 'commitsubrepos'):
1264 if not self.ui.configbool('ui', 'commitsubrepos'):
1263 raise util.Abort(
1265 raise util.Abort(
1264 _("uncommitted changes in subrepo %s") % s,
1266 _("uncommitted changes in subrepo %s") % s,
1265 hint=_("use --subrepos for recursive commit"))
1267 hint=_("use --subrepos for recursive commit"))
1266 subs.append(s)
1268 subs.append(s)
1267 commitsubs.add(s)
1269 commitsubs.add(s)
1268 else:
1270 else:
1269 bs = wctx.sub(s).basestate()
1271 bs = wctx.sub(s).basestate()
1270 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1272 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1271 if oldstate.get(s, (None, None, None))[1] != bs:
1273 if oldstate.get(s, (None, None, None))[1] != bs:
1272 subs.append(s)
1274 subs.append(s)
1273
1275
1274 # check for removed subrepos
1276 # check for removed subrepos
1275 for p in wctx.parents():
1277 for p in wctx.parents():
1276 r = [s for s in p.substate if s not in newstate]
1278 r = [s for s in p.substate if s not in newstate]
1277 subs += [s for s in r if match(s)]
1279 subs += [s for s in r if match(s)]
1278 if subs:
1280 if subs:
1279 if (not match('.hgsub') and
1281 if (not match('.hgsub') and
1280 '.hgsub' in (wctx.modified() + wctx.added())):
1282 '.hgsub' in (wctx.modified() + wctx.added())):
1281 raise util.Abort(
1283 raise util.Abort(
1282 _("can't commit subrepos without .hgsub"))
1284 _("can't commit subrepos without .hgsub"))
1283 changes[0].insert(0, '.hgsubstate')
1285 changes[0].insert(0, '.hgsubstate')
1284
1286
1285 elif '.hgsub' in changes[2]:
1287 elif '.hgsub' in changes[2]:
1286 # clean up .hgsubstate when .hgsub is removed
1288 # clean up .hgsubstate when .hgsub is removed
1287 if ('.hgsubstate' in wctx and
1289 if ('.hgsubstate' in wctx and
1288 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1290 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1289 changes[2].insert(0, '.hgsubstate')
1291 changes[2].insert(0, '.hgsubstate')
1290
1292
1291 # make sure all explicit patterns are matched
1293 # make sure all explicit patterns are matched
1292 if not force and match.files():
1294 if not force and match.files():
1293 matched = set(changes[0] + changes[1] + changes[2])
1295 matched = set(changes[0] + changes[1] + changes[2])
1294
1296
1295 for f in match.files():
1297 for f in match.files():
1296 f = self.dirstate.normalize(f)
1298 f = self.dirstate.normalize(f)
1297 if f == '.' or f in matched or f in wctx.substate:
1299 if f == '.' or f in matched or f in wctx.substate:
1298 continue
1300 continue
1299 if f in changes[3]: # missing
1301 if f in changes[3]: # missing
1300 fail(f, _('file not found!'))
1302 fail(f, _('file not found!'))
1301 if f in vdirs: # visited directory
1303 if f in vdirs: # visited directory
1302 d = f + '/'
1304 d = f + '/'
1303 for mf in matched:
1305 for mf in matched:
1304 if mf.startswith(d):
1306 if mf.startswith(d):
1305 break
1307 break
1306 else:
1308 else:
1307 fail(f, _("no match under directory!"))
1309 fail(f, _("no match under directory!"))
1308 elif f not in self.dirstate:
1310 elif f not in self.dirstate:
1309 fail(f, _("file not tracked!"))
1311 fail(f, _("file not tracked!"))
1310
1312
1311 cctx = context.workingctx(self, text, user, date, extra, changes)
1313 cctx = context.workingctx(self, text, user, date, extra, changes)
1312
1314
1313 if (not force and not extra.get("close") and not merge
1315 if (not force and not extra.get("close") and not merge
1314 and not cctx.files()
1316 and not cctx.files()
1315 and wctx.branch() == wctx.p1().branch()):
1317 and wctx.branch() == wctx.p1().branch()):
1316 return None
1318 return None
1317
1319
1318 if merge and cctx.deleted():
1320 if merge and cctx.deleted():
1319 raise util.Abort(_("cannot commit merge with missing files"))
1321 raise util.Abort(_("cannot commit merge with missing files"))
1320
1322
1321 ms = mergemod.mergestate(self)
1323 ms = mergemod.mergestate(self)
1322 for f in changes[0]:
1324 for f in changes[0]:
1323 if f in ms and ms[f] == 'u':
1325 if f in ms and ms[f] == 'u':
1324 raise util.Abort(_("unresolved merge conflicts "
1326 raise util.Abort(_("unresolved merge conflicts "
1325 "(see hg help resolve)"))
1327 "(see hg help resolve)"))
1326
1328
1327 if editor:
1329 if editor:
1328 cctx._text = editor(self, cctx, subs)
1330 cctx._text = editor(self, cctx, subs)
1329 edited = (text != cctx._text)
1331 edited = (text != cctx._text)
1330
1332
1331 # Save commit message in case this transaction gets rolled back
1333 # Save commit message in case this transaction gets rolled back
1332 # (e.g. by a pretxncommit hook). Leave the content alone on
1334 # (e.g. by a pretxncommit hook). Leave the content alone on
1333 # the assumption that the user will use the same editor again.
1335 # the assumption that the user will use the same editor again.
1334 msgfn = self.savecommitmessage(cctx._text)
1336 msgfn = self.savecommitmessage(cctx._text)
1335
1337
1336 # commit subs and write new state
1338 # commit subs and write new state
1337 if subs:
1339 if subs:
1338 for s in sorted(commitsubs):
1340 for s in sorted(commitsubs):
1339 sub = wctx.sub(s)
1341 sub = wctx.sub(s)
1340 self.ui.status(_('committing subrepository %s\n') %
1342 self.ui.status(_('committing subrepository %s\n') %
1341 subrepo.subrelpath(sub))
1343 subrepo.subrelpath(sub))
1342 sr = sub.commit(cctx._text, user, date)
1344 sr = sub.commit(cctx._text, user, date)
1343 newstate[s] = (newstate[s][0], sr)
1345 newstate[s] = (newstate[s][0], sr)
1344 subrepo.writestate(self, newstate)
1346 subrepo.writestate(self, newstate)
1345
1347
1346 p1, p2 = self.dirstate.parents()
1348 p1, p2 = self.dirstate.parents()
1347 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1349 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1348 try:
1350 try:
1349 self.hook("precommit", throw=True, parent1=hookp1,
1351 self.hook("precommit", throw=True, parent1=hookp1,
1350 parent2=hookp2)
1352 parent2=hookp2)
1351 ret = self.commitctx(cctx, True)
1353 ret = self.commitctx(cctx, True)
1352 except: # re-raises
1354 except: # re-raises
1353 if edited:
1355 if edited:
1354 self.ui.write(
1356 self.ui.write(
1355 _('note: commit message saved in %s\n') % msgfn)
1357 _('note: commit message saved in %s\n') % msgfn)
1356 raise
1358 raise
1357
1359
1358 # update bookmarks, dirstate and mergestate
1360 # update bookmarks, dirstate and mergestate
1359 bookmarks.update(self, [p1, p2], ret)
1361 bookmarks.update(self, [p1, p2], ret)
1360 cctx.markcommitted(ret)
1362 cctx.markcommitted(ret)
1361 ms.reset()
1363 ms.reset()
1362 finally:
1364 finally:
1363 wlock.release()
1365 wlock.release()
1364
1366
1365 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1367 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1366 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1368 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1367 self._afterlock(commithook)
1369 self._afterlock(commithook)
1368 return ret
1370 return ret
1369
1371
1370 @unfilteredmethod
1372 @unfilteredmethod
1371 def commitctx(self, ctx, error=False):
1373 def commitctx(self, ctx, error=False):
1372 """Add a new revision to current repository.
1374 """Add a new revision to current repository.
1373 Revision information is passed via the context argument.
1375 Revision information is passed via the context argument.
1374 """
1376 """
1375
1377
1376 tr = lock = None
1378 tr = lock = None
1377 removed = list(ctx.removed())
1379 removed = list(ctx.removed())
1378 p1, p2 = ctx.p1(), ctx.p2()
1380 p1, p2 = ctx.p1(), ctx.p2()
1379 user = ctx.user()
1381 user = ctx.user()
1380
1382
1381 lock = self.lock()
1383 lock = self.lock()
1382 try:
1384 try:
1383 tr = self.transaction("commit")
1385 tr = self.transaction("commit")
1384 trp = weakref.proxy(tr)
1386 trp = weakref.proxy(tr)
1385
1387
1386 if ctx.files():
1388 if ctx.files():
1387 m1 = p1.manifest().copy()
1389 m1 = p1.manifest().copy()
1388 m2 = p2.manifest()
1390 m2 = p2.manifest()
1389
1391
1390 # check in files
1392 # check in files
1391 new = {}
1393 new = {}
1392 changed = []
1394 changed = []
1393 linkrev = len(self)
1395 linkrev = len(self)
1394 for f in sorted(ctx.modified() + ctx.added()):
1396 for f in sorted(ctx.modified() + ctx.added()):
1395 self.ui.note(f + "\n")
1397 self.ui.note(f + "\n")
1396 try:
1398 try:
1397 fctx = ctx[f]
1399 fctx = ctx[f]
1398 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1400 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1399 changed)
1401 changed)
1400 m1.set(f, fctx.flags())
1402 m1.set(f, fctx.flags())
1401 except OSError, inst:
1403 except OSError, inst:
1402 self.ui.warn(_("trouble committing %s!\n") % f)
1404 self.ui.warn(_("trouble committing %s!\n") % f)
1403 raise
1405 raise
1404 except IOError, inst:
1406 except IOError, inst:
1405 errcode = getattr(inst, 'errno', errno.ENOENT)
1407 errcode = getattr(inst, 'errno', errno.ENOENT)
1406 if error or errcode and errcode != errno.ENOENT:
1408 if error or errcode and errcode != errno.ENOENT:
1407 self.ui.warn(_("trouble committing %s!\n") % f)
1409 self.ui.warn(_("trouble committing %s!\n") % f)
1408 raise
1410 raise
1409 else:
1411 else:
1410 removed.append(f)
1412 removed.append(f)
1411
1413
1412 # update manifest
1414 # update manifest
1413 m1.update(new)
1415 m1.update(new)
1414 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1416 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1415 drop = [f for f in removed if f in m1]
1417 drop = [f for f in removed if f in m1]
1416 for f in drop:
1418 for f in drop:
1417 del m1[f]
1419 del m1[f]
1418 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1420 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1419 p2.manifestnode(), (new, drop))
1421 p2.manifestnode(), (new, drop))
1420 files = changed + removed
1422 files = changed + removed
1421 else:
1423 else:
1422 mn = p1.manifestnode()
1424 mn = p1.manifestnode()
1423 files = []
1425 files = []
1424
1426
1425 # update changelog
1427 # update changelog
1426 self.changelog.delayupdate()
1428 self.changelog.delayupdate()
1427 n = self.changelog.add(mn, files, ctx.description(),
1429 n = self.changelog.add(mn, files, ctx.description(),
1428 trp, p1.node(), p2.node(),
1430 trp, p1.node(), p2.node(),
1429 user, ctx.date(), ctx.extra().copy())
1431 user, ctx.date(), ctx.extra().copy())
1430 p = lambda: self.changelog.writepending() and self.root or ""
1432 p = lambda: self.changelog.writepending() and self.root or ""
1431 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1433 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1432 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1434 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1433 parent2=xp2, pending=p)
1435 parent2=xp2, pending=p)
1434 self.changelog.finalize(trp)
1436 self.changelog.finalize(trp)
1435 # set the new commit is proper phase
1437 # set the new commit is proper phase
1436 targetphase = subrepo.newcommitphase(self.ui, ctx)
1438 targetphase = subrepo.newcommitphase(self.ui, ctx)
1437 if targetphase:
1439 if targetphase:
1438 # retract boundary do not alter parent changeset.
1440 # retract boundary do not alter parent changeset.
1439 # if a parent have higher the resulting phase will
1441 # if a parent have higher the resulting phase will
1440 # be compliant anyway
1442 # be compliant anyway
1441 #
1443 #
1442 # if minimal phase was 0 we don't need to retract anything
1444 # if minimal phase was 0 we don't need to retract anything
1443 phases.retractboundary(self, targetphase, [n])
1445 phases.retractboundary(self, targetphase, [n])
1444 tr.close()
1446 tr.close()
1445 branchmap.updatecache(self.filtered('served'))
1447 branchmap.updatecache(self.filtered('served'))
1446 return n
1448 return n
1447 finally:
1449 finally:
1448 if tr:
1450 if tr:
1449 tr.release()
1451 tr.release()
1450 lock.release()
1452 lock.release()
1451
1453
1452 @unfilteredmethod
1454 @unfilteredmethod
1453 def destroying(self):
1455 def destroying(self):
1454 '''Inform the repository that nodes are about to be destroyed.
1456 '''Inform the repository that nodes are about to be destroyed.
1455 Intended for use by strip and rollback, so there's a common
1457 Intended for use by strip and rollback, so there's a common
1456 place for anything that has to be done before destroying history.
1458 place for anything that has to be done before destroying history.
1457
1459
1458 This is mostly useful for saving state that is in memory and waiting
1460 This is mostly useful for saving state that is in memory and waiting
1459 to be flushed when the current lock is released. Because a call to
1461 to be flushed when the current lock is released. Because a call to
1460 destroyed is imminent, the repo will be invalidated causing those
1462 destroyed is imminent, the repo will be invalidated causing those
1461 changes to stay in memory (waiting for the next unlock), or vanish
1463 changes to stay in memory (waiting for the next unlock), or vanish
1462 completely.
1464 completely.
1463 '''
1465 '''
1464 # When using the same lock to commit and strip, the phasecache is left
1466 # When using the same lock to commit and strip, the phasecache is left
1465 # dirty after committing. Then when we strip, the repo is invalidated,
1467 # dirty after committing. Then when we strip, the repo is invalidated,
1466 # causing those changes to disappear.
1468 # causing those changes to disappear.
1467 if '_phasecache' in vars(self):
1469 if '_phasecache' in vars(self):
1468 self._phasecache.write()
1470 self._phasecache.write()
1469
1471
1470 @unfilteredmethod
1472 @unfilteredmethod
1471 def destroyed(self):
1473 def destroyed(self):
1472 '''Inform the repository that nodes have been destroyed.
1474 '''Inform the repository that nodes have been destroyed.
1473 Intended for use by strip and rollback, so there's a common
1475 Intended for use by strip and rollback, so there's a common
1474 place for anything that has to be done after destroying history.
1476 place for anything that has to be done after destroying history.
1475 '''
1477 '''
1476 # When one tries to:
1478 # When one tries to:
1477 # 1) destroy nodes thus calling this method (e.g. strip)
1479 # 1) destroy nodes thus calling this method (e.g. strip)
1478 # 2) use phasecache somewhere (e.g. commit)
1480 # 2) use phasecache somewhere (e.g. commit)
1479 #
1481 #
1480 # then 2) will fail because the phasecache contains nodes that were
1482 # then 2) will fail because the phasecache contains nodes that were
1481 # removed. We can either remove phasecache from the filecache,
1483 # removed. We can either remove phasecache from the filecache,
1482 # causing it to reload next time it is accessed, or simply filter
1484 # causing it to reload next time it is accessed, or simply filter
1483 # the removed nodes now and write the updated cache.
1485 # the removed nodes now and write the updated cache.
1484 self._phasecache.filterunknown(self)
1486 self._phasecache.filterunknown(self)
1485 self._phasecache.write()
1487 self._phasecache.write()
1486
1488
1487 # update the 'served' branch cache to help read only server process
1489 # update the 'served' branch cache to help read only server process
1488 # Thanks to branchcache collaboration this is done from the nearest
1490 # Thanks to branchcache collaboration this is done from the nearest
1489 # filtered subset and it is expected to be fast.
1491 # filtered subset and it is expected to be fast.
1490 branchmap.updatecache(self.filtered('served'))
1492 branchmap.updatecache(self.filtered('served'))
1491
1493
1492 # Ensure the persistent tag cache is updated. Doing it now
1494 # Ensure the persistent tag cache is updated. Doing it now
1493 # means that the tag cache only has to worry about destroyed
1495 # means that the tag cache only has to worry about destroyed
1494 # heads immediately after a strip/rollback. That in turn
1496 # heads immediately after a strip/rollback. That in turn
1495 # guarantees that "cachetip == currenttip" (comparing both rev
1497 # guarantees that "cachetip == currenttip" (comparing both rev
1496 # and node) always means no nodes have been added or destroyed.
1498 # and node) always means no nodes have been added or destroyed.
1497
1499
1498 # XXX this is suboptimal when qrefresh'ing: we strip the current
1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1499 # head, refresh the tag cache, then immediately add a new head.
1501 # head, refresh the tag cache, then immediately add a new head.
1500 # But I think doing it this way is necessary for the "instant
1502 # But I think doing it this way is necessary for the "instant
1501 # tag cache retrieval" case to work.
1503 # tag cache retrieval" case to work.
1502 self.invalidate()
1504 self.invalidate()
1503
1505
1504 def walk(self, match, node=None):
1506 def walk(self, match, node=None):
1505 '''
1507 '''
1506 walk recursively through the directory tree or a given
1508 walk recursively through the directory tree or a given
1507 changeset, finding all files matched by the match
1509 changeset, finding all files matched by the match
1508 function
1510 function
1509 '''
1511 '''
1510 return self[node].walk(match)
1512 return self[node].walk(match)
1511
1513
1512 def status(self, node1='.', node2=None, match=None,
1514 def status(self, node1='.', node2=None, match=None,
1513 ignored=False, clean=False, unknown=False,
1515 ignored=False, clean=False, unknown=False,
1514 listsubrepos=False):
1516 listsubrepos=False):
1515 '''a convenience method that calls node1.status(node2)'''
1517 '''a convenience method that calls node1.status(node2)'''
1516 return self[node1].status(node2, match, ignored, clean, unknown,
1518 return self[node1].status(node2, match, ignored, clean, unknown,
1517 listsubrepos)
1519 listsubrepos)
1518
1520
1519 def heads(self, start=None):
1521 def heads(self, start=None):
1520 heads = self.changelog.heads(start)
1522 heads = self.changelog.heads(start)
1521 # sort the output in rev descending order
1523 # sort the output in rev descending order
1522 return sorted(heads, key=self.changelog.rev, reverse=True)
1524 return sorted(heads, key=self.changelog.rev, reverse=True)
1523
1525
1524 def branchheads(self, branch=None, start=None, closed=False):
1526 def branchheads(self, branch=None, start=None, closed=False):
1525 '''return a (possibly filtered) list of heads for the given branch
1527 '''return a (possibly filtered) list of heads for the given branch
1526
1528
1527 Heads are returned in topological order, from newest to oldest.
1529 Heads are returned in topological order, from newest to oldest.
1528 If branch is None, use the dirstate branch.
1530 If branch is None, use the dirstate branch.
1529 If start is not None, return only heads reachable from start.
1531 If start is not None, return only heads reachable from start.
1530 If closed is True, return heads that are marked as closed as well.
1532 If closed is True, return heads that are marked as closed as well.
1531 '''
1533 '''
1532 if branch is None:
1534 if branch is None:
1533 branch = self[None].branch()
1535 branch = self[None].branch()
1534 branches = self.branchmap()
1536 branches = self.branchmap()
1535 if branch not in branches:
1537 if branch not in branches:
1536 return []
1538 return []
1537 # the cache returns heads ordered lowest to highest
1539 # the cache returns heads ordered lowest to highest
1538 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1540 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1539 if start is not None:
1541 if start is not None:
1540 # filter out the heads that cannot be reached from startrev
1542 # filter out the heads that cannot be reached from startrev
1541 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1543 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1542 bheads = [h for h in bheads if h in fbheads]
1544 bheads = [h for h in bheads if h in fbheads]
1543 return bheads
1545 return bheads
1544
1546
1545 def branches(self, nodes):
1547 def branches(self, nodes):
1546 if not nodes:
1548 if not nodes:
1547 nodes = [self.changelog.tip()]
1549 nodes = [self.changelog.tip()]
1548 b = []
1550 b = []
1549 for n in nodes:
1551 for n in nodes:
1550 t = n
1552 t = n
1551 while True:
1553 while True:
1552 p = self.changelog.parents(n)
1554 p = self.changelog.parents(n)
1553 if p[1] != nullid or p[0] == nullid:
1555 if p[1] != nullid or p[0] == nullid:
1554 b.append((t, n, p[0], p[1]))
1556 b.append((t, n, p[0], p[1]))
1555 break
1557 break
1556 n = p[0]
1558 n = p[0]
1557 return b
1559 return b
1558
1560
1559 def between(self, pairs):
1561 def between(self, pairs):
1560 r = []
1562 r = []
1561
1563
1562 for top, bottom in pairs:
1564 for top, bottom in pairs:
1563 n, l, i = top, [], 0
1565 n, l, i = top, [], 0
1564 f = 1
1566 f = 1
1565
1567
1566 while n != bottom and n != nullid:
1568 while n != bottom and n != nullid:
1567 p = self.changelog.parents(n)[0]
1569 p = self.changelog.parents(n)[0]
1568 if i == f:
1570 if i == f:
1569 l.append(n)
1571 l.append(n)
1570 f = f * 2
1572 f = f * 2
1571 n = p
1573 n = p
1572 i += 1
1574 i += 1
1573
1575
1574 r.append(l)
1576 r.append(l)
1575
1577
1576 return r
1578 return r
1577
1579
1578 def pull(self, remote, heads=None, force=False):
1580 def pull(self, remote, heads=None, force=False):
1579 return exchange.pull (self, remote, heads, force)
1581 return exchange.pull (self, remote, heads, force)
1580
1582
1581 def checkpush(self, pushop):
1583 def checkpush(self, pushop):
1582 """Extensions can override this function if additional checks have
1584 """Extensions can override this function if additional checks have
1583 to be performed before pushing, or call it if they override push
1585 to be performed before pushing, or call it if they override push
1584 command.
1586 command.
1585 """
1587 """
1586 pass
1588 pass
1587
1589
1588 @unfilteredpropertycache
1590 @unfilteredpropertycache
1589 def prepushoutgoinghooks(self):
1591 def prepushoutgoinghooks(self):
1590 """Return util.hooks consists of "(repo, remote, outgoing)"
1592 """Return util.hooks consists of "(repo, remote, outgoing)"
1591 functions, which are called before pushing changesets.
1593 functions, which are called before pushing changesets.
1592 """
1594 """
1593 return util.hooks()
1595 return util.hooks()
1594
1596
1595 def push(self, remote, force=False, revs=None, newbranch=False):
1597 def push(self, remote, force=False, revs=None, newbranch=False):
1596 return exchange.push(self, remote, force, revs, newbranch)
1598 return exchange.push(self, remote, force, revs, newbranch)
1597
1599
1598 def stream_in(self, remote, requirements):
1600 def stream_in(self, remote, requirements):
1599 lock = self.lock()
1601 lock = self.lock()
1600 try:
1602 try:
1601 # Save remote branchmap. We will use it later
1603 # Save remote branchmap. We will use it later
1602 # to speed up branchcache creation
1604 # to speed up branchcache creation
1603 rbranchmap = None
1605 rbranchmap = None
1604 if remote.capable("branchmap"):
1606 if remote.capable("branchmap"):
1605 rbranchmap = remote.branchmap()
1607 rbranchmap = remote.branchmap()
1606
1608
1607 fp = remote.stream_out()
1609 fp = remote.stream_out()
1608 l = fp.readline()
1610 l = fp.readline()
1609 try:
1611 try:
1610 resp = int(l)
1612 resp = int(l)
1611 except ValueError:
1613 except ValueError:
1612 raise error.ResponseError(
1614 raise error.ResponseError(
1613 _('unexpected response from remote server:'), l)
1615 _('unexpected response from remote server:'), l)
1614 if resp == 1:
1616 if resp == 1:
1615 raise util.Abort(_('operation forbidden by server'))
1617 raise util.Abort(_('operation forbidden by server'))
1616 elif resp == 2:
1618 elif resp == 2:
1617 raise util.Abort(_('locking the remote repository failed'))
1619 raise util.Abort(_('locking the remote repository failed'))
1618 elif resp != 0:
1620 elif resp != 0:
1619 raise util.Abort(_('the server sent an unknown error code'))
1621 raise util.Abort(_('the server sent an unknown error code'))
1620 self.ui.status(_('streaming all changes\n'))
1622 self.ui.status(_('streaming all changes\n'))
1621 l = fp.readline()
1623 l = fp.readline()
1622 try:
1624 try:
1623 total_files, total_bytes = map(int, l.split(' ', 1))
1625 total_files, total_bytes = map(int, l.split(' ', 1))
1624 except (ValueError, TypeError):
1626 except (ValueError, TypeError):
1625 raise error.ResponseError(
1627 raise error.ResponseError(
1626 _('unexpected response from remote server:'), l)
1628 _('unexpected response from remote server:'), l)
1627 self.ui.status(_('%d files to transfer, %s of data\n') %
1629 self.ui.status(_('%d files to transfer, %s of data\n') %
1628 (total_files, util.bytecount(total_bytes)))
1630 (total_files, util.bytecount(total_bytes)))
1629 handled_bytes = 0
1631 handled_bytes = 0
1630 self.ui.progress(_('clone'), 0, total=total_bytes)
1632 self.ui.progress(_('clone'), 0, total=total_bytes)
1631 start = time.time()
1633 start = time.time()
1632
1634
1633 tr = self.transaction(_('clone'))
1635 tr = self.transaction(_('clone'))
1634 try:
1636 try:
1635 for i in xrange(total_files):
1637 for i in xrange(total_files):
1636 # XXX doesn't support '\n' or '\r' in filenames
1638 # XXX doesn't support '\n' or '\r' in filenames
1637 l = fp.readline()
1639 l = fp.readline()
1638 try:
1640 try:
1639 name, size = l.split('\0', 1)
1641 name, size = l.split('\0', 1)
1640 size = int(size)
1642 size = int(size)
1641 except (ValueError, TypeError):
1643 except (ValueError, TypeError):
1642 raise error.ResponseError(
1644 raise error.ResponseError(
1643 _('unexpected response from remote server:'), l)
1645 _('unexpected response from remote server:'), l)
1644 if self.ui.debugflag:
1646 if self.ui.debugflag:
1645 self.ui.debug('adding %s (%s)\n' %
1647 self.ui.debug('adding %s (%s)\n' %
1646 (name, util.bytecount(size)))
1648 (name, util.bytecount(size)))
1647 # for backwards compat, name was partially encoded
1649 # for backwards compat, name was partially encoded
1648 ofp = self.sopener(store.decodedir(name), 'w')
1650 ofp = self.sopener(store.decodedir(name), 'w')
1649 for chunk in util.filechunkiter(fp, limit=size):
1651 for chunk in util.filechunkiter(fp, limit=size):
1650 handled_bytes += len(chunk)
1652 handled_bytes += len(chunk)
1651 self.ui.progress(_('clone'), handled_bytes,
1653 self.ui.progress(_('clone'), handled_bytes,
1652 total=total_bytes)
1654 total=total_bytes)
1653 ofp.write(chunk)
1655 ofp.write(chunk)
1654 ofp.close()
1656 ofp.close()
1655 tr.close()
1657 tr.close()
1656 finally:
1658 finally:
1657 tr.release()
1659 tr.release()
1658
1660
1659 # Writing straight to files circumvented the inmemory caches
1661 # Writing straight to files circumvented the inmemory caches
1660 self.invalidate()
1662 self.invalidate()
1661
1663
1662 elapsed = time.time() - start
1664 elapsed = time.time() - start
1663 if elapsed <= 0:
1665 if elapsed <= 0:
1664 elapsed = 0.001
1666 elapsed = 0.001
1665 self.ui.progress(_('clone'), None)
1667 self.ui.progress(_('clone'), None)
1666 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1668 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1667 (util.bytecount(total_bytes), elapsed,
1669 (util.bytecount(total_bytes), elapsed,
1668 util.bytecount(total_bytes / elapsed)))
1670 util.bytecount(total_bytes / elapsed)))
1669
1671
1670 # new requirements = old non-format requirements +
1672 # new requirements = old non-format requirements +
1671 # new format-related
1673 # new format-related
1672 # requirements from the streamed-in repository
1674 # requirements from the streamed-in repository
1673 requirements.update(set(self.requirements) - self.supportedformats)
1675 requirements.update(set(self.requirements) - self.supportedformats)
1674 self._applyrequirements(requirements)
1676 self._applyrequirements(requirements)
1675 self._writerequirements()
1677 self._writerequirements()
1676
1678
1677 if rbranchmap:
1679 if rbranchmap:
1678 rbheads = []
1680 rbheads = []
1679 for bheads in rbranchmap.itervalues():
1681 for bheads in rbranchmap.itervalues():
1680 rbheads.extend(bheads)
1682 rbheads.extend(bheads)
1681
1683
1682 if rbheads:
1684 if rbheads:
1683 rtiprev = max((int(self.changelog.rev(node))
1685 rtiprev = max((int(self.changelog.rev(node))
1684 for node in rbheads))
1686 for node in rbheads))
1685 cache = branchmap.branchcache(rbranchmap,
1687 cache = branchmap.branchcache(rbranchmap,
1686 self[rtiprev].node(),
1688 self[rtiprev].node(),
1687 rtiprev)
1689 rtiprev)
1688 # Try to stick it as low as possible
1690 # Try to stick it as low as possible
1689 # filter above served are unlikely to be fetch from a clone
1691 # filter above served are unlikely to be fetch from a clone
1690 for candidate in ('base', 'immutable', 'served'):
1692 for candidate in ('base', 'immutable', 'served'):
1691 rview = self.filtered(candidate)
1693 rview = self.filtered(candidate)
1692 if cache.validfor(rview):
1694 if cache.validfor(rview):
1693 self._branchcaches[candidate] = cache
1695 self._branchcaches[candidate] = cache
1694 cache.write(rview)
1696 cache.write(rview)
1695 break
1697 break
1696 self.invalidate()
1698 self.invalidate()
1697 return len(self.heads()) + 1
1699 return len(self.heads()) + 1
1698 finally:
1700 finally:
1699 lock.release()
1701 lock.release()
1700
1702
1701 def clone(self, remote, heads=[], stream=False):
1703 def clone(self, remote, heads=[], stream=False):
1702 '''clone remote repository.
1704 '''clone remote repository.
1703
1705
1704 keyword arguments:
1706 keyword arguments:
1705 heads: list of revs to clone (forces use of pull)
1707 heads: list of revs to clone (forces use of pull)
1706 stream: use streaming clone if possible'''
1708 stream: use streaming clone if possible'''
1707
1709
1708 # now, all clients that can request uncompressed clones can
1710 # now, all clients that can request uncompressed clones can
1709 # read repo formats supported by all servers that can serve
1711 # read repo formats supported by all servers that can serve
1710 # them.
1712 # them.
1711
1713
1712 # if revlog format changes, client will have to check version
1714 # if revlog format changes, client will have to check version
1713 # and format flags on "stream" capability, and use
1715 # and format flags on "stream" capability, and use
1714 # uncompressed only if compatible.
1716 # uncompressed only if compatible.
1715
1717
1716 if not stream:
1718 if not stream:
1717 # if the server explicitly prefers to stream (for fast LANs)
1719 # if the server explicitly prefers to stream (for fast LANs)
1718 stream = remote.capable('stream-preferred')
1720 stream = remote.capable('stream-preferred')
1719
1721
1720 if stream and not heads:
1722 if stream and not heads:
1721 # 'stream' means remote revlog format is revlogv1 only
1723 # 'stream' means remote revlog format is revlogv1 only
1722 if remote.capable('stream'):
1724 if remote.capable('stream'):
1723 return self.stream_in(remote, set(('revlogv1',)))
1725 return self.stream_in(remote, set(('revlogv1',)))
1724 # otherwise, 'streamreqs' contains the remote revlog format
1726 # otherwise, 'streamreqs' contains the remote revlog format
1725 streamreqs = remote.capable('streamreqs')
1727 streamreqs = remote.capable('streamreqs')
1726 if streamreqs:
1728 if streamreqs:
1727 streamreqs = set(streamreqs.split(','))
1729 streamreqs = set(streamreqs.split(','))
1728 # if we support it, stream in and adjust our requirements
1730 # if we support it, stream in and adjust our requirements
1729 if not streamreqs - self.supportedformats:
1731 if not streamreqs - self.supportedformats:
1730 return self.stream_in(remote, streamreqs)
1732 return self.stream_in(remote, streamreqs)
1731 return self.pull(remote, heads)
1733 return self.pull(remote, heads)
1732
1734
1733 def pushkey(self, namespace, key, old, new):
1735 def pushkey(self, namespace, key, old, new):
1734 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1736 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1735 old=old, new=new)
1737 old=old, new=new)
1736 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1738 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1737 ret = pushkey.push(self, namespace, key, old, new)
1739 ret = pushkey.push(self, namespace, key, old, new)
1738 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1740 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1739 ret=ret)
1741 ret=ret)
1740 return ret
1742 return ret
1741
1743
1742 def listkeys(self, namespace):
1744 def listkeys(self, namespace):
1743 self.hook('prelistkeys', throw=True, namespace=namespace)
1745 self.hook('prelistkeys', throw=True, namespace=namespace)
1744 self.ui.debug('listing keys for "%s"\n' % namespace)
1746 self.ui.debug('listing keys for "%s"\n' % namespace)
1745 values = pushkey.list(self, namespace)
1747 values = pushkey.list(self, namespace)
1746 self.hook('listkeys', namespace=namespace, values=values)
1748 self.hook('listkeys', namespace=namespace, values=values)
1747 return values
1749 return values
1748
1750
1749 def debugwireargs(self, one, two, three=None, four=None, five=None):
1751 def debugwireargs(self, one, two, three=None, four=None, five=None):
1750 '''used to test argument passing over the wire'''
1752 '''used to test argument passing over the wire'''
1751 return "%s %s %s %s %s" % (one, two, three, four, five)
1753 return "%s %s %s %s %s" % (one, two, three, four, five)
1752
1754
1753 def savecommitmessage(self, text):
1755 def savecommitmessage(self, text):
1754 fp = self.opener('last-message.txt', 'wb')
1756 fp = self.opener('last-message.txt', 'wb')
1755 try:
1757 try:
1756 fp.write(text)
1758 fp.write(text)
1757 finally:
1759 finally:
1758 fp.close()
1760 fp.close()
1759 return self.pathto(fp.name[len(self.root) + 1:])
1761 return self.pathto(fp.name[len(self.root) + 1:])
1760
1762
1761 # used to avoid circular references so destructors work
1763 # used to avoid circular references so destructors work
1762 def aftertrans(files):
1764 def aftertrans(files):
1763 renamefiles = [tuple(t) for t in files]
1765 renamefiles = [tuple(t) for t in files]
1764 def a():
1766 def a():
1765 for vfs, src, dest in renamefiles:
1767 for vfs, src, dest in renamefiles:
1766 try:
1768 try:
1767 vfs.rename(src, dest)
1769 vfs.rename(src, dest)
1768 except OSError: # journal file does not yet exist
1770 except OSError: # journal file does not yet exist
1769 pass
1771 pass
1770 return a
1772 return a
1771
1773
1772 def undoname(fn):
1774 def undoname(fn):
1773 base, name = os.path.split(fn)
1775 base, name = os.path.split(fn)
1774 assert name.startswith('journal')
1776 assert name.startswith('journal')
1775 return os.path.join(base, name.replace('journal', 'undo', 1))
1777 return os.path.join(base, name.replace('journal', 'undo', 1))
1776
1778
1777 def instance(ui, path, create):
1779 def instance(ui, path, create):
1778 return localrepository(ui, util.urllocalpath(path), create)
1780 return localrepository(ui, util.urllocalpath(path), create)
1779
1781
1780 def islocal(path):
1782 def islocal(path):
1781 return True
1783 return True
@@ -1,867 +1,868
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import urllib, tempfile, os, sys
8 import urllib, tempfile, os, sys
9 from i18n import _
9 from i18n import _
10 from node import bin, hex
10 from node import bin, hex
11 import changegroup as changegroupmod, bundle2, pushkey as pushkeymod
11 import changegroup as changegroupmod, bundle2, pushkey as pushkeymod
12 import peer, error, encoding, util, store, exchange
12 import peer, error, encoding, util, store, exchange
13
13
14
14
15 class abstractserverproto(object):
15 class abstractserverproto(object):
16 """abstract class that summarizes the protocol API
16 """abstract class that summarizes the protocol API
17
17
18 Used as reference and documentation.
18 Used as reference and documentation.
19 """
19 """
20
20
21 def getargs(self, args):
21 def getargs(self, args):
22 """return the value for arguments in <args>
22 """return the value for arguments in <args>
23
23
24 returns a list of values (same order as <args>)"""
24 returns a list of values (same order as <args>)"""
25 raise NotImplementedError()
25 raise NotImplementedError()
26
26
27 def getfile(self, fp):
27 def getfile(self, fp):
28 """write the whole content of a file into a file like object
28 """write the whole content of a file into a file like object
29
29
30 The file is in the form::
30 The file is in the form::
31
31
32 (<chunk-size>\n<chunk>)+0\n
32 (<chunk-size>\n<chunk>)+0\n
33
33
34 chunk size is the ascii version of the int.
34 chunk size is the ascii version of the int.
35 """
35 """
36 raise NotImplementedError()
36 raise NotImplementedError()
37
37
38 def redirect(self):
38 def redirect(self):
39 """may setup interception for stdout and stderr
39 """may setup interception for stdout and stderr
40
40
41 See also the `restore` method."""
41 See also the `restore` method."""
42 raise NotImplementedError()
42 raise NotImplementedError()
43
43
44 # If the `redirect` function does install interception, the `restore`
44 # If the `redirect` function does install interception, the `restore`
45 # function MUST be defined. If interception is not used, this function
45 # function MUST be defined. If interception is not used, this function
46 # MUST NOT be defined.
46 # MUST NOT be defined.
47 #
47 #
48 # left commented here on purpose
48 # left commented here on purpose
49 #
49 #
50 #def restore(self):
50 #def restore(self):
51 # """reinstall previous stdout and stderr and return intercepted stdout
51 # """reinstall previous stdout and stderr and return intercepted stdout
52 # """
52 # """
53 # raise NotImplementedError()
53 # raise NotImplementedError()
54
54
55 def groupchunks(self, cg):
55 def groupchunks(self, cg):
56 """return 4096 chunks from a changegroup object
56 """return 4096 chunks from a changegroup object
57
57
58 Some protocols may have compressed the contents."""
58 Some protocols may have compressed the contents."""
59 raise NotImplementedError()
59 raise NotImplementedError()
60
60
61 # abstract batching support
61 # abstract batching support
62
62
63 class future(object):
63 class future(object):
64 '''placeholder for a value to be set later'''
64 '''placeholder for a value to be set later'''
65 def set(self, value):
65 def set(self, value):
66 if util.safehasattr(self, 'value'):
66 if util.safehasattr(self, 'value'):
67 raise error.RepoError("future is already set")
67 raise error.RepoError("future is already set")
68 self.value = value
68 self.value = value
69
69
70 class batcher(object):
70 class batcher(object):
71 '''base class for batches of commands submittable in a single request
71 '''base class for batches of commands submittable in a single request
72
72
73 All methods invoked on instances of this class are simply queued and
73 All methods invoked on instances of this class are simply queued and
74 return a a future for the result. Once you call submit(), all the queued
74 return a a future for the result. Once you call submit(), all the queued
75 calls are performed and the results set in their respective futures.
75 calls are performed and the results set in their respective futures.
76 '''
76 '''
77 def __init__(self):
77 def __init__(self):
78 self.calls = []
78 self.calls = []
79 def __getattr__(self, name):
79 def __getattr__(self, name):
80 def call(*args, **opts):
80 def call(*args, **opts):
81 resref = future()
81 resref = future()
82 self.calls.append((name, args, opts, resref,))
82 self.calls.append((name, args, opts, resref,))
83 return resref
83 return resref
84 return call
84 return call
85 def submit(self):
85 def submit(self):
86 pass
86 pass
87
87
88 class localbatch(batcher):
88 class localbatch(batcher):
89 '''performs the queued calls directly'''
89 '''performs the queued calls directly'''
90 def __init__(self, local):
90 def __init__(self, local):
91 batcher.__init__(self)
91 batcher.__init__(self)
92 self.local = local
92 self.local = local
93 def submit(self):
93 def submit(self):
94 for name, args, opts, resref in self.calls:
94 for name, args, opts, resref in self.calls:
95 resref.set(getattr(self.local, name)(*args, **opts))
95 resref.set(getattr(self.local, name)(*args, **opts))
96
96
97 class remotebatch(batcher):
97 class remotebatch(batcher):
98 '''batches the queued calls; uses as few roundtrips as possible'''
98 '''batches the queued calls; uses as few roundtrips as possible'''
99 def __init__(self, remote):
99 def __init__(self, remote):
100 '''remote must support _submitbatch(encbatch) and
100 '''remote must support _submitbatch(encbatch) and
101 _submitone(op, encargs)'''
101 _submitone(op, encargs)'''
102 batcher.__init__(self)
102 batcher.__init__(self)
103 self.remote = remote
103 self.remote = remote
104 def submit(self):
104 def submit(self):
105 req, rsp = [], []
105 req, rsp = [], []
106 for name, args, opts, resref in self.calls:
106 for name, args, opts, resref in self.calls:
107 mtd = getattr(self.remote, name)
107 mtd = getattr(self.remote, name)
108 batchablefn = getattr(mtd, 'batchable', None)
108 batchablefn = getattr(mtd, 'batchable', None)
109 if batchablefn is not None:
109 if batchablefn is not None:
110 batchable = batchablefn(mtd.im_self, *args, **opts)
110 batchable = batchablefn(mtd.im_self, *args, **opts)
111 encargsorres, encresref = batchable.next()
111 encargsorres, encresref = batchable.next()
112 if encresref:
112 if encresref:
113 req.append((name, encargsorres,))
113 req.append((name, encargsorres,))
114 rsp.append((batchable, encresref, resref,))
114 rsp.append((batchable, encresref, resref,))
115 else:
115 else:
116 resref.set(encargsorres)
116 resref.set(encargsorres)
117 else:
117 else:
118 if req:
118 if req:
119 self._submitreq(req, rsp)
119 self._submitreq(req, rsp)
120 req, rsp = [], []
120 req, rsp = [], []
121 resref.set(mtd(*args, **opts))
121 resref.set(mtd(*args, **opts))
122 if req:
122 if req:
123 self._submitreq(req, rsp)
123 self._submitreq(req, rsp)
124 def _submitreq(self, req, rsp):
124 def _submitreq(self, req, rsp):
125 encresults = self.remote._submitbatch(req)
125 encresults = self.remote._submitbatch(req)
126 for encres, r in zip(encresults, rsp):
126 for encres, r in zip(encresults, rsp):
127 batchable, encresref, resref = r
127 batchable, encresref, resref = r
128 encresref.set(encres)
128 encresref.set(encres)
129 resref.set(batchable.next())
129 resref.set(batchable.next())
130
130
131 def batchable(f):
131 def batchable(f):
132 '''annotation for batchable methods
132 '''annotation for batchable methods
133
133
134 Such methods must implement a coroutine as follows:
134 Such methods must implement a coroutine as follows:
135
135
136 @batchable
136 @batchable
137 def sample(self, one, two=None):
137 def sample(self, one, two=None):
138 # Handle locally computable results first:
138 # Handle locally computable results first:
139 if not one:
139 if not one:
140 yield "a local result", None
140 yield "a local result", None
141 # Build list of encoded arguments suitable for your wire protocol:
141 # Build list of encoded arguments suitable for your wire protocol:
142 encargs = [('one', encode(one),), ('two', encode(two),)]
142 encargs = [('one', encode(one),), ('two', encode(two),)]
143 # Create future for injection of encoded result:
143 # Create future for injection of encoded result:
144 encresref = future()
144 encresref = future()
145 # Return encoded arguments and future:
145 # Return encoded arguments and future:
146 yield encargs, encresref
146 yield encargs, encresref
147 # Assuming the future to be filled with the result from the batched
147 # Assuming the future to be filled with the result from the batched
148 # request now. Decode it:
148 # request now. Decode it:
149 yield decode(encresref.value)
149 yield decode(encresref.value)
150
150
151 The decorator returns a function which wraps this coroutine as a plain
151 The decorator returns a function which wraps this coroutine as a plain
152 method, but adds the original method as an attribute called "batchable",
152 method, but adds the original method as an attribute called "batchable",
153 which is used by remotebatch to split the call into separate encoding and
153 which is used by remotebatch to split the call into separate encoding and
154 decoding phases.
154 decoding phases.
155 '''
155 '''
156 def plain(*args, **opts):
156 def plain(*args, **opts):
157 batchable = f(*args, **opts)
157 batchable = f(*args, **opts)
158 encargsorres, encresref = batchable.next()
158 encargsorres, encresref = batchable.next()
159 if not encresref:
159 if not encresref:
160 return encargsorres # a local result in this case
160 return encargsorres # a local result in this case
161 self = args[0]
161 self = args[0]
162 encresref.set(self._submitone(f.func_name, encargsorres))
162 encresref.set(self._submitone(f.func_name, encargsorres))
163 return batchable.next()
163 return batchable.next()
164 setattr(plain, 'batchable', f)
164 setattr(plain, 'batchable', f)
165 return plain
165 return plain
166
166
167 # list of nodes encoding / decoding
167 # list of nodes encoding / decoding
168
168
169 def decodelist(l, sep=' '):
169 def decodelist(l, sep=' '):
170 if l:
170 if l:
171 return map(bin, l.split(sep))
171 return map(bin, l.split(sep))
172 return []
172 return []
173
173
174 def encodelist(l, sep=' '):
174 def encodelist(l, sep=' '):
175 return sep.join(map(hex, l))
175 return sep.join(map(hex, l))
176
176
177 # batched call argument encoding
177 # batched call argument encoding
178
178
179 def escapearg(plain):
179 def escapearg(plain):
180 return (plain
180 return (plain
181 .replace(':', '::')
181 .replace(':', '::')
182 .replace(',', ':,')
182 .replace(',', ':,')
183 .replace(';', ':;')
183 .replace(';', ':;')
184 .replace('=', ':='))
184 .replace('=', ':='))
185
185
186 def unescapearg(escaped):
186 def unescapearg(escaped):
187 return (escaped
187 return (escaped
188 .replace(':=', '=')
188 .replace(':=', '=')
189 .replace(':;', ';')
189 .replace(':;', ';')
190 .replace(':,', ',')
190 .replace(':,', ',')
191 .replace('::', ':'))
191 .replace('::', ':'))
192
192
193 # mapping of options accepted by getbundle and their types
193 # mapping of options accepted by getbundle and their types
194 #
194 #
195 # Meant to be extended by extensions. It is extensions responsibility to ensure
195 # Meant to be extended by extensions. It is extensions responsibility to ensure
196 # such options are properly processed in exchange.getbundle.
196 # such options are properly processed in exchange.getbundle.
197 #
197 #
198 # supported types are:
198 # supported types are:
199 #
199 #
200 # :nodes: list of binary nodes
200 # :nodes: list of binary nodes
201 # :csv: list of comma-separated values
201 # :csv: list of comma-separated values
202 # :plain: string with no transformation needed.
202 # :plain: string with no transformation needed.
203 gboptsmap = {'heads': 'nodes',
203 gboptsmap = {'heads': 'nodes',
204 'common': 'nodes',
204 'common': 'nodes',
205 'bundlecaps': 'csv',
205 'bundlecaps': 'csv',
206 'listkeys': 'csv'}
206 'listkeys': 'csv',
207 'cg': 'boolean'}
207
208
208 # client side
209 # client side
209
210
210 class wirepeer(peer.peerrepository):
211 class wirepeer(peer.peerrepository):
211
212
212 def batch(self):
213 def batch(self):
213 return remotebatch(self)
214 return remotebatch(self)
214 def _submitbatch(self, req):
215 def _submitbatch(self, req):
215 cmds = []
216 cmds = []
216 for op, argsdict in req:
217 for op, argsdict in req:
217 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
218 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
218 cmds.append('%s %s' % (op, args))
219 cmds.append('%s %s' % (op, args))
219 rsp = self._call("batch", cmds=';'.join(cmds))
220 rsp = self._call("batch", cmds=';'.join(cmds))
220 return rsp.split(';')
221 return rsp.split(';')
221 def _submitone(self, op, args):
222 def _submitone(self, op, args):
222 return self._call(op, **args)
223 return self._call(op, **args)
223
224
224 @batchable
225 @batchable
225 def lookup(self, key):
226 def lookup(self, key):
226 self.requirecap('lookup', _('look up remote revision'))
227 self.requirecap('lookup', _('look up remote revision'))
227 f = future()
228 f = future()
228 yield {'key': encoding.fromlocal(key)}, f
229 yield {'key': encoding.fromlocal(key)}, f
229 d = f.value
230 d = f.value
230 success, data = d[:-1].split(" ", 1)
231 success, data = d[:-1].split(" ", 1)
231 if int(success):
232 if int(success):
232 yield bin(data)
233 yield bin(data)
233 self._abort(error.RepoError(data))
234 self._abort(error.RepoError(data))
234
235
235 @batchable
236 @batchable
236 def heads(self):
237 def heads(self):
237 f = future()
238 f = future()
238 yield {}, f
239 yield {}, f
239 d = f.value
240 d = f.value
240 try:
241 try:
241 yield decodelist(d[:-1])
242 yield decodelist(d[:-1])
242 except ValueError:
243 except ValueError:
243 self._abort(error.ResponseError(_("unexpected response:"), d))
244 self._abort(error.ResponseError(_("unexpected response:"), d))
244
245
245 @batchable
246 @batchable
246 def known(self, nodes):
247 def known(self, nodes):
247 f = future()
248 f = future()
248 yield {'nodes': encodelist(nodes)}, f
249 yield {'nodes': encodelist(nodes)}, f
249 d = f.value
250 d = f.value
250 try:
251 try:
251 yield [bool(int(f)) for f in d]
252 yield [bool(int(f)) for f in d]
252 except ValueError:
253 except ValueError:
253 self._abort(error.ResponseError(_("unexpected response:"), d))
254 self._abort(error.ResponseError(_("unexpected response:"), d))
254
255
255 @batchable
256 @batchable
256 def branchmap(self):
257 def branchmap(self):
257 f = future()
258 f = future()
258 yield {}, f
259 yield {}, f
259 d = f.value
260 d = f.value
260 try:
261 try:
261 branchmap = {}
262 branchmap = {}
262 for branchpart in d.splitlines():
263 for branchpart in d.splitlines():
263 branchname, branchheads = branchpart.split(' ', 1)
264 branchname, branchheads = branchpart.split(' ', 1)
264 branchname = encoding.tolocal(urllib.unquote(branchname))
265 branchname = encoding.tolocal(urllib.unquote(branchname))
265 branchheads = decodelist(branchheads)
266 branchheads = decodelist(branchheads)
266 branchmap[branchname] = branchheads
267 branchmap[branchname] = branchheads
267 yield branchmap
268 yield branchmap
268 except TypeError:
269 except TypeError:
269 self._abort(error.ResponseError(_("unexpected response:"), d))
270 self._abort(error.ResponseError(_("unexpected response:"), d))
270
271
271 def branches(self, nodes):
272 def branches(self, nodes):
272 n = encodelist(nodes)
273 n = encodelist(nodes)
273 d = self._call("branches", nodes=n)
274 d = self._call("branches", nodes=n)
274 try:
275 try:
275 br = [tuple(decodelist(b)) for b in d.splitlines()]
276 br = [tuple(decodelist(b)) for b in d.splitlines()]
276 return br
277 return br
277 except ValueError:
278 except ValueError:
278 self._abort(error.ResponseError(_("unexpected response:"), d))
279 self._abort(error.ResponseError(_("unexpected response:"), d))
279
280
280 def between(self, pairs):
281 def between(self, pairs):
281 batch = 8 # avoid giant requests
282 batch = 8 # avoid giant requests
282 r = []
283 r = []
283 for i in xrange(0, len(pairs), batch):
284 for i in xrange(0, len(pairs), batch):
284 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
285 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
285 d = self._call("between", pairs=n)
286 d = self._call("between", pairs=n)
286 try:
287 try:
287 r.extend(l and decodelist(l) or [] for l in d.splitlines())
288 r.extend(l and decodelist(l) or [] for l in d.splitlines())
288 except ValueError:
289 except ValueError:
289 self._abort(error.ResponseError(_("unexpected response:"), d))
290 self._abort(error.ResponseError(_("unexpected response:"), d))
290 return r
291 return r
291
292
292 @batchable
293 @batchable
293 def pushkey(self, namespace, key, old, new):
294 def pushkey(self, namespace, key, old, new):
294 if not self.capable('pushkey'):
295 if not self.capable('pushkey'):
295 yield False, None
296 yield False, None
296 f = future()
297 f = future()
297 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
298 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
298 yield {'namespace': encoding.fromlocal(namespace),
299 yield {'namespace': encoding.fromlocal(namespace),
299 'key': encoding.fromlocal(key),
300 'key': encoding.fromlocal(key),
300 'old': encoding.fromlocal(old),
301 'old': encoding.fromlocal(old),
301 'new': encoding.fromlocal(new)}, f
302 'new': encoding.fromlocal(new)}, f
302 d = f.value
303 d = f.value
303 d, output = d.split('\n', 1)
304 d, output = d.split('\n', 1)
304 try:
305 try:
305 d = bool(int(d))
306 d = bool(int(d))
306 except ValueError:
307 except ValueError:
307 raise error.ResponseError(
308 raise error.ResponseError(
308 _('push failed (unexpected response):'), d)
309 _('push failed (unexpected response):'), d)
309 for l in output.splitlines(True):
310 for l in output.splitlines(True):
310 self.ui.status(_('remote: '), l)
311 self.ui.status(_('remote: '), l)
311 yield d
312 yield d
312
313
313 @batchable
314 @batchable
314 def listkeys(self, namespace):
315 def listkeys(self, namespace):
315 if not self.capable('pushkey'):
316 if not self.capable('pushkey'):
316 yield {}, None
317 yield {}, None
317 f = future()
318 f = future()
318 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
319 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
319 yield {'namespace': encoding.fromlocal(namespace)}, f
320 yield {'namespace': encoding.fromlocal(namespace)}, f
320 d = f.value
321 d = f.value
321 yield pushkeymod.decodekeys(d)
322 yield pushkeymod.decodekeys(d)
322
323
323 def stream_out(self):
324 def stream_out(self):
324 return self._callstream('stream_out')
325 return self._callstream('stream_out')
325
326
326 def changegroup(self, nodes, kind):
327 def changegroup(self, nodes, kind):
327 n = encodelist(nodes)
328 n = encodelist(nodes)
328 f = self._callcompressable("changegroup", roots=n)
329 f = self._callcompressable("changegroup", roots=n)
329 return changegroupmod.unbundle10(f, 'UN')
330 return changegroupmod.unbundle10(f, 'UN')
330
331
331 def changegroupsubset(self, bases, heads, kind):
332 def changegroupsubset(self, bases, heads, kind):
332 self.requirecap('changegroupsubset', _('look up remote changes'))
333 self.requirecap('changegroupsubset', _('look up remote changes'))
333 bases = encodelist(bases)
334 bases = encodelist(bases)
334 heads = encodelist(heads)
335 heads = encodelist(heads)
335 f = self._callcompressable("changegroupsubset",
336 f = self._callcompressable("changegroupsubset",
336 bases=bases, heads=heads)
337 bases=bases, heads=heads)
337 return changegroupmod.unbundle10(f, 'UN')
338 return changegroupmod.unbundle10(f, 'UN')
338
339
339 def getbundle(self, source, **kwargs):
340 def getbundle(self, source, **kwargs):
340 self.requirecap('getbundle', _('look up remote changes'))
341 self.requirecap('getbundle', _('look up remote changes'))
341 opts = {}
342 opts = {}
342 for key, value in kwargs.iteritems():
343 for key, value in kwargs.iteritems():
343 if value is None:
344 if value is None:
344 continue
345 continue
345 keytype = gboptsmap.get(key)
346 keytype = gboptsmap.get(key)
346 if keytype is None:
347 if keytype is None:
347 assert False, 'unexpected'
348 assert False, 'unexpected'
348 elif keytype == 'nodes':
349 elif keytype == 'nodes':
349 value = encodelist(value)
350 value = encodelist(value)
350 elif keytype == 'csv':
351 elif keytype == 'csv':
351 value = ','.join(value)
352 value = ','.join(value)
352 elif keytype == 'boolean':
353 elif keytype == 'boolean':
353 value = bool(value)
354 value = bool(value)
354 elif keytype != 'plain':
355 elif keytype != 'plain':
355 raise KeyError('unknown getbundle option type %s'
356 raise KeyError('unknown getbundle option type %s'
356 % keytype)
357 % keytype)
357 opts[key] = value
358 opts[key] = value
358 f = self._callcompressable("getbundle", **opts)
359 f = self._callcompressable("getbundle", **opts)
359 bundlecaps = kwargs.get('bundlecaps')
360 bundlecaps = kwargs.get('bundlecaps')
360 if bundlecaps is not None and 'HG2X' in bundlecaps:
361 if bundlecaps is not None and 'HG2X' in bundlecaps:
361 return bundle2.unbundle20(self.ui, f)
362 return bundle2.unbundle20(self.ui, f)
362 else:
363 else:
363 return changegroupmod.unbundle10(f, 'UN')
364 return changegroupmod.unbundle10(f, 'UN')
364
365
365 def unbundle(self, cg, heads, source):
366 def unbundle(self, cg, heads, source):
366 '''Send cg (a readable file-like object representing the
367 '''Send cg (a readable file-like object representing the
367 changegroup to push, typically a chunkbuffer object) to the
368 changegroup to push, typically a chunkbuffer object) to the
368 remote server as a bundle.
369 remote server as a bundle.
369
370
370 When pushing a bundle10 stream, return an integer indicating the
371 When pushing a bundle10 stream, return an integer indicating the
371 result of the push (see localrepository.addchangegroup()).
372 result of the push (see localrepository.addchangegroup()).
372
373
373 When pushing a bundle20 stream, return a bundle20 stream.'''
374 When pushing a bundle20 stream, return a bundle20 stream.'''
374
375
375 if heads != ['force'] and self.capable('unbundlehash'):
376 if heads != ['force'] and self.capable('unbundlehash'):
376 heads = encodelist(['hashed',
377 heads = encodelist(['hashed',
377 util.sha1(''.join(sorted(heads))).digest()])
378 util.sha1(''.join(sorted(heads))).digest()])
378 else:
379 else:
379 heads = encodelist(heads)
380 heads = encodelist(heads)
380
381
381 if util.safehasattr(cg, 'deltaheader'):
382 if util.safehasattr(cg, 'deltaheader'):
382 # this a bundle10, do the old style call sequence
383 # this a bundle10, do the old style call sequence
383 ret, output = self._callpush("unbundle", cg, heads=heads)
384 ret, output = self._callpush("unbundle", cg, heads=heads)
384 if ret == "":
385 if ret == "":
385 raise error.ResponseError(
386 raise error.ResponseError(
386 _('push failed:'), output)
387 _('push failed:'), output)
387 try:
388 try:
388 ret = int(ret)
389 ret = int(ret)
389 except ValueError:
390 except ValueError:
390 raise error.ResponseError(
391 raise error.ResponseError(
391 _('push failed (unexpected response):'), ret)
392 _('push failed (unexpected response):'), ret)
392
393
393 for l in output.splitlines(True):
394 for l in output.splitlines(True):
394 self.ui.status(_('remote: '), l)
395 self.ui.status(_('remote: '), l)
395 else:
396 else:
396 # bundle2 push. Send a stream, fetch a stream.
397 # bundle2 push. Send a stream, fetch a stream.
397 stream = self._calltwowaystream('unbundle', cg, heads=heads)
398 stream = self._calltwowaystream('unbundle', cg, heads=heads)
398 ret = bundle2.unbundle20(self.ui, stream)
399 ret = bundle2.unbundle20(self.ui, stream)
399 return ret
400 return ret
400
401
401 def debugwireargs(self, one, two, three=None, four=None, five=None):
402 def debugwireargs(self, one, two, three=None, four=None, five=None):
402 # don't pass optional arguments left at their default value
403 # don't pass optional arguments left at their default value
403 opts = {}
404 opts = {}
404 if three is not None:
405 if three is not None:
405 opts['three'] = three
406 opts['three'] = three
406 if four is not None:
407 if four is not None:
407 opts['four'] = four
408 opts['four'] = four
408 return self._call('debugwireargs', one=one, two=two, **opts)
409 return self._call('debugwireargs', one=one, two=two, **opts)
409
410
410 def _call(self, cmd, **args):
411 def _call(self, cmd, **args):
411 """execute <cmd> on the server
412 """execute <cmd> on the server
412
413
413 The command is expected to return a simple string.
414 The command is expected to return a simple string.
414
415
415 returns the server reply as a string."""
416 returns the server reply as a string."""
416 raise NotImplementedError()
417 raise NotImplementedError()
417
418
418 def _callstream(self, cmd, **args):
419 def _callstream(self, cmd, **args):
419 """execute <cmd> on the server
420 """execute <cmd> on the server
420
421
421 The command is expected to return a stream.
422 The command is expected to return a stream.
422
423
423 returns the server reply as a file like object."""
424 returns the server reply as a file like object."""
424 raise NotImplementedError()
425 raise NotImplementedError()
425
426
426 def _callcompressable(self, cmd, **args):
427 def _callcompressable(self, cmd, **args):
427 """execute <cmd> on the server
428 """execute <cmd> on the server
428
429
429 The command is expected to return a stream.
430 The command is expected to return a stream.
430
431
431 The stream may have been compressed in some implementations. This
432 The stream may have been compressed in some implementations. This
432 function takes care of the decompression. This is the only difference
433 function takes care of the decompression. This is the only difference
433 with _callstream.
434 with _callstream.
434
435
435 returns the server reply as a file like object.
436 returns the server reply as a file like object.
436 """
437 """
437 raise NotImplementedError()
438 raise NotImplementedError()
438
439
439 def _callpush(self, cmd, fp, **args):
440 def _callpush(self, cmd, fp, **args):
440 """execute a <cmd> on server
441 """execute a <cmd> on server
441
442
442 The command is expected to be related to a push. Push has a special
443 The command is expected to be related to a push. Push has a special
443 return method.
444 return method.
444
445
445 returns the server reply as a (ret, output) tuple. ret is either
446 returns the server reply as a (ret, output) tuple. ret is either
446 empty (error) or a stringified int.
447 empty (error) or a stringified int.
447 """
448 """
448 raise NotImplementedError()
449 raise NotImplementedError()
449
450
450 def _calltwowaystream(self, cmd, fp, **args):
451 def _calltwowaystream(self, cmd, fp, **args):
451 """execute <cmd> on server
452 """execute <cmd> on server
452
453
453 The command will send a stream to the server and get a stream in reply.
454 The command will send a stream to the server and get a stream in reply.
454 """
455 """
455 raise NotImplementedError()
456 raise NotImplementedError()
456
457
457 def _abort(self, exception):
458 def _abort(self, exception):
458 """clearly abort the wire protocol connection and raise the exception
459 """clearly abort the wire protocol connection and raise the exception
459 """
460 """
460 raise NotImplementedError()
461 raise NotImplementedError()
461
462
462 # server side
463 # server side
463
464
464 # wire protocol command can either return a string or one of these classes.
465 # wire protocol command can either return a string or one of these classes.
465 class streamres(object):
466 class streamres(object):
466 """wireproto reply: binary stream
467 """wireproto reply: binary stream
467
468
468 The call was successful and the result is a stream.
469 The call was successful and the result is a stream.
469 Iterate on the `self.gen` attribute to retrieve chunks.
470 Iterate on the `self.gen` attribute to retrieve chunks.
470 """
471 """
471 def __init__(self, gen):
472 def __init__(self, gen):
472 self.gen = gen
473 self.gen = gen
473
474
474 class pushres(object):
475 class pushres(object):
475 """wireproto reply: success with simple integer return
476 """wireproto reply: success with simple integer return
476
477
477 The call was successful and returned an integer contained in `self.res`.
478 The call was successful and returned an integer contained in `self.res`.
478 """
479 """
479 def __init__(self, res):
480 def __init__(self, res):
480 self.res = res
481 self.res = res
481
482
482 class pusherr(object):
483 class pusherr(object):
483 """wireproto reply: failure
484 """wireproto reply: failure
484
485
485 The call failed. The `self.res` attribute contains the error message.
486 The call failed. The `self.res` attribute contains the error message.
486 """
487 """
487 def __init__(self, res):
488 def __init__(self, res):
488 self.res = res
489 self.res = res
489
490
490 class ooberror(object):
491 class ooberror(object):
491 """wireproto reply: failure of a batch of operation
492 """wireproto reply: failure of a batch of operation
492
493
493 Something failed during a batch call. The error message is stored in
494 Something failed during a batch call. The error message is stored in
494 `self.message`.
495 `self.message`.
495 """
496 """
496 def __init__(self, message):
497 def __init__(self, message):
497 self.message = message
498 self.message = message
498
499
499 def dispatch(repo, proto, command):
500 def dispatch(repo, proto, command):
500 repo = repo.filtered("served")
501 repo = repo.filtered("served")
501 func, spec = commands[command]
502 func, spec = commands[command]
502 args = proto.getargs(spec)
503 args = proto.getargs(spec)
503 return func(repo, proto, *args)
504 return func(repo, proto, *args)
504
505
505 def options(cmd, keys, others):
506 def options(cmd, keys, others):
506 opts = {}
507 opts = {}
507 for k in keys:
508 for k in keys:
508 if k in others:
509 if k in others:
509 opts[k] = others[k]
510 opts[k] = others[k]
510 del others[k]
511 del others[k]
511 if others:
512 if others:
512 sys.stderr.write("warning: %s ignored unexpected arguments %s\n"
513 sys.stderr.write("warning: %s ignored unexpected arguments %s\n"
513 % (cmd, ",".join(others)))
514 % (cmd, ",".join(others)))
514 return opts
515 return opts
515
516
516 # list of commands
517 # list of commands
517 commands = {}
518 commands = {}
518
519
519 def wireprotocommand(name, args=''):
520 def wireprotocommand(name, args=''):
520 """decorator for wire protocol command"""
521 """decorator for wire protocol command"""
521 def register(func):
522 def register(func):
522 commands[name] = (func, args)
523 commands[name] = (func, args)
523 return func
524 return func
524 return register
525 return register
525
526
526 @wireprotocommand('batch', 'cmds *')
527 @wireprotocommand('batch', 'cmds *')
527 def batch(repo, proto, cmds, others):
528 def batch(repo, proto, cmds, others):
528 repo = repo.filtered("served")
529 repo = repo.filtered("served")
529 res = []
530 res = []
530 for pair in cmds.split(';'):
531 for pair in cmds.split(';'):
531 op, args = pair.split(' ', 1)
532 op, args = pair.split(' ', 1)
532 vals = {}
533 vals = {}
533 for a in args.split(','):
534 for a in args.split(','):
534 if a:
535 if a:
535 n, v = a.split('=')
536 n, v = a.split('=')
536 vals[n] = unescapearg(v)
537 vals[n] = unescapearg(v)
537 func, spec = commands[op]
538 func, spec = commands[op]
538 if spec:
539 if spec:
539 keys = spec.split()
540 keys = spec.split()
540 data = {}
541 data = {}
541 for k in keys:
542 for k in keys:
542 if k == '*':
543 if k == '*':
543 star = {}
544 star = {}
544 for key in vals.keys():
545 for key in vals.keys():
545 if key not in keys:
546 if key not in keys:
546 star[key] = vals[key]
547 star[key] = vals[key]
547 data['*'] = star
548 data['*'] = star
548 else:
549 else:
549 data[k] = vals[k]
550 data[k] = vals[k]
550 result = func(repo, proto, *[data[k] for k in keys])
551 result = func(repo, proto, *[data[k] for k in keys])
551 else:
552 else:
552 result = func(repo, proto)
553 result = func(repo, proto)
553 if isinstance(result, ooberror):
554 if isinstance(result, ooberror):
554 return result
555 return result
555 res.append(escapearg(result))
556 res.append(escapearg(result))
556 return ';'.join(res)
557 return ';'.join(res)
557
558
558 @wireprotocommand('between', 'pairs')
559 @wireprotocommand('between', 'pairs')
559 def between(repo, proto, pairs):
560 def between(repo, proto, pairs):
560 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
561 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
561 r = []
562 r = []
562 for b in repo.between(pairs):
563 for b in repo.between(pairs):
563 r.append(encodelist(b) + "\n")
564 r.append(encodelist(b) + "\n")
564 return "".join(r)
565 return "".join(r)
565
566
566 @wireprotocommand('branchmap')
567 @wireprotocommand('branchmap')
567 def branchmap(repo, proto):
568 def branchmap(repo, proto):
568 branchmap = repo.branchmap()
569 branchmap = repo.branchmap()
569 heads = []
570 heads = []
570 for branch, nodes in branchmap.iteritems():
571 for branch, nodes in branchmap.iteritems():
571 branchname = urllib.quote(encoding.fromlocal(branch))
572 branchname = urllib.quote(encoding.fromlocal(branch))
572 branchnodes = encodelist(nodes)
573 branchnodes = encodelist(nodes)
573 heads.append('%s %s' % (branchname, branchnodes))
574 heads.append('%s %s' % (branchname, branchnodes))
574 return '\n'.join(heads)
575 return '\n'.join(heads)
575
576
576 @wireprotocommand('branches', 'nodes')
577 @wireprotocommand('branches', 'nodes')
577 def branches(repo, proto, nodes):
578 def branches(repo, proto, nodes):
578 nodes = decodelist(nodes)
579 nodes = decodelist(nodes)
579 r = []
580 r = []
580 for b in repo.branches(nodes):
581 for b in repo.branches(nodes):
581 r.append(encodelist(b) + "\n")
582 r.append(encodelist(b) + "\n")
582 return "".join(r)
583 return "".join(r)
583
584
584
585
585 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
586 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
586 'known', 'getbundle', 'unbundlehash', 'batch']
587 'known', 'getbundle', 'unbundlehash', 'batch']
587
588
588 def _capabilities(repo, proto):
589 def _capabilities(repo, proto):
589 """return a list of capabilities for a repo
590 """return a list of capabilities for a repo
590
591
591 This function exists to allow extensions to easily wrap capabilities
592 This function exists to allow extensions to easily wrap capabilities
592 computation
593 computation
593
594
594 - returns a lists: easy to alter
595 - returns a lists: easy to alter
595 - change done here will be propagated to both `capabilities` and `hello`
596 - change done here will be propagated to both `capabilities` and `hello`
596 command without any other action needed.
597 command without any other action needed.
597 """
598 """
598 # copy to prevent modification of the global list
599 # copy to prevent modification of the global list
599 caps = list(wireprotocaps)
600 caps = list(wireprotocaps)
600 if _allowstream(repo.ui):
601 if _allowstream(repo.ui):
601 if repo.ui.configbool('server', 'preferuncompressed', False):
602 if repo.ui.configbool('server', 'preferuncompressed', False):
602 caps.append('stream-preferred')
603 caps.append('stream-preferred')
603 requiredformats = repo.requirements & repo.supportedformats
604 requiredformats = repo.requirements & repo.supportedformats
604 # if our local revlogs are just revlogv1, add 'stream' cap
605 # if our local revlogs are just revlogv1, add 'stream' cap
605 if not requiredformats - set(('revlogv1',)):
606 if not requiredformats - set(('revlogv1',)):
606 caps.append('stream')
607 caps.append('stream')
607 # otherwise, add 'streamreqs' detailing our local revlog format
608 # otherwise, add 'streamreqs' detailing our local revlog format
608 else:
609 else:
609 caps.append('streamreqs=%s' % ','.join(requiredformats))
610 caps.append('streamreqs=%s' % ','.join(requiredformats))
610 if repo.ui.configbool('experimental', 'bundle2-exp', False):
611 if repo.ui.configbool('experimental', 'bundle2-exp', False):
611 capsblob = bundle2.encodecaps(repo.bundle2caps)
612 capsblob = bundle2.encodecaps(repo.bundle2caps)
612 caps.append('bundle2-exp=' + urllib.quote(capsblob))
613 caps.append('bundle2-exp=' + urllib.quote(capsblob))
613 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
614 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
614 caps.append('httpheader=1024')
615 caps.append('httpheader=1024')
615 return caps
616 return caps
616
617
617 # If you are writing an extension and consider wrapping this function. Wrap
618 # If you are writing an extension and consider wrapping this function. Wrap
618 # `_capabilities` instead.
619 # `_capabilities` instead.
619 @wireprotocommand('capabilities')
620 @wireprotocommand('capabilities')
620 def capabilities(repo, proto):
621 def capabilities(repo, proto):
621 return ' '.join(_capabilities(repo, proto))
622 return ' '.join(_capabilities(repo, proto))
622
623
623 @wireprotocommand('changegroup', 'roots')
624 @wireprotocommand('changegroup', 'roots')
624 def changegroup(repo, proto, roots):
625 def changegroup(repo, proto, roots):
625 nodes = decodelist(roots)
626 nodes = decodelist(roots)
626 cg = changegroupmod.changegroup(repo, nodes, 'serve')
627 cg = changegroupmod.changegroup(repo, nodes, 'serve')
627 return streamres(proto.groupchunks(cg))
628 return streamres(proto.groupchunks(cg))
628
629
629 @wireprotocommand('changegroupsubset', 'bases heads')
630 @wireprotocommand('changegroupsubset', 'bases heads')
630 def changegroupsubset(repo, proto, bases, heads):
631 def changegroupsubset(repo, proto, bases, heads):
631 bases = decodelist(bases)
632 bases = decodelist(bases)
632 heads = decodelist(heads)
633 heads = decodelist(heads)
633 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
634 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
634 return streamres(proto.groupchunks(cg))
635 return streamres(proto.groupchunks(cg))
635
636
636 @wireprotocommand('debugwireargs', 'one two *')
637 @wireprotocommand('debugwireargs', 'one two *')
637 def debugwireargs(repo, proto, one, two, others):
638 def debugwireargs(repo, proto, one, two, others):
638 # only accept optional args from the known set
639 # only accept optional args from the known set
639 opts = options('debugwireargs', ['three', 'four'], others)
640 opts = options('debugwireargs', ['three', 'four'], others)
640 return repo.debugwireargs(one, two, **opts)
641 return repo.debugwireargs(one, two, **opts)
641
642
642 # List of options accepted by getbundle.
643 # List of options accepted by getbundle.
643 #
644 #
644 # Meant to be extended by extensions. It is the extension's responsibility to
645 # Meant to be extended by extensions. It is the extension's responsibility to
645 # ensure such options are properly processed in exchange.getbundle.
646 # ensure such options are properly processed in exchange.getbundle.
646 gboptslist = ['heads', 'common', 'bundlecaps']
647 gboptslist = ['heads', 'common', 'bundlecaps']
647
648
648 @wireprotocommand('getbundle', '*')
649 @wireprotocommand('getbundle', '*')
649 def getbundle(repo, proto, others):
650 def getbundle(repo, proto, others):
650 opts = options('getbundle', gboptsmap.keys(), others)
651 opts = options('getbundle', gboptsmap.keys(), others)
651 for k, v in opts.iteritems():
652 for k, v in opts.iteritems():
652 keytype = gboptsmap[k]
653 keytype = gboptsmap[k]
653 if keytype == 'nodes':
654 if keytype == 'nodes':
654 opts[k] = decodelist(v)
655 opts[k] = decodelist(v)
655 elif keytype == 'csv':
656 elif keytype == 'csv':
656 opts[k] = set(v.split(','))
657 opts[k] = set(v.split(','))
657 elif keytype == 'boolean':
658 elif keytype == 'boolean':
658 opts[k] = '%i' % bool(v)
659 opts[k] = '%i' % bool(v)
659 elif keytype != 'plain':
660 elif keytype != 'plain':
660 raise KeyError('unknown getbundle option type %s'
661 raise KeyError('unknown getbundle option type %s'
661 % keytype)
662 % keytype)
662 cg = exchange.getbundle(repo, 'serve', **opts)
663 cg = exchange.getbundle(repo, 'serve', **opts)
663 return streamres(proto.groupchunks(cg))
664 return streamres(proto.groupchunks(cg))
664
665
665 @wireprotocommand('heads')
666 @wireprotocommand('heads')
666 def heads(repo, proto):
667 def heads(repo, proto):
667 h = repo.heads()
668 h = repo.heads()
668 return encodelist(h) + "\n"
669 return encodelist(h) + "\n"
669
670
670 @wireprotocommand('hello')
671 @wireprotocommand('hello')
671 def hello(repo, proto):
672 def hello(repo, proto):
672 '''the hello command returns a set of lines describing various
673 '''the hello command returns a set of lines describing various
673 interesting things about the server, in an RFC822-like format.
674 interesting things about the server, in an RFC822-like format.
674 Currently the only one defined is "capabilities", which
675 Currently the only one defined is "capabilities", which
675 consists of a line in the form:
676 consists of a line in the form:
676
677
677 capabilities: space separated list of tokens
678 capabilities: space separated list of tokens
678 '''
679 '''
679 return "capabilities: %s\n" % (capabilities(repo, proto))
680 return "capabilities: %s\n" % (capabilities(repo, proto))
680
681
681 @wireprotocommand('listkeys', 'namespace')
682 @wireprotocommand('listkeys', 'namespace')
682 def listkeys(repo, proto, namespace):
683 def listkeys(repo, proto, namespace):
683 d = repo.listkeys(encoding.tolocal(namespace)).items()
684 d = repo.listkeys(encoding.tolocal(namespace)).items()
684 return pushkeymod.encodekeys(d)
685 return pushkeymod.encodekeys(d)
685
686
686 @wireprotocommand('lookup', 'key')
687 @wireprotocommand('lookup', 'key')
687 def lookup(repo, proto, key):
688 def lookup(repo, proto, key):
688 try:
689 try:
689 k = encoding.tolocal(key)
690 k = encoding.tolocal(key)
690 c = repo[k]
691 c = repo[k]
691 r = c.hex()
692 r = c.hex()
692 success = 1
693 success = 1
693 except Exception, inst:
694 except Exception, inst:
694 r = str(inst)
695 r = str(inst)
695 success = 0
696 success = 0
696 return "%s %s\n" % (success, r)
697 return "%s %s\n" % (success, r)
697
698
698 @wireprotocommand('known', 'nodes *')
699 @wireprotocommand('known', 'nodes *')
699 def known(repo, proto, nodes, others):
700 def known(repo, proto, nodes, others):
700 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
701 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
701
702
702 @wireprotocommand('pushkey', 'namespace key old new')
703 @wireprotocommand('pushkey', 'namespace key old new')
703 def pushkey(repo, proto, namespace, key, old, new):
704 def pushkey(repo, proto, namespace, key, old, new):
704 # compatibility with pre-1.8 clients which were accidentally
705 # compatibility with pre-1.8 clients which were accidentally
705 # sending raw binary nodes rather than utf-8-encoded hex
706 # sending raw binary nodes rather than utf-8-encoded hex
706 if len(new) == 20 and new.encode('string-escape') != new:
707 if len(new) == 20 and new.encode('string-escape') != new:
707 # looks like it could be a binary node
708 # looks like it could be a binary node
708 try:
709 try:
709 new.decode('utf-8')
710 new.decode('utf-8')
710 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
711 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
711 except UnicodeDecodeError:
712 except UnicodeDecodeError:
712 pass # binary, leave unmodified
713 pass # binary, leave unmodified
713 else:
714 else:
714 new = encoding.tolocal(new) # normal path
715 new = encoding.tolocal(new) # normal path
715
716
716 if util.safehasattr(proto, 'restore'):
717 if util.safehasattr(proto, 'restore'):
717
718
718 proto.redirect()
719 proto.redirect()
719
720
720 try:
721 try:
721 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
722 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
722 encoding.tolocal(old), new) or False
723 encoding.tolocal(old), new) or False
723 except util.Abort:
724 except util.Abort:
724 r = False
725 r = False
725
726
726 output = proto.restore()
727 output = proto.restore()
727
728
728 return '%s\n%s' % (int(r), output)
729 return '%s\n%s' % (int(r), output)
729
730
730 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
731 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
731 encoding.tolocal(old), new)
732 encoding.tolocal(old), new)
732 return '%s\n' % int(r)
733 return '%s\n' % int(r)
733
734
734 def _allowstream(ui):
735 def _allowstream(ui):
735 return ui.configbool('server', 'uncompressed', True, untrusted=True)
736 return ui.configbool('server', 'uncompressed', True, untrusted=True)
736
737
737 def _walkstreamfiles(repo):
738 def _walkstreamfiles(repo):
738 # this is it's own function so extensions can override it
739 # this is it's own function so extensions can override it
739 return repo.store.walk()
740 return repo.store.walk()
740
741
741 @wireprotocommand('stream_out')
742 @wireprotocommand('stream_out')
742 def stream(repo, proto):
743 def stream(repo, proto):
743 '''If the server supports streaming clone, it advertises the "stream"
744 '''If the server supports streaming clone, it advertises the "stream"
744 capability with a value representing the version and flags of the repo
745 capability with a value representing the version and flags of the repo
745 it is serving. Client checks to see if it understands the format.
746 it is serving. Client checks to see if it understands the format.
746
747
747 The format is simple: the server writes out a line with the amount
748 The format is simple: the server writes out a line with the amount
748 of files, then the total amount of bytes to be transferred (separated
749 of files, then the total amount of bytes to be transferred (separated
749 by a space). Then, for each file, the server first writes the filename
750 by a space). Then, for each file, the server first writes the filename
750 and file size (separated by the null character), then the file contents.
751 and file size (separated by the null character), then the file contents.
751 '''
752 '''
752
753
753 if not _allowstream(repo.ui):
754 if not _allowstream(repo.ui):
754 return '1\n'
755 return '1\n'
755
756
756 entries = []
757 entries = []
757 total_bytes = 0
758 total_bytes = 0
758 try:
759 try:
759 # get consistent snapshot of repo, lock during scan
760 # get consistent snapshot of repo, lock during scan
760 lock = repo.lock()
761 lock = repo.lock()
761 try:
762 try:
762 repo.ui.debug('scanning\n')
763 repo.ui.debug('scanning\n')
763 for name, ename, size in _walkstreamfiles(repo):
764 for name, ename, size in _walkstreamfiles(repo):
764 if size:
765 if size:
765 entries.append((name, size))
766 entries.append((name, size))
766 total_bytes += size
767 total_bytes += size
767 finally:
768 finally:
768 lock.release()
769 lock.release()
769 except error.LockError:
770 except error.LockError:
770 return '2\n' # error: 2
771 return '2\n' # error: 2
771
772
772 def streamer(repo, entries, total):
773 def streamer(repo, entries, total):
773 '''stream out all metadata files in repository.'''
774 '''stream out all metadata files in repository.'''
774 yield '0\n' # success
775 yield '0\n' # success
775 repo.ui.debug('%d files, %d bytes to transfer\n' %
776 repo.ui.debug('%d files, %d bytes to transfer\n' %
776 (len(entries), total_bytes))
777 (len(entries), total_bytes))
777 yield '%d %d\n' % (len(entries), total_bytes)
778 yield '%d %d\n' % (len(entries), total_bytes)
778
779
779 sopener = repo.sopener
780 sopener = repo.sopener
780 oldaudit = sopener.mustaudit
781 oldaudit = sopener.mustaudit
781 debugflag = repo.ui.debugflag
782 debugflag = repo.ui.debugflag
782 sopener.mustaudit = False
783 sopener.mustaudit = False
783
784
784 try:
785 try:
785 for name, size in entries:
786 for name, size in entries:
786 if debugflag:
787 if debugflag:
787 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
788 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
788 # partially encode name over the wire for backwards compat
789 # partially encode name over the wire for backwards compat
789 yield '%s\0%d\n' % (store.encodedir(name), size)
790 yield '%s\0%d\n' % (store.encodedir(name), size)
790 if size <= 65536:
791 if size <= 65536:
791 fp = sopener(name)
792 fp = sopener(name)
792 try:
793 try:
793 data = fp.read(size)
794 data = fp.read(size)
794 finally:
795 finally:
795 fp.close()
796 fp.close()
796 yield data
797 yield data
797 else:
798 else:
798 for chunk in util.filechunkiter(sopener(name), limit=size):
799 for chunk in util.filechunkiter(sopener(name), limit=size):
799 yield chunk
800 yield chunk
800 # replace with "finally:" when support for python 2.4 has been dropped
801 # replace with "finally:" when support for python 2.4 has been dropped
801 except Exception:
802 except Exception:
802 sopener.mustaudit = oldaudit
803 sopener.mustaudit = oldaudit
803 raise
804 raise
804 sopener.mustaudit = oldaudit
805 sopener.mustaudit = oldaudit
805
806
806 return streamres(streamer(repo, entries, total_bytes))
807 return streamres(streamer(repo, entries, total_bytes))
807
808
808 @wireprotocommand('unbundle', 'heads')
809 @wireprotocommand('unbundle', 'heads')
809 def unbundle(repo, proto, heads):
810 def unbundle(repo, proto, heads):
810 their_heads = decodelist(heads)
811 their_heads = decodelist(heads)
811
812
812 try:
813 try:
813 proto.redirect()
814 proto.redirect()
814
815
815 exchange.check_heads(repo, their_heads, 'preparing changes')
816 exchange.check_heads(repo, their_heads, 'preparing changes')
816
817
817 # write bundle data to temporary file because it can be big
818 # write bundle data to temporary file because it can be big
818 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
819 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
819 fp = os.fdopen(fd, 'wb+')
820 fp = os.fdopen(fd, 'wb+')
820 r = 0
821 r = 0
821 try:
822 try:
822 proto.getfile(fp)
823 proto.getfile(fp)
823 fp.seek(0)
824 fp.seek(0)
824 gen = exchange.readbundle(repo.ui, fp, None)
825 gen = exchange.readbundle(repo.ui, fp, None)
825 r = exchange.unbundle(repo, gen, their_heads, 'serve',
826 r = exchange.unbundle(repo, gen, their_heads, 'serve',
826 proto._client())
827 proto._client())
827 if util.safehasattr(r, 'addpart'):
828 if util.safehasattr(r, 'addpart'):
828 # The return looks streameable, we are in the bundle2 case and
829 # The return looks streameable, we are in the bundle2 case and
829 # should return a stream.
830 # should return a stream.
830 return streamres(r.getchunks())
831 return streamres(r.getchunks())
831 return pushres(r)
832 return pushres(r)
832
833
833 finally:
834 finally:
834 fp.close()
835 fp.close()
835 os.unlink(tempname)
836 os.unlink(tempname)
836 except error.BundleValueError, exc:
837 except error.BundleValueError, exc:
837 bundler = bundle2.bundle20(repo.ui)
838 bundler = bundle2.bundle20(repo.ui)
838 errpart = bundler.newpart('B2X:ERROR:UNSUPPORTEDCONTENT')
839 errpart = bundler.newpart('B2X:ERROR:UNSUPPORTEDCONTENT')
839 if exc.parttype is not None:
840 if exc.parttype is not None:
840 errpart.addparam('parttype', exc.parttype)
841 errpart.addparam('parttype', exc.parttype)
841 if exc.params:
842 if exc.params:
842 errpart.addparam('params', '\0'.join(exc.params))
843 errpart.addparam('params', '\0'.join(exc.params))
843 return streamres(bundler.getchunks())
844 return streamres(bundler.getchunks())
844 except util.Abort, inst:
845 except util.Abort, inst:
845 # The old code we moved used sys.stderr directly.
846 # The old code we moved used sys.stderr directly.
846 # We did not change it to minimise code change.
847 # We did not change it to minimise code change.
847 # This need to be moved to something proper.
848 # This need to be moved to something proper.
848 # Feel free to do it.
849 # Feel free to do it.
849 if getattr(inst, 'duringunbundle2', False):
850 if getattr(inst, 'duringunbundle2', False):
850 bundler = bundle2.bundle20(repo.ui)
851 bundler = bundle2.bundle20(repo.ui)
851 manargs = [('message', str(inst))]
852 manargs = [('message', str(inst))]
852 advargs = []
853 advargs = []
853 if inst.hint is not None:
854 if inst.hint is not None:
854 advargs.append(('hint', inst.hint))
855 advargs.append(('hint', inst.hint))
855 bundler.addpart(bundle2.bundlepart('B2X:ERROR:ABORT',
856 bundler.addpart(bundle2.bundlepart('B2X:ERROR:ABORT',
856 manargs, advargs))
857 manargs, advargs))
857 return streamres(bundler.getchunks())
858 return streamres(bundler.getchunks())
858 else:
859 else:
859 sys.stderr.write("abort: %s\n" % inst)
860 sys.stderr.write("abort: %s\n" % inst)
860 return pushres(0)
861 return pushres(0)
861 except error.PushRaced, exc:
862 except error.PushRaced, exc:
862 if getattr(exc, 'duringunbundle2', False):
863 if getattr(exc, 'duringunbundle2', False):
863 bundler = bundle2.bundle20(repo.ui)
864 bundler = bundle2.bundle20(repo.ui)
864 bundler.newpart('B2X:ERROR:PUSHRACED', [('message', str(exc))])
865 bundler.newpart('B2X:ERROR:PUSHRACED', [('message', str(exc))])
865 return streamres(bundler.getchunks())
866 return streamres(bundler.getchunks())
866 else:
867 else:
867 return pusherr(str(exc))
868 return pusherr(str(exc))
General Comments 0
You need to be logged in to leave comments. Login now