##// END OF EJS Templates
getbundle: support of listkeys argument when bundle2 is used...
Pierre-Yves David -
r21657:0ff44e06 default
parent child Browse files
Show More
@@ -1,740 +1,746 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2, pushkey
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # Integer version of the push result
64 # Integer version of the push result
65 # - None means nothing to push
65 # - None means nothing to push
66 # - 0 means HTTP error
66 # - 0 means HTTP error
67 # - 1 means we pushed and remote head count is unchanged *or*
67 # - 1 means we pushed and remote head count is unchanged *or*
68 # we have outgoing changesets but refused to push
68 # we have outgoing changesets but refused to push
69 # - other values as described by addchangegroup()
69 # - other values as described by addchangegroup()
70 self.ret = None
70 self.ret = None
71 # discover.outgoing object (contains common and outgoing data)
71 # discover.outgoing object (contains common and outgoing data)
72 self.outgoing = None
72 self.outgoing = None
73 # all remote heads before the push
73 # all remote heads before the push
74 self.remoteheads = None
74 self.remoteheads = None
75 # testable as a boolean indicating if any nodes are missing locally.
75 # testable as a boolean indicating if any nodes are missing locally.
76 self.incoming = None
76 self.incoming = None
77 # set of all heads common after changeset bundle push
77 # set of all heads common after changeset bundle push
78 self.commonheads = None
78 self.commonheads = None
79
79
80 def push(repo, remote, force=False, revs=None, newbranch=False):
80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 '''Push outgoing changesets (limited by revs) from a local
81 '''Push outgoing changesets (limited by revs) from a local
82 repository to remote. Return an integer:
82 repository to remote. Return an integer:
83 - None means nothing to push
83 - None means nothing to push
84 - 0 means HTTP error
84 - 0 means HTTP error
85 - 1 means we pushed and remote head count is unchanged *or*
85 - 1 means we pushed and remote head count is unchanged *or*
86 we have outgoing changesets but refused to push
86 we have outgoing changesets but refused to push
87 - other values as described by addchangegroup()
87 - other values as described by addchangegroup()
88 '''
88 '''
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 if pushop.remote.local():
90 if pushop.remote.local():
91 missing = (set(pushop.repo.requirements)
91 missing = (set(pushop.repo.requirements)
92 - pushop.remote.local().supported)
92 - pushop.remote.local().supported)
93 if missing:
93 if missing:
94 msg = _("required features are not"
94 msg = _("required features are not"
95 " supported in the destination:"
95 " supported in the destination:"
96 " %s") % (', '.join(sorted(missing)))
96 " %s") % (', '.join(sorted(missing)))
97 raise util.Abort(msg)
97 raise util.Abort(msg)
98
98
99 # there are two ways to push to remote repo:
99 # there are two ways to push to remote repo:
100 #
100 #
101 # addchangegroup assumes local user can lock remote
101 # addchangegroup assumes local user can lock remote
102 # repo (local filesystem, old ssh servers).
102 # repo (local filesystem, old ssh servers).
103 #
103 #
104 # unbundle assumes local user cannot lock remote repo (new ssh
104 # unbundle assumes local user cannot lock remote repo (new ssh
105 # servers, http servers).
105 # servers, http servers).
106
106
107 if not pushop.remote.canpush():
107 if not pushop.remote.canpush():
108 raise util.Abort(_("destination does not support push"))
108 raise util.Abort(_("destination does not support push"))
109 # get local lock as we might write phase data
109 # get local lock as we might write phase data
110 locallock = None
110 locallock = None
111 try:
111 try:
112 locallock = pushop.repo.lock()
112 locallock = pushop.repo.lock()
113 pushop.locallocked = True
113 pushop.locallocked = True
114 except IOError, err:
114 except IOError, err:
115 pushop.locallocked = False
115 pushop.locallocked = False
116 if err.errno != errno.EACCES:
116 if err.errno != errno.EACCES:
117 raise
117 raise
118 # source repo cannot be locked.
118 # source repo cannot be locked.
119 # We do not abort the push, but just disable the local phase
119 # We do not abort the push, but just disable the local phase
120 # synchronisation.
120 # synchronisation.
121 msg = 'cannot lock source repository: %s\n' % err
121 msg = 'cannot lock source repository: %s\n' % err
122 pushop.ui.debug(msg)
122 pushop.ui.debug(msg)
123 try:
123 try:
124 pushop.repo.checkpush(pushop)
124 pushop.repo.checkpush(pushop)
125 lock = None
125 lock = None
126 unbundle = pushop.remote.capable('unbundle')
126 unbundle = pushop.remote.capable('unbundle')
127 if not unbundle:
127 if not unbundle:
128 lock = pushop.remote.lock()
128 lock = pushop.remote.lock()
129 try:
129 try:
130 _pushdiscovery(pushop)
130 _pushdiscovery(pushop)
131 if _pushcheckoutgoing(pushop):
131 if _pushcheckoutgoing(pushop):
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 pushop.remote,
133 pushop.remote,
134 pushop.outgoing)
134 pushop.outgoing)
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
136 False)
136 False)
137 and pushop.remote.capable('bundle2-exp')):
137 and pushop.remote.capable('bundle2-exp')):
138 _pushbundle2(pushop)
138 _pushbundle2(pushop)
139 else:
139 else:
140 _pushchangeset(pushop)
140 _pushchangeset(pushop)
141 _pushcomputecommonheads(pushop)
141 _pushcomputecommonheads(pushop)
142 _pushsyncphase(pushop)
142 _pushsyncphase(pushop)
143 _pushobsolete(pushop)
143 _pushobsolete(pushop)
144 finally:
144 finally:
145 if lock is not None:
145 if lock is not None:
146 lock.release()
146 lock.release()
147 finally:
147 finally:
148 if locallock is not None:
148 if locallock is not None:
149 locallock.release()
149 locallock.release()
150
150
151 _pushbookmark(pushop)
151 _pushbookmark(pushop)
152 return pushop.ret
152 return pushop.ret
153
153
154 def _pushdiscovery(pushop):
154 def _pushdiscovery(pushop):
155 # discovery
155 # discovery
156 unfi = pushop.repo.unfiltered()
156 unfi = pushop.repo.unfiltered()
157 fci = discovery.findcommonincoming
157 fci = discovery.findcommonincoming
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
159 common, inc, remoteheads = commoninc
159 common, inc, remoteheads = commoninc
160 fco = discovery.findcommonoutgoing
160 fco = discovery.findcommonoutgoing
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
162 commoninc=commoninc, force=pushop.force)
162 commoninc=commoninc, force=pushop.force)
163 pushop.outgoing = outgoing
163 pushop.outgoing = outgoing
164 pushop.remoteheads = remoteheads
164 pushop.remoteheads = remoteheads
165 pushop.incoming = inc
165 pushop.incoming = inc
166
166
167 def _pushcheckoutgoing(pushop):
167 def _pushcheckoutgoing(pushop):
168 outgoing = pushop.outgoing
168 outgoing = pushop.outgoing
169 unfi = pushop.repo.unfiltered()
169 unfi = pushop.repo.unfiltered()
170 if not outgoing.missing:
170 if not outgoing.missing:
171 # nothing to push
171 # nothing to push
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
173 return False
173 return False
174 # something to push
174 # something to push
175 if not pushop.force:
175 if not pushop.force:
176 # if repo.obsstore == False --> no obsolete
176 # if repo.obsstore == False --> no obsolete
177 # then, save the iteration
177 # then, save the iteration
178 if unfi.obsstore:
178 if unfi.obsstore:
179 # this message are here for 80 char limit reason
179 # this message are here for 80 char limit reason
180 mso = _("push includes obsolete changeset: %s!")
180 mso = _("push includes obsolete changeset: %s!")
181 mst = "push includes %s changeset: %s!"
181 mst = "push includes %s changeset: %s!"
182 # plain versions for i18n tool to detect them
182 # plain versions for i18n tool to detect them
183 _("push includes unstable changeset: %s!")
183 _("push includes unstable changeset: %s!")
184 _("push includes bumped changeset: %s!")
184 _("push includes bumped changeset: %s!")
185 _("push includes divergent changeset: %s!")
185 _("push includes divergent changeset: %s!")
186 # If we are to push if there is at least one
186 # If we are to push if there is at least one
187 # obsolete or unstable changeset in missing, at
187 # obsolete or unstable changeset in missing, at
188 # least one of the missinghead will be obsolete or
188 # least one of the missinghead will be obsolete or
189 # unstable. So checking heads only is ok
189 # unstable. So checking heads only is ok
190 for node in outgoing.missingheads:
190 for node in outgoing.missingheads:
191 ctx = unfi[node]
191 ctx = unfi[node]
192 if ctx.obsolete():
192 if ctx.obsolete():
193 raise util.Abort(mso % ctx)
193 raise util.Abort(mso % ctx)
194 elif ctx.troubled():
194 elif ctx.troubled():
195 raise util.Abort(_(mst)
195 raise util.Abort(_(mst)
196 % (ctx.troubles()[0],
196 % (ctx.troubles()[0],
197 ctx))
197 ctx))
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
199 discovery.checkheads(unfi, pushop.remote, outgoing,
199 discovery.checkheads(unfi, pushop.remote, outgoing,
200 pushop.remoteheads,
200 pushop.remoteheads,
201 pushop.newbranch,
201 pushop.newbranch,
202 bool(pushop.incoming),
202 bool(pushop.incoming),
203 newbm)
203 newbm)
204 return True
204 return True
205
205
206 def _pushbundle2(pushop):
206 def _pushbundle2(pushop):
207 """push data to the remote using bundle2
207 """push data to the remote using bundle2
208
208
209 The only currently supported type of data is changegroup but this will
209 The only currently supported type of data is changegroup but this will
210 evolve in the future."""
210 evolve in the future."""
211 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
211 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
212 # create reply capability
212 # create reply capability
213 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
213 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
214 bundler.newpart('b2x:replycaps', data=capsblob)
214 bundler.newpart('b2x:replycaps', data=capsblob)
215 # Send known heads to the server for race detection.
215 # Send known heads to the server for race detection.
216 if not pushop.force:
216 if not pushop.force:
217 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
217 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
218 extrainfo = _pushbundle2extraparts(pushop, bundler)
218 extrainfo = _pushbundle2extraparts(pushop, bundler)
219 # add the changegroup bundle
219 # add the changegroup bundle
220 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
220 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
221 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
221 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
222 stream = util.chunkbuffer(bundler.getchunks())
222 stream = util.chunkbuffer(bundler.getchunks())
223 try:
223 try:
224 reply = pushop.remote.unbundle(stream, ['force'], 'push')
224 reply = pushop.remote.unbundle(stream, ['force'], 'push')
225 except error.BundleValueError, exc:
225 except error.BundleValueError, exc:
226 raise util.Abort('missing support for %s' % exc)
226 raise util.Abort('missing support for %s' % exc)
227 try:
227 try:
228 op = bundle2.processbundle(pushop.repo, reply)
228 op = bundle2.processbundle(pushop.repo, reply)
229 except error.BundleValueError, exc:
229 except error.BundleValueError, exc:
230 raise util.Abort('missing support for %s' % exc)
230 raise util.Abort('missing support for %s' % exc)
231 cgreplies = op.records.getreplies(cgpart.id)
231 cgreplies = op.records.getreplies(cgpart.id)
232 assert len(cgreplies['changegroup']) == 1
232 assert len(cgreplies['changegroup']) == 1
233 pushop.ret = cgreplies['changegroup'][0]['return']
233 pushop.ret = cgreplies['changegroup'][0]['return']
234 _pushbundle2extrareply(pushop, op, extrainfo)
234 _pushbundle2extrareply(pushop, op, extrainfo)
235
235
236 def _pushbundle2extraparts(pushop, bundler):
236 def _pushbundle2extraparts(pushop, bundler):
237 """hook function to let extensions add parts
237 """hook function to let extensions add parts
238
238
239 Return a dict to let extensions pass data to the reply processing.
239 Return a dict to let extensions pass data to the reply processing.
240 """
240 """
241 return {}
241 return {}
242
242
243 def _pushbundle2extrareply(pushop, op, extrainfo):
243 def _pushbundle2extrareply(pushop, op, extrainfo):
244 """hook function to let extensions react to part replies
244 """hook function to let extensions react to part replies
245
245
246 The dict from _pushbundle2extrareply is fed to this function.
246 The dict from _pushbundle2extrareply is fed to this function.
247 """
247 """
248 pass
248 pass
249
249
250 def _pushchangeset(pushop):
250 def _pushchangeset(pushop):
251 """Make the actual push of changeset bundle to remote repo"""
251 """Make the actual push of changeset bundle to remote repo"""
252 outgoing = pushop.outgoing
252 outgoing = pushop.outgoing
253 unbundle = pushop.remote.capable('unbundle')
253 unbundle = pushop.remote.capable('unbundle')
254 # TODO: get bundlecaps from remote
254 # TODO: get bundlecaps from remote
255 bundlecaps = None
255 bundlecaps = None
256 # create a changegroup from local
256 # create a changegroup from local
257 if pushop.revs is None and not (outgoing.excluded
257 if pushop.revs is None and not (outgoing.excluded
258 or pushop.repo.changelog.filteredrevs):
258 or pushop.repo.changelog.filteredrevs):
259 # push everything,
259 # push everything,
260 # use the fast path, no race possible on push
260 # use the fast path, no race possible on push
261 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
261 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
262 cg = changegroup.getsubset(pushop.repo,
262 cg = changegroup.getsubset(pushop.repo,
263 outgoing,
263 outgoing,
264 bundler,
264 bundler,
265 'push',
265 'push',
266 fastpath=True)
266 fastpath=True)
267 else:
267 else:
268 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
268 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
269 bundlecaps)
269 bundlecaps)
270
270
271 # apply changegroup to remote
271 # apply changegroup to remote
272 if unbundle:
272 if unbundle:
273 # local repo finds heads on server, finds out what
273 # local repo finds heads on server, finds out what
274 # revs it must push. once revs transferred, if server
274 # revs it must push. once revs transferred, if server
275 # finds it has different heads (someone else won
275 # finds it has different heads (someone else won
276 # commit/push race), server aborts.
276 # commit/push race), server aborts.
277 if pushop.force:
277 if pushop.force:
278 remoteheads = ['force']
278 remoteheads = ['force']
279 else:
279 else:
280 remoteheads = pushop.remoteheads
280 remoteheads = pushop.remoteheads
281 # ssh: return remote's addchangegroup()
281 # ssh: return remote's addchangegroup()
282 # http: return remote's addchangegroup() or 0 for error
282 # http: return remote's addchangegroup() or 0 for error
283 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
283 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
284 'push')
284 'push')
285 else:
285 else:
286 # we return an integer indicating remote head count
286 # we return an integer indicating remote head count
287 # change
287 # change
288 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
288 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
289
289
290 def _pushcomputecommonheads(pushop):
290 def _pushcomputecommonheads(pushop):
291 unfi = pushop.repo.unfiltered()
291 unfi = pushop.repo.unfiltered()
292 if pushop.ret:
292 if pushop.ret:
293 # push succeed, synchronize target of the push
293 # push succeed, synchronize target of the push
294 cheads = pushop.outgoing.missingheads
294 cheads = pushop.outgoing.missingheads
295 elif pushop.revs is None:
295 elif pushop.revs is None:
296 # All out push fails. synchronize all common
296 # All out push fails. synchronize all common
297 cheads = pushop.outgoing.commonheads
297 cheads = pushop.outgoing.commonheads
298 else:
298 else:
299 # I want cheads = heads(::missingheads and ::commonheads)
299 # I want cheads = heads(::missingheads and ::commonheads)
300 # (missingheads is revs with secret changeset filtered out)
300 # (missingheads is revs with secret changeset filtered out)
301 #
301 #
302 # This can be expressed as:
302 # This can be expressed as:
303 # cheads = ( (missingheads and ::commonheads)
303 # cheads = ( (missingheads and ::commonheads)
304 # + (commonheads and ::missingheads))"
304 # + (commonheads and ::missingheads))"
305 # )
305 # )
306 #
306 #
307 # while trying to push we already computed the following:
307 # while trying to push we already computed the following:
308 # common = (::commonheads)
308 # common = (::commonheads)
309 # missing = ((commonheads::missingheads) - commonheads)
309 # missing = ((commonheads::missingheads) - commonheads)
310 #
310 #
311 # We can pick:
311 # We can pick:
312 # * missingheads part of common (::commonheads)
312 # * missingheads part of common (::commonheads)
313 common = set(pushop.outgoing.common)
313 common = set(pushop.outgoing.common)
314 nm = pushop.repo.changelog.nodemap
314 nm = pushop.repo.changelog.nodemap
315 cheads = [node for node in pushop.revs if nm[node] in common]
315 cheads = [node for node in pushop.revs if nm[node] in common]
316 # and
316 # and
317 # * commonheads parents on missing
317 # * commonheads parents on missing
318 revset = unfi.set('%ln and parents(roots(%ln))',
318 revset = unfi.set('%ln and parents(roots(%ln))',
319 pushop.outgoing.commonheads,
319 pushop.outgoing.commonheads,
320 pushop.outgoing.missing)
320 pushop.outgoing.missing)
321 cheads.extend(c.node() for c in revset)
321 cheads.extend(c.node() for c in revset)
322 pushop.commonheads = cheads
322 pushop.commonheads = cheads
323
323
324 def _pushsyncphase(pushop):
324 def _pushsyncphase(pushop):
325 """synchronise phase information locally and remotely"""
325 """synchronise phase information locally and remotely"""
326 unfi = pushop.repo.unfiltered()
326 unfi = pushop.repo.unfiltered()
327 cheads = pushop.commonheads
327 cheads = pushop.commonheads
328 # even when we don't push, exchanging phase data is useful
328 # even when we don't push, exchanging phase data is useful
329 remotephases = pushop.remote.listkeys('phases')
329 remotephases = pushop.remote.listkeys('phases')
330 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
330 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
331 and remotephases # server supports phases
331 and remotephases # server supports phases
332 and pushop.ret is None # nothing was pushed
332 and pushop.ret is None # nothing was pushed
333 and remotephases.get('publishing', False)):
333 and remotephases.get('publishing', False)):
334 # When:
334 # When:
335 # - this is a subrepo push
335 # - this is a subrepo push
336 # - and remote support phase
336 # - and remote support phase
337 # - and no changeset was pushed
337 # - and no changeset was pushed
338 # - and remote is publishing
338 # - and remote is publishing
339 # We may be in issue 3871 case!
339 # We may be in issue 3871 case!
340 # We drop the possible phase synchronisation done by
340 # We drop the possible phase synchronisation done by
341 # courtesy to publish changesets possibly locally draft
341 # courtesy to publish changesets possibly locally draft
342 # on the remote.
342 # on the remote.
343 remotephases = {'publishing': 'True'}
343 remotephases = {'publishing': 'True'}
344 if not remotephases: # old server or public only reply from non-publishing
344 if not remotephases: # old server or public only reply from non-publishing
345 _localphasemove(pushop, cheads)
345 _localphasemove(pushop, cheads)
346 # don't push any phase data as there is nothing to push
346 # don't push any phase data as there is nothing to push
347 else:
347 else:
348 ana = phases.analyzeremotephases(pushop.repo, cheads,
348 ana = phases.analyzeremotephases(pushop.repo, cheads,
349 remotephases)
349 remotephases)
350 pheads, droots = ana
350 pheads, droots = ana
351 ### Apply remote phase on local
351 ### Apply remote phase on local
352 if remotephases.get('publishing', False):
352 if remotephases.get('publishing', False):
353 _localphasemove(pushop, cheads)
353 _localphasemove(pushop, cheads)
354 else: # publish = False
354 else: # publish = False
355 _localphasemove(pushop, pheads)
355 _localphasemove(pushop, pheads)
356 _localphasemove(pushop, cheads, phases.draft)
356 _localphasemove(pushop, cheads, phases.draft)
357 ### Apply local phase on remote
357 ### Apply local phase on remote
358
358
359 # Get the list of all revs draft on remote by public here.
359 # Get the list of all revs draft on remote by public here.
360 # XXX Beware that revset break if droots is not strictly
360 # XXX Beware that revset break if droots is not strictly
361 # XXX root we may want to ensure it is but it is costly
361 # XXX root we may want to ensure it is but it is costly
362 outdated = unfi.set('heads((%ln::%ln) and public())',
362 outdated = unfi.set('heads((%ln::%ln) and public())',
363 droots, cheads)
363 droots, cheads)
364 for newremotehead in outdated:
364 for newremotehead in outdated:
365 r = pushop.remote.pushkey('phases',
365 r = pushop.remote.pushkey('phases',
366 newremotehead.hex(),
366 newremotehead.hex(),
367 str(phases.draft),
367 str(phases.draft),
368 str(phases.public))
368 str(phases.public))
369 if not r:
369 if not r:
370 pushop.ui.warn(_('updating %s to public failed!\n')
370 pushop.ui.warn(_('updating %s to public failed!\n')
371 % newremotehead)
371 % newremotehead)
372
372
373 def _localphasemove(pushop, nodes, phase=phases.public):
373 def _localphasemove(pushop, nodes, phase=phases.public):
374 """move <nodes> to <phase> in the local source repo"""
374 """move <nodes> to <phase> in the local source repo"""
375 if pushop.locallocked:
375 if pushop.locallocked:
376 phases.advanceboundary(pushop.repo, phase, nodes)
376 phases.advanceboundary(pushop.repo, phase, nodes)
377 else:
377 else:
378 # repo is not locked, do not change any phases!
378 # repo is not locked, do not change any phases!
379 # Informs the user that phases should have been moved when
379 # Informs the user that phases should have been moved when
380 # applicable.
380 # applicable.
381 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
381 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
382 phasestr = phases.phasenames[phase]
382 phasestr = phases.phasenames[phase]
383 if actualmoves:
383 if actualmoves:
384 pushop.ui.status(_('cannot lock source repo, skipping '
384 pushop.ui.status(_('cannot lock source repo, skipping '
385 'local %s phase update\n') % phasestr)
385 'local %s phase update\n') % phasestr)
386
386
387 def _pushobsolete(pushop):
387 def _pushobsolete(pushop):
388 """utility function to push obsolete markers to a remote"""
388 """utility function to push obsolete markers to a remote"""
389 pushop.ui.debug('try to push obsolete markers to remote\n')
389 pushop.ui.debug('try to push obsolete markers to remote\n')
390 repo = pushop.repo
390 repo = pushop.repo
391 remote = pushop.remote
391 remote = pushop.remote
392 if (obsolete._enabled and repo.obsstore and
392 if (obsolete._enabled and repo.obsstore and
393 'obsolete' in remote.listkeys('namespaces')):
393 'obsolete' in remote.listkeys('namespaces')):
394 rslts = []
394 rslts = []
395 remotedata = repo.listkeys('obsolete')
395 remotedata = repo.listkeys('obsolete')
396 for key in sorted(remotedata, reverse=True):
396 for key in sorted(remotedata, reverse=True):
397 # reverse sort to ensure we end with dump0
397 # reverse sort to ensure we end with dump0
398 data = remotedata[key]
398 data = remotedata[key]
399 rslts.append(remote.pushkey('obsolete', key, '', data))
399 rslts.append(remote.pushkey('obsolete', key, '', data))
400 if [r for r in rslts if not r]:
400 if [r for r in rslts if not r]:
401 msg = _('failed to push some obsolete markers!\n')
401 msg = _('failed to push some obsolete markers!\n')
402 repo.ui.warn(msg)
402 repo.ui.warn(msg)
403
403
404 def _pushbookmark(pushop):
404 def _pushbookmark(pushop):
405 """Update bookmark position on remote"""
405 """Update bookmark position on remote"""
406 ui = pushop.ui
406 ui = pushop.ui
407 repo = pushop.repo.unfiltered()
407 repo = pushop.repo.unfiltered()
408 remote = pushop.remote
408 remote = pushop.remote
409 ui.debug("checking for updated bookmarks\n")
409 ui.debug("checking for updated bookmarks\n")
410 revnums = map(repo.changelog.rev, pushop.revs or [])
410 revnums = map(repo.changelog.rev, pushop.revs or [])
411 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
411 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
412 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
412 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
413 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
413 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
414 srchex=hex)
414 srchex=hex)
415
415
416 for b, scid, dcid in advsrc:
416 for b, scid, dcid in advsrc:
417 if ancestors and repo[scid].rev() not in ancestors:
417 if ancestors and repo[scid].rev() not in ancestors:
418 continue
418 continue
419 if remote.pushkey('bookmarks', b, dcid, scid):
419 if remote.pushkey('bookmarks', b, dcid, scid):
420 ui.status(_("updating bookmark %s\n") % b)
420 ui.status(_("updating bookmark %s\n") % b)
421 else:
421 else:
422 ui.warn(_('updating bookmark %s failed!\n') % b)
422 ui.warn(_('updating bookmark %s failed!\n') % b)
423
423
424 class pulloperation(object):
424 class pulloperation(object):
425 """A object that represent a single pull operation
425 """A object that represent a single pull operation
426
426
427 It purpose is to carry push related state and very common operation.
427 It purpose is to carry push related state and very common operation.
428
428
429 A new should be created at the beginning of each pull and discarded
429 A new should be created at the beginning of each pull and discarded
430 afterward.
430 afterward.
431 """
431 """
432
432
433 def __init__(self, repo, remote, heads=None, force=False):
433 def __init__(self, repo, remote, heads=None, force=False):
434 # repo we pull into
434 # repo we pull into
435 self.repo = repo
435 self.repo = repo
436 # repo we pull from
436 # repo we pull from
437 self.remote = remote
437 self.remote = remote
438 # revision we try to pull (None is "all")
438 # revision we try to pull (None is "all")
439 self.heads = heads
439 self.heads = heads
440 # do we force pull?
440 # do we force pull?
441 self.force = force
441 self.force = force
442 # the name the pull transaction
442 # the name the pull transaction
443 self._trname = 'pull\n' + util.hidepassword(remote.url())
443 self._trname = 'pull\n' + util.hidepassword(remote.url())
444 # hold the transaction once created
444 # hold the transaction once created
445 self._tr = None
445 self._tr = None
446 # set of common changeset between local and remote before pull
446 # set of common changeset between local and remote before pull
447 self.common = None
447 self.common = None
448 # set of pulled head
448 # set of pulled head
449 self.rheads = None
449 self.rheads = None
450 # list of missing changeset to fetch remotely
450 # list of missing changeset to fetch remotely
451 self.fetch = None
451 self.fetch = None
452 # result of changegroup pulling (used as return code by pull)
452 # result of changegroup pulling (used as return code by pull)
453 self.cgresult = None
453 self.cgresult = None
454 # list of step remaining todo (related to future bundle2 usage)
454 # list of step remaining todo (related to future bundle2 usage)
455 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
455 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
456
456
457 @util.propertycache
457 @util.propertycache
458 def pulledsubset(self):
458 def pulledsubset(self):
459 """heads of the set of changeset target by the pull"""
459 """heads of the set of changeset target by the pull"""
460 # compute target subset
460 # compute target subset
461 if self.heads is None:
461 if self.heads is None:
462 # We pulled every thing possible
462 # We pulled every thing possible
463 # sync on everything common
463 # sync on everything common
464 c = set(self.common)
464 c = set(self.common)
465 ret = list(self.common)
465 ret = list(self.common)
466 for n in self.rheads:
466 for n in self.rheads:
467 if n not in c:
467 if n not in c:
468 ret.append(n)
468 ret.append(n)
469 return ret
469 return ret
470 else:
470 else:
471 # We pulled a specific subset
471 # We pulled a specific subset
472 # sync on this subset
472 # sync on this subset
473 return self.heads
473 return self.heads
474
474
475 def gettransaction(self):
475 def gettransaction(self):
476 """get appropriate pull transaction, creating it if needed"""
476 """get appropriate pull transaction, creating it if needed"""
477 if self._tr is None:
477 if self._tr is None:
478 self._tr = self.repo.transaction(self._trname)
478 self._tr = self.repo.transaction(self._trname)
479 return self._tr
479 return self._tr
480
480
481 def closetransaction(self):
481 def closetransaction(self):
482 """close transaction if created"""
482 """close transaction if created"""
483 if self._tr is not None:
483 if self._tr is not None:
484 self._tr.close()
484 self._tr.close()
485
485
486 def releasetransaction(self):
486 def releasetransaction(self):
487 """release transaction if created"""
487 """release transaction if created"""
488 if self._tr is not None:
488 if self._tr is not None:
489 self._tr.release()
489 self._tr.release()
490
490
491 def pull(repo, remote, heads=None, force=False):
491 def pull(repo, remote, heads=None, force=False):
492 pullop = pulloperation(repo, remote, heads, force)
492 pullop = pulloperation(repo, remote, heads, force)
493 if pullop.remote.local():
493 if pullop.remote.local():
494 missing = set(pullop.remote.requirements) - pullop.repo.supported
494 missing = set(pullop.remote.requirements) - pullop.repo.supported
495 if missing:
495 if missing:
496 msg = _("required features are not"
496 msg = _("required features are not"
497 " supported in the destination:"
497 " supported in the destination:"
498 " %s") % (', '.join(sorted(missing)))
498 " %s") % (', '.join(sorted(missing)))
499 raise util.Abort(msg)
499 raise util.Abort(msg)
500
500
501 lock = pullop.repo.lock()
501 lock = pullop.repo.lock()
502 try:
502 try:
503 _pulldiscovery(pullop)
503 _pulldiscovery(pullop)
504 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
504 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
505 and pullop.remote.capable('bundle2-exp')):
505 and pullop.remote.capable('bundle2-exp')):
506 _pullbundle2(pullop)
506 _pullbundle2(pullop)
507 if 'changegroup' in pullop.todosteps:
507 if 'changegroup' in pullop.todosteps:
508 _pullchangeset(pullop)
508 _pullchangeset(pullop)
509 if 'phases' in pullop.todosteps:
509 if 'phases' in pullop.todosteps:
510 _pullphase(pullop)
510 _pullphase(pullop)
511 if 'obsmarkers' in pullop.todosteps:
511 if 'obsmarkers' in pullop.todosteps:
512 _pullobsolete(pullop)
512 _pullobsolete(pullop)
513 pullop.closetransaction()
513 pullop.closetransaction()
514 finally:
514 finally:
515 pullop.releasetransaction()
515 pullop.releasetransaction()
516 lock.release()
516 lock.release()
517
517
518 return pullop.cgresult
518 return pullop.cgresult
519
519
520 def _pulldiscovery(pullop):
520 def _pulldiscovery(pullop):
521 """discovery phase for the pull
521 """discovery phase for the pull
522
522
523 Current handle changeset discovery only, will change handle all discovery
523 Current handle changeset discovery only, will change handle all discovery
524 at some point."""
524 at some point."""
525 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
525 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
526 pullop.remote,
526 pullop.remote,
527 heads=pullop.heads,
527 heads=pullop.heads,
528 force=pullop.force)
528 force=pullop.force)
529 pullop.common, pullop.fetch, pullop.rheads = tmp
529 pullop.common, pullop.fetch, pullop.rheads = tmp
530
530
531 def _pullbundle2(pullop):
531 def _pullbundle2(pullop):
532 """pull data using bundle2
532 """pull data using bundle2
533
533
534 For now, the only supported data are changegroup."""
534 For now, the only supported data are changegroup."""
535 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
535 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
536 # pulling changegroup
536 # pulling changegroup
537 pullop.todosteps.remove('changegroup')
537 pullop.todosteps.remove('changegroup')
538
538
539 kwargs['common'] = pullop.common
539 kwargs['common'] = pullop.common
540 kwargs['heads'] = pullop.heads or pullop.rheads
540 kwargs['heads'] = pullop.heads or pullop.rheads
541 if not pullop.fetch:
541 if not pullop.fetch:
542 pullop.repo.ui.status(_("no changes found\n"))
542 pullop.repo.ui.status(_("no changes found\n"))
543 pullop.cgresult = 0
543 pullop.cgresult = 0
544 else:
544 else:
545 if pullop.heads is None and list(pullop.common) == [nullid]:
545 if pullop.heads is None and list(pullop.common) == [nullid]:
546 pullop.repo.ui.status(_("requesting all changes\n"))
546 pullop.repo.ui.status(_("requesting all changes\n"))
547 _pullbundle2extraprepare(pullop, kwargs)
547 _pullbundle2extraprepare(pullop, kwargs)
548 if kwargs.keys() == ['format']:
548 if kwargs.keys() == ['format']:
549 return # nothing to pull
549 return # nothing to pull
550 bundle = pullop.remote.getbundle('pull', **kwargs)
550 bundle = pullop.remote.getbundle('pull', **kwargs)
551 try:
551 try:
552 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
552 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
553 except error.BundleValueError, exc:
553 except error.BundleValueError, exc:
554 raise util.Abort('missing support for %s' % exc)
554 raise util.Abort('missing support for %s' % exc)
555
555
556 if pullop.fetch:
556 if pullop.fetch:
557 assert len(op.records['changegroup']) == 1
557 assert len(op.records['changegroup']) == 1
558 pullop.cgresult = op.records['changegroup'][0]['return']
558 pullop.cgresult = op.records['changegroup'][0]['return']
559
559
560 def _pullbundle2extraprepare(pullop, kwargs):
560 def _pullbundle2extraprepare(pullop, kwargs):
561 """hook function so that extensions can extend the getbundle call"""
561 """hook function so that extensions can extend the getbundle call"""
562 pass
562 pass
563
563
564 def _pullchangeset(pullop):
564 def _pullchangeset(pullop):
565 """pull changeset from unbundle into the local repo"""
565 """pull changeset from unbundle into the local repo"""
566 # We delay the open of the transaction as late as possible so we
566 # We delay the open of the transaction as late as possible so we
567 # don't open transaction for nothing or you break future useful
567 # don't open transaction for nothing or you break future useful
568 # rollback call
568 # rollback call
569 pullop.todosteps.remove('changegroup')
569 pullop.todosteps.remove('changegroup')
570 if not pullop.fetch:
570 if not pullop.fetch:
571 pullop.repo.ui.status(_("no changes found\n"))
571 pullop.repo.ui.status(_("no changes found\n"))
572 pullop.cgresult = 0
572 pullop.cgresult = 0
573 return
573 return
574 pullop.gettransaction()
574 pullop.gettransaction()
575 if pullop.heads is None and list(pullop.common) == [nullid]:
575 if pullop.heads is None and list(pullop.common) == [nullid]:
576 pullop.repo.ui.status(_("requesting all changes\n"))
576 pullop.repo.ui.status(_("requesting all changes\n"))
577 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
577 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
578 # issue1320, avoid a race if remote changed after discovery
578 # issue1320, avoid a race if remote changed after discovery
579 pullop.heads = pullop.rheads
579 pullop.heads = pullop.rheads
580
580
581 if pullop.remote.capable('getbundle'):
581 if pullop.remote.capable('getbundle'):
582 # TODO: get bundlecaps from remote
582 # TODO: get bundlecaps from remote
583 cg = pullop.remote.getbundle('pull', common=pullop.common,
583 cg = pullop.remote.getbundle('pull', common=pullop.common,
584 heads=pullop.heads or pullop.rheads)
584 heads=pullop.heads or pullop.rheads)
585 elif pullop.heads is None:
585 elif pullop.heads is None:
586 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
586 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
587 elif not pullop.remote.capable('changegroupsubset'):
587 elif not pullop.remote.capable('changegroupsubset'):
588 raise util.Abort(_("partial pull cannot be done because "
588 raise util.Abort(_("partial pull cannot be done because "
589 "other repository doesn't support "
589 "other repository doesn't support "
590 "changegroupsubset."))
590 "changegroupsubset."))
591 else:
591 else:
592 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
592 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
593 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
593 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
594 pullop.remote.url())
594 pullop.remote.url())
595
595
596 def _pullphase(pullop):
596 def _pullphase(pullop):
597 # Get remote phases data from remote
597 # Get remote phases data from remote
598 remotephases = pullop.remote.listkeys('phases')
598 remotephases = pullop.remote.listkeys('phases')
599 _pullapplyphases(pullop, remotephases)
599 _pullapplyphases(pullop, remotephases)
600
600
601 def _pullapplyphases(pullop, remotephases):
601 def _pullapplyphases(pullop, remotephases):
602 """apply phase movement from observed remote state"""
602 """apply phase movement from observed remote state"""
603 pullop.todosteps.remove('phases')
603 pullop.todosteps.remove('phases')
604 publishing = bool(remotephases.get('publishing', False))
604 publishing = bool(remotephases.get('publishing', False))
605 if remotephases and not publishing:
605 if remotephases and not publishing:
606 # remote is new and unpublishing
606 # remote is new and unpublishing
607 pheads, _dr = phases.analyzeremotephases(pullop.repo,
607 pheads, _dr = phases.analyzeremotephases(pullop.repo,
608 pullop.pulledsubset,
608 pullop.pulledsubset,
609 remotephases)
609 remotephases)
610 phases.advanceboundary(pullop.repo, phases.public, pheads)
610 phases.advanceboundary(pullop.repo, phases.public, pheads)
611 phases.advanceboundary(pullop.repo, phases.draft,
611 phases.advanceboundary(pullop.repo, phases.draft,
612 pullop.pulledsubset)
612 pullop.pulledsubset)
613 else:
613 else:
614 # Remote is old or publishing all common changesets
614 # Remote is old or publishing all common changesets
615 # should be seen as public
615 # should be seen as public
616 phases.advanceboundary(pullop.repo, phases.public,
616 phases.advanceboundary(pullop.repo, phases.public,
617 pullop.pulledsubset)
617 pullop.pulledsubset)
618
618
619 def _pullobsolete(pullop):
619 def _pullobsolete(pullop):
620 """utility function to pull obsolete markers from a remote
620 """utility function to pull obsolete markers from a remote
621
621
622 The `gettransaction` is function that return the pull transaction, creating
622 The `gettransaction` is function that return the pull transaction, creating
623 one if necessary. We return the transaction to inform the calling code that
623 one if necessary. We return the transaction to inform the calling code that
624 a new transaction have been created (when applicable).
624 a new transaction have been created (when applicable).
625
625
626 Exists mostly to allow overriding for experimentation purpose"""
626 Exists mostly to allow overriding for experimentation purpose"""
627 pullop.todosteps.remove('obsmarkers')
627 pullop.todosteps.remove('obsmarkers')
628 tr = None
628 tr = None
629 if obsolete._enabled:
629 if obsolete._enabled:
630 pullop.repo.ui.debug('fetching remote obsolete markers\n')
630 pullop.repo.ui.debug('fetching remote obsolete markers\n')
631 remoteobs = pullop.remote.listkeys('obsolete')
631 remoteobs = pullop.remote.listkeys('obsolete')
632 if 'dump0' in remoteobs:
632 if 'dump0' in remoteobs:
633 tr = pullop.gettransaction()
633 tr = pullop.gettransaction()
634 for key in sorted(remoteobs, reverse=True):
634 for key in sorted(remoteobs, reverse=True):
635 if key.startswith('dump'):
635 if key.startswith('dump'):
636 data = base85.b85decode(remoteobs[key])
636 data = base85.b85decode(remoteobs[key])
637 pullop.repo.obsstore.mergemarkers(tr, data)
637 pullop.repo.obsstore.mergemarkers(tr, data)
638 pullop.repo.invalidatevolatilesets()
638 pullop.repo.invalidatevolatilesets()
639 return tr
639 return tr
640
640
641 def caps20to10(repo):
641 def caps20to10(repo):
642 """return a set with appropriate options to use bundle20 during getbundle"""
642 """return a set with appropriate options to use bundle20 during getbundle"""
643 caps = set(['HG2X'])
643 caps = set(['HG2X'])
644 capsblob = bundle2.encodecaps(repo.bundle2caps)
644 capsblob = bundle2.encodecaps(repo.bundle2caps)
645 caps.add('bundle2=' + urllib.quote(capsblob))
645 caps.add('bundle2=' + urllib.quote(capsblob))
646 return caps
646 return caps
647
647
648 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
648 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
649 **kwargs):
649 **kwargs):
650 """return a full bundle (with potentially multiple kind of parts)
650 """return a full bundle (with potentially multiple kind of parts)
651
651
652 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
652 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
653 passed. For now, the bundle can contain only changegroup, but this will
653 passed. For now, the bundle can contain only changegroup, but this will
654 changes when more part type will be available for bundle2.
654 changes when more part type will be available for bundle2.
655
655
656 This is different from changegroup.getbundle that only returns an HG10
656 This is different from changegroup.getbundle that only returns an HG10
657 changegroup bundle. They may eventually get reunited in the future when we
657 changegroup bundle. They may eventually get reunited in the future when we
658 have a clearer idea of the API we what to query different data.
658 have a clearer idea of the API we what to query different data.
659
659
660 The implementation is at a very early stage and will get massive rework
660 The implementation is at a very early stage and will get massive rework
661 when the API of bundle is refined.
661 when the API of bundle is refined.
662 """
662 """
663 # build changegroup bundle here.
663 # build changegroup bundle here.
664 cg = changegroup.getbundle(repo, source, heads=heads,
664 cg = changegroup.getbundle(repo, source, heads=heads,
665 common=common, bundlecaps=bundlecaps)
665 common=common, bundlecaps=bundlecaps)
666 if bundlecaps is None or 'HG2X' not in bundlecaps:
666 if bundlecaps is None or 'HG2X' not in bundlecaps:
667 if kwargs:
667 if kwargs:
668 raise ValueError(_('unsupported getbundle arguments: %s')
668 raise ValueError(_('unsupported getbundle arguments: %s')
669 % ', '.join(sorted(kwargs.keys())))
669 % ', '.join(sorted(kwargs.keys())))
670 return cg
670 return cg
671 # very crude first implementation,
671 # very crude first implementation,
672 # the bundle API will change and the generation will be done lazily.
672 # the bundle API will change and the generation will be done lazily.
673 b2caps = {}
673 b2caps = {}
674 for bcaps in bundlecaps:
674 for bcaps in bundlecaps:
675 if bcaps.startswith('bundle2='):
675 if bcaps.startswith('bundle2='):
676 blob = urllib.unquote(bcaps[len('bundle2='):])
676 blob = urllib.unquote(bcaps[len('bundle2='):])
677 b2caps.update(bundle2.decodecaps(blob))
677 b2caps.update(bundle2.decodecaps(blob))
678 bundler = bundle2.bundle20(repo.ui, b2caps)
678 bundler = bundle2.bundle20(repo.ui, b2caps)
679 if cg:
679 if cg:
680 bundler.newpart('b2x:changegroup', data=cg.getchunks())
680 bundler.newpart('b2x:changegroup', data=cg.getchunks())
681 listkeys = kwargs.get('listkeys', ())
682 for namespace in listkeys:
683 part = bundler.newpart('b2x:listkeys')
684 part.addparam('namespace', namespace)
685 keys = repo.listkeys(namespace).items()
686 part.data = pushkey.encodekeys(keys)
681 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
687 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
682 bundlecaps=bundlecaps, **kwargs)
688 bundlecaps=bundlecaps, **kwargs)
683 return util.chunkbuffer(bundler.getchunks())
689 return util.chunkbuffer(bundler.getchunks())
684
690
685 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
691 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
686 bundlecaps=None, **kwargs):
692 bundlecaps=None, **kwargs):
687 """hook function to let extensions add parts to the requested bundle"""
693 """hook function to let extensions add parts to the requested bundle"""
688 pass
694 pass
689
695
690 def check_heads(repo, their_heads, context):
696 def check_heads(repo, their_heads, context):
691 """check if the heads of a repo have been modified
697 """check if the heads of a repo have been modified
692
698
693 Used by peer for unbundling.
699 Used by peer for unbundling.
694 """
700 """
695 heads = repo.heads()
701 heads = repo.heads()
696 heads_hash = util.sha1(''.join(sorted(heads))).digest()
702 heads_hash = util.sha1(''.join(sorted(heads))).digest()
697 if not (their_heads == ['force'] or their_heads == heads or
703 if not (their_heads == ['force'] or their_heads == heads or
698 their_heads == ['hashed', heads_hash]):
704 their_heads == ['hashed', heads_hash]):
699 # someone else committed/pushed/unbundled while we
705 # someone else committed/pushed/unbundled while we
700 # were transferring data
706 # were transferring data
701 raise error.PushRaced('repository changed while %s - '
707 raise error.PushRaced('repository changed while %s - '
702 'please try again' % context)
708 'please try again' % context)
703
709
704 def unbundle(repo, cg, heads, source, url):
710 def unbundle(repo, cg, heads, source, url):
705 """Apply a bundle to a repo.
711 """Apply a bundle to a repo.
706
712
707 this function makes sure the repo is locked during the application and have
713 this function makes sure the repo is locked during the application and have
708 mechanism to check that no push race occurred between the creation of the
714 mechanism to check that no push race occurred between the creation of the
709 bundle and its application.
715 bundle and its application.
710
716
711 If the push was raced as PushRaced exception is raised."""
717 If the push was raced as PushRaced exception is raised."""
712 r = 0
718 r = 0
713 # need a transaction when processing a bundle2 stream
719 # need a transaction when processing a bundle2 stream
714 tr = None
720 tr = None
715 lock = repo.lock()
721 lock = repo.lock()
716 try:
722 try:
717 check_heads(repo, heads, 'uploading changes')
723 check_heads(repo, heads, 'uploading changes')
718 # push can proceed
724 # push can proceed
719 if util.safehasattr(cg, 'params'):
725 if util.safehasattr(cg, 'params'):
720 try:
726 try:
721 tr = repo.transaction('unbundle')
727 tr = repo.transaction('unbundle')
722 tr.hookargs['bundle2-exp'] = '1'
728 tr.hookargs['bundle2-exp'] = '1'
723 r = bundle2.processbundle(repo, cg, lambda: tr).reply
729 r = bundle2.processbundle(repo, cg, lambda: tr).reply
724 cl = repo.unfiltered().changelog
730 cl = repo.unfiltered().changelog
725 p = cl.writepending() and repo.root or ""
731 p = cl.writepending() and repo.root or ""
726 repo.hook('b2x-pretransactionclose', throw=True, source=source,
732 repo.hook('b2x-pretransactionclose', throw=True, source=source,
727 url=url, pending=p, **tr.hookargs)
733 url=url, pending=p, **tr.hookargs)
728 tr.close()
734 tr.close()
729 repo.hook('b2x-transactionclose', source=source, url=url,
735 repo.hook('b2x-transactionclose', source=source, url=url,
730 **tr.hookargs)
736 **tr.hookargs)
731 except Exception, exc:
737 except Exception, exc:
732 exc.duringunbundle2 = True
738 exc.duringunbundle2 = True
733 raise
739 raise
734 else:
740 else:
735 r = changegroup.addchangegroup(repo, cg, source, url)
741 r = changegroup.addchangegroup(repo, cg, source, url)
736 finally:
742 finally:
737 if tr is not None:
743 if tr is not None:
738 tr.release()
744 tr.release()
739 lock.release()
745 lock.release()
740 return r
746 return r
@@ -1,1773 +1,1774 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 bundle2caps = {'HG2X': ()}
183 bundle2caps = {'HG2X': (),
184 'b2x:listkeys': ()}
184
185
185 # a list of (ui, featureset) functions.
186 # a list of (ui, featureset) functions.
186 # only functions defined in module of enabled extensions are invoked
187 # only functions defined in module of enabled extensions are invoked
187 featuresetupfuncs = set()
188 featuresetupfuncs = set()
188
189
189 def _baserequirements(self, create):
190 def _baserequirements(self, create):
190 return self.requirements[:]
191 return self.requirements[:]
191
192
192 def __init__(self, baseui, path=None, create=False):
193 def __init__(self, baseui, path=None, create=False):
193 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
194 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
194 self.wopener = self.wvfs
195 self.wopener = self.wvfs
195 self.root = self.wvfs.base
196 self.root = self.wvfs.base
196 self.path = self.wvfs.join(".hg")
197 self.path = self.wvfs.join(".hg")
197 self.origroot = path
198 self.origroot = path
198 self.auditor = pathutil.pathauditor(self.root, self._checknested)
199 self.auditor = pathutil.pathauditor(self.root, self._checknested)
199 self.vfs = scmutil.vfs(self.path)
200 self.vfs = scmutil.vfs(self.path)
200 self.opener = self.vfs
201 self.opener = self.vfs
201 self.baseui = baseui
202 self.baseui = baseui
202 self.ui = baseui.copy()
203 self.ui = baseui.copy()
203 self.ui.copy = baseui.copy # prevent copying repo configuration
204 self.ui.copy = baseui.copy # prevent copying repo configuration
204 # A list of callback to shape the phase if no data were found.
205 # A list of callback to shape the phase if no data were found.
205 # Callback are in the form: func(repo, roots) --> processed root.
206 # Callback are in the form: func(repo, roots) --> processed root.
206 # This list it to be filled by extension during repo setup
207 # This list it to be filled by extension during repo setup
207 self._phasedefaults = []
208 self._phasedefaults = []
208 try:
209 try:
209 self.ui.readconfig(self.join("hgrc"), self.root)
210 self.ui.readconfig(self.join("hgrc"), self.root)
210 extensions.loadall(self.ui)
211 extensions.loadall(self.ui)
211 except IOError:
212 except IOError:
212 pass
213 pass
213
214
214 if self.featuresetupfuncs:
215 if self.featuresetupfuncs:
215 self.supported = set(self._basesupported) # use private copy
216 self.supported = set(self._basesupported) # use private copy
216 extmods = set(m.__name__ for n, m
217 extmods = set(m.__name__ for n, m
217 in extensions.extensions(self.ui))
218 in extensions.extensions(self.ui))
218 for setupfunc in self.featuresetupfuncs:
219 for setupfunc in self.featuresetupfuncs:
219 if setupfunc.__module__ in extmods:
220 if setupfunc.__module__ in extmods:
220 setupfunc(self.ui, self.supported)
221 setupfunc(self.ui, self.supported)
221 else:
222 else:
222 self.supported = self._basesupported
223 self.supported = self._basesupported
223
224
224 if not self.vfs.isdir():
225 if not self.vfs.isdir():
225 if create:
226 if create:
226 if not self.wvfs.exists():
227 if not self.wvfs.exists():
227 self.wvfs.makedirs()
228 self.wvfs.makedirs()
228 self.vfs.makedir(notindexed=True)
229 self.vfs.makedir(notindexed=True)
229 requirements = self._baserequirements(create)
230 requirements = self._baserequirements(create)
230 if self.ui.configbool('format', 'usestore', True):
231 if self.ui.configbool('format', 'usestore', True):
231 self.vfs.mkdir("store")
232 self.vfs.mkdir("store")
232 requirements.append("store")
233 requirements.append("store")
233 if self.ui.configbool('format', 'usefncache', True):
234 if self.ui.configbool('format', 'usefncache', True):
234 requirements.append("fncache")
235 requirements.append("fncache")
235 if self.ui.configbool('format', 'dotencode', True):
236 if self.ui.configbool('format', 'dotencode', True):
236 requirements.append('dotencode')
237 requirements.append('dotencode')
237 # create an invalid changelog
238 # create an invalid changelog
238 self.vfs.append(
239 self.vfs.append(
239 "00changelog.i",
240 "00changelog.i",
240 '\0\0\0\2' # represents revlogv2
241 '\0\0\0\2' # represents revlogv2
241 ' dummy changelog to prevent using the old repo layout'
242 ' dummy changelog to prevent using the old repo layout'
242 )
243 )
243 if self.ui.configbool('format', 'generaldelta', False):
244 if self.ui.configbool('format', 'generaldelta', False):
244 requirements.append("generaldelta")
245 requirements.append("generaldelta")
245 requirements = set(requirements)
246 requirements = set(requirements)
246 else:
247 else:
247 raise error.RepoError(_("repository %s not found") % path)
248 raise error.RepoError(_("repository %s not found") % path)
248 elif create:
249 elif create:
249 raise error.RepoError(_("repository %s already exists") % path)
250 raise error.RepoError(_("repository %s already exists") % path)
250 else:
251 else:
251 try:
252 try:
252 requirements = scmutil.readrequires(self.vfs, self.supported)
253 requirements = scmutil.readrequires(self.vfs, self.supported)
253 except IOError, inst:
254 except IOError, inst:
254 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
255 raise
256 raise
256 requirements = set()
257 requirements = set()
257
258
258 self.sharedpath = self.path
259 self.sharedpath = self.path
259 try:
260 try:
260 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 realpath=True)
262 realpath=True)
262 s = vfs.base
263 s = vfs.base
263 if not vfs.exists():
264 if not vfs.exists():
264 raise error.RepoError(
265 raise error.RepoError(
265 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 self.sharedpath = s
267 self.sharedpath = s
267 except IOError, inst:
268 except IOError, inst:
268 if inst.errno != errno.ENOENT:
269 if inst.errno != errno.ENOENT:
269 raise
270 raise
270
271
271 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 self.spath = self.store.path
273 self.spath = self.store.path
273 self.svfs = self.store.vfs
274 self.svfs = self.store.vfs
274 self.sopener = self.svfs
275 self.sopener = self.svfs
275 self.sjoin = self.store.join
276 self.sjoin = self.store.join
276 self.vfs.createmode = self.store.createmode
277 self.vfs.createmode = self.store.createmode
277 self._applyrequirements(requirements)
278 self._applyrequirements(requirements)
278 if create:
279 if create:
279 self._writerequirements()
280 self._writerequirements()
280
281
281
282
282 self._branchcaches = {}
283 self._branchcaches = {}
283 self.filterpats = {}
284 self.filterpats = {}
284 self._datafilters = {}
285 self._datafilters = {}
285 self._transref = self._lockref = self._wlockref = None
286 self._transref = self._lockref = self._wlockref = None
286
287
287 # A cache for various files under .hg/ that tracks file changes,
288 # A cache for various files under .hg/ that tracks file changes,
288 # (used by the filecache decorator)
289 # (used by the filecache decorator)
289 #
290 #
290 # Maps a property name to its util.filecacheentry
291 # Maps a property name to its util.filecacheentry
291 self._filecache = {}
292 self._filecache = {}
292
293
293 # hold sets of revision to be filtered
294 # hold sets of revision to be filtered
294 # should be cleared when something might have changed the filter value:
295 # should be cleared when something might have changed the filter value:
295 # - new changesets,
296 # - new changesets,
296 # - phase change,
297 # - phase change,
297 # - new obsolescence marker,
298 # - new obsolescence marker,
298 # - working directory parent change,
299 # - working directory parent change,
299 # - bookmark changes
300 # - bookmark changes
300 self.filteredrevcache = {}
301 self.filteredrevcache = {}
301
302
302 def close(self):
303 def close(self):
303 pass
304 pass
304
305
305 def _restrictcapabilities(self, caps):
306 def _restrictcapabilities(self, caps):
306 # bundle2 is not ready for prime time, drop it unless explicitly
307 # bundle2 is not ready for prime time, drop it unless explicitly
307 # required by the tests (or some brave tester)
308 # required by the tests (or some brave tester)
308 if self.ui.configbool('experimental', 'bundle2-exp', False):
309 if self.ui.configbool('experimental', 'bundle2-exp', False):
309 caps = set(caps)
310 caps = set(caps)
310 capsblob = bundle2.encodecaps(self.bundle2caps)
311 capsblob = bundle2.encodecaps(self.bundle2caps)
311 caps.add('bundle2-exp=' + urllib.quote(capsblob))
312 caps.add('bundle2-exp=' + urllib.quote(capsblob))
312 return caps
313 return caps
313
314
314 def _applyrequirements(self, requirements):
315 def _applyrequirements(self, requirements):
315 self.requirements = requirements
316 self.requirements = requirements
316 self.sopener.options = dict((r, 1) for r in requirements
317 self.sopener.options = dict((r, 1) for r in requirements
317 if r in self.openerreqs)
318 if r in self.openerreqs)
318 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
319 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
319 if chunkcachesize is not None:
320 if chunkcachesize is not None:
320 self.sopener.options['chunkcachesize'] = chunkcachesize
321 self.sopener.options['chunkcachesize'] = chunkcachesize
321
322
322 def _writerequirements(self):
323 def _writerequirements(self):
323 reqfile = self.opener("requires", "w")
324 reqfile = self.opener("requires", "w")
324 for r in sorted(self.requirements):
325 for r in sorted(self.requirements):
325 reqfile.write("%s\n" % r)
326 reqfile.write("%s\n" % r)
326 reqfile.close()
327 reqfile.close()
327
328
328 def _checknested(self, path):
329 def _checknested(self, path):
329 """Determine if path is a legal nested repository."""
330 """Determine if path is a legal nested repository."""
330 if not path.startswith(self.root):
331 if not path.startswith(self.root):
331 return False
332 return False
332 subpath = path[len(self.root) + 1:]
333 subpath = path[len(self.root) + 1:]
333 normsubpath = util.pconvert(subpath)
334 normsubpath = util.pconvert(subpath)
334
335
335 # XXX: Checking against the current working copy is wrong in
336 # XXX: Checking against the current working copy is wrong in
336 # the sense that it can reject things like
337 # the sense that it can reject things like
337 #
338 #
338 # $ hg cat -r 10 sub/x.txt
339 # $ hg cat -r 10 sub/x.txt
339 #
340 #
340 # if sub/ is no longer a subrepository in the working copy
341 # if sub/ is no longer a subrepository in the working copy
341 # parent revision.
342 # parent revision.
342 #
343 #
343 # However, it can of course also allow things that would have
344 # However, it can of course also allow things that would have
344 # been rejected before, such as the above cat command if sub/
345 # been rejected before, such as the above cat command if sub/
345 # is a subrepository now, but was a normal directory before.
346 # is a subrepository now, but was a normal directory before.
346 # The old path auditor would have rejected by mistake since it
347 # The old path auditor would have rejected by mistake since it
347 # panics when it sees sub/.hg/.
348 # panics when it sees sub/.hg/.
348 #
349 #
349 # All in all, checking against the working copy seems sensible
350 # All in all, checking against the working copy seems sensible
350 # since we want to prevent access to nested repositories on
351 # since we want to prevent access to nested repositories on
351 # the filesystem *now*.
352 # the filesystem *now*.
352 ctx = self[None]
353 ctx = self[None]
353 parts = util.splitpath(subpath)
354 parts = util.splitpath(subpath)
354 while parts:
355 while parts:
355 prefix = '/'.join(parts)
356 prefix = '/'.join(parts)
356 if prefix in ctx.substate:
357 if prefix in ctx.substate:
357 if prefix == normsubpath:
358 if prefix == normsubpath:
358 return True
359 return True
359 else:
360 else:
360 sub = ctx.sub(prefix)
361 sub = ctx.sub(prefix)
361 return sub.checknested(subpath[len(prefix) + 1:])
362 return sub.checknested(subpath[len(prefix) + 1:])
362 else:
363 else:
363 parts.pop()
364 parts.pop()
364 return False
365 return False
365
366
366 def peer(self):
367 def peer(self):
367 return localpeer(self) # not cached to avoid reference cycle
368 return localpeer(self) # not cached to avoid reference cycle
368
369
369 def unfiltered(self):
370 def unfiltered(self):
370 """Return unfiltered version of the repository
371 """Return unfiltered version of the repository
371
372
372 Intended to be overwritten by filtered repo."""
373 Intended to be overwritten by filtered repo."""
373 return self
374 return self
374
375
375 def filtered(self, name):
376 def filtered(self, name):
376 """Return a filtered version of a repository"""
377 """Return a filtered version of a repository"""
377 # build a new class with the mixin and the current class
378 # build a new class with the mixin and the current class
378 # (possibly subclass of the repo)
379 # (possibly subclass of the repo)
379 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 pass
381 pass
381 return proxycls(self, name)
382 return proxycls(self, name)
382
383
383 @repofilecache('bookmarks')
384 @repofilecache('bookmarks')
384 def _bookmarks(self):
385 def _bookmarks(self):
385 return bookmarks.bmstore(self)
386 return bookmarks.bmstore(self)
386
387
387 @repofilecache('bookmarks.current')
388 @repofilecache('bookmarks.current')
388 def _bookmarkcurrent(self):
389 def _bookmarkcurrent(self):
389 return bookmarks.readcurrent(self)
390 return bookmarks.readcurrent(self)
390
391
391 def bookmarkheads(self, bookmark):
392 def bookmarkheads(self, bookmark):
392 name = bookmark.split('@', 1)[0]
393 name = bookmark.split('@', 1)[0]
393 heads = []
394 heads = []
394 for mark, n in self._bookmarks.iteritems():
395 for mark, n in self._bookmarks.iteritems():
395 if mark.split('@', 1)[0] == name:
396 if mark.split('@', 1)[0] == name:
396 heads.append(n)
397 heads.append(n)
397 return heads
398 return heads
398
399
399 @storecache('phaseroots')
400 @storecache('phaseroots')
400 def _phasecache(self):
401 def _phasecache(self):
401 return phases.phasecache(self, self._phasedefaults)
402 return phases.phasecache(self, self._phasedefaults)
402
403
403 @storecache('obsstore')
404 @storecache('obsstore')
404 def obsstore(self):
405 def obsstore(self):
405 store = obsolete.obsstore(self.sopener)
406 store = obsolete.obsstore(self.sopener)
406 if store and not obsolete._enabled:
407 if store and not obsolete._enabled:
407 # message is rare enough to not be translated
408 # message is rare enough to not be translated
408 msg = 'obsolete feature not enabled but %i markers found!\n'
409 msg = 'obsolete feature not enabled but %i markers found!\n'
409 self.ui.warn(msg % len(list(store)))
410 self.ui.warn(msg % len(list(store)))
410 return store
411 return store
411
412
412 @storecache('00changelog.i')
413 @storecache('00changelog.i')
413 def changelog(self):
414 def changelog(self):
414 c = changelog.changelog(self.sopener)
415 c = changelog.changelog(self.sopener)
415 if 'HG_PENDING' in os.environ:
416 if 'HG_PENDING' in os.environ:
416 p = os.environ['HG_PENDING']
417 p = os.environ['HG_PENDING']
417 if p.startswith(self.root):
418 if p.startswith(self.root):
418 c.readpending('00changelog.i.a')
419 c.readpending('00changelog.i.a')
419 return c
420 return c
420
421
421 @storecache('00manifest.i')
422 @storecache('00manifest.i')
422 def manifest(self):
423 def manifest(self):
423 return manifest.manifest(self.sopener)
424 return manifest.manifest(self.sopener)
424
425
425 @repofilecache('dirstate')
426 @repofilecache('dirstate')
426 def dirstate(self):
427 def dirstate(self):
427 warned = [0]
428 warned = [0]
428 def validate(node):
429 def validate(node):
429 try:
430 try:
430 self.changelog.rev(node)
431 self.changelog.rev(node)
431 return node
432 return node
432 except error.LookupError:
433 except error.LookupError:
433 if not warned[0]:
434 if not warned[0]:
434 warned[0] = True
435 warned[0] = True
435 self.ui.warn(_("warning: ignoring unknown"
436 self.ui.warn(_("warning: ignoring unknown"
436 " working parent %s!\n") % short(node))
437 " working parent %s!\n") % short(node))
437 return nullid
438 return nullid
438
439
439 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
440 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
440
441
441 def __getitem__(self, changeid):
442 def __getitem__(self, changeid):
442 if changeid is None:
443 if changeid is None:
443 return context.workingctx(self)
444 return context.workingctx(self)
444 return context.changectx(self, changeid)
445 return context.changectx(self, changeid)
445
446
446 def __contains__(self, changeid):
447 def __contains__(self, changeid):
447 try:
448 try:
448 return bool(self.lookup(changeid))
449 return bool(self.lookup(changeid))
449 except error.RepoLookupError:
450 except error.RepoLookupError:
450 return False
451 return False
451
452
452 def __nonzero__(self):
453 def __nonzero__(self):
453 return True
454 return True
454
455
455 def __len__(self):
456 def __len__(self):
456 return len(self.changelog)
457 return len(self.changelog)
457
458
458 def __iter__(self):
459 def __iter__(self):
459 return iter(self.changelog)
460 return iter(self.changelog)
460
461
461 def revs(self, expr, *args):
462 def revs(self, expr, *args):
462 '''Return a list of revisions matching the given revset'''
463 '''Return a list of revisions matching the given revset'''
463 expr = revset.formatspec(expr, *args)
464 expr = revset.formatspec(expr, *args)
464 m = revset.match(None, expr)
465 m = revset.match(None, expr)
465 return m(self, revset.spanset(self))
466 return m(self, revset.spanset(self))
466
467
467 def set(self, expr, *args):
468 def set(self, expr, *args):
468 '''
469 '''
469 Yield a context for each matching revision, after doing arg
470 Yield a context for each matching revision, after doing arg
470 replacement via revset.formatspec
471 replacement via revset.formatspec
471 '''
472 '''
472 for r in self.revs(expr, *args):
473 for r in self.revs(expr, *args):
473 yield self[r]
474 yield self[r]
474
475
475 def url(self):
476 def url(self):
476 return 'file:' + self.root
477 return 'file:' + self.root
477
478
478 def hook(self, name, throw=False, **args):
479 def hook(self, name, throw=False, **args):
479 return hook.hook(self.ui, self, name, throw, **args)
480 return hook.hook(self.ui, self, name, throw, **args)
480
481
481 @unfilteredmethod
482 @unfilteredmethod
482 def _tag(self, names, node, message, local, user, date, extra={},
483 def _tag(self, names, node, message, local, user, date, extra={},
483 editor=False):
484 editor=False):
484 if isinstance(names, str):
485 if isinstance(names, str):
485 names = (names,)
486 names = (names,)
486
487
487 branches = self.branchmap()
488 branches = self.branchmap()
488 for name in names:
489 for name in names:
489 self.hook('pretag', throw=True, node=hex(node), tag=name,
490 self.hook('pretag', throw=True, node=hex(node), tag=name,
490 local=local)
491 local=local)
491 if name in branches:
492 if name in branches:
492 self.ui.warn(_("warning: tag %s conflicts with existing"
493 self.ui.warn(_("warning: tag %s conflicts with existing"
493 " branch name\n") % name)
494 " branch name\n") % name)
494
495
495 def writetags(fp, names, munge, prevtags):
496 def writetags(fp, names, munge, prevtags):
496 fp.seek(0, 2)
497 fp.seek(0, 2)
497 if prevtags and prevtags[-1] != '\n':
498 if prevtags and prevtags[-1] != '\n':
498 fp.write('\n')
499 fp.write('\n')
499 for name in names:
500 for name in names:
500 m = munge and munge(name) or name
501 m = munge and munge(name) or name
501 if (self._tagscache.tagtypes and
502 if (self._tagscache.tagtypes and
502 name in self._tagscache.tagtypes):
503 name in self._tagscache.tagtypes):
503 old = self.tags().get(name, nullid)
504 old = self.tags().get(name, nullid)
504 fp.write('%s %s\n' % (hex(old), m))
505 fp.write('%s %s\n' % (hex(old), m))
505 fp.write('%s %s\n' % (hex(node), m))
506 fp.write('%s %s\n' % (hex(node), m))
506 fp.close()
507 fp.close()
507
508
508 prevtags = ''
509 prevtags = ''
509 if local:
510 if local:
510 try:
511 try:
511 fp = self.opener('localtags', 'r+')
512 fp = self.opener('localtags', 'r+')
512 except IOError:
513 except IOError:
513 fp = self.opener('localtags', 'a')
514 fp = self.opener('localtags', 'a')
514 else:
515 else:
515 prevtags = fp.read()
516 prevtags = fp.read()
516
517
517 # local tags are stored in the current charset
518 # local tags are stored in the current charset
518 writetags(fp, names, None, prevtags)
519 writetags(fp, names, None, prevtags)
519 for name in names:
520 for name in names:
520 self.hook('tag', node=hex(node), tag=name, local=local)
521 self.hook('tag', node=hex(node), tag=name, local=local)
521 return
522 return
522
523
523 try:
524 try:
524 fp = self.wfile('.hgtags', 'rb+')
525 fp = self.wfile('.hgtags', 'rb+')
525 except IOError, e:
526 except IOError, e:
526 if e.errno != errno.ENOENT:
527 if e.errno != errno.ENOENT:
527 raise
528 raise
528 fp = self.wfile('.hgtags', 'ab')
529 fp = self.wfile('.hgtags', 'ab')
529 else:
530 else:
530 prevtags = fp.read()
531 prevtags = fp.read()
531
532
532 # committed tags are stored in UTF-8
533 # committed tags are stored in UTF-8
533 writetags(fp, names, encoding.fromlocal, prevtags)
534 writetags(fp, names, encoding.fromlocal, prevtags)
534
535
535 fp.close()
536 fp.close()
536
537
537 self.invalidatecaches()
538 self.invalidatecaches()
538
539
539 if '.hgtags' not in self.dirstate:
540 if '.hgtags' not in self.dirstate:
540 self[None].add(['.hgtags'])
541 self[None].add(['.hgtags'])
541
542
542 m = matchmod.exact(self.root, '', ['.hgtags'])
543 m = matchmod.exact(self.root, '', ['.hgtags'])
543 tagnode = self.commit(message, user, date, extra=extra, match=m,
544 tagnode = self.commit(message, user, date, extra=extra, match=m,
544 editor=editor)
545 editor=editor)
545
546
546 for name in names:
547 for name in names:
547 self.hook('tag', node=hex(node), tag=name, local=local)
548 self.hook('tag', node=hex(node), tag=name, local=local)
548
549
549 return tagnode
550 return tagnode
550
551
551 def tag(self, names, node, message, local, user, date, editor=False):
552 def tag(self, names, node, message, local, user, date, editor=False):
552 '''tag a revision with one or more symbolic names.
553 '''tag a revision with one or more symbolic names.
553
554
554 names is a list of strings or, when adding a single tag, names may be a
555 names is a list of strings or, when adding a single tag, names may be a
555 string.
556 string.
556
557
557 if local is True, the tags are stored in a per-repository file.
558 if local is True, the tags are stored in a per-repository file.
558 otherwise, they are stored in the .hgtags file, and a new
559 otherwise, they are stored in the .hgtags file, and a new
559 changeset is committed with the change.
560 changeset is committed with the change.
560
561
561 keyword arguments:
562 keyword arguments:
562
563
563 local: whether to store tags in non-version-controlled file
564 local: whether to store tags in non-version-controlled file
564 (default False)
565 (default False)
565
566
566 message: commit message to use if committing
567 message: commit message to use if committing
567
568
568 user: name of user to use if committing
569 user: name of user to use if committing
569
570
570 date: date tuple to use if committing'''
571 date: date tuple to use if committing'''
571
572
572 if not local:
573 if not local:
573 for x in self.status()[:5]:
574 for x in self.status()[:5]:
574 if '.hgtags' in x:
575 if '.hgtags' in x:
575 raise util.Abort(_('working copy of .hgtags is changed '
576 raise util.Abort(_('working copy of .hgtags is changed '
576 '(please commit .hgtags manually)'))
577 '(please commit .hgtags manually)'))
577
578
578 self.tags() # instantiate the cache
579 self.tags() # instantiate the cache
579 self._tag(names, node, message, local, user, date, editor=editor)
580 self._tag(names, node, message, local, user, date, editor=editor)
580
581
581 @filteredpropertycache
582 @filteredpropertycache
582 def _tagscache(self):
583 def _tagscache(self):
583 '''Returns a tagscache object that contains various tags related
584 '''Returns a tagscache object that contains various tags related
584 caches.'''
585 caches.'''
585
586
586 # This simplifies its cache management by having one decorated
587 # This simplifies its cache management by having one decorated
587 # function (this one) and the rest simply fetch things from it.
588 # function (this one) and the rest simply fetch things from it.
588 class tagscache(object):
589 class tagscache(object):
589 def __init__(self):
590 def __init__(self):
590 # These two define the set of tags for this repository. tags
591 # These two define the set of tags for this repository. tags
591 # maps tag name to node; tagtypes maps tag name to 'global' or
592 # maps tag name to node; tagtypes maps tag name to 'global' or
592 # 'local'. (Global tags are defined by .hgtags across all
593 # 'local'. (Global tags are defined by .hgtags across all
593 # heads, and local tags are defined in .hg/localtags.)
594 # heads, and local tags are defined in .hg/localtags.)
594 # They constitute the in-memory cache of tags.
595 # They constitute the in-memory cache of tags.
595 self.tags = self.tagtypes = None
596 self.tags = self.tagtypes = None
596
597
597 self.nodetagscache = self.tagslist = None
598 self.nodetagscache = self.tagslist = None
598
599
599 cache = tagscache()
600 cache = tagscache()
600 cache.tags, cache.tagtypes = self._findtags()
601 cache.tags, cache.tagtypes = self._findtags()
601
602
602 return cache
603 return cache
603
604
604 def tags(self):
605 def tags(self):
605 '''return a mapping of tag to node'''
606 '''return a mapping of tag to node'''
606 t = {}
607 t = {}
607 if self.changelog.filteredrevs:
608 if self.changelog.filteredrevs:
608 tags, tt = self._findtags()
609 tags, tt = self._findtags()
609 else:
610 else:
610 tags = self._tagscache.tags
611 tags = self._tagscache.tags
611 for k, v in tags.iteritems():
612 for k, v in tags.iteritems():
612 try:
613 try:
613 # ignore tags to unknown nodes
614 # ignore tags to unknown nodes
614 self.changelog.rev(v)
615 self.changelog.rev(v)
615 t[k] = v
616 t[k] = v
616 except (error.LookupError, ValueError):
617 except (error.LookupError, ValueError):
617 pass
618 pass
618 return t
619 return t
619
620
620 def _findtags(self):
621 def _findtags(self):
621 '''Do the hard work of finding tags. Return a pair of dicts
622 '''Do the hard work of finding tags. Return a pair of dicts
622 (tags, tagtypes) where tags maps tag name to node, and tagtypes
623 (tags, tagtypes) where tags maps tag name to node, and tagtypes
623 maps tag name to a string like \'global\' or \'local\'.
624 maps tag name to a string like \'global\' or \'local\'.
624 Subclasses or extensions are free to add their own tags, but
625 Subclasses or extensions are free to add their own tags, but
625 should be aware that the returned dicts will be retained for the
626 should be aware that the returned dicts will be retained for the
626 duration of the localrepo object.'''
627 duration of the localrepo object.'''
627
628
628 # XXX what tagtype should subclasses/extensions use? Currently
629 # XXX what tagtype should subclasses/extensions use? Currently
629 # mq and bookmarks add tags, but do not set the tagtype at all.
630 # mq and bookmarks add tags, but do not set the tagtype at all.
630 # Should each extension invent its own tag type? Should there
631 # Should each extension invent its own tag type? Should there
631 # be one tagtype for all such "virtual" tags? Or is the status
632 # be one tagtype for all such "virtual" tags? Or is the status
632 # quo fine?
633 # quo fine?
633
634
634 alltags = {} # map tag name to (node, hist)
635 alltags = {} # map tag name to (node, hist)
635 tagtypes = {}
636 tagtypes = {}
636
637
637 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
638 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
638 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
639 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
639
640
640 # Build the return dicts. Have to re-encode tag names because
641 # Build the return dicts. Have to re-encode tag names because
641 # the tags module always uses UTF-8 (in order not to lose info
642 # the tags module always uses UTF-8 (in order not to lose info
642 # writing to the cache), but the rest of Mercurial wants them in
643 # writing to the cache), but the rest of Mercurial wants them in
643 # local encoding.
644 # local encoding.
644 tags = {}
645 tags = {}
645 for (name, (node, hist)) in alltags.iteritems():
646 for (name, (node, hist)) in alltags.iteritems():
646 if node != nullid:
647 if node != nullid:
647 tags[encoding.tolocal(name)] = node
648 tags[encoding.tolocal(name)] = node
648 tags['tip'] = self.changelog.tip()
649 tags['tip'] = self.changelog.tip()
649 tagtypes = dict([(encoding.tolocal(name), value)
650 tagtypes = dict([(encoding.tolocal(name), value)
650 for (name, value) in tagtypes.iteritems()])
651 for (name, value) in tagtypes.iteritems()])
651 return (tags, tagtypes)
652 return (tags, tagtypes)
652
653
653 def tagtype(self, tagname):
654 def tagtype(self, tagname):
654 '''
655 '''
655 return the type of the given tag. result can be:
656 return the type of the given tag. result can be:
656
657
657 'local' : a local tag
658 'local' : a local tag
658 'global' : a global tag
659 'global' : a global tag
659 None : tag does not exist
660 None : tag does not exist
660 '''
661 '''
661
662
662 return self._tagscache.tagtypes.get(tagname)
663 return self._tagscache.tagtypes.get(tagname)
663
664
664 def tagslist(self):
665 def tagslist(self):
665 '''return a list of tags ordered by revision'''
666 '''return a list of tags ordered by revision'''
666 if not self._tagscache.tagslist:
667 if not self._tagscache.tagslist:
667 l = []
668 l = []
668 for t, n in self.tags().iteritems():
669 for t, n in self.tags().iteritems():
669 r = self.changelog.rev(n)
670 r = self.changelog.rev(n)
670 l.append((r, t, n))
671 l.append((r, t, n))
671 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
672 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
672
673
673 return self._tagscache.tagslist
674 return self._tagscache.tagslist
674
675
675 def nodetags(self, node):
676 def nodetags(self, node):
676 '''return the tags associated with a node'''
677 '''return the tags associated with a node'''
677 if not self._tagscache.nodetagscache:
678 if not self._tagscache.nodetagscache:
678 nodetagscache = {}
679 nodetagscache = {}
679 for t, n in self._tagscache.tags.iteritems():
680 for t, n in self._tagscache.tags.iteritems():
680 nodetagscache.setdefault(n, []).append(t)
681 nodetagscache.setdefault(n, []).append(t)
681 for tags in nodetagscache.itervalues():
682 for tags in nodetagscache.itervalues():
682 tags.sort()
683 tags.sort()
683 self._tagscache.nodetagscache = nodetagscache
684 self._tagscache.nodetagscache = nodetagscache
684 return self._tagscache.nodetagscache.get(node, [])
685 return self._tagscache.nodetagscache.get(node, [])
685
686
686 def nodebookmarks(self, node):
687 def nodebookmarks(self, node):
687 marks = []
688 marks = []
688 for bookmark, n in self._bookmarks.iteritems():
689 for bookmark, n in self._bookmarks.iteritems():
689 if n == node:
690 if n == node:
690 marks.append(bookmark)
691 marks.append(bookmark)
691 return sorted(marks)
692 return sorted(marks)
692
693
693 def branchmap(self):
694 def branchmap(self):
694 '''returns a dictionary {branch: [branchheads]} with branchheads
695 '''returns a dictionary {branch: [branchheads]} with branchheads
695 ordered by increasing revision number'''
696 ordered by increasing revision number'''
696 branchmap.updatecache(self)
697 branchmap.updatecache(self)
697 return self._branchcaches[self.filtername]
698 return self._branchcaches[self.filtername]
698
699
699 def branchtip(self, branch):
700 def branchtip(self, branch):
700 '''return the tip node for a given branch'''
701 '''return the tip node for a given branch'''
701 try:
702 try:
702 return self.branchmap().branchtip(branch)
703 return self.branchmap().branchtip(branch)
703 except KeyError:
704 except KeyError:
704 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
705 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
705
706
706 def lookup(self, key):
707 def lookup(self, key):
707 return self[key].node()
708 return self[key].node()
708
709
709 def lookupbranch(self, key, remote=None):
710 def lookupbranch(self, key, remote=None):
710 repo = remote or self
711 repo = remote or self
711 if key in repo.branchmap():
712 if key in repo.branchmap():
712 return key
713 return key
713
714
714 repo = (remote and remote.local()) and remote or self
715 repo = (remote and remote.local()) and remote or self
715 return repo[key].branch()
716 return repo[key].branch()
716
717
717 def known(self, nodes):
718 def known(self, nodes):
718 nm = self.changelog.nodemap
719 nm = self.changelog.nodemap
719 pc = self._phasecache
720 pc = self._phasecache
720 result = []
721 result = []
721 for n in nodes:
722 for n in nodes:
722 r = nm.get(n)
723 r = nm.get(n)
723 resp = not (r is None or pc.phase(self, r) >= phases.secret)
724 resp = not (r is None or pc.phase(self, r) >= phases.secret)
724 result.append(resp)
725 result.append(resp)
725 return result
726 return result
726
727
727 def local(self):
728 def local(self):
728 return self
729 return self
729
730
730 def cancopy(self):
731 def cancopy(self):
731 # so statichttprepo's override of local() works
732 # so statichttprepo's override of local() works
732 if not self.local():
733 if not self.local():
733 return False
734 return False
734 if not self.ui.configbool('phases', 'publish', True):
735 if not self.ui.configbool('phases', 'publish', True):
735 return True
736 return True
736 # if publishing we can't copy if there is filtered content
737 # if publishing we can't copy if there is filtered content
737 return not self.filtered('visible').changelog.filteredrevs
738 return not self.filtered('visible').changelog.filteredrevs
738
739
739 def join(self, f):
740 def join(self, f):
740 return os.path.join(self.path, f)
741 return os.path.join(self.path, f)
741
742
742 def wjoin(self, f):
743 def wjoin(self, f):
743 return os.path.join(self.root, f)
744 return os.path.join(self.root, f)
744
745
745 def file(self, f):
746 def file(self, f):
746 if f[0] == '/':
747 if f[0] == '/':
747 f = f[1:]
748 f = f[1:]
748 return filelog.filelog(self.sopener, f)
749 return filelog.filelog(self.sopener, f)
749
750
750 def changectx(self, changeid):
751 def changectx(self, changeid):
751 return self[changeid]
752 return self[changeid]
752
753
753 def parents(self, changeid=None):
754 def parents(self, changeid=None):
754 '''get list of changectxs for parents of changeid'''
755 '''get list of changectxs for parents of changeid'''
755 return self[changeid].parents()
756 return self[changeid].parents()
756
757
757 def setparents(self, p1, p2=nullid):
758 def setparents(self, p1, p2=nullid):
758 copies = self.dirstate.setparents(p1, p2)
759 copies = self.dirstate.setparents(p1, p2)
759 pctx = self[p1]
760 pctx = self[p1]
760 if copies:
761 if copies:
761 # Adjust copy records, the dirstate cannot do it, it
762 # Adjust copy records, the dirstate cannot do it, it
762 # requires access to parents manifests. Preserve them
763 # requires access to parents manifests. Preserve them
763 # only for entries added to first parent.
764 # only for entries added to first parent.
764 for f in copies:
765 for f in copies:
765 if f not in pctx and copies[f] in pctx:
766 if f not in pctx and copies[f] in pctx:
766 self.dirstate.copy(copies[f], f)
767 self.dirstate.copy(copies[f], f)
767 if p2 == nullid:
768 if p2 == nullid:
768 for f, s in sorted(self.dirstate.copies().items()):
769 for f, s in sorted(self.dirstate.copies().items()):
769 if f not in pctx and s not in pctx:
770 if f not in pctx and s not in pctx:
770 self.dirstate.copy(None, f)
771 self.dirstate.copy(None, f)
771
772
772 def filectx(self, path, changeid=None, fileid=None):
773 def filectx(self, path, changeid=None, fileid=None):
773 """changeid can be a changeset revision, node, or tag.
774 """changeid can be a changeset revision, node, or tag.
774 fileid can be a file revision or node."""
775 fileid can be a file revision or node."""
775 return context.filectx(self, path, changeid, fileid)
776 return context.filectx(self, path, changeid, fileid)
776
777
777 def getcwd(self):
778 def getcwd(self):
778 return self.dirstate.getcwd()
779 return self.dirstate.getcwd()
779
780
780 def pathto(self, f, cwd=None):
781 def pathto(self, f, cwd=None):
781 return self.dirstate.pathto(f, cwd)
782 return self.dirstate.pathto(f, cwd)
782
783
783 def wfile(self, f, mode='r'):
784 def wfile(self, f, mode='r'):
784 return self.wopener(f, mode)
785 return self.wopener(f, mode)
785
786
786 def _link(self, f):
787 def _link(self, f):
787 return self.wvfs.islink(f)
788 return self.wvfs.islink(f)
788
789
789 def _loadfilter(self, filter):
790 def _loadfilter(self, filter):
790 if filter not in self.filterpats:
791 if filter not in self.filterpats:
791 l = []
792 l = []
792 for pat, cmd in self.ui.configitems(filter):
793 for pat, cmd in self.ui.configitems(filter):
793 if cmd == '!':
794 if cmd == '!':
794 continue
795 continue
795 mf = matchmod.match(self.root, '', [pat])
796 mf = matchmod.match(self.root, '', [pat])
796 fn = None
797 fn = None
797 params = cmd
798 params = cmd
798 for name, filterfn in self._datafilters.iteritems():
799 for name, filterfn in self._datafilters.iteritems():
799 if cmd.startswith(name):
800 if cmd.startswith(name):
800 fn = filterfn
801 fn = filterfn
801 params = cmd[len(name):].lstrip()
802 params = cmd[len(name):].lstrip()
802 break
803 break
803 if not fn:
804 if not fn:
804 fn = lambda s, c, **kwargs: util.filter(s, c)
805 fn = lambda s, c, **kwargs: util.filter(s, c)
805 # Wrap old filters not supporting keyword arguments
806 # Wrap old filters not supporting keyword arguments
806 if not inspect.getargspec(fn)[2]:
807 if not inspect.getargspec(fn)[2]:
807 oldfn = fn
808 oldfn = fn
808 fn = lambda s, c, **kwargs: oldfn(s, c)
809 fn = lambda s, c, **kwargs: oldfn(s, c)
809 l.append((mf, fn, params))
810 l.append((mf, fn, params))
810 self.filterpats[filter] = l
811 self.filterpats[filter] = l
811 return self.filterpats[filter]
812 return self.filterpats[filter]
812
813
813 def _filter(self, filterpats, filename, data):
814 def _filter(self, filterpats, filename, data):
814 for mf, fn, cmd in filterpats:
815 for mf, fn, cmd in filterpats:
815 if mf(filename):
816 if mf(filename):
816 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
817 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
817 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
818 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
818 break
819 break
819
820
820 return data
821 return data
821
822
822 @unfilteredpropertycache
823 @unfilteredpropertycache
823 def _encodefilterpats(self):
824 def _encodefilterpats(self):
824 return self._loadfilter('encode')
825 return self._loadfilter('encode')
825
826
826 @unfilteredpropertycache
827 @unfilteredpropertycache
827 def _decodefilterpats(self):
828 def _decodefilterpats(self):
828 return self._loadfilter('decode')
829 return self._loadfilter('decode')
829
830
830 def adddatafilter(self, name, filter):
831 def adddatafilter(self, name, filter):
831 self._datafilters[name] = filter
832 self._datafilters[name] = filter
832
833
833 def wread(self, filename):
834 def wread(self, filename):
834 if self._link(filename):
835 if self._link(filename):
835 data = self.wvfs.readlink(filename)
836 data = self.wvfs.readlink(filename)
836 else:
837 else:
837 data = self.wopener.read(filename)
838 data = self.wopener.read(filename)
838 return self._filter(self._encodefilterpats, filename, data)
839 return self._filter(self._encodefilterpats, filename, data)
839
840
840 def wwrite(self, filename, data, flags):
841 def wwrite(self, filename, data, flags):
841 data = self._filter(self._decodefilterpats, filename, data)
842 data = self._filter(self._decodefilterpats, filename, data)
842 if 'l' in flags:
843 if 'l' in flags:
843 self.wopener.symlink(data, filename)
844 self.wopener.symlink(data, filename)
844 else:
845 else:
845 self.wopener.write(filename, data)
846 self.wopener.write(filename, data)
846 if 'x' in flags:
847 if 'x' in flags:
847 self.wvfs.setflags(filename, False, True)
848 self.wvfs.setflags(filename, False, True)
848
849
849 def wwritedata(self, filename, data):
850 def wwritedata(self, filename, data):
850 return self._filter(self._decodefilterpats, filename, data)
851 return self._filter(self._decodefilterpats, filename, data)
851
852
852 def transaction(self, desc, report=None):
853 def transaction(self, desc, report=None):
853 tr = self._transref and self._transref() or None
854 tr = self._transref and self._transref() or None
854 if tr and tr.running():
855 if tr and tr.running():
855 return tr.nest()
856 return tr.nest()
856
857
857 # abort here if the journal already exists
858 # abort here if the journal already exists
858 if self.svfs.exists("journal"):
859 if self.svfs.exists("journal"):
859 raise error.RepoError(
860 raise error.RepoError(
860 _("abandoned transaction found"),
861 _("abandoned transaction found"),
861 hint=_("run 'hg recover' to clean up transaction"))
862 hint=_("run 'hg recover' to clean up transaction"))
862
863
863 def onclose():
864 def onclose():
864 self.store.write(tr)
865 self.store.write(tr)
865
866
866 self._writejournal(desc)
867 self._writejournal(desc)
867 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
868 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
868 rp = report and report or self.ui.warn
869 rp = report and report or self.ui.warn
869 tr = transaction.transaction(rp, self.sopener,
870 tr = transaction.transaction(rp, self.sopener,
870 "journal",
871 "journal",
871 aftertrans(renames),
872 aftertrans(renames),
872 self.store.createmode,
873 self.store.createmode,
873 onclose)
874 onclose)
874 self._transref = weakref.ref(tr)
875 self._transref = weakref.ref(tr)
875 return tr
876 return tr
876
877
877 def _journalfiles(self):
878 def _journalfiles(self):
878 return ((self.svfs, 'journal'),
879 return ((self.svfs, 'journal'),
879 (self.vfs, 'journal.dirstate'),
880 (self.vfs, 'journal.dirstate'),
880 (self.vfs, 'journal.branch'),
881 (self.vfs, 'journal.branch'),
881 (self.vfs, 'journal.desc'),
882 (self.vfs, 'journal.desc'),
882 (self.vfs, 'journal.bookmarks'),
883 (self.vfs, 'journal.bookmarks'),
883 (self.svfs, 'journal.phaseroots'))
884 (self.svfs, 'journal.phaseroots'))
884
885
885 def undofiles(self):
886 def undofiles(self):
886 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
887 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
887
888
888 def _writejournal(self, desc):
889 def _writejournal(self, desc):
889 self.opener.write("journal.dirstate",
890 self.opener.write("journal.dirstate",
890 self.opener.tryread("dirstate"))
891 self.opener.tryread("dirstate"))
891 self.opener.write("journal.branch",
892 self.opener.write("journal.branch",
892 encoding.fromlocal(self.dirstate.branch()))
893 encoding.fromlocal(self.dirstate.branch()))
893 self.opener.write("journal.desc",
894 self.opener.write("journal.desc",
894 "%d\n%s\n" % (len(self), desc))
895 "%d\n%s\n" % (len(self), desc))
895 self.opener.write("journal.bookmarks",
896 self.opener.write("journal.bookmarks",
896 self.opener.tryread("bookmarks"))
897 self.opener.tryread("bookmarks"))
897 self.sopener.write("journal.phaseroots",
898 self.sopener.write("journal.phaseroots",
898 self.sopener.tryread("phaseroots"))
899 self.sopener.tryread("phaseroots"))
899
900
900 def recover(self):
901 def recover(self):
901 lock = self.lock()
902 lock = self.lock()
902 try:
903 try:
903 if self.svfs.exists("journal"):
904 if self.svfs.exists("journal"):
904 self.ui.status(_("rolling back interrupted transaction\n"))
905 self.ui.status(_("rolling back interrupted transaction\n"))
905 transaction.rollback(self.sopener, "journal",
906 transaction.rollback(self.sopener, "journal",
906 self.ui.warn)
907 self.ui.warn)
907 self.invalidate()
908 self.invalidate()
908 return True
909 return True
909 else:
910 else:
910 self.ui.warn(_("no interrupted transaction available\n"))
911 self.ui.warn(_("no interrupted transaction available\n"))
911 return False
912 return False
912 finally:
913 finally:
913 lock.release()
914 lock.release()
914
915
915 def rollback(self, dryrun=False, force=False):
916 def rollback(self, dryrun=False, force=False):
916 wlock = lock = None
917 wlock = lock = None
917 try:
918 try:
918 wlock = self.wlock()
919 wlock = self.wlock()
919 lock = self.lock()
920 lock = self.lock()
920 if self.svfs.exists("undo"):
921 if self.svfs.exists("undo"):
921 return self._rollback(dryrun, force)
922 return self._rollback(dryrun, force)
922 else:
923 else:
923 self.ui.warn(_("no rollback information available\n"))
924 self.ui.warn(_("no rollback information available\n"))
924 return 1
925 return 1
925 finally:
926 finally:
926 release(lock, wlock)
927 release(lock, wlock)
927
928
928 @unfilteredmethod # Until we get smarter cache management
929 @unfilteredmethod # Until we get smarter cache management
929 def _rollback(self, dryrun, force):
930 def _rollback(self, dryrun, force):
930 ui = self.ui
931 ui = self.ui
931 try:
932 try:
932 args = self.opener.read('undo.desc').splitlines()
933 args = self.opener.read('undo.desc').splitlines()
933 (oldlen, desc, detail) = (int(args[0]), args[1], None)
934 (oldlen, desc, detail) = (int(args[0]), args[1], None)
934 if len(args) >= 3:
935 if len(args) >= 3:
935 detail = args[2]
936 detail = args[2]
936 oldtip = oldlen - 1
937 oldtip = oldlen - 1
937
938
938 if detail and ui.verbose:
939 if detail and ui.verbose:
939 msg = (_('repository tip rolled back to revision %s'
940 msg = (_('repository tip rolled back to revision %s'
940 ' (undo %s: %s)\n')
941 ' (undo %s: %s)\n')
941 % (oldtip, desc, detail))
942 % (oldtip, desc, detail))
942 else:
943 else:
943 msg = (_('repository tip rolled back to revision %s'
944 msg = (_('repository tip rolled back to revision %s'
944 ' (undo %s)\n')
945 ' (undo %s)\n')
945 % (oldtip, desc))
946 % (oldtip, desc))
946 except IOError:
947 except IOError:
947 msg = _('rolling back unknown transaction\n')
948 msg = _('rolling back unknown transaction\n')
948 desc = None
949 desc = None
949
950
950 if not force and self['.'] != self['tip'] and desc == 'commit':
951 if not force and self['.'] != self['tip'] and desc == 'commit':
951 raise util.Abort(
952 raise util.Abort(
952 _('rollback of last commit while not checked out '
953 _('rollback of last commit while not checked out '
953 'may lose data'), hint=_('use -f to force'))
954 'may lose data'), hint=_('use -f to force'))
954
955
955 ui.status(msg)
956 ui.status(msg)
956 if dryrun:
957 if dryrun:
957 return 0
958 return 0
958
959
959 parents = self.dirstate.parents()
960 parents = self.dirstate.parents()
960 self.destroying()
961 self.destroying()
961 transaction.rollback(self.sopener, 'undo', ui.warn)
962 transaction.rollback(self.sopener, 'undo', ui.warn)
962 if self.vfs.exists('undo.bookmarks'):
963 if self.vfs.exists('undo.bookmarks'):
963 self.vfs.rename('undo.bookmarks', 'bookmarks')
964 self.vfs.rename('undo.bookmarks', 'bookmarks')
964 if self.svfs.exists('undo.phaseroots'):
965 if self.svfs.exists('undo.phaseroots'):
965 self.svfs.rename('undo.phaseroots', 'phaseroots')
966 self.svfs.rename('undo.phaseroots', 'phaseroots')
966 self.invalidate()
967 self.invalidate()
967
968
968 parentgone = (parents[0] not in self.changelog.nodemap or
969 parentgone = (parents[0] not in self.changelog.nodemap or
969 parents[1] not in self.changelog.nodemap)
970 parents[1] not in self.changelog.nodemap)
970 if parentgone:
971 if parentgone:
971 self.vfs.rename('undo.dirstate', 'dirstate')
972 self.vfs.rename('undo.dirstate', 'dirstate')
972 try:
973 try:
973 branch = self.opener.read('undo.branch')
974 branch = self.opener.read('undo.branch')
974 self.dirstate.setbranch(encoding.tolocal(branch))
975 self.dirstate.setbranch(encoding.tolocal(branch))
975 except IOError:
976 except IOError:
976 ui.warn(_('named branch could not be reset: '
977 ui.warn(_('named branch could not be reset: '
977 'current branch is still \'%s\'\n')
978 'current branch is still \'%s\'\n')
978 % self.dirstate.branch())
979 % self.dirstate.branch())
979
980
980 self.dirstate.invalidate()
981 self.dirstate.invalidate()
981 parents = tuple([p.rev() for p in self.parents()])
982 parents = tuple([p.rev() for p in self.parents()])
982 if len(parents) > 1:
983 if len(parents) > 1:
983 ui.status(_('working directory now based on '
984 ui.status(_('working directory now based on '
984 'revisions %d and %d\n') % parents)
985 'revisions %d and %d\n') % parents)
985 else:
986 else:
986 ui.status(_('working directory now based on '
987 ui.status(_('working directory now based on '
987 'revision %d\n') % parents)
988 'revision %d\n') % parents)
988 # TODO: if we know which new heads may result from this rollback, pass
989 # TODO: if we know which new heads may result from this rollback, pass
989 # them to destroy(), which will prevent the branchhead cache from being
990 # them to destroy(), which will prevent the branchhead cache from being
990 # invalidated.
991 # invalidated.
991 self.destroyed()
992 self.destroyed()
992 return 0
993 return 0
993
994
994 def invalidatecaches(self):
995 def invalidatecaches(self):
995
996
996 if '_tagscache' in vars(self):
997 if '_tagscache' in vars(self):
997 # can't use delattr on proxy
998 # can't use delattr on proxy
998 del self.__dict__['_tagscache']
999 del self.__dict__['_tagscache']
999
1000
1000 self.unfiltered()._branchcaches.clear()
1001 self.unfiltered()._branchcaches.clear()
1001 self.invalidatevolatilesets()
1002 self.invalidatevolatilesets()
1002
1003
1003 def invalidatevolatilesets(self):
1004 def invalidatevolatilesets(self):
1004 self.filteredrevcache.clear()
1005 self.filteredrevcache.clear()
1005 obsolete.clearobscaches(self)
1006 obsolete.clearobscaches(self)
1006
1007
1007 def invalidatedirstate(self):
1008 def invalidatedirstate(self):
1008 '''Invalidates the dirstate, causing the next call to dirstate
1009 '''Invalidates the dirstate, causing the next call to dirstate
1009 to check if it was modified since the last time it was read,
1010 to check if it was modified since the last time it was read,
1010 rereading it if it has.
1011 rereading it if it has.
1011
1012
1012 This is different to dirstate.invalidate() that it doesn't always
1013 This is different to dirstate.invalidate() that it doesn't always
1013 rereads the dirstate. Use dirstate.invalidate() if you want to
1014 rereads the dirstate. Use dirstate.invalidate() if you want to
1014 explicitly read the dirstate again (i.e. restoring it to a previous
1015 explicitly read the dirstate again (i.e. restoring it to a previous
1015 known good state).'''
1016 known good state).'''
1016 if hasunfilteredcache(self, 'dirstate'):
1017 if hasunfilteredcache(self, 'dirstate'):
1017 for k in self.dirstate._filecache:
1018 for k in self.dirstate._filecache:
1018 try:
1019 try:
1019 delattr(self.dirstate, k)
1020 delattr(self.dirstate, k)
1020 except AttributeError:
1021 except AttributeError:
1021 pass
1022 pass
1022 delattr(self.unfiltered(), 'dirstate')
1023 delattr(self.unfiltered(), 'dirstate')
1023
1024
1024 def invalidate(self):
1025 def invalidate(self):
1025 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1026 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1026 for k in self._filecache:
1027 for k in self._filecache:
1027 # dirstate is invalidated separately in invalidatedirstate()
1028 # dirstate is invalidated separately in invalidatedirstate()
1028 if k == 'dirstate':
1029 if k == 'dirstate':
1029 continue
1030 continue
1030
1031
1031 try:
1032 try:
1032 delattr(unfiltered, k)
1033 delattr(unfiltered, k)
1033 except AttributeError:
1034 except AttributeError:
1034 pass
1035 pass
1035 self.invalidatecaches()
1036 self.invalidatecaches()
1036 self.store.invalidatecaches()
1037 self.store.invalidatecaches()
1037
1038
1038 def invalidateall(self):
1039 def invalidateall(self):
1039 '''Fully invalidates both store and non-store parts, causing the
1040 '''Fully invalidates both store and non-store parts, causing the
1040 subsequent operation to reread any outside changes.'''
1041 subsequent operation to reread any outside changes.'''
1041 # extension should hook this to invalidate its caches
1042 # extension should hook this to invalidate its caches
1042 self.invalidate()
1043 self.invalidate()
1043 self.invalidatedirstate()
1044 self.invalidatedirstate()
1044
1045
1045 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1046 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1046 try:
1047 try:
1047 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1048 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1048 except error.LockHeld, inst:
1049 except error.LockHeld, inst:
1049 if not wait:
1050 if not wait:
1050 raise
1051 raise
1051 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1052 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1052 (desc, inst.locker))
1053 (desc, inst.locker))
1053 # default to 600 seconds timeout
1054 # default to 600 seconds timeout
1054 l = lockmod.lock(vfs, lockname,
1055 l = lockmod.lock(vfs, lockname,
1055 int(self.ui.config("ui", "timeout", "600")),
1056 int(self.ui.config("ui", "timeout", "600")),
1056 releasefn, desc=desc)
1057 releasefn, desc=desc)
1057 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1058 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1058 if acquirefn:
1059 if acquirefn:
1059 acquirefn()
1060 acquirefn()
1060 return l
1061 return l
1061
1062
1062 def _afterlock(self, callback):
1063 def _afterlock(self, callback):
1063 """add a callback to the current repository lock.
1064 """add a callback to the current repository lock.
1064
1065
1065 The callback will be executed on lock release."""
1066 The callback will be executed on lock release."""
1066 l = self._lockref and self._lockref()
1067 l = self._lockref and self._lockref()
1067 if l:
1068 if l:
1068 l.postrelease.append(callback)
1069 l.postrelease.append(callback)
1069 else:
1070 else:
1070 callback()
1071 callback()
1071
1072
1072 def lock(self, wait=True):
1073 def lock(self, wait=True):
1073 '''Lock the repository store (.hg/store) and return a weak reference
1074 '''Lock the repository store (.hg/store) and return a weak reference
1074 to the lock. Use this before modifying the store (e.g. committing or
1075 to the lock. Use this before modifying the store (e.g. committing or
1075 stripping). If you are opening a transaction, get a lock as well.)'''
1076 stripping). If you are opening a transaction, get a lock as well.)'''
1076 l = self._lockref and self._lockref()
1077 l = self._lockref and self._lockref()
1077 if l is not None and l.held:
1078 if l is not None and l.held:
1078 l.lock()
1079 l.lock()
1079 return l
1080 return l
1080
1081
1081 def unlock():
1082 def unlock():
1082 if hasunfilteredcache(self, '_phasecache'):
1083 if hasunfilteredcache(self, '_phasecache'):
1083 self._phasecache.write()
1084 self._phasecache.write()
1084 for k, ce in self._filecache.items():
1085 for k, ce in self._filecache.items():
1085 if k == 'dirstate' or k not in self.__dict__:
1086 if k == 'dirstate' or k not in self.__dict__:
1086 continue
1087 continue
1087 ce.refresh()
1088 ce.refresh()
1088
1089
1089 l = self._lock(self.svfs, "lock", wait, unlock,
1090 l = self._lock(self.svfs, "lock", wait, unlock,
1090 self.invalidate, _('repository %s') % self.origroot)
1091 self.invalidate, _('repository %s') % self.origroot)
1091 self._lockref = weakref.ref(l)
1092 self._lockref = weakref.ref(l)
1092 return l
1093 return l
1093
1094
1094 def wlock(self, wait=True):
1095 def wlock(self, wait=True):
1095 '''Lock the non-store parts of the repository (everything under
1096 '''Lock the non-store parts of the repository (everything under
1096 .hg except .hg/store) and return a weak reference to the lock.
1097 .hg except .hg/store) and return a weak reference to the lock.
1097 Use this before modifying files in .hg.'''
1098 Use this before modifying files in .hg.'''
1098 l = self._wlockref and self._wlockref()
1099 l = self._wlockref and self._wlockref()
1099 if l is not None and l.held:
1100 if l is not None and l.held:
1100 l.lock()
1101 l.lock()
1101 return l
1102 return l
1102
1103
1103 def unlock():
1104 def unlock():
1104 self.dirstate.write()
1105 self.dirstate.write()
1105 self._filecache['dirstate'].refresh()
1106 self._filecache['dirstate'].refresh()
1106
1107
1107 l = self._lock(self.vfs, "wlock", wait, unlock,
1108 l = self._lock(self.vfs, "wlock", wait, unlock,
1108 self.invalidatedirstate, _('working directory of %s') %
1109 self.invalidatedirstate, _('working directory of %s') %
1109 self.origroot)
1110 self.origroot)
1110 self._wlockref = weakref.ref(l)
1111 self._wlockref = weakref.ref(l)
1111 return l
1112 return l
1112
1113
1113 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1114 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1114 """
1115 """
1115 commit an individual file as part of a larger transaction
1116 commit an individual file as part of a larger transaction
1116 """
1117 """
1117
1118
1118 fname = fctx.path()
1119 fname = fctx.path()
1119 text = fctx.data()
1120 text = fctx.data()
1120 flog = self.file(fname)
1121 flog = self.file(fname)
1121 fparent1 = manifest1.get(fname, nullid)
1122 fparent1 = manifest1.get(fname, nullid)
1122 fparent2 = fparent2o = manifest2.get(fname, nullid)
1123 fparent2 = fparent2o = manifest2.get(fname, nullid)
1123
1124
1124 meta = {}
1125 meta = {}
1125 copy = fctx.renamed()
1126 copy = fctx.renamed()
1126 if copy and copy[0] != fname:
1127 if copy and copy[0] != fname:
1127 # Mark the new revision of this file as a copy of another
1128 # Mark the new revision of this file as a copy of another
1128 # file. This copy data will effectively act as a parent
1129 # file. This copy data will effectively act as a parent
1129 # of this new revision. If this is a merge, the first
1130 # of this new revision. If this is a merge, the first
1130 # parent will be the nullid (meaning "look up the copy data")
1131 # parent will be the nullid (meaning "look up the copy data")
1131 # and the second one will be the other parent. For example:
1132 # and the second one will be the other parent. For example:
1132 #
1133 #
1133 # 0 --- 1 --- 3 rev1 changes file foo
1134 # 0 --- 1 --- 3 rev1 changes file foo
1134 # \ / rev2 renames foo to bar and changes it
1135 # \ / rev2 renames foo to bar and changes it
1135 # \- 2 -/ rev3 should have bar with all changes and
1136 # \- 2 -/ rev3 should have bar with all changes and
1136 # should record that bar descends from
1137 # should record that bar descends from
1137 # bar in rev2 and foo in rev1
1138 # bar in rev2 and foo in rev1
1138 #
1139 #
1139 # this allows this merge to succeed:
1140 # this allows this merge to succeed:
1140 #
1141 #
1141 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1142 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1142 # \ / merging rev3 and rev4 should use bar@rev2
1143 # \ / merging rev3 and rev4 should use bar@rev2
1143 # \- 2 --- 4 as the merge base
1144 # \- 2 --- 4 as the merge base
1144 #
1145 #
1145
1146
1146 cfname = copy[0]
1147 cfname = copy[0]
1147 crev = manifest1.get(cfname)
1148 crev = manifest1.get(cfname)
1148 newfparent = fparent2
1149 newfparent = fparent2
1149
1150
1150 if manifest2: # branch merge
1151 if manifest2: # branch merge
1151 if fparent2 == nullid or crev is None: # copied on remote side
1152 if fparent2 == nullid or crev is None: # copied on remote side
1152 if cfname in manifest2:
1153 if cfname in manifest2:
1153 crev = manifest2[cfname]
1154 crev = manifest2[cfname]
1154 newfparent = fparent1
1155 newfparent = fparent1
1155
1156
1156 # find source in nearest ancestor if we've lost track
1157 # find source in nearest ancestor if we've lost track
1157 if not crev:
1158 if not crev:
1158 self.ui.debug(" %s: searching for copy revision for %s\n" %
1159 self.ui.debug(" %s: searching for copy revision for %s\n" %
1159 (fname, cfname))
1160 (fname, cfname))
1160 for ancestor in self[None].ancestors():
1161 for ancestor in self[None].ancestors():
1161 if cfname in ancestor:
1162 if cfname in ancestor:
1162 crev = ancestor[cfname].filenode()
1163 crev = ancestor[cfname].filenode()
1163 break
1164 break
1164
1165
1165 if crev:
1166 if crev:
1166 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1167 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1167 meta["copy"] = cfname
1168 meta["copy"] = cfname
1168 meta["copyrev"] = hex(crev)
1169 meta["copyrev"] = hex(crev)
1169 fparent1, fparent2 = nullid, newfparent
1170 fparent1, fparent2 = nullid, newfparent
1170 else:
1171 else:
1171 self.ui.warn(_("warning: can't find ancestor for '%s' "
1172 self.ui.warn(_("warning: can't find ancestor for '%s' "
1172 "copied from '%s'!\n") % (fname, cfname))
1173 "copied from '%s'!\n") % (fname, cfname))
1173
1174
1174 elif fparent1 == nullid:
1175 elif fparent1 == nullid:
1175 fparent1, fparent2 = fparent2, nullid
1176 fparent1, fparent2 = fparent2, nullid
1176 elif fparent2 != nullid:
1177 elif fparent2 != nullid:
1177 # is one parent an ancestor of the other?
1178 # is one parent an ancestor of the other?
1178 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1179 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1179 if fparent1 in fparentancestors:
1180 if fparent1 in fparentancestors:
1180 fparent1, fparent2 = fparent2, nullid
1181 fparent1, fparent2 = fparent2, nullid
1181 elif fparent2 in fparentancestors:
1182 elif fparent2 in fparentancestors:
1182 fparent2 = nullid
1183 fparent2 = nullid
1183
1184
1184 # is the file changed?
1185 # is the file changed?
1185 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1186 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1186 changelist.append(fname)
1187 changelist.append(fname)
1187 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1188 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1188
1189
1189 # are just the flags changed during merge?
1190 # are just the flags changed during merge?
1190 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1191 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1191 changelist.append(fname)
1192 changelist.append(fname)
1192
1193
1193 return fparent1
1194 return fparent1
1194
1195
1195 @unfilteredmethod
1196 @unfilteredmethod
1196 def commit(self, text="", user=None, date=None, match=None, force=False,
1197 def commit(self, text="", user=None, date=None, match=None, force=False,
1197 editor=False, extra={}):
1198 editor=False, extra={}):
1198 """Add a new revision to current repository.
1199 """Add a new revision to current repository.
1199
1200
1200 Revision information is gathered from the working directory,
1201 Revision information is gathered from the working directory,
1201 match can be used to filter the committed files. If editor is
1202 match can be used to filter the committed files. If editor is
1202 supplied, it is called to get a commit message.
1203 supplied, it is called to get a commit message.
1203 """
1204 """
1204
1205
1205 def fail(f, msg):
1206 def fail(f, msg):
1206 raise util.Abort('%s: %s' % (f, msg))
1207 raise util.Abort('%s: %s' % (f, msg))
1207
1208
1208 if not match:
1209 if not match:
1209 match = matchmod.always(self.root, '')
1210 match = matchmod.always(self.root, '')
1210
1211
1211 if not force:
1212 if not force:
1212 vdirs = []
1213 vdirs = []
1213 match.explicitdir = vdirs.append
1214 match.explicitdir = vdirs.append
1214 match.bad = fail
1215 match.bad = fail
1215
1216
1216 wlock = self.wlock()
1217 wlock = self.wlock()
1217 try:
1218 try:
1218 wctx = self[None]
1219 wctx = self[None]
1219 merge = len(wctx.parents()) > 1
1220 merge = len(wctx.parents()) > 1
1220
1221
1221 if (not force and merge and match and
1222 if (not force and merge and match and
1222 (match.files() or match.anypats())):
1223 (match.files() or match.anypats())):
1223 raise util.Abort(_('cannot partially commit a merge '
1224 raise util.Abort(_('cannot partially commit a merge '
1224 '(do not specify files or patterns)'))
1225 '(do not specify files or patterns)'))
1225
1226
1226 changes = self.status(match=match, clean=force)
1227 changes = self.status(match=match, clean=force)
1227 if force:
1228 if force:
1228 changes[0].extend(changes[6]) # mq may commit unchanged files
1229 changes[0].extend(changes[6]) # mq may commit unchanged files
1229
1230
1230 # check subrepos
1231 # check subrepos
1231 subs = []
1232 subs = []
1232 commitsubs = set()
1233 commitsubs = set()
1233 newstate = wctx.substate.copy()
1234 newstate = wctx.substate.copy()
1234 # only manage subrepos and .hgsubstate if .hgsub is present
1235 # only manage subrepos and .hgsubstate if .hgsub is present
1235 if '.hgsub' in wctx:
1236 if '.hgsub' in wctx:
1236 # we'll decide whether to track this ourselves, thanks
1237 # we'll decide whether to track this ourselves, thanks
1237 for c in changes[:3]:
1238 for c in changes[:3]:
1238 if '.hgsubstate' in c:
1239 if '.hgsubstate' in c:
1239 c.remove('.hgsubstate')
1240 c.remove('.hgsubstate')
1240
1241
1241 # compare current state to last committed state
1242 # compare current state to last committed state
1242 # build new substate based on last committed state
1243 # build new substate based on last committed state
1243 oldstate = wctx.p1().substate
1244 oldstate = wctx.p1().substate
1244 for s in sorted(newstate.keys()):
1245 for s in sorted(newstate.keys()):
1245 if not match(s):
1246 if not match(s):
1246 # ignore working copy, use old state if present
1247 # ignore working copy, use old state if present
1247 if s in oldstate:
1248 if s in oldstate:
1248 newstate[s] = oldstate[s]
1249 newstate[s] = oldstate[s]
1249 continue
1250 continue
1250 if not force:
1251 if not force:
1251 raise util.Abort(
1252 raise util.Abort(
1252 _("commit with new subrepo %s excluded") % s)
1253 _("commit with new subrepo %s excluded") % s)
1253 if wctx.sub(s).dirty(True):
1254 if wctx.sub(s).dirty(True):
1254 if not self.ui.configbool('ui', 'commitsubrepos'):
1255 if not self.ui.configbool('ui', 'commitsubrepos'):
1255 raise util.Abort(
1256 raise util.Abort(
1256 _("uncommitted changes in subrepo %s") % s,
1257 _("uncommitted changes in subrepo %s") % s,
1257 hint=_("use --subrepos for recursive commit"))
1258 hint=_("use --subrepos for recursive commit"))
1258 subs.append(s)
1259 subs.append(s)
1259 commitsubs.add(s)
1260 commitsubs.add(s)
1260 else:
1261 else:
1261 bs = wctx.sub(s).basestate()
1262 bs = wctx.sub(s).basestate()
1262 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1263 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1263 if oldstate.get(s, (None, None, None))[1] != bs:
1264 if oldstate.get(s, (None, None, None))[1] != bs:
1264 subs.append(s)
1265 subs.append(s)
1265
1266
1266 # check for removed subrepos
1267 # check for removed subrepos
1267 for p in wctx.parents():
1268 for p in wctx.parents():
1268 r = [s for s in p.substate if s not in newstate]
1269 r = [s for s in p.substate if s not in newstate]
1269 subs += [s for s in r if match(s)]
1270 subs += [s for s in r if match(s)]
1270 if subs:
1271 if subs:
1271 if (not match('.hgsub') and
1272 if (not match('.hgsub') and
1272 '.hgsub' in (wctx.modified() + wctx.added())):
1273 '.hgsub' in (wctx.modified() + wctx.added())):
1273 raise util.Abort(
1274 raise util.Abort(
1274 _("can't commit subrepos without .hgsub"))
1275 _("can't commit subrepos without .hgsub"))
1275 changes[0].insert(0, '.hgsubstate')
1276 changes[0].insert(0, '.hgsubstate')
1276
1277
1277 elif '.hgsub' in changes[2]:
1278 elif '.hgsub' in changes[2]:
1278 # clean up .hgsubstate when .hgsub is removed
1279 # clean up .hgsubstate when .hgsub is removed
1279 if ('.hgsubstate' in wctx and
1280 if ('.hgsubstate' in wctx and
1280 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1281 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1281 changes[2].insert(0, '.hgsubstate')
1282 changes[2].insert(0, '.hgsubstate')
1282
1283
1283 # make sure all explicit patterns are matched
1284 # make sure all explicit patterns are matched
1284 if not force and match.files():
1285 if not force and match.files():
1285 matched = set(changes[0] + changes[1] + changes[2])
1286 matched = set(changes[0] + changes[1] + changes[2])
1286
1287
1287 for f in match.files():
1288 for f in match.files():
1288 f = self.dirstate.normalize(f)
1289 f = self.dirstate.normalize(f)
1289 if f == '.' or f in matched or f in wctx.substate:
1290 if f == '.' or f in matched or f in wctx.substate:
1290 continue
1291 continue
1291 if f in changes[3]: # missing
1292 if f in changes[3]: # missing
1292 fail(f, _('file not found!'))
1293 fail(f, _('file not found!'))
1293 if f in vdirs: # visited directory
1294 if f in vdirs: # visited directory
1294 d = f + '/'
1295 d = f + '/'
1295 for mf in matched:
1296 for mf in matched:
1296 if mf.startswith(d):
1297 if mf.startswith(d):
1297 break
1298 break
1298 else:
1299 else:
1299 fail(f, _("no match under directory!"))
1300 fail(f, _("no match under directory!"))
1300 elif f not in self.dirstate:
1301 elif f not in self.dirstate:
1301 fail(f, _("file not tracked!"))
1302 fail(f, _("file not tracked!"))
1302
1303
1303 cctx = context.workingctx(self, text, user, date, extra, changes)
1304 cctx = context.workingctx(self, text, user, date, extra, changes)
1304
1305
1305 if (not force and not extra.get("close") and not merge
1306 if (not force and not extra.get("close") and not merge
1306 and not cctx.files()
1307 and not cctx.files()
1307 and wctx.branch() == wctx.p1().branch()):
1308 and wctx.branch() == wctx.p1().branch()):
1308 return None
1309 return None
1309
1310
1310 if merge and cctx.deleted():
1311 if merge and cctx.deleted():
1311 raise util.Abort(_("cannot commit merge with missing files"))
1312 raise util.Abort(_("cannot commit merge with missing files"))
1312
1313
1313 ms = mergemod.mergestate(self)
1314 ms = mergemod.mergestate(self)
1314 for f in changes[0]:
1315 for f in changes[0]:
1315 if f in ms and ms[f] == 'u':
1316 if f in ms and ms[f] == 'u':
1316 raise util.Abort(_("unresolved merge conflicts "
1317 raise util.Abort(_("unresolved merge conflicts "
1317 "(see hg help resolve)"))
1318 "(see hg help resolve)"))
1318
1319
1319 if editor:
1320 if editor:
1320 cctx._text = editor(self, cctx, subs)
1321 cctx._text = editor(self, cctx, subs)
1321 edited = (text != cctx._text)
1322 edited = (text != cctx._text)
1322
1323
1323 # Save commit message in case this transaction gets rolled back
1324 # Save commit message in case this transaction gets rolled back
1324 # (e.g. by a pretxncommit hook). Leave the content alone on
1325 # (e.g. by a pretxncommit hook). Leave the content alone on
1325 # the assumption that the user will use the same editor again.
1326 # the assumption that the user will use the same editor again.
1326 msgfn = self.savecommitmessage(cctx._text)
1327 msgfn = self.savecommitmessage(cctx._text)
1327
1328
1328 # commit subs and write new state
1329 # commit subs and write new state
1329 if subs:
1330 if subs:
1330 for s in sorted(commitsubs):
1331 for s in sorted(commitsubs):
1331 sub = wctx.sub(s)
1332 sub = wctx.sub(s)
1332 self.ui.status(_('committing subrepository %s\n') %
1333 self.ui.status(_('committing subrepository %s\n') %
1333 subrepo.subrelpath(sub))
1334 subrepo.subrelpath(sub))
1334 sr = sub.commit(cctx._text, user, date)
1335 sr = sub.commit(cctx._text, user, date)
1335 newstate[s] = (newstate[s][0], sr)
1336 newstate[s] = (newstate[s][0], sr)
1336 subrepo.writestate(self, newstate)
1337 subrepo.writestate(self, newstate)
1337
1338
1338 p1, p2 = self.dirstate.parents()
1339 p1, p2 = self.dirstate.parents()
1339 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1340 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1340 try:
1341 try:
1341 self.hook("precommit", throw=True, parent1=hookp1,
1342 self.hook("precommit", throw=True, parent1=hookp1,
1342 parent2=hookp2)
1343 parent2=hookp2)
1343 ret = self.commitctx(cctx, True)
1344 ret = self.commitctx(cctx, True)
1344 except: # re-raises
1345 except: # re-raises
1345 if edited:
1346 if edited:
1346 self.ui.write(
1347 self.ui.write(
1347 _('note: commit message saved in %s\n') % msgfn)
1348 _('note: commit message saved in %s\n') % msgfn)
1348 raise
1349 raise
1349
1350
1350 # update bookmarks, dirstate and mergestate
1351 # update bookmarks, dirstate and mergestate
1351 bookmarks.update(self, [p1, p2], ret)
1352 bookmarks.update(self, [p1, p2], ret)
1352 cctx.markcommitted(ret)
1353 cctx.markcommitted(ret)
1353 ms.reset()
1354 ms.reset()
1354 finally:
1355 finally:
1355 wlock.release()
1356 wlock.release()
1356
1357
1357 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1358 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1358 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1359 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1359 self._afterlock(commithook)
1360 self._afterlock(commithook)
1360 return ret
1361 return ret
1361
1362
1362 @unfilteredmethod
1363 @unfilteredmethod
1363 def commitctx(self, ctx, error=False):
1364 def commitctx(self, ctx, error=False):
1364 """Add a new revision to current repository.
1365 """Add a new revision to current repository.
1365 Revision information is passed via the context argument.
1366 Revision information is passed via the context argument.
1366 """
1367 """
1367
1368
1368 tr = lock = None
1369 tr = lock = None
1369 removed = list(ctx.removed())
1370 removed = list(ctx.removed())
1370 p1, p2 = ctx.p1(), ctx.p2()
1371 p1, p2 = ctx.p1(), ctx.p2()
1371 user = ctx.user()
1372 user = ctx.user()
1372
1373
1373 lock = self.lock()
1374 lock = self.lock()
1374 try:
1375 try:
1375 tr = self.transaction("commit")
1376 tr = self.transaction("commit")
1376 trp = weakref.proxy(tr)
1377 trp = weakref.proxy(tr)
1377
1378
1378 if ctx.files():
1379 if ctx.files():
1379 m1 = p1.manifest().copy()
1380 m1 = p1.manifest().copy()
1380 m2 = p2.manifest()
1381 m2 = p2.manifest()
1381
1382
1382 # check in files
1383 # check in files
1383 new = {}
1384 new = {}
1384 changed = []
1385 changed = []
1385 linkrev = len(self)
1386 linkrev = len(self)
1386 for f in sorted(ctx.modified() + ctx.added()):
1387 for f in sorted(ctx.modified() + ctx.added()):
1387 self.ui.note(f + "\n")
1388 self.ui.note(f + "\n")
1388 try:
1389 try:
1389 fctx = ctx[f]
1390 fctx = ctx[f]
1390 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1391 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1391 changed)
1392 changed)
1392 m1.set(f, fctx.flags())
1393 m1.set(f, fctx.flags())
1393 except OSError, inst:
1394 except OSError, inst:
1394 self.ui.warn(_("trouble committing %s!\n") % f)
1395 self.ui.warn(_("trouble committing %s!\n") % f)
1395 raise
1396 raise
1396 except IOError, inst:
1397 except IOError, inst:
1397 errcode = getattr(inst, 'errno', errno.ENOENT)
1398 errcode = getattr(inst, 'errno', errno.ENOENT)
1398 if error or errcode and errcode != errno.ENOENT:
1399 if error or errcode and errcode != errno.ENOENT:
1399 self.ui.warn(_("trouble committing %s!\n") % f)
1400 self.ui.warn(_("trouble committing %s!\n") % f)
1400 raise
1401 raise
1401 else:
1402 else:
1402 removed.append(f)
1403 removed.append(f)
1403
1404
1404 # update manifest
1405 # update manifest
1405 m1.update(new)
1406 m1.update(new)
1406 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1407 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1407 drop = [f for f in removed if f in m1]
1408 drop = [f for f in removed if f in m1]
1408 for f in drop:
1409 for f in drop:
1409 del m1[f]
1410 del m1[f]
1410 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1411 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1411 p2.manifestnode(), (new, drop))
1412 p2.manifestnode(), (new, drop))
1412 files = changed + removed
1413 files = changed + removed
1413 else:
1414 else:
1414 mn = p1.manifestnode()
1415 mn = p1.manifestnode()
1415 files = []
1416 files = []
1416
1417
1417 # update changelog
1418 # update changelog
1418 self.changelog.delayupdate()
1419 self.changelog.delayupdate()
1419 n = self.changelog.add(mn, files, ctx.description(),
1420 n = self.changelog.add(mn, files, ctx.description(),
1420 trp, p1.node(), p2.node(),
1421 trp, p1.node(), p2.node(),
1421 user, ctx.date(), ctx.extra().copy())
1422 user, ctx.date(), ctx.extra().copy())
1422 p = lambda: self.changelog.writepending() and self.root or ""
1423 p = lambda: self.changelog.writepending() and self.root or ""
1423 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1424 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1424 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1425 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1425 parent2=xp2, pending=p)
1426 parent2=xp2, pending=p)
1426 self.changelog.finalize(trp)
1427 self.changelog.finalize(trp)
1427 # set the new commit is proper phase
1428 # set the new commit is proper phase
1428 targetphase = subrepo.newcommitphase(self.ui, ctx)
1429 targetphase = subrepo.newcommitphase(self.ui, ctx)
1429 if targetphase:
1430 if targetphase:
1430 # retract boundary do not alter parent changeset.
1431 # retract boundary do not alter parent changeset.
1431 # if a parent have higher the resulting phase will
1432 # if a parent have higher the resulting phase will
1432 # be compliant anyway
1433 # be compliant anyway
1433 #
1434 #
1434 # if minimal phase was 0 we don't need to retract anything
1435 # if minimal phase was 0 we don't need to retract anything
1435 phases.retractboundary(self, targetphase, [n])
1436 phases.retractboundary(self, targetphase, [n])
1436 tr.close()
1437 tr.close()
1437 branchmap.updatecache(self.filtered('served'))
1438 branchmap.updatecache(self.filtered('served'))
1438 return n
1439 return n
1439 finally:
1440 finally:
1440 if tr:
1441 if tr:
1441 tr.release()
1442 tr.release()
1442 lock.release()
1443 lock.release()
1443
1444
1444 @unfilteredmethod
1445 @unfilteredmethod
1445 def destroying(self):
1446 def destroying(self):
1446 '''Inform the repository that nodes are about to be destroyed.
1447 '''Inform the repository that nodes are about to be destroyed.
1447 Intended for use by strip and rollback, so there's a common
1448 Intended for use by strip and rollback, so there's a common
1448 place for anything that has to be done before destroying history.
1449 place for anything that has to be done before destroying history.
1449
1450
1450 This is mostly useful for saving state that is in memory and waiting
1451 This is mostly useful for saving state that is in memory and waiting
1451 to be flushed when the current lock is released. Because a call to
1452 to be flushed when the current lock is released. Because a call to
1452 destroyed is imminent, the repo will be invalidated causing those
1453 destroyed is imminent, the repo will be invalidated causing those
1453 changes to stay in memory (waiting for the next unlock), or vanish
1454 changes to stay in memory (waiting for the next unlock), or vanish
1454 completely.
1455 completely.
1455 '''
1456 '''
1456 # When using the same lock to commit and strip, the phasecache is left
1457 # When using the same lock to commit and strip, the phasecache is left
1457 # dirty after committing. Then when we strip, the repo is invalidated,
1458 # dirty after committing. Then when we strip, the repo is invalidated,
1458 # causing those changes to disappear.
1459 # causing those changes to disappear.
1459 if '_phasecache' in vars(self):
1460 if '_phasecache' in vars(self):
1460 self._phasecache.write()
1461 self._phasecache.write()
1461
1462
1462 @unfilteredmethod
1463 @unfilteredmethod
1463 def destroyed(self):
1464 def destroyed(self):
1464 '''Inform the repository that nodes have been destroyed.
1465 '''Inform the repository that nodes have been destroyed.
1465 Intended for use by strip and rollback, so there's a common
1466 Intended for use by strip and rollback, so there's a common
1466 place for anything that has to be done after destroying history.
1467 place for anything that has to be done after destroying history.
1467 '''
1468 '''
1468 # When one tries to:
1469 # When one tries to:
1469 # 1) destroy nodes thus calling this method (e.g. strip)
1470 # 1) destroy nodes thus calling this method (e.g. strip)
1470 # 2) use phasecache somewhere (e.g. commit)
1471 # 2) use phasecache somewhere (e.g. commit)
1471 #
1472 #
1472 # then 2) will fail because the phasecache contains nodes that were
1473 # then 2) will fail because the phasecache contains nodes that were
1473 # removed. We can either remove phasecache from the filecache,
1474 # removed. We can either remove phasecache from the filecache,
1474 # causing it to reload next time it is accessed, or simply filter
1475 # causing it to reload next time it is accessed, or simply filter
1475 # the removed nodes now and write the updated cache.
1476 # the removed nodes now and write the updated cache.
1476 self._phasecache.filterunknown(self)
1477 self._phasecache.filterunknown(self)
1477 self._phasecache.write()
1478 self._phasecache.write()
1478
1479
1479 # update the 'served' branch cache to help read only server process
1480 # update the 'served' branch cache to help read only server process
1480 # Thanks to branchcache collaboration this is done from the nearest
1481 # Thanks to branchcache collaboration this is done from the nearest
1481 # filtered subset and it is expected to be fast.
1482 # filtered subset and it is expected to be fast.
1482 branchmap.updatecache(self.filtered('served'))
1483 branchmap.updatecache(self.filtered('served'))
1483
1484
1484 # Ensure the persistent tag cache is updated. Doing it now
1485 # Ensure the persistent tag cache is updated. Doing it now
1485 # means that the tag cache only has to worry about destroyed
1486 # means that the tag cache only has to worry about destroyed
1486 # heads immediately after a strip/rollback. That in turn
1487 # heads immediately after a strip/rollback. That in turn
1487 # guarantees that "cachetip == currenttip" (comparing both rev
1488 # guarantees that "cachetip == currenttip" (comparing both rev
1488 # and node) always means no nodes have been added or destroyed.
1489 # and node) always means no nodes have been added or destroyed.
1489
1490
1490 # XXX this is suboptimal when qrefresh'ing: we strip the current
1491 # XXX this is suboptimal when qrefresh'ing: we strip the current
1491 # head, refresh the tag cache, then immediately add a new head.
1492 # head, refresh the tag cache, then immediately add a new head.
1492 # But I think doing it this way is necessary for the "instant
1493 # But I think doing it this way is necessary for the "instant
1493 # tag cache retrieval" case to work.
1494 # tag cache retrieval" case to work.
1494 self.invalidate()
1495 self.invalidate()
1495
1496
1496 def walk(self, match, node=None):
1497 def walk(self, match, node=None):
1497 '''
1498 '''
1498 walk recursively through the directory tree or a given
1499 walk recursively through the directory tree or a given
1499 changeset, finding all files matched by the match
1500 changeset, finding all files matched by the match
1500 function
1501 function
1501 '''
1502 '''
1502 return self[node].walk(match)
1503 return self[node].walk(match)
1503
1504
1504 def status(self, node1='.', node2=None, match=None,
1505 def status(self, node1='.', node2=None, match=None,
1505 ignored=False, clean=False, unknown=False,
1506 ignored=False, clean=False, unknown=False,
1506 listsubrepos=False):
1507 listsubrepos=False):
1507 '''a convenience method that calls node1.status(node2)'''
1508 '''a convenience method that calls node1.status(node2)'''
1508 return self[node1].status(node2, match, ignored, clean, unknown,
1509 return self[node1].status(node2, match, ignored, clean, unknown,
1509 listsubrepos)
1510 listsubrepos)
1510
1511
1511 def heads(self, start=None):
1512 def heads(self, start=None):
1512 heads = self.changelog.heads(start)
1513 heads = self.changelog.heads(start)
1513 # sort the output in rev descending order
1514 # sort the output in rev descending order
1514 return sorted(heads, key=self.changelog.rev, reverse=True)
1515 return sorted(heads, key=self.changelog.rev, reverse=True)
1515
1516
1516 def branchheads(self, branch=None, start=None, closed=False):
1517 def branchheads(self, branch=None, start=None, closed=False):
1517 '''return a (possibly filtered) list of heads for the given branch
1518 '''return a (possibly filtered) list of heads for the given branch
1518
1519
1519 Heads are returned in topological order, from newest to oldest.
1520 Heads are returned in topological order, from newest to oldest.
1520 If branch is None, use the dirstate branch.
1521 If branch is None, use the dirstate branch.
1521 If start is not None, return only heads reachable from start.
1522 If start is not None, return only heads reachable from start.
1522 If closed is True, return heads that are marked as closed as well.
1523 If closed is True, return heads that are marked as closed as well.
1523 '''
1524 '''
1524 if branch is None:
1525 if branch is None:
1525 branch = self[None].branch()
1526 branch = self[None].branch()
1526 branches = self.branchmap()
1527 branches = self.branchmap()
1527 if branch not in branches:
1528 if branch not in branches:
1528 return []
1529 return []
1529 # the cache returns heads ordered lowest to highest
1530 # the cache returns heads ordered lowest to highest
1530 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1531 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1531 if start is not None:
1532 if start is not None:
1532 # filter out the heads that cannot be reached from startrev
1533 # filter out the heads that cannot be reached from startrev
1533 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1534 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1534 bheads = [h for h in bheads if h in fbheads]
1535 bheads = [h for h in bheads if h in fbheads]
1535 return bheads
1536 return bheads
1536
1537
1537 def branches(self, nodes):
1538 def branches(self, nodes):
1538 if not nodes:
1539 if not nodes:
1539 nodes = [self.changelog.tip()]
1540 nodes = [self.changelog.tip()]
1540 b = []
1541 b = []
1541 for n in nodes:
1542 for n in nodes:
1542 t = n
1543 t = n
1543 while True:
1544 while True:
1544 p = self.changelog.parents(n)
1545 p = self.changelog.parents(n)
1545 if p[1] != nullid or p[0] == nullid:
1546 if p[1] != nullid or p[0] == nullid:
1546 b.append((t, n, p[0], p[1]))
1547 b.append((t, n, p[0], p[1]))
1547 break
1548 break
1548 n = p[0]
1549 n = p[0]
1549 return b
1550 return b
1550
1551
1551 def between(self, pairs):
1552 def between(self, pairs):
1552 r = []
1553 r = []
1553
1554
1554 for top, bottom in pairs:
1555 for top, bottom in pairs:
1555 n, l, i = top, [], 0
1556 n, l, i = top, [], 0
1556 f = 1
1557 f = 1
1557
1558
1558 while n != bottom and n != nullid:
1559 while n != bottom and n != nullid:
1559 p = self.changelog.parents(n)[0]
1560 p = self.changelog.parents(n)[0]
1560 if i == f:
1561 if i == f:
1561 l.append(n)
1562 l.append(n)
1562 f = f * 2
1563 f = f * 2
1563 n = p
1564 n = p
1564 i += 1
1565 i += 1
1565
1566
1566 r.append(l)
1567 r.append(l)
1567
1568
1568 return r
1569 return r
1569
1570
1570 def pull(self, remote, heads=None, force=False):
1571 def pull(self, remote, heads=None, force=False):
1571 return exchange.pull (self, remote, heads, force)
1572 return exchange.pull (self, remote, heads, force)
1572
1573
1573 def checkpush(self, pushop):
1574 def checkpush(self, pushop):
1574 """Extensions can override this function if additional checks have
1575 """Extensions can override this function if additional checks have
1575 to be performed before pushing, or call it if they override push
1576 to be performed before pushing, or call it if they override push
1576 command.
1577 command.
1577 """
1578 """
1578 pass
1579 pass
1579
1580
1580 @unfilteredpropertycache
1581 @unfilteredpropertycache
1581 def prepushoutgoinghooks(self):
1582 def prepushoutgoinghooks(self):
1582 """Return util.hooks consists of "(repo, remote, outgoing)"
1583 """Return util.hooks consists of "(repo, remote, outgoing)"
1583 functions, which are called before pushing changesets.
1584 functions, which are called before pushing changesets.
1584 """
1585 """
1585 return util.hooks()
1586 return util.hooks()
1586
1587
1587 def push(self, remote, force=False, revs=None, newbranch=False):
1588 def push(self, remote, force=False, revs=None, newbranch=False):
1588 return exchange.push(self, remote, force, revs, newbranch)
1589 return exchange.push(self, remote, force, revs, newbranch)
1589
1590
1590 def stream_in(self, remote, requirements):
1591 def stream_in(self, remote, requirements):
1591 lock = self.lock()
1592 lock = self.lock()
1592 try:
1593 try:
1593 # Save remote branchmap. We will use it later
1594 # Save remote branchmap. We will use it later
1594 # to speed up branchcache creation
1595 # to speed up branchcache creation
1595 rbranchmap = None
1596 rbranchmap = None
1596 if remote.capable("branchmap"):
1597 if remote.capable("branchmap"):
1597 rbranchmap = remote.branchmap()
1598 rbranchmap = remote.branchmap()
1598
1599
1599 fp = remote.stream_out()
1600 fp = remote.stream_out()
1600 l = fp.readline()
1601 l = fp.readline()
1601 try:
1602 try:
1602 resp = int(l)
1603 resp = int(l)
1603 except ValueError:
1604 except ValueError:
1604 raise error.ResponseError(
1605 raise error.ResponseError(
1605 _('unexpected response from remote server:'), l)
1606 _('unexpected response from remote server:'), l)
1606 if resp == 1:
1607 if resp == 1:
1607 raise util.Abort(_('operation forbidden by server'))
1608 raise util.Abort(_('operation forbidden by server'))
1608 elif resp == 2:
1609 elif resp == 2:
1609 raise util.Abort(_('locking the remote repository failed'))
1610 raise util.Abort(_('locking the remote repository failed'))
1610 elif resp != 0:
1611 elif resp != 0:
1611 raise util.Abort(_('the server sent an unknown error code'))
1612 raise util.Abort(_('the server sent an unknown error code'))
1612 self.ui.status(_('streaming all changes\n'))
1613 self.ui.status(_('streaming all changes\n'))
1613 l = fp.readline()
1614 l = fp.readline()
1614 try:
1615 try:
1615 total_files, total_bytes = map(int, l.split(' ', 1))
1616 total_files, total_bytes = map(int, l.split(' ', 1))
1616 except (ValueError, TypeError):
1617 except (ValueError, TypeError):
1617 raise error.ResponseError(
1618 raise error.ResponseError(
1618 _('unexpected response from remote server:'), l)
1619 _('unexpected response from remote server:'), l)
1619 self.ui.status(_('%d files to transfer, %s of data\n') %
1620 self.ui.status(_('%d files to transfer, %s of data\n') %
1620 (total_files, util.bytecount(total_bytes)))
1621 (total_files, util.bytecount(total_bytes)))
1621 handled_bytes = 0
1622 handled_bytes = 0
1622 self.ui.progress(_('clone'), 0, total=total_bytes)
1623 self.ui.progress(_('clone'), 0, total=total_bytes)
1623 start = time.time()
1624 start = time.time()
1624
1625
1625 tr = self.transaction(_('clone'))
1626 tr = self.transaction(_('clone'))
1626 try:
1627 try:
1627 for i in xrange(total_files):
1628 for i in xrange(total_files):
1628 # XXX doesn't support '\n' or '\r' in filenames
1629 # XXX doesn't support '\n' or '\r' in filenames
1629 l = fp.readline()
1630 l = fp.readline()
1630 try:
1631 try:
1631 name, size = l.split('\0', 1)
1632 name, size = l.split('\0', 1)
1632 size = int(size)
1633 size = int(size)
1633 except (ValueError, TypeError):
1634 except (ValueError, TypeError):
1634 raise error.ResponseError(
1635 raise error.ResponseError(
1635 _('unexpected response from remote server:'), l)
1636 _('unexpected response from remote server:'), l)
1636 if self.ui.debugflag:
1637 if self.ui.debugflag:
1637 self.ui.debug('adding %s (%s)\n' %
1638 self.ui.debug('adding %s (%s)\n' %
1638 (name, util.bytecount(size)))
1639 (name, util.bytecount(size)))
1639 # for backwards compat, name was partially encoded
1640 # for backwards compat, name was partially encoded
1640 ofp = self.sopener(store.decodedir(name), 'w')
1641 ofp = self.sopener(store.decodedir(name), 'w')
1641 for chunk in util.filechunkiter(fp, limit=size):
1642 for chunk in util.filechunkiter(fp, limit=size):
1642 handled_bytes += len(chunk)
1643 handled_bytes += len(chunk)
1643 self.ui.progress(_('clone'), handled_bytes,
1644 self.ui.progress(_('clone'), handled_bytes,
1644 total=total_bytes)
1645 total=total_bytes)
1645 ofp.write(chunk)
1646 ofp.write(chunk)
1646 ofp.close()
1647 ofp.close()
1647 tr.close()
1648 tr.close()
1648 finally:
1649 finally:
1649 tr.release()
1650 tr.release()
1650
1651
1651 # Writing straight to files circumvented the inmemory caches
1652 # Writing straight to files circumvented the inmemory caches
1652 self.invalidate()
1653 self.invalidate()
1653
1654
1654 elapsed = time.time() - start
1655 elapsed = time.time() - start
1655 if elapsed <= 0:
1656 if elapsed <= 0:
1656 elapsed = 0.001
1657 elapsed = 0.001
1657 self.ui.progress(_('clone'), None)
1658 self.ui.progress(_('clone'), None)
1658 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1659 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1659 (util.bytecount(total_bytes), elapsed,
1660 (util.bytecount(total_bytes), elapsed,
1660 util.bytecount(total_bytes / elapsed)))
1661 util.bytecount(total_bytes / elapsed)))
1661
1662
1662 # new requirements = old non-format requirements +
1663 # new requirements = old non-format requirements +
1663 # new format-related
1664 # new format-related
1664 # requirements from the streamed-in repository
1665 # requirements from the streamed-in repository
1665 requirements.update(set(self.requirements) - self.supportedformats)
1666 requirements.update(set(self.requirements) - self.supportedformats)
1666 self._applyrequirements(requirements)
1667 self._applyrequirements(requirements)
1667 self._writerequirements()
1668 self._writerequirements()
1668
1669
1669 if rbranchmap:
1670 if rbranchmap:
1670 rbheads = []
1671 rbheads = []
1671 for bheads in rbranchmap.itervalues():
1672 for bheads in rbranchmap.itervalues():
1672 rbheads.extend(bheads)
1673 rbheads.extend(bheads)
1673
1674
1674 if rbheads:
1675 if rbheads:
1675 rtiprev = max((int(self.changelog.rev(node))
1676 rtiprev = max((int(self.changelog.rev(node))
1676 for node in rbheads))
1677 for node in rbheads))
1677 cache = branchmap.branchcache(rbranchmap,
1678 cache = branchmap.branchcache(rbranchmap,
1678 self[rtiprev].node(),
1679 self[rtiprev].node(),
1679 rtiprev)
1680 rtiprev)
1680 # Try to stick it as low as possible
1681 # Try to stick it as low as possible
1681 # filter above served are unlikely to be fetch from a clone
1682 # filter above served are unlikely to be fetch from a clone
1682 for candidate in ('base', 'immutable', 'served'):
1683 for candidate in ('base', 'immutable', 'served'):
1683 rview = self.filtered(candidate)
1684 rview = self.filtered(candidate)
1684 if cache.validfor(rview):
1685 if cache.validfor(rview):
1685 self._branchcaches[candidate] = cache
1686 self._branchcaches[candidate] = cache
1686 cache.write(rview)
1687 cache.write(rview)
1687 break
1688 break
1688 self.invalidate()
1689 self.invalidate()
1689 return len(self.heads()) + 1
1690 return len(self.heads()) + 1
1690 finally:
1691 finally:
1691 lock.release()
1692 lock.release()
1692
1693
1693 def clone(self, remote, heads=[], stream=False):
1694 def clone(self, remote, heads=[], stream=False):
1694 '''clone remote repository.
1695 '''clone remote repository.
1695
1696
1696 keyword arguments:
1697 keyword arguments:
1697 heads: list of revs to clone (forces use of pull)
1698 heads: list of revs to clone (forces use of pull)
1698 stream: use streaming clone if possible'''
1699 stream: use streaming clone if possible'''
1699
1700
1700 # now, all clients that can request uncompressed clones can
1701 # now, all clients that can request uncompressed clones can
1701 # read repo formats supported by all servers that can serve
1702 # read repo formats supported by all servers that can serve
1702 # them.
1703 # them.
1703
1704
1704 # if revlog format changes, client will have to check version
1705 # if revlog format changes, client will have to check version
1705 # and format flags on "stream" capability, and use
1706 # and format flags on "stream" capability, and use
1706 # uncompressed only if compatible.
1707 # uncompressed only if compatible.
1707
1708
1708 if not stream:
1709 if not stream:
1709 # if the server explicitly prefers to stream (for fast LANs)
1710 # if the server explicitly prefers to stream (for fast LANs)
1710 stream = remote.capable('stream-preferred')
1711 stream = remote.capable('stream-preferred')
1711
1712
1712 if stream and not heads:
1713 if stream and not heads:
1713 # 'stream' means remote revlog format is revlogv1 only
1714 # 'stream' means remote revlog format is revlogv1 only
1714 if remote.capable('stream'):
1715 if remote.capable('stream'):
1715 return self.stream_in(remote, set(('revlogv1',)))
1716 return self.stream_in(remote, set(('revlogv1',)))
1716 # otherwise, 'streamreqs' contains the remote revlog format
1717 # otherwise, 'streamreqs' contains the remote revlog format
1717 streamreqs = remote.capable('streamreqs')
1718 streamreqs = remote.capable('streamreqs')
1718 if streamreqs:
1719 if streamreqs:
1719 streamreqs = set(streamreqs.split(','))
1720 streamreqs = set(streamreqs.split(','))
1720 # if we support it, stream in and adjust our requirements
1721 # if we support it, stream in and adjust our requirements
1721 if not streamreqs - self.supportedformats:
1722 if not streamreqs - self.supportedformats:
1722 return self.stream_in(remote, streamreqs)
1723 return self.stream_in(remote, streamreqs)
1723 return self.pull(remote, heads)
1724 return self.pull(remote, heads)
1724
1725
1725 def pushkey(self, namespace, key, old, new):
1726 def pushkey(self, namespace, key, old, new):
1726 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1727 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1727 old=old, new=new)
1728 old=old, new=new)
1728 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1729 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1729 ret = pushkey.push(self, namespace, key, old, new)
1730 ret = pushkey.push(self, namespace, key, old, new)
1730 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1731 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1731 ret=ret)
1732 ret=ret)
1732 return ret
1733 return ret
1733
1734
1734 def listkeys(self, namespace):
1735 def listkeys(self, namespace):
1735 self.hook('prelistkeys', throw=True, namespace=namespace)
1736 self.hook('prelistkeys', throw=True, namespace=namespace)
1736 self.ui.debug('listing keys for "%s"\n' % namespace)
1737 self.ui.debug('listing keys for "%s"\n' % namespace)
1737 values = pushkey.list(self, namespace)
1738 values = pushkey.list(self, namespace)
1738 self.hook('listkeys', namespace=namespace, values=values)
1739 self.hook('listkeys', namespace=namespace, values=values)
1739 return values
1740 return values
1740
1741
1741 def debugwireargs(self, one, two, three=None, four=None, five=None):
1742 def debugwireargs(self, one, two, three=None, four=None, five=None):
1742 '''used to test argument passing over the wire'''
1743 '''used to test argument passing over the wire'''
1743 return "%s %s %s %s %s" % (one, two, three, four, five)
1744 return "%s %s %s %s %s" % (one, two, three, four, five)
1744
1745
1745 def savecommitmessage(self, text):
1746 def savecommitmessage(self, text):
1746 fp = self.opener('last-message.txt', 'wb')
1747 fp = self.opener('last-message.txt', 'wb')
1747 try:
1748 try:
1748 fp.write(text)
1749 fp.write(text)
1749 finally:
1750 finally:
1750 fp.close()
1751 fp.close()
1751 return self.pathto(fp.name[len(self.root) + 1:])
1752 return self.pathto(fp.name[len(self.root) + 1:])
1752
1753
1753 # used to avoid circular references so destructors work
1754 # used to avoid circular references so destructors work
1754 def aftertrans(files):
1755 def aftertrans(files):
1755 renamefiles = [tuple(t) for t in files]
1756 renamefiles = [tuple(t) for t in files]
1756 def a():
1757 def a():
1757 for vfs, src, dest in renamefiles:
1758 for vfs, src, dest in renamefiles:
1758 try:
1759 try:
1759 vfs.rename(src, dest)
1760 vfs.rename(src, dest)
1760 except OSError: # journal file does not yet exist
1761 except OSError: # journal file does not yet exist
1761 pass
1762 pass
1762 return a
1763 return a
1763
1764
1764 def undoname(fn):
1765 def undoname(fn):
1765 base, name = os.path.split(fn)
1766 base, name = os.path.split(fn)
1766 assert name.startswith('journal')
1767 assert name.startswith('journal')
1767 return os.path.join(base, name.replace('journal', 'undo', 1))
1768 return os.path.join(base, name.replace('journal', 'undo', 1))
1768
1769
1769 def instance(ui, path, create):
1770 def instance(ui, path, create):
1770 return localrepository(ui, util.urllocalpath(path), create)
1771 return localrepository(ui, util.urllocalpath(path), create)
1771
1772
1772 def islocal(path):
1773 def islocal(path):
1773 return True
1774 return True
@@ -1,862 +1,863 b''
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import urllib, tempfile, os, sys
8 import urllib, tempfile, os, sys
9 from i18n import _
9 from i18n import _
10 from node import bin, hex
10 from node import bin, hex
11 import changegroup as changegroupmod, bundle2, pushkey as pushkeymod
11 import changegroup as changegroupmod, bundle2, pushkey as pushkeymod
12 import peer, error, encoding, util, store, exchange
12 import peer, error, encoding, util, store, exchange
13
13
14
14
15 class abstractserverproto(object):
15 class abstractserverproto(object):
16 """abstract class that summarizes the protocol API
16 """abstract class that summarizes the protocol API
17
17
18 Used as reference and documentation.
18 Used as reference and documentation.
19 """
19 """
20
20
21 def getargs(self, args):
21 def getargs(self, args):
22 """return the value for arguments in <args>
22 """return the value for arguments in <args>
23
23
24 returns a list of values (same order as <args>)"""
24 returns a list of values (same order as <args>)"""
25 raise NotImplementedError()
25 raise NotImplementedError()
26
26
27 def getfile(self, fp):
27 def getfile(self, fp):
28 """write the whole content of a file into a file like object
28 """write the whole content of a file into a file like object
29
29
30 The file is in the form::
30 The file is in the form::
31
31
32 (<chunk-size>\n<chunk>)+0\n
32 (<chunk-size>\n<chunk>)+0\n
33
33
34 chunk size is the ascii version of the int.
34 chunk size is the ascii version of the int.
35 """
35 """
36 raise NotImplementedError()
36 raise NotImplementedError()
37
37
38 def redirect(self):
38 def redirect(self):
39 """may setup interception for stdout and stderr
39 """may setup interception for stdout and stderr
40
40
41 See also the `restore` method."""
41 See also the `restore` method."""
42 raise NotImplementedError()
42 raise NotImplementedError()
43
43
44 # If the `redirect` function does install interception, the `restore`
44 # If the `redirect` function does install interception, the `restore`
45 # function MUST be defined. If interception is not used, this function
45 # function MUST be defined. If interception is not used, this function
46 # MUST NOT be defined.
46 # MUST NOT be defined.
47 #
47 #
48 # left commented here on purpose
48 # left commented here on purpose
49 #
49 #
50 #def restore(self):
50 #def restore(self):
51 # """reinstall previous stdout and stderr and return intercepted stdout
51 # """reinstall previous stdout and stderr and return intercepted stdout
52 # """
52 # """
53 # raise NotImplementedError()
53 # raise NotImplementedError()
54
54
55 def groupchunks(self, cg):
55 def groupchunks(self, cg):
56 """return 4096 chunks from a changegroup object
56 """return 4096 chunks from a changegroup object
57
57
58 Some protocols may have compressed the contents."""
58 Some protocols may have compressed the contents."""
59 raise NotImplementedError()
59 raise NotImplementedError()
60
60
61 # abstract batching support
61 # abstract batching support
62
62
63 class future(object):
63 class future(object):
64 '''placeholder for a value to be set later'''
64 '''placeholder for a value to be set later'''
65 def set(self, value):
65 def set(self, value):
66 if util.safehasattr(self, 'value'):
66 if util.safehasattr(self, 'value'):
67 raise error.RepoError("future is already set")
67 raise error.RepoError("future is already set")
68 self.value = value
68 self.value = value
69
69
70 class batcher(object):
70 class batcher(object):
71 '''base class for batches of commands submittable in a single request
71 '''base class for batches of commands submittable in a single request
72
72
73 All methods invoked on instances of this class are simply queued and
73 All methods invoked on instances of this class are simply queued and
74 return a a future for the result. Once you call submit(), all the queued
74 return a a future for the result. Once you call submit(), all the queued
75 calls are performed and the results set in their respective futures.
75 calls are performed and the results set in their respective futures.
76 '''
76 '''
77 def __init__(self):
77 def __init__(self):
78 self.calls = []
78 self.calls = []
79 def __getattr__(self, name):
79 def __getattr__(self, name):
80 def call(*args, **opts):
80 def call(*args, **opts):
81 resref = future()
81 resref = future()
82 self.calls.append((name, args, opts, resref,))
82 self.calls.append((name, args, opts, resref,))
83 return resref
83 return resref
84 return call
84 return call
85 def submit(self):
85 def submit(self):
86 pass
86 pass
87
87
88 class localbatch(batcher):
88 class localbatch(batcher):
89 '''performs the queued calls directly'''
89 '''performs the queued calls directly'''
90 def __init__(self, local):
90 def __init__(self, local):
91 batcher.__init__(self)
91 batcher.__init__(self)
92 self.local = local
92 self.local = local
93 def submit(self):
93 def submit(self):
94 for name, args, opts, resref in self.calls:
94 for name, args, opts, resref in self.calls:
95 resref.set(getattr(self.local, name)(*args, **opts))
95 resref.set(getattr(self.local, name)(*args, **opts))
96
96
97 class remotebatch(batcher):
97 class remotebatch(batcher):
98 '''batches the queued calls; uses as few roundtrips as possible'''
98 '''batches the queued calls; uses as few roundtrips as possible'''
99 def __init__(self, remote):
99 def __init__(self, remote):
100 '''remote must support _submitbatch(encbatch) and
100 '''remote must support _submitbatch(encbatch) and
101 _submitone(op, encargs)'''
101 _submitone(op, encargs)'''
102 batcher.__init__(self)
102 batcher.__init__(self)
103 self.remote = remote
103 self.remote = remote
104 def submit(self):
104 def submit(self):
105 req, rsp = [], []
105 req, rsp = [], []
106 for name, args, opts, resref in self.calls:
106 for name, args, opts, resref in self.calls:
107 mtd = getattr(self.remote, name)
107 mtd = getattr(self.remote, name)
108 batchablefn = getattr(mtd, 'batchable', None)
108 batchablefn = getattr(mtd, 'batchable', None)
109 if batchablefn is not None:
109 if batchablefn is not None:
110 batchable = batchablefn(mtd.im_self, *args, **opts)
110 batchable = batchablefn(mtd.im_self, *args, **opts)
111 encargsorres, encresref = batchable.next()
111 encargsorres, encresref = batchable.next()
112 if encresref:
112 if encresref:
113 req.append((name, encargsorres,))
113 req.append((name, encargsorres,))
114 rsp.append((batchable, encresref, resref,))
114 rsp.append((batchable, encresref, resref,))
115 else:
115 else:
116 resref.set(encargsorres)
116 resref.set(encargsorres)
117 else:
117 else:
118 if req:
118 if req:
119 self._submitreq(req, rsp)
119 self._submitreq(req, rsp)
120 req, rsp = [], []
120 req, rsp = [], []
121 resref.set(mtd(*args, **opts))
121 resref.set(mtd(*args, **opts))
122 if req:
122 if req:
123 self._submitreq(req, rsp)
123 self._submitreq(req, rsp)
124 def _submitreq(self, req, rsp):
124 def _submitreq(self, req, rsp):
125 encresults = self.remote._submitbatch(req)
125 encresults = self.remote._submitbatch(req)
126 for encres, r in zip(encresults, rsp):
126 for encres, r in zip(encresults, rsp):
127 batchable, encresref, resref = r
127 batchable, encresref, resref = r
128 encresref.set(encres)
128 encresref.set(encres)
129 resref.set(batchable.next())
129 resref.set(batchable.next())
130
130
131 def batchable(f):
131 def batchable(f):
132 '''annotation for batchable methods
132 '''annotation for batchable methods
133
133
134 Such methods must implement a coroutine as follows:
134 Such methods must implement a coroutine as follows:
135
135
136 @batchable
136 @batchable
137 def sample(self, one, two=None):
137 def sample(self, one, two=None):
138 # Handle locally computable results first:
138 # Handle locally computable results first:
139 if not one:
139 if not one:
140 yield "a local result", None
140 yield "a local result", None
141 # Build list of encoded arguments suitable for your wire protocol:
141 # Build list of encoded arguments suitable for your wire protocol:
142 encargs = [('one', encode(one),), ('two', encode(two),)]
142 encargs = [('one', encode(one),), ('two', encode(two),)]
143 # Create future for injection of encoded result:
143 # Create future for injection of encoded result:
144 encresref = future()
144 encresref = future()
145 # Return encoded arguments and future:
145 # Return encoded arguments and future:
146 yield encargs, encresref
146 yield encargs, encresref
147 # Assuming the future to be filled with the result from the batched
147 # Assuming the future to be filled with the result from the batched
148 # request now. Decode it:
148 # request now. Decode it:
149 yield decode(encresref.value)
149 yield decode(encresref.value)
150
150
151 The decorator returns a function which wraps this coroutine as a plain
151 The decorator returns a function which wraps this coroutine as a plain
152 method, but adds the original method as an attribute called "batchable",
152 method, but adds the original method as an attribute called "batchable",
153 which is used by remotebatch to split the call into separate encoding and
153 which is used by remotebatch to split the call into separate encoding and
154 decoding phases.
154 decoding phases.
155 '''
155 '''
156 def plain(*args, **opts):
156 def plain(*args, **opts):
157 batchable = f(*args, **opts)
157 batchable = f(*args, **opts)
158 encargsorres, encresref = batchable.next()
158 encargsorres, encresref = batchable.next()
159 if not encresref:
159 if not encresref:
160 return encargsorres # a local result in this case
160 return encargsorres # a local result in this case
161 self = args[0]
161 self = args[0]
162 encresref.set(self._submitone(f.func_name, encargsorres))
162 encresref.set(self._submitone(f.func_name, encargsorres))
163 return batchable.next()
163 return batchable.next()
164 setattr(plain, 'batchable', f)
164 setattr(plain, 'batchable', f)
165 return plain
165 return plain
166
166
167 # list of nodes encoding / decoding
167 # list of nodes encoding / decoding
168
168
169 def decodelist(l, sep=' '):
169 def decodelist(l, sep=' '):
170 if l:
170 if l:
171 return map(bin, l.split(sep))
171 return map(bin, l.split(sep))
172 return []
172 return []
173
173
174 def encodelist(l, sep=' '):
174 def encodelist(l, sep=' '):
175 return sep.join(map(hex, l))
175 return sep.join(map(hex, l))
176
176
177 # batched call argument encoding
177 # batched call argument encoding
178
178
179 def escapearg(plain):
179 def escapearg(plain):
180 return (plain
180 return (plain
181 .replace(':', '::')
181 .replace(':', '::')
182 .replace(',', ':,')
182 .replace(',', ':,')
183 .replace(';', ':;')
183 .replace(';', ':;')
184 .replace('=', ':='))
184 .replace('=', ':='))
185
185
186 def unescapearg(escaped):
186 def unescapearg(escaped):
187 return (escaped
187 return (escaped
188 .replace(':=', '=')
188 .replace(':=', '=')
189 .replace(':;', ';')
189 .replace(':;', ';')
190 .replace(':,', ',')
190 .replace(':,', ',')
191 .replace('::', ':'))
191 .replace('::', ':'))
192
192
193 # mapping of options accepted by getbundle and their types
193 # mapping of options accepted by getbundle and their types
194 #
194 #
195 # Meant to be extended by extensions. It is extensions responsibility to ensure
195 # Meant to be extended by extensions. It is extensions responsibility to ensure
196 # such options are properly processed in exchange.getbundle.
196 # such options are properly processed in exchange.getbundle.
197 #
197 #
198 # supported types are:
198 # supported types are:
199 #
199 #
200 # :nodes: list of binary nodes
200 # :nodes: list of binary nodes
201 # :csv: list of comma-separated values
201 # :csv: list of comma-separated values
202 # :plain: string with no transformation needed.
202 # :plain: string with no transformation needed.
203 gboptsmap = {'heads': 'nodes',
203 gboptsmap = {'heads': 'nodes',
204 'common': 'nodes',
204 'common': 'nodes',
205 'bundlecaps': 'csv'}
205 'bundlecaps': 'csv',
206 'listkeys': 'csv'}
206
207
207 # client side
208 # client side
208
209
209 class wirepeer(peer.peerrepository):
210 class wirepeer(peer.peerrepository):
210
211
211 def batch(self):
212 def batch(self):
212 return remotebatch(self)
213 return remotebatch(self)
213 def _submitbatch(self, req):
214 def _submitbatch(self, req):
214 cmds = []
215 cmds = []
215 for op, argsdict in req:
216 for op, argsdict in req:
216 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
217 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
217 cmds.append('%s %s' % (op, args))
218 cmds.append('%s %s' % (op, args))
218 rsp = self._call("batch", cmds=';'.join(cmds))
219 rsp = self._call("batch", cmds=';'.join(cmds))
219 return rsp.split(';')
220 return rsp.split(';')
220 def _submitone(self, op, args):
221 def _submitone(self, op, args):
221 return self._call(op, **args)
222 return self._call(op, **args)
222
223
223 @batchable
224 @batchable
224 def lookup(self, key):
225 def lookup(self, key):
225 self.requirecap('lookup', _('look up remote revision'))
226 self.requirecap('lookup', _('look up remote revision'))
226 f = future()
227 f = future()
227 yield {'key': encoding.fromlocal(key)}, f
228 yield {'key': encoding.fromlocal(key)}, f
228 d = f.value
229 d = f.value
229 success, data = d[:-1].split(" ", 1)
230 success, data = d[:-1].split(" ", 1)
230 if int(success):
231 if int(success):
231 yield bin(data)
232 yield bin(data)
232 self._abort(error.RepoError(data))
233 self._abort(error.RepoError(data))
233
234
234 @batchable
235 @batchable
235 def heads(self):
236 def heads(self):
236 f = future()
237 f = future()
237 yield {}, f
238 yield {}, f
238 d = f.value
239 d = f.value
239 try:
240 try:
240 yield decodelist(d[:-1])
241 yield decodelist(d[:-1])
241 except ValueError:
242 except ValueError:
242 self._abort(error.ResponseError(_("unexpected response:"), d))
243 self._abort(error.ResponseError(_("unexpected response:"), d))
243
244
244 @batchable
245 @batchable
245 def known(self, nodes):
246 def known(self, nodes):
246 f = future()
247 f = future()
247 yield {'nodes': encodelist(nodes)}, f
248 yield {'nodes': encodelist(nodes)}, f
248 d = f.value
249 d = f.value
249 try:
250 try:
250 yield [bool(int(f)) for f in d]
251 yield [bool(int(f)) for f in d]
251 except ValueError:
252 except ValueError:
252 self._abort(error.ResponseError(_("unexpected response:"), d))
253 self._abort(error.ResponseError(_("unexpected response:"), d))
253
254
254 @batchable
255 @batchable
255 def branchmap(self):
256 def branchmap(self):
256 f = future()
257 f = future()
257 yield {}, f
258 yield {}, f
258 d = f.value
259 d = f.value
259 try:
260 try:
260 branchmap = {}
261 branchmap = {}
261 for branchpart in d.splitlines():
262 for branchpart in d.splitlines():
262 branchname, branchheads = branchpart.split(' ', 1)
263 branchname, branchheads = branchpart.split(' ', 1)
263 branchname = encoding.tolocal(urllib.unquote(branchname))
264 branchname = encoding.tolocal(urllib.unquote(branchname))
264 branchheads = decodelist(branchheads)
265 branchheads = decodelist(branchheads)
265 branchmap[branchname] = branchheads
266 branchmap[branchname] = branchheads
266 yield branchmap
267 yield branchmap
267 except TypeError:
268 except TypeError:
268 self._abort(error.ResponseError(_("unexpected response:"), d))
269 self._abort(error.ResponseError(_("unexpected response:"), d))
269
270
270 def branches(self, nodes):
271 def branches(self, nodes):
271 n = encodelist(nodes)
272 n = encodelist(nodes)
272 d = self._call("branches", nodes=n)
273 d = self._call("branches", nodes=n)
273 try:
274 try:
274 br = [tuple(decodelist(b)) for b in d.splitlines()]
275 br = [tuple(decodelist(b)) for b in d.splitlines()]
275 return br
276 return br
276 except ValueError:
277 except ValueError:
277 self._abort(error.ResponseError(_("unexpected response:"), d))
278 self._abort(error.ResponseError(_("unexpected response:"), d))
278
279
279 def between(self, pairs):
280 def between(self, pairs):
280 batch = 8 # avoid giant requests
281 batch = 8 # avoid giant requests
281 r = []
282 r = []
282 for i in xrange(0, len(pairs), batch):
283 for i in xrange(0, len(pairs), batch):
283 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
284 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
284 d = self._call("between", pairs=n)
285 d = self._call("between", pairs=n)
285 try:
286 try:
286 r.extend(l and decodelist(l) or [] for l in d.splitlines())
287 r.extend(l and decodelist(l) or [] for l in d.splitlines())
287 except ValueError:
288 except ValueError:
288 self._abort(error.ResponseError(_("unexpected response:"), d))
289 self._abort(error.ResponseError(_("unexpected response:"), d))
289 return r
290 return r
290
291
291 @batchable
292 @batchable
292 def pushkey(self, namespace, key, old, new):
293 def pushkey(self, namespace, key, old, new):
293 if not self.capable('pushkey'):
294 if not self.capable('pushkey'):
294 yield False, None
295 yield False, None
295 f = future()
296 f = future()
296 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
297 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
297 yield {'namespace': encoding.fromlocal(namespace),
298 yield {'namespace': encoding.fromlocal(namespace),
298 'key': encoding.fromlocal(key),
299 'key': encoding.fromlocal(key),
299 'old': encoding.fromlocal(old),
300 'old': encoding.fromlocal(old),
300 'new': encoding.fromlocal(new)}, f
301 'new': encoding.fromlocal(new)}, f
301 d = f.value
302 d = f.value
302 d, output = d.split('\n', 1)
303 d, output = d.split('\n', 1)
303 try:
304 try:
304 d = bool(int(d))
305 d = bool(int(d))
305 except ValueError:
306 except ValueError:
306 raise error.ResponseError(
307 raise error.ResponseError(
307 _('push failed (unexpected response):'), d)
308 _('push failed (unexpected response):'), d)
308 for l in output.splitlines(True):
309 for l in output.splitlines(True):
309 self.ui.status(_('remote: '), l)
310 self.ui.status(_('remote: '), l)
310 yield d
311 yield d
311
312
312 @batchable
313 @batchable
313 def listkeys(self, namespace):
314 def listkeys(self, namespace):
314 if not self.capable('pushkey'):
315 if not self.capable('pushkey'):
315 yield {}, None
316 yield {}, None
316 f = future()
317 f = future()
317 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
318 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
318 yield {'namespace': encoding.fromlocal(namespace)}, f
319 yield {'namespace': encoding.fromlocal(namespace)}, f
319 d = f.value
320 d = f.value
320 yield pushkeymod.decodekeys(d)
321 yield pushkeymod.decodekeys(d)
321
322
322 def stream_out(self):
323 def stream_out(self):
323 return self._callstream('stream_out')
324 return self._callstream('stream_out')
324
325
325 def changegroup(self, nodes, kind):
326 def changegroup(self, nodes, kind):
326 n = encodelist(nodes)
327 n = encodelist(nodes)
327 f = self._callcompressable("changegroup", roots=n)
328 f = self._callcompressable("changegroup", roots=n)
328 return changegroupmod.unbundle10(f, 'UN')
329 return changegroupmod.unbundle10(f, 'UN')
329
330
330 def changegroupsubset(self, bases, heads, kind):
331 def changegroupsubset(self, bases, heads, kind):
331 self.requirecap('changegroupsubset', _('look up remote changes'))
332 self.requirecap('changegroupsubset', _('look up remote changes'))
332 bases = encodelist(bases)
333 bases = encodelist(bases)
333 heads = encodelist(heads)
334 heads = encodelist(heads)
334 f = self._callcompressable("changegroupsubset",
335 f = self._callcompressable("changegroupsubset",
335 bases=bases, heads=heads)
336 bases=bases, heads=heads)
336 return changegroupmod.unbundle10(f, 'UN')
337 return changegroupmod.unbundle10(f, 'UN')
337
338
338 def getbundle(self, source, **kwargs):
339 def getbundle(self, source, **kwargs):
339 self.requirecap('getbundle', _('look up remote changes'))
340 self.requirecap('getbundle', _('look up remote changes'))
340 opts = {}
341 opts = {}
341 for key, value in kwargs.iteritems():
342 for key, value in kwargs.iteritems():
342 if value is None:
343 if value is None:
343 continue
344 continue
344 keytype = gboptsmap.get(key)
345 keytype = gboptsmap.get(key)
345 if keytype is None:
346 if keytype is None:
346 assert False, 'unexpected'
347 assert False, 'unexpected'
347 elif keytype == 'nodes':
348 elif keytype == 'nodes':
348 value = encodelist(value)
349 value = encodelist(value)
349 elif keytype == 'csv':
350 elif keytype == 'csv':
350 value = ','.join(value)
351 value = ','.join(value)
351 elif keytype != 'plain':
352 elif keytype != 'plain':
352 raise KeyError('unknown getbundle option type %s'
353 raise KeyError('unknown getbundle option type %s'
353 % keytype)
354 % keytype)
354 opts[key] = value
355 opts[key] = value
355 f = self._callcompressable("getbundle", **opts)
356 f = self._callcompressable("getbundle", **opts)
356 bundlecaps = kwargs.get('bundlecaps')
357 bundlecaps = kwargs.get('bundlecaps')
357 if bundlecaps is not None and 'HG2X' in bundlecaps:
358 if bundlecaps is not None and 'HG2X' in bundlecaps:
358 return bundle2.unbundle20(self.ui, f)
359 return bundle2.unbundle20(self.ui, f)
359 else:
360 else:
360 return changegroupmod.unbundle10(f, 'UN')
361 return changegroupmod.unbundle10(f, 'UN')
361
362
362 def unbundle(self, cg, heads, source):
363 def unbundle(self, cg, heads, source):
363 '''Send cg (a readable file-like object representing the
364 '''Send cg (a readable file-like object representing the
364 changegroup to push, typically a chunkbuffer object) to the
365 changegroup to push, typically a chunkbuffer object) to the
365 remote server as a bundle.
366 remote server as a bundle.
366
367
367 When pushing a bundle10 stream, return an integer indicating the
368 When pushing a bundle10 stream, return an integer indicating the
368 result of the push (see localrepository.addchangegroup()).
369 result of the push (see localrepository.addchangegroup()).
369
370
370 When pushing a bundle20 stream, return a bundle20 stream.'''
371 When pushing a bundle20 stream, return a bundle20 stream.'''
371
372
372 if heads != ['force'] and self.capable('unbundlehash'):
373 if heads != ['force'] and self.capable('unbundlehash'):
373 heads = encodelist(['hashed',
374 heads = encodelist(['hashed',
374 util.sha1(''.join(sorted(heads))).digest()])
375 util.sha1(''.join(sorted(heads))).digest()])
375 else:
376 else:
376 heads = encodelist(heads)
377 heads = encodelist(heads)
377
378
378 if util.safehasattr(cg, 'deltaheader'):
379 if util.safehasattr(cg, 'deltaheader'):
379 # this a bundle10, do the old style call sequence
380 # this a bundle10, do the old style call sequence
380 ret, output = self._callpush("unbundle", cg, heads=heads)
381 ret, output = self._callpush("unbundle", cg, heads=heads)
381 if ret == "":
382 if ret == "":
382 raise error.ResponseError(
383 raise error.ResponseError(
383 _('push failed:'), output)
384 _('push failed:'), output)
384 try:
385 try:
385 ret = int(ret)
386 ret = int(ret)
386 except ValueError:
387 except ValueError:
387 raise error.ResponseError(
388 raise error.ResponseError(
388 _('push failed (unexpected response):'), ret)
389 _('push failed (unexpected response):'), ret)
389
390
390 for l in output.splitlines(True):
391 for l in output.splitlines(True):
391 self.ui.status(_('remote: '), l)
392 self.ui.status(_('remote: '), l)
392 else:
393 else:
393 # bundle2 push. Send a stream, fetch a stream.
394 # bundle2 push. Send a stream, fetch a stream.
394 stream = self._calltwowaystream('unbundle', cg, heads=heads)
395 stream = self._calltwowaystream('unbundle', cg, heads=heads)
395 ret = bundle2.unbundle20(self.ui, stream)
396 ret = bundle2.unbundle20(self.ui, stream)
396 return ret
397 return ret
397
398
398 def debugwireargs(self, one, two, three=None, four=None, five=None):
399 def debugwireargs(self, one, two, three=None, four=None, five=None):
399 # don't pass optional arguments left at their default value
400 # don't pass optional arguments left at their default value
400 opts = {}
401 opts = {}
401 if three is not None:
402 if three is not None:
402 opts['three'] = three
403 opts['three'] = three
403 if four is not None:
404 if four is not None:
404 opts['four'] = four
405 opts['four'] = four
405 return self._call('debugwireargs', one=one, two=two, **opts)
406 return self._call('debugwireargs', one=one, two=two, **opts)
406
407
407 def _call(self, cmd, **args):
408 def _call(self, cmd, **args):
408 """execute <cmd> on the server
409 """execute <cmd> on the server
409
410
410 The command is expected to return a simple string.
411 The command is expected to return a simple string.
411
412
412 returns the server reply as a string."""
413 returns the server reply as a string."""
413 raise NotImplementedError()
414 raise NotImplementedError()
414
415
415 def _callstream(self, cmd, **args):
416 def _callstream(self, cmd, **args):
416 """execute <cmd> on the server
417 """execute <cmd> on the server
417
418
418 The command is expected to return a stream.
419 The command is expected to return a stream.
419
420
420 returns the server reply as a file like object."""
421 returns the server reply as a file like object."""
421 raise NotImplementedError()
422 raise NotImplementedError()
422
423
423 def _callcompressable(self, cmd, **args):
424 def _callcompressable(self, cmd, **args):
424 """execute <cmd> on the server
425 """execute <cmd> on the server
425
426
426 The command is expected to return a stream.
427 The command is expected to return a stream.
427
428
428 The stream may have been compressed in some implementations. This
429 The stream may have been compressed in some implementations. This
429 function takes care of the decompression. This is the only difference
430 function takes care of the decompression. This is the only difference
430 with _callstream.
431 with _callstream.
431
432
432 returns the server reply as a file like object.
433 returns the server reply as a file like object.
433 """
434 """
434 raise NotImplementedError()
435 raise NotImplementedError()
435
436
436 def _callpush(self, cmd, fp, **args):
437 def _callpush(self, cmd, fp, **args):
437 """execute a <cmd> on server
438 """execute a <cmd> on server
438
439
439 The command is expected to be related to a push. Push has a special
440 The command is expected to be related to a push. Push has a special
440 return method.
441 return method.
441
442
442 returns the server reply as a (ret, output) tuple. ret is either
443 returns the server reply as a (ret, output) tuple. ret is either
443 empty (error) or a stringified int.
444 empty (error) or a stringified int.
444 """
445 """
445 raise NotImplementedError()
446 raise NotImplementedError()
446
447
447 def _calltwowaystream(self, cmd, fp, **args):
448 def _calltwowaystream(self, cmd, fp, **args):
448 """execute <cmd> on server
449 """execute <cmd> on server
449
450
450 The command will send a stream to the server and get a stream in reply.
451 The command will send a stream to the server and get a stream in reply.
451 """
452 """
452 raise NotImplementedError()
453 raise NotImplementedError()
453
454
454 def _abort(self, exception):
455 def _abort(self, exception):
455 """clearly abort the wire protocol connection and raise the exception
456 """clearly abort the wire protocol connection and raise the exception
456 """
457 """
457 raise NotImplementedError()
458 raise NotImplementedError()
458
459
459 # server side
460 # server side
460
461
461 # wire protocol command can either return a string or one of these classes.
462 # wire protocol command can either return a string or one of these classes.
462 class streamres(object):
463 class streamres(object):
463 """wireproto reply: binary stream
464 """wireproto reply: binary stream
464
465
465 The call was successful and the result is a stream.
466 The call was successful and the result is a stream.
466 Iterate on the `self.gen` attribute to retrieve chunks.
467 Iterate on the `self.gen` attribute to retrieve chunks.
467 """
468 """
468 def __init__(self, gen):
469 def __init__(self, gen):
469 self.gen = gen
470 self.gen = gen
470
471
471 class pushres(object):
472 class pushres(object):
472 """wireproto reply: success with simple integer return
473 """wireproto reply: success with simple integer return
473
474
474 The call was successful and returned an integer contained in `self.res`.
475 The call was successful and returned an integer contained in `self.res`.
475 """
476 """
476 def __init__(self, res):
477 def __init__(self, res):
477 self.res = res
478 self.res = res
478
479
479 class pusherr(object):
480 class pusherr(object):
480 """wireproto reply: failure
481 """wireproto reply: failure
481
482
482 The call failed. The `self.res` attribute contains the error message.
483 The call failed. The `self.res` attribute contains the error message.
483 """
484 """
484 def __init__(self, res):
485 def __init__(self, res):
485 self.res = res
486 self.res = res
486
487
487 class ooberror(object):
488 class ooberror(object):
488 """wireproto reply: failure of a batch of operation
489 """wireproto reply: failure of a batch of operation
489
490
490 Something failed during a batch call. The error message is stored in
491 Something failed during a batch call. The error message is stored in
491 `self.message`.
492 `self.message`.
492 """
493 """
493 def __init__(self, message):
494 def __init__(self, message):
494 self.message = message
495 self.message = message
495
496
496 def dispatch(repo, proto, command):
497 def dispatch(repo, proto, command):
497 repo = repo.filtered("served")
498 repo = repo.filtered("served")
498 func, spec = commands[command]
499 func, spec = commands[command]
499 args = proto.getargs(spec)
500 args = proto.getargs(spec)
500 return func(repo, proto, *args)
501 return func(repo, proto, *args)
501
502
502 def options(cmd, keys, others):
503 def options(cmd, keys, others):
503 opts = {}
504 opts = {}
504 for k in keys:
505 for k in keys:
505 if k in others:
506 if k in others:
506 opts[k] = others[k]
507 opts[k] = others[k]
507 del others[k]
508 del others[k]
508 if others:
509 if others:
509 sys.stderr.write("abort: %s got unexpected arguments %s\n"
510 sys.stderr.write("abort: %s got unexpected arguments %s\n"
510 % (cmd, ",".join(others)))
511 % (cmd, ",".join(others)))
511 return opts
512 return opts
512
513
513 # list of commands
514 # list of commands
514 commands = {}
515 commands = {}
515
516
516 def wireprotocommand(name, args=''):
517 def wireprotocommand(name, args=''):
517 """decorator for wire protocol command"""
518 """decorator for wire protocol command"""
518 def register(func):
519 def register(func):
519 commands[name] = (func, args)
520 commands[name] = (func, args)
520 return func
521 return func
521 return register
522 return register
522
523
523 @wireprotocommand('batch', 'cmds *')
524 @wireprotocommand('batch', 'cmds *')
524 def batch(repo, proto, cmds, others):
525 def batch(repo, proto, cmds, others):
525 repo = repo.filtered("served")
526 repo = repo.filtered("served")
526 res = []
527 res = []
527 for pair in cmds.split(';'):
528 for pair in cmds.split(';'):
528 op, args = pair.split(' ', 1)
529 op, args = pair.split(' ', 1)
529 vals = {}
530 vals = {}
530 for a in args.split(','):
531 for a in args.split(','):
531 if a:
532 if a:
532 n, v = a.split('=')
533 n, v = a.split('=')
533 vals[n] = unescapearg(v)
534 vals[n] = unescapearg(v)
534 func, spec = commands[op]
535 func, spec = commands[op]
535 if spec:
536 if spec:
536 keys = spec.split()
537 keys = spec.split()
537 data = {}
538 data = {}
538 for k in keys:
539 for k in keys:
539 if k == '*':
540 if k == '*':
540 star = {}
541 star = {}
541 for key in vals.keys():
542 for key in vals.keys():
542 if key not in keys:
543 if key not in keys:
543 star[key] = vals[key]
544 star[key] = vals[key]
544 data['*'] = star
545 data['*'] = star
545 else:
546 else:
546 data[k] = vals[k]
547 data[k] = vals[k]
547 result = func(repo, proto, *[data[k] for k in keys])
548 result = func(repo, proto, *[data[k] for k in keys])
548 else:
549 else:
549 result = func(repo, proto)
550 result = func(repo, proto)
550 if isinstance(result, ooberror):
551 if isinstance(result, ooberror):
551 return result
552 return result
552 res.append(escapearg(result))
553 res.append(escapearg(result))
553 return ';'.join(res)
554 return ';'.join(res)
554
555
555 @wireprotocommand('between', 'pairs')
556 @wireprotocommand('between', 'pairs')
556 def between(repo, proto, pairs):
557 def between(repo, proto, pairs):
557 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
558 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
558 r = []
559 r = []
559 for b in repo.between(pairs):
560 for b in repo.between(pairs):
560 r.append(encodelist(b) + "\n")
561 r.append(encodelist(b) + "\n")
561 return "".join(r)
562 return "".join(r)
562
563
563 @wireprotocommand('branchmap')
564 @wireprotocommand('branchmap')
564 def branchmap(repo, proto):
565 def branchmap(repo, proto):
565 branchmap = repo.branchmap()
566 branchmap = repo.branchmap()
566 heads = []
567 heads = []
567 for branch, nodes in branchmap.iteritems():
568 for branch, nodes in branchmap.iteritems():
568 branchname = urllib.quote(encoding.fromlocal(branch))
569 branchname = urllib.quote(encoding.fromlocal(branch))
569 branchnodes = encodelist(nodes)
570 branchnodes = encodelist(nodes)
570 heads.append('%s %s' % (branchname, branchnodes))
571 heads.append('%s %s' % (branchname, branchnodes))
571 return '\n'.join(heads)
572 return '\n'.join(heads)
572
573
573 @wireprotocommand('branches', 'nodes')
574 @wireprotocommand('branches', 'nodes')
574 def branches(repo, proto, nodes):
575 def branches(repo, proto, nodes):
575 nodes = decodelist(nodes)
576 nodes = decodelist(nodes)
576 r = []
577 r = []
577 for b in repo.branches(nodes):
578 for b in repo.branches(nodes):
578 r.append(encodelist(b) + "\n")
579 r.append(encodelist(b) + "\n")
579 return "".join(r)
580 return "".join(r)
580
581
581
582
582 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
583 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
583 'known', 'getbundle', 'unbundlehash', 'batch']
584 'known', 'getbundle', 'unbundlehash', 'batch']
584
585
585 def _capabilities(repo, proto):
586 def _capabilities(repo, proto):
586 """return a list of capabilities for a repo
587 """return a list of capabilities for a repo
587
588
588 This function exists to allow extensions to easily wrap capabilities
589 This function exists to allow extensions to easily wrap capabilities
589 computation
590 computation
590
591
591 - returns a lists: easy to alter
592 - returns a lists: easy to alter
592 - change done here will be propagated to both `capabilities` and `hello`
593 - change done here will be propagated to both `capabilities` and `hello`
593 command without any other action needed.
594 command without any other action needed.
594 """
595 """
595 # copy to prevent modification of the global list
596 # copy to prevent modification of the global list
596 caps = list(wireprotocaps)
597 caps = list(wireprotocaps)
597 if _allowstream(repo.ui):
598 if _allowstream(repo.ui):
598 if repo.ui.configbool('server', 'preferuncompressed', False):
599 if repo.ui.configbool('server', 'preferuncompressed', False):
599 caps.append('stream-preferred')
600 caps.append('stream-preferred')
600 requiredformats = repo.requirements & repo.supportedformats
601 requiredformats = repo.requirements & repo.supportedformats
601 # if our local revlogs are just revlogv1, add 'stream' cap
602 # if our local revlogs are just revlogv1, add 'stream' cap
602 if not requiredformats - set(('revlogv1',)):
603 if not requiredformats - set(('revlogv1',)):
603 caps.append('stream')
604 caps.append('stream')
604 # otherwise, add 'streamreqs' detailing our local revlog format
605 # otherwise, add 'streamreqs' detailing our local revlog format
605 else:
606 else:
606 caps.append('streamreqs=%s' % ','.join(requiredformats))
607 caps.append('streamreqs=%s' % ','.join(requiredformats))
607 if repo.ui.configbool('experimental', 'bundle2-exp', False):
608 if repo.ui.configbool('experimental', 'bundle2-exp', False):
608 capsblob = bundle2.encodecaps(repo.bundle2caps)
609 capsblob = bundle2.encodecaps(repo.bundle2caps)
609 caps.append('bundle2-exp=' + urllib.quote(capsblob))
610 caps.append('bundle2-exp=' + urllib.quote(capsblob))
610 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
611 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
611 caps.append('httpheader=1024')
612 caps.append('httpheader=1024')
612 return caps
613 return caps
613
614
614 # If you are writing an extension and consider wrapping this function. Wrap
615 # If you are writing an extension and consider wrapping this function. Wrap
615 # `_capabilities` instead.
616 # `_capabilities` instead.
616 @wireprotocommand('capabilities')
617 @wireprotocommand('capabilities')
617 def capabilities(repo, proto):
618 def capabilities(repo, proto):
618 return ' '.join(_capabilities(repo, proto))
619 return ' '.join(_capabilities(repo, proto))
619
620
620 @wireprotocommand('changegroup', 'roots')
621 @wireprotocommand('changegroup', 'roots')
621 def changegroup(repo, proto, roots):
622 def changegroup(repo, proto, roots):
622 nodes = decodelist(roots)
623 nodes = decodelist(roots)
623 cg = changegroupmod.changegroup(repo, nodes, 'serve')
624 cg = changegroupmod.changegroup(repo, nodes, 'serve')
624 return streamres(proto.groupchunks(cg))
625 return streamres(proto.groupchunks(cg))
625
626
626 @wireprotocommand('changegroupsubset', 'bases heads')
627 @wireprotocommand('changegroupsubset', 'bases heads')
627 def changegroupsubset(repo, proto, bases, heads):
628 def changegroupsubset(repo, proto, bases, heads):
628 bases = decodelist(bases)
629 bases = decodelist(bases)
629 heads = decodelist(heads)
630 heads = decodelist(heads)
630 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
631 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
631 return streamres(proto.groupchunks(cg))
632 return streamres(proto.groupchunks(cg))
632
633
633 @wireprotocommand('debugwireargs', 'one two *')
634 @wireprotocommand('debugwireargs', 'one two *')
634 def debugwireargs(repo, proto, one, two, others):
635 def debugwireargs(repo, proto, one, two, others):
635 # only accept optional args from the known set
636 # only accept optional args from the known set
636 opts = options('debugwireargs', ['three', 'four'], others)
637 opts = options('debugwireargs', ['three', 'four'], others)
637 return repo.debugwireargs(one, two, **opts)
638 return repo.debugwireargs(one, two, **opts)
638
639
639 # List of options accepted by getbundle.
640 # List of options accepted by getbundle.
640 #
641 #
641 # Meant to be extended by extensions. It is the extension's responsibility to
642 # Meant to be extended by extensions. It is the extension's responsibility to
642 # ensure such options are properly processed in exchange.getbundle.
643 # ensure such options are properly processed in exchange.getbundle.
643 gboptslist = ['heads', 'common', 'bundlecaps']
644 gboptslist = ['heads', 'common', 'bundlecaps']
644
645
645 @wireprotocommand('getbundle', '*')
646 @wireprotocommand('getbundle', '*')
646 def getbundle(repo, proto, others):
647 def getbundle(repo, proto, others):
647 opts = options('getbundle', gboptsmap.keys(), others)
648 opts = options('getbundle', gboptsmap.keys(), others)
648 for k, v in opts.iteritems():
649 for k, v in opts.iteritems():
649 keytype = gboptsmap[k]
650 keytype = gboptsmap[k]
650 if keytype == 'nodes':
651 if keytype == 'nodes':
651 opts[k] = decodelist(v)
652 opts[k] = decodelist(v)
652 elif keytype == 'csv':
653 elif keytype == 'csv':
653 opts[k] = set(v.split(','))
654 opts[k] = set(v.split(','))
654 elif keytype != 'plain':
655 elif keytype != 'plain':
655 raise KeyError('unknown getbundle option type %s'
656 raise KeyError('unknown getbundle option type %s'
656 % keytype)
657 % keytype)
657 cg = exchange.getbundle(repo, 'serve', **opts)
658 cg = exchange.getbundle(repo, 'serve', **opts)
658 return streamres(proto.groupchunks(cg))
659 return streamres(proto.groupchunks(cg))
659
660
660 @wireprotocommand('heads')
661 @wireprotocommand('heads')
661 def heads(repo, proto):
662 def heads(repo, proto):
662 h = repo.heads()
663 h = repo.heads()
663 return encodelist(h) + "\n"
664 return encodelist(h) + "\n"
664
665
665 @wireprotocommand('hello')
666 @wireprotocommand('hello')
666 def hello(repo, proto):
667 def hello(repo, proto):
667 '''the hello command returns a set of lines describing various
668 '''the hello command returns a set of lines describing various
668 interesting things about the server, in an RFC822-like format.
669 interesting things about the server, in an RFC822-like format.
669 Currently the only one defined is "capabilities", which
670 Currently the only one defined is "capabilities", which
670 consists of a line in the form:
671 consists of a line in the form:
671
672
672 capabilities: space separated list of tokens
673 capabilities: space separated list of tokens
673 '''
674 '''
674 return "capabilities: %s\n" % (capabilities(repo, proto))
675 return "capabilities: %s\n" % (capabilities(repo, proto))
675
676
676 @wireprotocommand('listkeys', 'namespace')
677 @wireprotocommand('listkeys', 'namespace')
677 def listkeys(repo, proto, namespace):
678 def listkeys(repo, proto, namespace):
678 d = repo.listkeys(encoding.tolocal(namespace)).items()
679 d = repo.listkeys(encoding.tolocal(namespace)).items()
679 return pushkeymod.encodekeys(d)
680 return pushkeymod.encodekeys(d)
680
681
681 @wireprotocommand('lookup', 'key')
682 @wireprotocommand('lookup', 'key')
682 def lookup(repo, proto, key):
683 def lookup(repo, proto, key):
683 try:
684 try:
684 k = encoding.tolocal(key)
685 k = encoding.tolocal(key)
685 c = repo[k]
686 c = repo[k]
686 r = c.hex()
687 r = c.hex()
687 success = 1
688 success = 1
688 except Exception, inst:
689 except Exception, inst:
689 r = str(inst)
690 r = str(inst)
690 success = 0
691 success = 0
691 return "%s %s\n" % (success, r)
692 return "%s %s\n" % (success, r)
692
693
693 @wireprotocommand('known', 'nodes *')
694 @wireprotocommand('known', 'nodes *')
694 def known(repo, proto, nodes, others):
695 def known(repo, proto, nodes, others):
695 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
696 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
696
697
697 @wireprotocommand('pushkey', 'namespace key old new')
698 @wireprotocommand('pushkey', 'namespace key old new')
698 def pushkey(repo, proto, namespace, key, old, new):
699 def pushkey(repo, proto, namespace, key, old, new):
699 # compatibility with pre-1.8 clients which were accidentally
700 # compatibility with pre-1.8 clients which were accidentally
700 # sending raw binary nodes rather than utf-8-encoded hex
701 # sending raw binary nodes rather than utf-8-encoded hex
701 if len(new) == 20 and new.encode('string-escape') != new:
702 if len(new) == 20 and new.encode('string-escape') != new:
702 # looks like it could be a binary node
703 # looks like it could be a binary node
703 try:
704 try:
704 new.decode('utf-8')
705 new.decode('utf-8')
705 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
706 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
706 except UnicodeDecodeError:
707 except UnicodeDecodeError:
707 pass # binary, leave unmodified
708 pass # binary, leave unmodified
708 else:
709 else:
709 new = encoding.tolocal(new) # normal path
710 new = encoding.tolocal(new) # normal path
710
711
711 if util.safehasattr(proto, 'restore'):
712 if util.safehasattr(proto, 'restore'):
712
713
713 proto.redirect()
714 proto.redirect()
714
715
715 try:
716 try:
716 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
717 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
717 encoding.tolocal(old), new) or False
718 encoding.tolocal(old), new) or False
718 except util.Abort:
719 except util.Abort:
719 r = False
720 r = False
720
721
721 output = proto.restore()
722 output = proto.restore()
722
723
723 return '%s\n%s' % (int(r), output)
724 return '%s\n%s' % (int(r), output)
724
725
725 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
726 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
726 encoding.tolocal(old), new)
727 encoding.tolocal(old), new)
727 return '%s\n' % int(r)
728 return '%s\n' % int(r)
728
729
729 def _allowstream(ui):
730 def _allowstream(ui):
730 return ui.configbool('server', 'uncompressed', True, untrusted=True)
731 return ui.configbool('server', 'uncompressed', True, untrusted=True)
731
732
732 def _walkstreamfiles(repo):
733 def _walkstreamfiles(repo):
733 # this is it's own function so extensions can override it
734 # this is it's own function so extensions can override it
734 return repo.store.walk()
735 return repo.store.walk()
735
736
736 @wireprotocommand('stream_out')
737 @wireprotocommand('stream_out')
737 def stream(repo, proto):
738 def stream(repo, proto):
738 '''If the server supports streaming clone, it advertises the "stream"
739 '''If the server supports streaming clone, it advertises the "stream"
739 capability with a value representing the version and flags of the repo
740 capability with a value representing the version and flags of the repo
740 it is serving. Client checks to see if it understands the format.
741 it is serving. Client checks to see if it understands the format.
741
742
742 The format is simple: the server writes out a line with the amount
743 The format is simple: the server writes out a line with the amount
743 of files, then the total amount of bytes to be transferred (separated
744 of files, then the total amount of bytes to be transferred (separated
744 by a space). Then, for each file, the server first writes the filename
745 by a space). Then, for each file, the server first writes the filename
745 and file size (separated by the null character), then the file contents.
746 and file size (separated by the null character), then the file contents.
746 '''
747 '''
747
748
748 if not _allowstream(repo.ui):
749 if not _allowstream(repo.ui):
749 return '1\n'
750 return '1\n'
750
751
751 entries = []
752 entries = []
752 total_bytes = 0
753 total_bytes = 0
753 try:
754 try:
754 # get consistent snapshot of repo, lock during scan
755 # get consistent snapshot of repo, lock during scan
755 lock = repo.lock()
756 lock = repo.lock()
756 try:
757 try:
757 repo.ui.debug('scanning\n')
758 repo.ui.debug('scanning\n')
758 for name, ename, size in _walkstreamfiles(repo):
759 for name, ename, size in _walkstreamfiles(repo):
759 if size:
760 if size:
760 entries.append((name, size))
761 entries.append((name, size))
761 total_bytes += size
762 total_bytes += size
762 finally:
763 finally:
763 lock.release()
764 lock.release()
764 except error.LockError:
765 except error.LockError:
765 return '2\n' # error: 2
766 return '2\n' # error: 2
766
767
767 def streamer(repo, entries, total):
768 def streamer(repo, entries, total):
768 '''stream out all metadata files in repository.'''
769 '''stream out all metadata files in repository.'''
769 yield '0\n' # success
770 yield '0\n' # success
770 repo.ui.debug('%d files, %d bytes to transfer\n' %
771 repo.ui.debug('%d files, %d bytes to transfer\n' %
771 (len(entries), total_bytes))
772 (len(entries), total_bytes))
772 yield '%d %d\n' % (len(entries), total_bytes)
773 yield '%d %d\n' % (len(entries), total_bytes)
773
774
774 sopener = repo.sopener
775 sopener = repo.sopener
775 oldaudit = sopener.mustaudit
776 oldaudit = sopener.mustaudit
776 debugflag = repo.ui.debugflag
777 debugflag = repo.ui.debugflag
777 sopener.mustaudit = False
778 sopener.mustaudit = False
778
779
779 try:
780 try:
780 for name, size in entries:
781 for name, size in entries:
781 if debugflag:
782 if debugflag:
782 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
783 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
783 # partially encode name over the wire for backwards compat
784 # partially encode name over the wire for backwards compat
784 yield '%s\0%d\n' % (store.encodedir(name), size)
785 yield '%s\0%d\n' % (store.encodedir(name), size)
785 if size <= 65536:
786 if size <= 65536:
786 fp = sopener(name)
787 fp = sopener(name)
787 try:
788 try:
788 data = fp.read(size)
789 data = fp.read(size)
789 finally:
790 finally:
790 fp.close()
791 fp.close()
791 yield data
792 yield data
792 else:
793 else:
793 for chunk in util.filechunkiter(sopener(name), limit=size):
794 for chunk in util.filechunkiter(sopener(name), limit=size):
794 yield chunk
795 yield chunk
795 # replace with "finally:" when support for python 2.4 has been dropped
796 # replace with "finally:" when support for python 2.4 has been dropped
796 except Exception:
797 except Exception:
797 sopener.mustaudit = oldaudit
798 sopener.mustaudit = oldaudit
798 raise
799 raise
799 sopener.mustaudit = oldaudit
800 sopener.mustaudit = oldaudit
800
801
801 return streamres(streamer(repo, entries, total_bytes))
802 return streamres(streamer(repo, entries, total_bytes))
802
803
803 @wireprotocommand('unbundle', 'heads')
804 @wireprotocommand('unbundle', 'heads')
804 def unbundle(repo, proto, heads):
805 def unbundle(repo, proto, heads):
805 their_heads = decodelist(heads)
806 their_heads = decodelist(heads)
806
807
807 try:
808 try:
808 proto.redirect()
809 proto.redirect()
809
810
810 exchange.check_heads(repo, their_heads, 'preparing changes')
811 exchange.check_heads(repo, their_heads, 'preparing changes')
811
812
812 # write bundle data to temporary file because it can be big
813 # write bundle data to temporary file because it can be big
813 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
814 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
814 fp = os.fdopen(fd, 'wb+')
815 fp = os.fdopen(fd, 'wb+')
815 r = 0
816 r = 0
816 try:
817 try:
817 proto.getfile(fp)
818 proto.getfile(fp)
818 fp.seek(0)
819 fp.seek(0)
819 gen = exchange.readbundle(repo.ui, fp, None)
820 gen = exchange.readbundle(repo.ui, fp, None)
820 r = exchange.unbundle(repo, gen, their_heads, 'serve',
821 r = exchange.unbundle(repo, gen, their_heads, 'serve',
821 proto._client())
822 proto._client())
822 if util.safehasattr(r, 'addpart'):
823 if util.safehasattr(r, 'addpart'):
823 # The return looks streameable, we are in the bundle2 case and
824 # The return looks streameable, we are in the bundle2 case and
824 # should return a stream.
825 # should return a stream.
825 return streamres(r.getchunks())
826 return streamres(r.getchunks())
826 return pushres(r)
827 return pushres(r)
827
828
828 finally:
829 finally:
829 fp.close()
830 fp.close()
830 os.unlink(tempname)
831 os.unlink(tempname)
831 except error.BundleValueError, exc:
832 except error.BundleValueError, exc:
832 bundler = bundle2.bundle20(repo.ui)
833 bundler = bundle2.bundle20(repo.ui)
833 errpart = bundler.newpart('B2X:ERROR:UNSUPPORTEDCONTENT')
834 errpart = bundler.newpart('B2X:ERROR:UNSUPPORTEDCONTENT')
834 if exc.parttype is not None:
835 if exc.parttype is not None:
835 errpart.addparam('parttype', exc.parttype)
836 errpart.addparam('parttype', exc.parttype)
836 if exc.params:
837 if exc.params:
837 errpart.addparam('params', '\0'.join(exc.params))
838 errpart.addparam('params', '\0'.join(exc.params))
838 return streamres(bundler.getchunks())
839 return streamres(bundler.getchunks())
839 except util.Abort, inst:
840 except util.Abort, inst:
840 # The old code we moved used sys.stderr directly.
841 # The old code we moved used sys.stderr directly.
841 # We did not change it to minimise code change.
842 # We did not change it to minimise code change.
842 # This need to be moved to something proper.
843 # This need to be moved to something proper.
843 # Feel free to do it.
844 # Feel free to do it.
844 if getattr(inst, 'duringunbundle2', False):
845 if getattr(inst, 'duringunbundle2', False):
845 bundler = bundle2.bundle20(repo.ui)
846 bundler = bundle2.bundle20(repo.ui)
846 manargs = [('message', str(inst))]
847 manargs = [('message', str(inst))]
847 advargs = []
848 advargs = []
848 if inst.hint is not None:
849 if inst.hint is not None:
849 advargs.append(('hint', inst.hint))
850 advargs.append(('hint', inst.hint))
850 bundler.addpart(bundle2.bundlepart('B2X:ERROR:ABORT',
851 bundler.addpart(bundle2.bundlepart('B2X:ERROR:ABORT',
851 manargs, advargs))
852 manargs, advargs))
852 return streamres(bundler.getchunks())
853 return streamres(bundler.getchunks())
853 else:
854 else:
854 sys.stderr.write("abort: %s\n" % inst)
855 sys.stderr.write("abort: %s\n" % inst)
855 return pushres(0)
856 return pushres(0)
856 except error.PushRaced, exc:
857 except error.PushRaced, exc:
857 if getattr(exc, 'duringunbundle2', False):
858 if getattr(exc, 'duringunbundle2', False):
858 bundler = bundle2.bundle20(repo.ui)
859 bundler = bundle2.bundle20(repo.ui)
859 bundler.newpart('B2X:ERROR:PUSHRACED', [('message', str(exc))])
860 bundler.newpart('B2X:ERROR:PUSHRACED', [('message', str(exc))])
860 return streamres(bundler.getchunks())
861 return streamres(bundler.getchunks())
861 else:
862 else:
862 return pusherr(str(exc))
863 return pusherr(str(exc))
General Comments 0
You need to be logged in to leave comments. Login now