##// END OF EJS Templates
getbundle: pass arbitrary arguments all along the call chain...
Pierre-Yves David -
r21157:60ad2ea5 default
parent child Browse files
Show More
@@ -1,744 +1,745 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '2X':
35 elif version == '2X':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # Integer version of the push result
64 # Integer version of the push result
65 # - None means nothing to push
65 # - None means nothing to push
66 # - 0 means HTTP error
66 # - 0 means HTTP error
67 # - 1 means we pushed and remote head count is unchanged *or*
67 # - 1 means we pushed and remote head count is unchanged *or*
68 # we have outgoing changesets but refused to push
68 # we have outgoing changesets but refused to push
69 # - other values as described by addchangegroup()
69 # - other values as described by addchangegroup()
70 self.ret = None
70 self.ret = None
71 # discover.outgoing object (contains common and outgoing data)
71 # discover.outgoing object (contains common and outgoing data)
72 self.outgoing = None
72 self.outgoing = None
73 # all remote heads before the push
73 # all remote heads before the push
74 self.remoteheads = None
74 self.remoteheads = None
75 # testable as a boolean indicating if any nodes are missing locally.
75 # testable as a boolean indicating if any nodes are missing locally.
76 self.incoming = None
76 self.incoming = None
77 # set of all heads common after changeset bundle push
77 # set of all heads common after changeset bundle push
78 self.commonheads = None
78 self.commonheads = None
79
79
80 def push(repo, remote, force=False, revs=None, newbranch=False):
80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 '''Push outgoing changesets (limited by revs) from a local
81 '''Push outgoing changesets (limited by revs) from a local
82 repository to remote. Return an integer:
82 repository to remote. Return an integer:
83 - None means nothing to push
83 - None means nothing to push
84 - 0 means HTTP error
84 - 0 means HTTP error
85 - 1 means we pushed and remote head count is unchanged *or*
85 - 1 means we pushed and remote head count is unchanged *or*
86 we have outgoing changesets but refused to push
86 we have outgoing changesets but refused to push
87 - other values as described by addchangegroup()
87 - other values as described by addchangegroup()
88 '''
88 '''
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 if pushop.remote.local():
90 if pushop.remote.local():
91 missing = (set(pushop.repo.requirements)
91 missing = (set(pushop.repo.requirements)
92 - pushop.remote.local().supported)
92 - pushop.remote.local().supported)
93 if missing:
93 if missing:
94 msg = _("required features are not"
94 msg = _("required features are not"
95 " supported in the destination:"
95 " supported in the destination:"
96 " %s") % (', '.join(sorted(missing)))
96 " %s") % (', '.join(sorted(missing)))
97 raise util.Abort(msg)
97 raise util.Abort(msg)
98
98
99 # there are two ways to push to remote repo:
99 # there are two ways to push to remote repo:
100 #
100 #
101 # addchangegroup assumes local user can lock remote
101 # addchangegroup assumes local user can lock remote
102 # repo (local filesystem, old ssh servers).
102 # repo (local filesystem, old ssh servers).
103 #
103 #
104 # unbundle assumes local user cannot lock remote repo (new ssh
104 # unbundle assumes local user cannot lock remote repo (new ssh
105 # servers, http servers).
105 # servers, http servers).
106
106
107 if not pushop.remote.canpush():
107 if not pushop.remote.canpush():
108 raise util.Abort(_("destination does not support push"))
108 raise util.Abort(_("destination does not support push"))
109 # get local lock as we might write phase data
109 # get local lock as we might write phase data
110 locallock = None
110 locallock = None
111 try:
111 try:
112 locallock = pushop.repo.lock()
112 locallock = pushop.repo.lock()
113 pushop.locallocked = True
113 pushop.locallocked = True
114 except IOError, err:
114 except IOError, err:
115 pushop.locallocked = False
115 pushop.locallocked = False
116 if err.errno != errno.EACCES:
116 if err.errno != errno.EACCES:
117 raise
117 raise
118 # source repo cannot be locked.
118 # source repo cannot be locked.
119 # We do not abort the push, but just disable the local phase
119 # We do not abort the push, but just disable the local phase
120 # synchronisation.
120 # synchronisation.
121 msg = 'cannot lock source repository: %s\n' % err
121 msg = 'cannot lock source repository: %s\n' % err
122 pushop.ui.debug(msg)
122 pushop.ui.debug(msg)
123 try:
123 try:
124 pushop.repo.checkpush(pushop)
124 pushop.repo.checkpush(pushop)
125 lock = None
125 lock = None
126 unbundle = pushop.remote.capable('unbundle')
126 unbundle = pushop.remote.capable('unbundle')
127 if not unbundle:
127 if not unbundle:
128 lock = pushop.remote.lock()
128 lock = pushop.remote.lock()
129 try:
129 try:
130 _pushdiscovery(pushop)
130 _pushdiscovery(pushop)
131 if _pushcheckoutgoing(pushop):
131 if _pushcheckoutgoing(pushop):
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 pushop.remote,
133 pushop.remote,
134 pushop.outgoing)
134 pushop.outgoing)
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
136 False)
136 False)
137 and pushop.remote.capable('bundle2-exp')):
137 and pushop.remote.capable('bundle2-exp')):
138 _pushbundle2(pushop)
138 _pushbundle2(pushop)
139 else:
139 else:
140 _pushchangeset(pushop)
140 _pushchangeset(pushop)
141 _pushcomputecommonheads(pushop)
141 _pushcomputecommonheads(pushop)
142 _pushsyncphase(pushop)
142 _pushsyncphase(pushop)
143 _pushobsolete(pushop)
143 _pushobsolete(pushop)
144 finally:
144 finally:
145 if lock is not None:
145 if lock is not None:
146 lock.release()
146 lock.release()
147 finally:
147 finally:
148 if locallock is not None:
148 if locallock is not None:
149 locallock.release()
149 locallock.release()
150
150
151 _pushbookmark(pushop)
151 _pushbookmark(pushop)
152 return pushop.ret
152 return pushop.ret
153
153
154 def _pushdiscovery(pushop):
154 def _pushdiscovery(pushop):
155 # discovery
155 # discovery
156 unfi = pushop.repo.unfiltered()
156 unfi = pushop.repo.unfiltered()
157 fci = discovery.findcommonincoming
157 fci = discovery.findcommonincoming
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
159 common, inc, remoteheads = commoninc
159 common, inc, remoteheads = commoninc
160 fco = discovery.findcommonoutgoing
160 fco = discovery.findcommonoutgoing
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
162 commoninc=commoninc, force=pushop.force)
162 commoninc=commoninc, force=pushop.force)
163 pushop.outgoing = outgoing
163 pushop.outgoing = outgoing
164 pushop.remoteheads = remoteheads
164 pushop.remoteheads = remoteheads
165 pushop.incoming = inc
165 pushop.incoming = inc
166
166
167 def _pushcheckoutgoing(pushop):
167 def _pushcheckoutgoing(pushop):
168 outgoing = pushop.outgoing
168 outgoing = pushop.outgoing
169 unfi = pushop.repo.unfiltered()
169 unfi = pushop.repo.unfiltered()
170 if not outgoing.missing:
170 if not outgoing.missing:
171 # nothing to push
171 # nothing to push
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
173 return False
173 return False
174 # something to push
174 # something to push
175 if not pushop.force:
175 if not pushop.force:
176 # if repo.obsstore == False --> no obsolete
176 # if repo.obsstore == False --> no obsolete
177 # then, save the iteration
177 # then, save the iteration
178 if unfi.obsstore:
178 if unfi.obsstore:
179 # this message are here for 80 char limit reason
179 # this message are here for 80 char limit reason
180 mso = _("push includes obsolete changeset: %s!")
180 mso = _("push includes obsolete changeset: %s!")
181 mst = "push includes %s changeset: %s!"
181 mst = "push includes %s changeset: %s!"
182 # plain versions for i18n tool to detect them
182 # plain versions for i18n tool to detect them
183 _("push includes unstable changeset: %s!")
183 _("push includes unstable changeset: %s!")
184 _("push includes bumped changeset: %s!")
184 _("push includes bumped changeset: %s!")
185 _("push includes divergent changeset: %s!")
185 _("push includes divergent changeset: %s!")
186 # If we are to push if there is at least one
186 # If we are to push if there is at least one
187 # obsolete or unstable changeset in missing, at
187 # obsolete or unstable changeset in missing, at
188 # least one of the missinghead will be obsolete or
188 # least one of the missinghead will be obsolete or
189 # unstable. So checking heads only is ok
189 # unstable. So checking heads only is ok
190 for node in outgoing.missingheads:
190 for node in outgoing.missingheads:
191 ctx = unfi[node]
191 ctx = unfi[node]
192 if ctx.obsolete():
192 if ctx.obsolete():
193 raise util.Abort(mso % ctx)
193 raise util.Abort(mso % ctx)
194 elif ctx.troubled():
194 elif ctx.troubled():
195 raise util.Abort(_(mst)
195 raise util.Abort(_(mst)
196 % (ctx.troubles()[0],
196 % (ctx.troubles()[0],
197 ctx))
197 ctx))
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
199 discovery.checkheads(unfi, pushop.remote, outgoing,
199 discovery.checkheads(unfi, pushop.remote, outgoing,
200 pushop.remoteheads,
200 pushop.remoteheads,
201 pushop.newbranch,
201 pushop.newbranch,
202 bool(pushop.incoming),
202 bool(pushop.incoming),
203 newbm)
203 newbm)
204 return True
204 return True
205
205
206 def _pushbundle2(pushop):
206 def _pushbundle2(pushop):
207 """push data to the remote using bundle2
207 """push data to the remote using bundle2
208
208
209 The only currently supported type of data is changegroup but this will
209 The only currently supported type of data is changegroup but this will
210 evolve in the future."""
210 evolve in the future."""
211 # Send known head to the server for race detection.
211 # Send known head to the server for race detection.
212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
213 caps = bundle2.decodecaps(capsblob)
213 caps = bundle2.decodecaps(capsblob)
214 bundler = bundle2.bundle20(pushop.ui, caps)
214 bundler = bundle2.bundle20(pushop.ui, caps)
215 # create reply capability
215 # create reply capability
216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
218 if not pushop.force:
218 if not pushop.force:
219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
220 data=iter(pushop.remoteheads))
220 data=iter(pushop.remoteheads))
221 bundler.addpart(part)
221 bundler.addpart(part)
222 extrainfo = _pushbundle2extraparts(pushop, bundler)
222 extrainfo = _pushbundle2extraparts(pushop, bundler)
223 # add the changegroup bundle
223 # add the changegroup bundle
224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
226 bundler.addpart(cgpart)
226 bundler.addpart(cgpart)
227 stream = util.chunkbuffer(bundler.getchunks())
227 stream = util.chunkbuffer(bundler.getchunks())
228 reply = pushop.remote.unbundle(stream, ['force'], 'push')
228 reply = pushop.remote.unbundle(stream, ['force'], 'push')
229 try:
229 try:
230 op = bundle2.processbundle(pushop.repo, reply)
230 op = bundle2.processbundle(pushop.repo, reply)
231 except KeyError, exc:
231 except KeyError, exc:
232 raise util.Abort('missing support for %s' % exc)
232 raise util.Abort('missing support for %s' % exc)
233 cgreplies = op.records.getreplies(cgpart.id)
233 cgreplies = op.records.getreplies(cgpart.id)
234 assert len(cgreplies['changegroup']) == 1
234 assert len(cgreplies['changegroup']) == 1
235 pushop.ret = cgreplies['changegroup'][0]['return']
235 pushop.ret = cgreplies['changegroup'][0]['return']
236 _pushbundle2extrareply(pushop, op, extrainfo)
236 _pushbundle2extrareply(pushop, op, extrainfo)
237
237
238 def _pushbundle2extraparts(pushop, bundler):
238 def _pushbundle2extraparts(pushop, bundler):
239 """hook function to let extensions add parts
239 """hook function to let extensions add parts
240
240
241 Return a dict to let extensions pass data to the reply processing.
241 Return a dict to let extensions pass data to the reply processing.
242 """
242 """
243 return {}
243 return {}
244
244
245 def _pushbundle2extrareply(pushop, op, extrainfo):
245 def _pushbundle2extrareply(pushop, op, extrainfo):
246 """hook function to let extensions react to part replies
246 """hook function to let extensions react to part replies
247
247
248 The dict from _pushbundle2extrareply is fed to this function.
248 The dict from _pushbundle2extrareply is fed to this function.
249 """
249 """
250 pass
250 pass
251
251
252 def _pushchangeset(pushop):
252 def _pushchangeset(pushop):
253 """Make the actual push of changeset bundle to remote repo"""
253 """Make the actual push of changeset bundle to remote repo"""
254 outgoing = pushop.outgoing
254 outgoing = pushop.outgoing
255 unbundle = pushop.remote.capable('unbundle')
255 unbundle = pushop.remote.capable('unbundle')
256 # TODO: get bundlecaps from remote
256 # TODO: get bundlecaps from remote
257 bundlecaps = None
257 bundlecaps = None
258 # create a changegroup from local
258 # create a changegroup from local
259 if pushop.revs is None and not (outgoing.excluded
259 if pushop.revs is None and not (outgoing.excluded
260 or pushop.repo.changelog.filteredrevs):
260 or pushop.repo.changelog.filteredrevs):
261 # push everything,
261 # push everything,
262 # use the fast path, no race possible on push
262 # use the fast path, no race possible on push
263 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
263 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
264 cg = changegroup.getsubset(pushop.repo,
264 cg = changegroup.getsubset(pushop.repo,
265 outgoing,
265 outgoing,
266 bundler,
266 bundler,
267 'push',
267 'push',
268 fastpath=True)
268 fastpath=True)
269 else:
269 else:
270 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
270 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
271 bundlecaps)
271 bundlecaps)
272
272
273 # apply changegroup to remote
273 # apply changegroup to remote
274 if unbundle:
274 if unbundle:
275 # local repo finds heads on server, finds out what
275 # local repo finds heads on server, finds out what
276 # revs it must push. once revs transferred, if server
276 # revs it must push. once revs transferred, if server
277 # finds it has different heads (someone else won
277 # finds it has different heads (someone else won
278 # commit/push race), server aborts.
278 # commit/push race), server aborts.
279 if pushop.force:
279 if pushop.force:
280 remoteheads = ['force']
280 remoteheads = ['force']
281 else:
281 else:
282 remoteheads = pushop.remoteheads
282 remoteheads = pushop.remoteheads
283 # ssh: return remote's addchangegroup()
283 # ssh: return remote's addchangegroup()
284 # http: return remote's addchangegroup() or 0 for error
284 # http: return remote's addchangegroup() or 0 for error
285 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
285 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
286 'push')
286 'push')
287 else:
287 else:
288 # we return an integer indicating remote head count
288 # we return an integer indicating remote head count
289 # change
289 # change
290 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
290 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
291
291
292 def _pushcomputecommonheads(pushop):
292 def _pushcomputecommonheads(pushop):
293 unfi = pushop.repo.unfiltered()
293 unfi = pushop.repo.unfiltered()
294 if pushop.ret:
294 if pushop.ret:
295 # push succeed, synchronize target of the push
295 # push succeed, synchronize target of the push
296 cheads = pushop.outgoing.missingheads
296 cheads = pushop.outgoing.missingheads
297 elif pushop.revs is None:
297 elif pushop.revs is None:
298 # All out push fails. synchronize all common
298 # All out push fails. synchronize all common
299 cheads = pushop.outgoing.commonheads
299 cheads = pushop.outgoing.commonheads
300 else:
300 else:
301 # I want cheads = heads(::missingheads and ::commonheads)
301 # I want cheads = heads(::missingheads and ::commonheads)
302 # (missingheads is revs with secret changeset filtered out)
302 # (missingheads is revs with secret changeset filtered out)
303 #
303 #
304 # This can be expressed as:
304 # This can be expressed as:
305 # cheads = ( (missingheads and ::commonheads)
305 # cheads = ( (missingheads and ::commonheads)
306 # + (commonheads and ::missingheads))"
306 # + (commonheads and ::missingheads))"
307 # )
307 # )
308 #
308 #
309 # while trying to push we already computed the following:
309 # while trying to push we already computed the following:
310 # common = (::commonheads)
310 # common = (::commonheads)
311 # missing = ((commonheads::missingheads) - commonheads)
311 # missing = ((commonheads::missingheads) - commonheads)
312 #
312 #
313 # We can pick:
313 # We can pick:
314 # * missingheads part of common (::commonheads)
314 # * missingheads part of common (::commonheads)
315 common = set(pushop.outgoing.common)
315 common = set(pushop.outgoing.common)
316 nm = pushop.repo.changelog.nodemap
316 nm = pushop.repo.changelog.nodemap
317 cheads = [node for node in pushop.revs if nm[node] in common]
317 cheads = [node for node in pushop.revs if nm[node] in common]
318 # and
318 # and
319 # * commonheads parents on missing
319 # * commonheads parents on missing
320 revset = unfi.set('%ln and parents(roots(%ln))',
320 revset = unfi.set('%ln and parents(roots(%ln))',
321 pushop.outgoing.commonheads,
321 pushop.outgoing.commonheads,
322 pushop.outgoing.missing)
322 pushop.outgoing.missing)
323 cheads.extend(c.node() for c in revset)
323 cheads.extend(c.node() for c in revset)
324 pushop.commonheads = cheads
324 pushop.commonheads = cheads
325
325
326 def _pushsyncphase(pushop):
326 def _pushsyncphase(pushop):
327 """synchronise phase information locally and remotely"""
327 """synchronise phase information locally and remotely"""
328 unfi = pushop.repo.unfiltered()
328 unfi = pushop.repo.unfiltered()
329 cheads = pushop.commonheads
329 cheads = pushop.commonheads
330 if pushop.ret:
330 if pushop.ret:
331 # push succeed, synchronize target of the push
331 # push succeed, synchronize target of the push
332 cheads = pushop.outgoing.missingheads
332 cheads = pushop.outgoing.missingheads
333 elif pushop.revs is None:
333 elif pushop.revs is None:
334 # All out push fails. synchronize all common
334 # All out push fails. synchronize all common
335 cheads = pushop.outgoing.commonheads
335 cheads = pushop.outgoing.commonheads
336 else:
336 else:
337 # I want cheads = heads(::missingheads and ::commonheads)
337 # I want cheads = heads(::missingheads and ::commonheads)
338 # (missingheads is revs with secret changeset filtered out)
338 # (missingheads is revs with secret changeset filtered out)
339 #
339 #
340 # This can be expressed as:
340 # This can be expressed as:
341 # cheads = ( (missingheads and ::commonheads)
341 # cheads = ( (missingheads and ::commonheads)
342 # + (commonheads and ::missingheads))"
342 # + (commonheads and ::missingheads))"
343 # )
343 # )
344 #
344 #
345 # while trying to push we already computed the following:
345 # while trying to push we already computed the following:
346 # common = (::commonheads)
346 # common = (::commonheads)
347 # missing = ((commonheads::missingheads) - commonheads)
347 # missing = ((commonheads::missingheads) - commonheads)
348 #
348 #
349 # We can pick:
349 # We can pick:
350 # * missingheads part of common (::commonheads)
350 # * missingheads part of common (::commonheads)
351 common = set(pushop.outgoing.common)
351 common = set(pushop.outgoing.common)
352 nm = pushop.repo.changelog.nodemap
352 nm = pushop.repo.changelog.nodemap
353 cheads = [node for node in pushop.revs if nm[node] in common]
353 cheads = [node for node in pushop.revs if nm[node] in common]
354 # and
354 # and
355 # * commonheads parents on missing
355 # * commonheads parents on missing
356 revset = unfi.set('%ln and parents(roots(%ln))',
356 revset = unfi.set('%ln and parents(roots(%ln))',
357 pushop.outgoing.commonheads,
357 pushop.outgoing.commonheads,
358 pushop.outgoing.missing)
358 pushop.outgoing.missing)
359 cheads.extend(c.node() for c in revset)
359 cheads.extend(c.node() for c in revset)
360 pushop.commonheads = cheads
360 pushop.commonheads = cheads
361 # even when we don't push, exchanging phase data is useful
361 # even when we don't push, exchanging phase data is useful
362 remotephases = pushop.remote.listkeys('phases')
362 remotephases = pushop.remote.listkeys('phases')
363 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
363 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
364 and remotephases # server supports phases
364 and remotephases # server supports phases
365 and pushop.ret is None # nothing was pushed
365 and pushop.ret is None # nothing was pushed
366 and remotephases.get('publishing', False)):
366 and remotephases.get('publishing', False)):
367 # When:
367 # When:
368 # - this is a subrepo push
368 # - this is a subrepo push
369 # - and remote support phase
369 # - and remote support phase
370 # - and no changeset was pushed
370 # - and no changeset was pushed
371 # - and remote is publishing
371 # - and remote is publishing
372 # We may be in issue 3871 case!
372 # We may be in issue 3871 case!
373 # We drop the possible phase synchronisation done by
373 # We drop the possible phase synchronisation done by
374 # courtesy to publish changesets possibly locally draft
374 # courtesy to publish changesets possibly locally draft
375 # on the remote.
375 # on the remote.
376 remotephases = {'publishing': 'True'}
376 remotephases = {'publishing': 'True'}
377 if not remotephases: # old server or public only reply from non-publishing
377 if not remotephases: # old server or public only reply from non-publishing
378 _localphasemove(pushop, cheads)
378 _localphasemove(pushop, cheads)
379 # don't push any phase data as there is nothing to push
379 # don't push any phase data as there is nothing to push
380 else:
380 else:
381 ana = phases.analyzeremotephases(pushop.repo, cheads,
381 ana = phases.analyzeremotephases(pushop.repo, cheads,
382 remotephases)
382 remotephases)
383 pheads, droots = ana
383 pheads, droots = ana
384 ### Apply remote phase on local
384 ### Apply remote phase on local
385 if remotephases.get('publishing', False):
385 if remotephases.get('publishing', False):
386 _localphasemove(pushop, cheads)
386 _localphasemove(pushop, cheads)
387 else: # publish = False
387 else: # publish = False
388 _localphasemove(pushop, pheads)
388 _localphasemove(pushop, pheads)
389 _localphasemove(pushop, cheads, phases.draft)
389 _localphasemove(pushop, cheads, phases.draft)
390 ### Apply local phase on remote
390 ### Apply local phase on remote
391
391
392 # Get the list of all revs draft on remote by public here.
392 # Get the list of all revs draft on remote by public here.
393 # XXX Beware that revset break if droots is not strictly
393 # XXX Beware that revset break if droots is not strictly
394 # XXX root we may want to ensure it is but it is costly
394 # XXX root we may want to ensure it is but it is costly
395 outdated = unfi.set('heads((%ln::%ln) and public())',
395 outdated = unfi.set('heads((%ln::%ln) and public())',
396 droots, cheads)
396 droots, cheads)
397 for newremotehead in outdated:
397 for newremotehead in outdated:
398 r = pushop.remote.pushkey('phases',
398 r = pushop.remote.pushkey('phases',
399 newremotehead.hex(),
399 newremotehead.hex(),
400 str(phases.draft),
400 str(phases.draft),
401 str(phases.public))
401 str(phases.public))
402 if not r:
402 if not r:
403 pushop.ui.warn(_('updating %s to public failed!\n')
403 pushop.ui.warn(_('updating %s to public failed!\n')
404 % newremotehead)
404 % newremotehead)
405
405
406 def _localphasemove(pushop, nodes, phase=phases.public):
406 def _localphasemove(pushop, nodes, phase=phases.public):
407 """move <nodes> to <phase> in the local source repo"""
407 """move <nodes> to <phase> in the local source repo"""
408 if pushop.locallocked:
408 if pushop.locallocked:
409 phases.advanceboundary(pushop.repo, phase, nodes)
409 phases.advanceboundary(pushop.repo, phase, nodes)
410 else:
410 else:
411 # repo is not locked, do not change any phases!
411 # repo is not locked, do not change any phases!
412 # Informs the user that phases should have been moved when
412 # Informs the user that phases should have been moved when
413 # applicable.
413 # applicable.
414 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
414 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
415 phasestr = phases.phasenames[phase]
415 phasestr = phases.phasenames[phase]
416 if actualmoves:
416 if actualmoves:
417 pushop.ui.status(_('cannot lock source repo, skipping '
417 pushop.ui.status(_('cannot lock source repo, skipping '
418 'local %s phase update\n') % phasestr)
418 'local %s phase update\n') % phasestr)
419
419
420 def _pushobsolete(pushop):
420 def _pushobsolete(pushop):
421 """utility function to push obsolete markers to a remote"""
421 """utility function to push obsolete markers to a remote"""
422 pushop.ui.debug('try to push obsolete markers to remote\n')
422 pushop.ui.debug('try to push obsolete markers to remote\n')
423 repo = pushop.repo
423 repo = pushop.repo
424 remote = pushop.remote
424 remote = pushop.remote
425 if (obsolete._enabled and repo.obsstore and
425 if (obsolete._enabled and repo.obsstore and
426 'obsolete' in remote.listkeys('namespaces')):
426 'obsolete' in remote.listkeys('namespaces')):
427 rslts = []
427 rslts = []
428 remotedata = repo.listkeys('obsolete')
428 remotedata = repo.listkeys('obsolete')
429 for key in sorted(remotedata, reverse=True):
429 for key in sorted(remotedata, reverse=True):
430 # reverse sort to ensure we end with dump0
430 # reverse sort to ensure we end with dump0
431 data = remotedata[key]
431 data = remotedata[key]
432 rslts.append(remote.pushkey('obsolete', key, '', data))
432 rslts.append(remote.pushkey('obsolete', key, '', data))
433 if [r for r in rslts if not r]:
433 if [r for r in rslts if not r]:
434 msg = _('failed to push some obsolete markers!\n')
434 msg = _('failed to push some obsolete markers!\n')
435 repo.ui.warn(msg)
435 repo.ui.warn(msg)
436
436
437 def _pushbookmark(pushop):
437 def _pushbookmark(pushop):
438 """Update bookmark position on remote"""
438 """Update bookmark position on remote"""
439 ui = pushop.ui
439 ui = pushop.ui
440 repo = pushop.repo.unfiltered()
440 repo = pushop.repo.unfiltered()
441 remote = pushop.remote
441 remote = pushop.remote
442 ui.debug("checking for updated bookmarks\n")
442 ui.debug("checking for updated bookmarks\n")
443 revnums = map(repo.changelog.rev, pushop.revs or [])
443 revnums = map(repo.changelog.rev, pushop.revs or [])
444 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
444 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
445 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
445 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
446 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
446 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
447 srchex=hex)
447 srchex=hex)
448
448
449 for b, scid, dcid in advsrc:
449 for b, scid, dcid in advsrc:
450 if ancestors and repo[scid].rev() not in ancestors:
450 if ancestors and repo[scid].rev() not in ancestors:
451 continue
451 continue
452 if remote.pushkey('bookmarks', b, dcid, scid):
452 if remote.pushkey('bookmarks', b, dcid, scid):
453 ui.status(_("updating bookmark %s\n") % b)
453 ui.status(_("updating bookmark %s\n") % b)
454 else:
454 else:
455 ui.warn(_('updating bookmark %s failed!\n') % b)
455 ui.warn(_('updating bookmark %s failed!\n') % b)
456
456
457 class pulloperation(object):
457 class pulloperation(object):
458 """A object that represent a single pull operation
458 """A object that represent a single pull operation
459
459
460 It purpose is to carry push related state and very common operation.
460 It purpose is to carry push related state and very common operation.
461
461
462 A new should be created at the beginning of each pull and discarded
462 A new should be created at the beginning of each pull and discarded
463 afterward.
463 afterward.
464 """
464 """
465
465
466 def __init__(self, repo, remote, heads=None, force=False):
466 def __init__(self, repo, remote, heads=None, force=False):
467 # repo we pull into
467 # repo we pull into
468 self.repo = repo
468 self.repo = repo
469 # repo we pull from
469 # repo we pull from
470 self.remote = remote
470 self.remote = remote
471 # revision we try to pull (None is "all")
471 # revision we try to pull (None is "all")
472 self.heads = heads
472 self.heads = heads
473 # do we force pull?
473 # do we force pull?
474 self.force = force
474 self.force = force
475 # the name the pull transaction
475 # the name the pull transaction
476 self._trname = 'pull\n' + util.hidepassword(remote.url())
476 self._trname = 'pull\n' + util.hidepassword(remote.url())
477 # hold the transaction once created
477 # hold the transaction once created
478 self._tr = None
478 self._tr = None
479 # set of common changeset between local and remote before pull
479 # set of common changeset between local and remote before pull
480 self.common = None
480 self.common = None
481 # set of pulled head
481 # set of pulled head
482 self.rheads = None
482 self.rheads = None
483 # list of missing changeset to fetch remotely
483 # list of missing changeset to fetch remotely
484 self.fetch = None
484 self.fetch = None
485 # result of changegroup pulling (used as return code by pull)
485 # result of changegroup pulling (used as return code by pull)
486 self.cgresult = None
486 self.cgresult = None
487 # list of step remaining todo (related to future bundle2 usage)
487 # list of step remaining todo (related to future bundle2 usage)
488 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
488 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
489
489
490 @util.propertycache
490 @util.propertycache
491 def pulledsubset(self):
491 def pulledsubset(self):
492 """heads of the set of changeset target by the pull"""
492 """heads of the set of changeset target by the pull"""
493 # compute target subset
493 # compute target subset
494 if self.heads is None:
494 if self.heads is None:
495 # We pulled every thing possible
495 # We pulled every thing possible
496 # sync on everything common
496 # sync on everything common
497 c = set(self.common)
497 c = set(self.common)
498 ret = list(self.common)
498 ret = list(self.common)
499 for n in self.rheads:
499 for n in self.rheads:
500 if n not in c:
500 if n not in c:
501 ret.append(n)
501 ret.append(n)
502 return ret
502 return ret
503 else:
503 else:
504 # We pulled a specific subset
504 # We pulled a specific subset
505 # sync on this subset
505 # sync on this subset
506 return self.heads
506 return self.heads
507
507
508 def gettransaction(self):
508 def gettransaction(self):
509 """get appropriate pull transaction, creating it if needed"""
509 """get appropriate pull transaction, creating it if needed"""
510 if self._tr is None:
510 if self._tr is None:
511 self._tr = self.repo.transaction(self._trname)
511 self._tr = self.repo.transaction(self._trname)
512 return self._tr
512 return self._tr
513
513
514 def closetransaction(self):
514 def closetransaction(self):
515 """close transaction if created"""
515 """close transaction if created"""
516 if self._tr is not None:
516 if self._tr is not None:
517 self._tr.close()
517 self._tr.close()
518
518
519 def releasetransaction(self):
519 def releasetransaction(self):
520 """release transaction if created"""
520 """release transaction if created"""
521 if self._tr is not None:
521 if self._tr is not None:
522 self._tr.release()
522 self._tr.release()
523
523
524 def pull(repo, remote, heads=None, force=False):
524 def pull(repo, remote, heads=None, force=False):
525 pullop = pulloperation(repo, remote, heads, force)
525 pullop = pulloperation(repo, remote, heads, force)
526 if pullop.remote.local():
526 if pullop.remote.local():
527 missing = set(pullop.remote.requirements) - pullop.repo.supported
527 missing = set(pullop.remote.requirements) - pullop.repo.supported
528 if missing:
528 if missing:
529 msg = _("required features are not"
529 msg = _("required features are not"
530 " supported in the destination:"
530 " supported in the destination:"
531 " %s") % (', '.join(sorted(missing)))
531 " %s") % (', '.join(sorted(missing)))
532 raise util.Abort(msg)
532 raise util.Abort(msg)
533
533
534 lock = pullop.repo.lock()
534 lock = pullop.repo.lock()
535 try:
535 try:
536 _pulldiscovery(pullop)
536 _pulldiscovery(pullop)
537 if (pullop.repo.ui.configbool('server', 'bundle2', False)
537 if (pullop.repo.ui.configbool('server', 'bundle2', False)
538 and pullop.remote.capable('bundle2-exp')):
538 and pullop.remote.capable('bundle2-exp')):
539 _pullbundle2(pullop)
539 _pullbundle2(pullop)
540 if 'changegroup' in pullop.todosteps:
540 if 'changegroup' in pullop.todosteps:
541 _pullchangeset(pullop)
541 _pullchangeset(pullop)
542 if 'phases' in pullop.todosteps:
542 if 'phases' in pullop.todosteps:
543 _pullphase(pullop)
543 _pullphase(pullop)
544 if 'obsmarkers' in pullop.todosteps:
544 if 'obsmarkers' in pullop.todosteps:
545 _pullobsolete(pullop)
545 _pullobsolete(pullop)
546 pullop.closetransaction()
546 pullop.closetransaction()
547 finally:
547 finally:
548 pullop.releasetransaction()
548 pullop.releasetransaction()
549 lock.release()
549 lock.release()
550
550
551 return pullop.cgresult
551 return pullop.cgresult
552
552
553 def _pulldiscovery(pullop):
553 def _pulldiscovery(pullop):
554 """discovery phase for the pull
554 """discovery phase for the pull
555
555
556 Current handle changeset discovery only, will change handle all discovery
556 Current handle changeset discovery only, will change handle all discovery
557 at some point."""
557 at some point."""
558 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
558 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
559 pullop.remote,
559 pullop.remote,
560 heads=pullop.heads,
560 heads=pullop.heads,
561 force=pullop.force)
561 force=pullop.force)
562 pullop.common, pullop.fetch, pullop.rheads = tmp
562 pullop.common, pullop.fetch, pullop.rheads = tmp
563
563
564 def _pullbundle2(pullop):
564 def _pullbundle2(pullop):
565 """pull data using bundle2
565 """pull data using bundle2
566
566
567 For now, the only supported data are changegroup."""
567 For now, the only supported data are changegroup."""
568 kwargs = {'bundlecaps': set(['HG2X'])}
568 kwargs = {'bundlecaps': set(['HG2X'])}
569 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
569 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
570 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
570 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
571 # pulling changegroup
571 # pulling changegroup
572 pullop.todosteps.remove('changegroup')
572 pullop.todosteps.remove('changegroup')
573 if not pullop.fetch:
573 if not pullop.fetch:
574 pullop.repo.ui.status(_("no changes found\n"))
574 pullop.repo.ui.status(_("no changes found\n"))
575 pullop.cgresult = 0
575 pullop.cgresult = 0
576 else:
576 else:
577 kwargs['common'] = pullop.common
577 kwargs['common'] = pullop.common
578 kwargs['heads'] = pullop.heads or pullop.rheads
578 kwargs['heads'] = pullop.heads or pullop.rheads
579 if pullop.heads is None and list(pullop.common) == [nullid]:
579 if pullop.heads is None and list(pullop.common) == [nullid]:
580 pullop.repo.ui.status(_("requesting all changes\n"))
580 pullop.repo.ui.status(_("requesting all changes\n"))
581 if kwargs.keys() == ['format']:
581 if kwargs.keys() == ['format']:
582 return # nothing to pull
582 return # nothing to pull
583 bundle = pullop.remote.getbundle('pull', **kwargs)
583 bundle = pullop.remote.getbundle('pull', **kwargs)
584 try:
584 try:
585 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
585 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
586 except KeyError, exc:
586 except KeyError, exc:
587 raise util.Abort('missing support for %s' % exc)
587 raise util.Abort('missing support for %s' % exc)
588 assert len(op.records['changegroup']) == 1
588 assert len(op.records['changegroup']) == 1
589 pullop.cgresult = op.records['changegroup'][0]['return']
589 pullop.cgresult = op.records['changegroup'][0]['return']
590
590
591 def _pullchangeset(pullop):
591 def _pullchangeset(pullop):
592 """pull changeset from unbundle into the local repo"""
592 """pull changeset from unbundle into the local repo"""
593 # We delay the open of the transaction as late as possible so we
593 # We delay the open of the transaction as late as possible so we
594 # don't open transaction for nothing or you break future useful
594 # don't open transaction for nothing or you break future useful
595 # rollback call
595 # rollback call
596 pullop.todosteps.remove('changegroup')
596 pullop.todosteps.remove('changegroup')
597 if not pullop.fetch:
597 if not pullop.fetch:
598 pullop.repo.ui.status(_("no changes found\n"))
598 pullop.repo.ui.status(_("no changes found\n"))
599 pullop.cgresult = 0
599 pullop.cgresult = 0
600 return
600 return
601 pullop.gettransaction()
601 pullop.gettransaction()
602 if pullop.heads is None and list(pullop.common) == [nullid]:
602 if pullop.heads is None and list(pullop.common) == [nullid]:
603 pullop.repo.ui.status(_("requesting all changes\n"))
603 pullop.repo.ui.status(_("requesting all changes\n"))
604 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
604 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
605 # issue1320, avoid a race if remote changed after discovery
605 # issue1320, avoid a race if remote changed after discovery
606 pullop.heads = pullop.rheads
606 pullop.heads = pullop.rheads
607
607
608 if pullop.remote.capable('getbundle'):
608 if pullop.remote.capable('getbundle'):
609 # TODO: get bundlecaps from remote
609 # TODO: get bundlecaps from remote
610 cg = pullop.remote.getbundle('pull', common=pullop.common,
610 cg = pullop.remote.getbundle('pull', common=pullop.common,
611 heads=pullop.heads or pullop.rheads)
611 heads=pullop.heads or pullop.rheads)
612 elif pullop.heads is None:
612 elif pullop.heads is None:
613 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
613 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
614 elif not pullop.remote.capable('changegroupsubset'):
614 elif not pullop.remote.capable('changegroupsubset'):
615 raise util.Abort(_("partial pull cannot be done because "
615 raise util.Abort(_("partial pull cannot be done because "
616 "other repository doesn't support "
616 "other repository doesn't support "
617 "changegroupsubset."))
617 "changegroupsubset."))
618 else:
618 else:
619 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
619 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
620 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
620 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
621 pullop.remote.url())
621 pullop.remote.url())
622
622
623 def _pullphase(pullop):
623 def _pullphase(pullop):
624 # Get remote phases data from remote
624 # Get remote phases data from remote
625 pullop.todosteps.remove('phases')
625 pullop.todosteps.remove('phases')
626 remotephases = pullop.remote.listkeys('phases')
626 remotephases = pullop.remote.listkeys('phases')
627 publishing = bool(remotephases.get('publishing', False))
627 publishing = bool(remotephases.get('publishing', False))
628 if remotephases and not publishing:
628 if remotephases and not publishing:
629 # remote is new and unpublishing
629 # remote is new and unpublishing
630 pheads, _dr = phases.analyzeremotephases(pullop.repo,
630 pheads, _dr = phases.analyzeremotephases(pullop.repo,
631 pullop.pulledsubset,
631 pullop.pulledsubset,
632 remotephases)
632 remotephases)
633 phases.advanceboundary(pullop.repo, phases.public, pheads)
633 phases.advanceboundary(pullop.repo, phases.public, pheads)
634 phases.advanceboundary(pullop.repo, phases.draft,
634 phases.advanceboundary(pullop.repo, phases.draft,
635 pullop.pulledsubset)
635 pullop.pulledsubset)
636 else:
636 else:
637 # Remote is old or publishing all common changesets
637 # Remote is old or publishing all common changesets
638 # should be seen as public
638 # should be seen as public
639 phases.advanceboundary(pullop.repo, phases.public,
639 phases.advanceboundary(pullop.repo, phases.public,
640 pullop.pulledsubset)
640 pullop.pulledsubset)
641
641
642 def _pullobsolete(pullop):
642 def _pullobsolete(pullop):
643 """utility function to pull obsolete markers from a remote
643 """utility function to pull obsolete markers from a remote
644
644
645 The `gettransaction` is function that return the pull transaction, creating
645 The `gettransaction` is function that return the pull transaction, creating
646 one if necessary. We return the transaction to inform the calling code that
646 one if necessary. We return the transaction to inform the calling code that
647 a new transaction have been created (when applicable).
647 a new transaction have been created (when applicable).
648
648
649 Exists mostly to allow overriding for experimentation purpose"""
649 Exists mostly to allow overriding for experimentation purpose"""
650 pullop.todosteps.remove('obsmarkers')
650 pullop.todosteps.remove('obsmarkers')
651 tr = None
651 tr = None
652 if obsolete._enabled:
652 if obsolete._enabled:
653 pullop.repo.ui.debug('fetching remote obsolete markers\n')
653 pullop.repo.ui.debug('fetching remote obsolete markers\n')
654 remoteobs = pullop.remote.listkeys('obsolete')
654 remoteobs = pullop.remote.listkeys('obsolete')
655 if 'dump0' in remoteobs:
655 if 'dump0' in remoteobs:
656 tr = pullop.gettransaction()
656 tr = pullop.gettransaction()
657 for key in sorted(remoteobs, reverse=True):
657 for key in sorted(remoteobs, reverse=True):
658 if key.startswith('dump'):
658 if key.startswith('dump'):
659 data = base85.b85decode(remoteobs[key])
659 data = base85.b85decode(remoteobs[key])
660 pullop.repo.obsstore.mergemarkers(tr, data)
660 pullop.repo.obsstore.mergemarkers(tr, data)
661 pullop.repo.invalidatevolatilesets()
661 pullop.repo.invalidatevolatilesets()
662 return tr
662 return tr
663
663
664 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
664 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
665 **kwargs):
665 """return a full bundle (with potentially multiple kind of parts)
666 """return a full bundle (with potentially multiple kind of parts)
666
667
667 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
668 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
668 passed. For now, the bundle can contain only changegroup, but this will
669 passed. For now, the bundle can contain only changegroup, but this will
669 changes when more part type will be available for bundle2.
670 changes when more part type will be available for bundle2.
670
671
671 This is different from changegroup.getbundle that only returns an HG10
672 This is different from changegroup.getbundle that only returns an HG10
672 changegroup bundle. They may eventually get reunited in the future when we
673 changegroup bundle. They may eventually get reunited in the future when we
673 have a clearer idea of the API we what to query different data.
674 have a clearer idea of the API we what to query different data.
674
675
675 The implementation is at a very early stage and will get massive rework
676 The implementation is at a very early stage and will get massive rework
676 when the API of bundle is refined.
677 when the API of bundle is refined.
677 """
678 """
678 # build bundle here.
679 # build bundle here.
679 cg = changegroup.getbundle(repo, source, heads=heads,
680 cg = changegroup.getbundle(repo, source, heads=heads,
680 common=common, bundlecaps=bundlecaps)
681 common=common, bundlecaps=bundlecaps)
681 if bundlecaps is None or 'HG2X' not in bundlecaps:
682 if bundlecaps is None or 'HG2X' not in bundlecaps:
682 return cg
683 return cg
683 # very crude first implementation,
684 # very crude first implementation,
684 # the bundle API will change and the generation will be done lazily.
685 # the bundle API will change and the generation will be done lazily.
685 b2caps = {}
686 b2caps = {}
686 for bcaps in bundlecaps:
687 for bcaps in bundlecaps:
687 if bcaps.startswith('bundle2='):
688 if bcaps.startswith('bundle2='):
688 blob = urllib.unquote(bcaps[len('bundle2='):])
689 blob = urllib.unquote(bcaps[len('bundle2='):])
689 b2caps.update(bundle2.decodecaps(blob))
690 b2caps.update(bundle2.decodecaps(blob))
690 bundler = bundle2.bundle20(repo.ui, b2caps)
691 bundler = bundle2.bundle20(repo.ui, b2caps)
691 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
692 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
692 bundler.addpart(part)
693 bundler.addpart(part)
693 return util.chunkbuffer(bundler.getchunks())
694 return util.chunkbuffer(bundler.getchunks())
694
695
695 class PushRaced(RuntimeError):
696 class PushRaced(RuntimeError):
696 """An exception raised during unbundling that indicate a push race"""
697 """An exception raised during unbundling that indicate a push race"""
697
698
698 def check_heads(repo, their_heads, context):
699 def check_heads(repo, their_heads, context):
699 """check if the heads of a repo have been modified
700 """check if the heads of a repo have been modified
700
701
701 Used by peer for unbundling.
702 Used by peer for unbundling.
702 """
703 """
703 heads = repo.heads()
704 heads = repo.heads()
704 heads_hash = util.sha1(''.join(sorted(heads))).digest()
705 heads_hash = util.sha1(''.join(sorted(heads))).digest()
705 if not (their_heads == ['force'] or their_heads == heads or
706 if not (their_heads == ['force'] or their_heads == heads or
706 their_heads == ['hashed', heads_hash]):
707 their_heads == ['hashed', heads_hash]):
707 # someone else committed/pushed/unbundled while we
708 # someone else committed/pushed/unbundled while we
708 # were transferring data
709 # were transferring data
709 raise PushRaced('repository changed while %s - '
710 raise PushRaced('repository changed while %s - '
710 'please try again' % context)
711 'please try again' % context)
711
712
712 def unbundle(repo, cg, heads, source, url):
713 def unbundle(repo, cg, heads, source, url):
713 """Apply a bundle to a repo.
714 """Apply a bundle to a repo.
714
715
715 this function makes sure the repo is locked during the application and have
716 this function makes sure the repo is locked during the application and have
716 mechanism to check that no push race occurred between the creation of the
717 mechanism to check that no push race occurred between the creation of the
717 bundle and its application.
718 bundle and its application.
718
719
719 If the push was raced as PushRaced exception is raised."""
720 If the push was raced as PushRaced exception is raised."""
720 r = 0
721 r = 0
721 # need a transaction when processing a bundle2 stream
722 # need a transaction when processing a bundle2 stream
722 tr = None
723 tr = None
723 lock = repo.lock()
724 lock = repo.lock()
724 try:
725 try:
725 check_heads(repo, heads, 'uploading changes')
726 check_heads(repo, heads, 'uploading changes')
726 # push can proceed
727 # push can proceed
727 if util.safehasattr(cg, 'params'):
728 if util.safehasattr(cg, 'params'):
728 tr = repo.transaction('unbundle')
729 tr = repo.transaction('unbundle')
729 tr.hookargs['bundle2-exp'] = '1'
730 tr.hookargs['bundle2-exp'] = '1'
730 r = bundle2.processbundle(repo, cg, lambda: tr).reply
731 r = bundle2.processbundle(repo, cg, lambda: tr).reply
731 cl = repo.unfiltered().changelog
732 cl = repo.unfiltered().changelog
732 p = cl.writepending() and repo.root or ""
733 p = cl.writepending() and repo.root or ""
733 repo.hook('b2x-pretransactionclose', throw=True, source=source,
734 repo.hook('b2x-pretransactionclose', throw=True, source=source,
734 url=url, pending=p, **tr.hookargs)
735 url=url, pending=p, **tr.hookargs)
735 tr.close()
736 tr.close()
736 repo.hook('b2x-transactionclose', source=source, url=url,
737 repo.hook('b2x-transactionclose', source=source, url=url,
737 **tr.hookargs)
738 **tr.hookargs)
738 else:
739 else:
739 r = changegroup.addchangegroup(repo, cg, source, url)
740 r = changegroup.addchangegroup(repo, cg, source, url)
740 finally:
741 finally:
741 if tr is not None:
742 if tr is not None:
742 tr.release()
743 tr.release()
743 lock.release()
744 lock.release()
744 return r
745 return r
@@ -1,1910 +1,1910 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10'):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except exchange.PushRaced, exc:
136 except exchange.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), exc.message)
137 raise error.ResponseError(_('push failed:'), exc.message)
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 bundle2caps = {'HG2X': ()}
183 bundle2caps = {'HG2X': ()}
184
184
185 # a list of (ui, featureset) functions.
185 # a list of (ui, featureset) functions.
186 # only functions defined in module of enabled extensions are invoked
186 # only functions defined in module of enabled extensions are invoked
187 featuresetupfuncs = set()
187 featuresetupfuncs = set()
188
188
189 def _baserequirements(self, create):
189 def _baserequirements(self, create):
190 return self.requirements[:]
190 return self.requirements[:]
191
191
192 def __init__(self, baseui, path=None, create=False):
192 def __init__(self, baseui, path=None, create=False):
193 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
194 self.wopener = self.wvfs
194 self.wopener = self.wvfs
195 self.root = self.wvfs.base
195 self.root = self.wvfs.base
196 self.path = self.wvfs.join(".hg")
196 self.path = self.wvfs.join(".hg")
197 self.origroot = path
197 self.origroot = path
198 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.auditor = pathutil.pathauditor(self.root, self._checknested)
199 self.vfs = scmutil.vfs(self.path)
199 self.vfs = scmutil.vfs(self.path)
200 self.opener = self.vfs
200 self.opener = self.vfs
201 self.baseui = baseui
201 self.baseui = baseui
202 self.ui = baseui.copy()
202 self.ui = baseui.copy()
203 self.ui.copy = baseui.copy # prevent copying repo configuration
203 self.ui.copy = baseui.copy # prevent copying repo configuration
204 # A list of callback to shape the phase if no data were found.
204 # A list of callback to shape the phase if no data were found.
205 # Callback are in the form: func(repo, roots) --> processed root.
205 # Callback are in the form: func(repo, roots) --> processed root.
206 # This list it to be filled by extension during repo setup
206 # This list it to be filled by extension during repo setup
207 self._phasedefaults = []
207 self._phasedefaults = []
208 try:
208 try:
209 self.ui.readconfig(self.join("hgrc"), self.root)
209 self.ui.readconfig(self.join("hgrc"), self.root)
210 extensions.loadall(self.ui)
210 extensions.loadall(self.ui)
211 except IOError:
211 except IOError:
212 pass
212 pass
213
213
214 if self.featuresetupfuncs:
214 if self.featuresetupfuncs:
215 self.supported = set(self._basesupported) # use private copy
215 self.supported = set(self._basesupported) # use private copy
216 extmods = set(m.__name__ for n, m
216 extmods = set(m.__name__ for n, m
217 in extensions.extensions(self.ui))
217 in extensions.extensions(self.ui))
218 for setupfunc in self.featuresetupfuncs:
218 for setupfunc in self.featuresetupfuncs:
219 if setupfunc.__module__ in extmods:
219 if setupfunc.__module__ in extmods:
220 setupfunc(self.ui, self.supported)
220 setupfunc(self.ui, self.supported)
221 else:
221 else:
222 self.supported = self._basesupported
222 self.supported = self._basesupported
223
223
224 if not self.vfs.isdir():
224 if not self.vfs.isdir():
225 if create:
225 if create:
226 if not self.wvfs.exists():
226 if not self.wvfs.exists():
227 self.wvfs.makedirs()
227 self.wvfs.makedirs()
228 self.vfs.makedir(notindexed=True)
228 self.vfs.makedir(notindexed=True)
229 requirements = self._baserequirements(create)
229 requirements = self._baserequirements(create)
230 if self.ui.configbool('format', 'usestore', True):
230 if self.ui.configbool('format', 'usestore', True):
231 self.vfs.mkdir("store")
231 self.vfs.mkdir("store")
232 requirements.append("store")
232 requirements.append("store")
233 if self.ui.configbool('format', 'usefncache', True):
233 if self.ui.configbool('format', 'usefncache', True):
234 requirements.append("fncache")
234 requirements.append("fncache")
235 if self.ui.configbool('format', 'dotencode', True):
235 if self.ui.configbool('format', 'dotencode', True):
236 requirements.append('dotencode')
236 requirements.append('dotencode')
237 # create an invalid changelog
237 # create an invalid changelog
238 self.vfs.append(
238 self.vfs.append(
239 "00changelog.i",
239 "00changelog.i",
240 '\0\0\0\2' # represents revlogv2
240 '\0\0\0\2' # represents revlogv2
241 ' dummy changelog to prevent using the old repo layout'
241 ' dummy changelog to prevent using the old repo layout'
242 )
242 )
243 if self.ui.configbool('format', 'generaldelta', False):
243 if self.ui.configbool('format', 'generaldelta', False):
244 requirements.append("generaldelta")
244 requirements.append("generaldelta")
245 requirements = set(requirements)
245 requirements = set(requirements)
246 else:
246 else:
247 raise error.RepoError(_("repository %s not found") % path)
247 raise error.RepoError(_("repository %s not found") % path)
248 elif create:
248 elif create:
249 raise error.RepoError(_("repository %s already exists") % path)
249 raise error.RepoError(_("repository %s already exists") % path)
250 else:
250 else:
251 try:
251 try:
252 requirements = scmutil.readrequires(self.vfs, self.supported)
252 requirements = scmutil.readrequires(self.vfs, self.supported)
253 except IOError, inst:
253 except IOError, inst:
254 if inst.errno != errno.ENOENT:
254 if inst.errno != errno.ENOENT:
255 raise
255 raise
256 requirements = set()
256 requirements = set()
257
257
258 self.sharedpath = self.path
258 self.sharedpath = self.path
259 try:
259 try:
260 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 realpath=True)
261 realpath=True)
262 s = vfs.base
262 s = vfs.base
263 if not vfs.exists():
263 if not vfs.exists():
264 raise error.RepoError(
264 raise error.RepoError(
265 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 self.sharedpath = s
266 self.sharedpath = s
267 except IOError, inst:
267 except IOError, inst:
268 if inst.errno != errno.ENOENT:
268 if inst.errno != errno.ENOENT:
269 raise
269 raise
270
270
271 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 self.spath = self.store.path
272 self.spath = self.store.path
273 self.svfs = self.store.vfs
273 self.svfs = self.store.vfs
274 self.sopener = self.svfs
274 self.sopener = self.svfs
275 self.sjoin = self.store.join
275 self.sjoin = self.store.join
276 self.vfs.createmode = self.store.createmode
276 self.vfs.createmode = self.store.createmode
277 self._applyrequirements(requirements)
277 self._applyrequirements(requirements)
278 if create:
278 if create:
279 self._writerequirements()
279 self._writerequirements()
280
280
281
281
282 self._branchcaches = {}
282 self._branchcaches = {}
283 self.filterpats = {}
283 self.filterpats = {}
284 self._datafilters = {}
284 self._datafilters = {}
285 self._transref = self._lockref = self._wlockref = None
285 self._transref = self._lockref = self._wlockref = None
286
286
287 # A cache for various files under .hg/ that tracks file changes,
287 # A cache for various files under .hg/ that tracks file changes,
288 # (used by the filecache decorator)
288 # (used by the filecache decorator)
289 #
289 #
290 # Maps a property name to its util.filecacheentry
290 # Maps a property name to its util.filecacheentry
291 self._filecache = {}
291 self._filecache = {}
292
292
293 # hold sets of revision to be filtered
293 # hold sets of revision to be filtered
294 # should be cleared when something might have changed the filter value:
294 # should be cleared when something might have changed the filter value:
295 # - new changesets,
295 # - new changesets,
296 # - phase change,
296 # - phase change,
297 # - new obsolescence marker,
297 # - new obsolescence marker,
298 # - working directory parent change,
298 # - working directory parent change,
299 # - bookmark changes
299 # - bookmark changes
300 self.filteredrevcache = {}
300 self.filteredrevcache = {}
301
301
302 def close(self):
302 def close(self):
303 pass
303 pass
304
304
305 def _restrictcapabilities(self, caps):
305 def _restrictcapabilities(self, caps):
306 # bundle2 is not ready for prime time, drop it unless explicitly
306 # bundle2 is not ready for prime time, drop it unless explicitly
307 # required by the tests (or some brave tester)
307 # required by the tests (or some brave tester)
308 if self.ui.configbool('experimental', 'bundle2-exp', False):
308 if self.ui.configbool('experimental', 'bundle2-exp', False):
309 caps = set(caps)
309 caps = set(caps)
310 capsblob = bundle2.encodecaps(self.bundle2caps)
310 capsblob = bundle2.encodecaps(self.bundle2caps)
311 caps.add('bundle2-exp=' + urllib.quote(capsblob))
311 caps.add('bundle2-exp=' + urllib.quote(capsblob))
312 return caps
312 return caps
313
313
314 def _applyrequirements(self, requirements):
314 def _applyrequirements(self, requirements):
315 self.requirements = requirements
315 self.requirements = requirements
316 self.sopener.options = dict((r, 1) for r in requirements
316 self.sopener.options = dict((r, 1) for r in requirements
317 if r in self.openerreqs)
317 if r in self.openerreqs)
318 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
318 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
319 if chunkcachesize is not None:
319 if chunkcachesize is not None:
320 self.sopener.options['chunkcachesize'] = chunkcachesize
320 self.sopener.options['chunkcachesize'] = chunkcachesize
321
321
322 def _writerequirements(self):
322 def _writerequirements(self):
323 reqfile = self.opener("requires", "w")
323 reqfile = self.opener("requires", "w")
324 for r in sorted(self.requirements):
324 for r in sorted(self.requirements):
325 reqfile.write("%s\n" % r)
325 reqfile.write("%s\n" % r)
326 reqfile.close()
326 reqfile.close()
327
327
328 def _checknested(self, path):
328 def _checknested(self, path):
329 """Determine if path is a legal nested repository."""
329 """Determine if path is a legal nested repository."""
330 if not path.startswith(self.root):
330 if not path.startswith(self.root):
331 return False
331 return False
332 subpath = path[len(self.root) + 1:]
332 subpath = path[len(self.root) + 1:]
333 normsubpath = util.pconvert(subpath)
333 normsubpath = util.pconvert(subpath)
334
334
335 # XXX: Checking against the current working copy is wrong in
335 # XXX: Checking against the current working copy is wrong in
336 # the sense that it can reject things like
336 # the sense that it can reject things like
337 #
337 #
338 # $ hg cat -r 10 sub/x.txt
338 # $ hg cat -r 10 sub/x.txt
339 #
339 #
340 # if sub/ is no longer a subrepository in the working copy
340 # if sub/ is no longer a subrepository in the working copy
341 # parent revision.
341 # parent revision.
342 #
342 #
343 # However, it can of course also allow things that would have
343 # However, it can of course also allow things that would have
344 # been rejected before, such as the above cat command if sub/
344 # been rejected before, such as the above cat command if sub/
345 # is a subrepository now, but was a normal directory before.
345 # is a subrepository now, but was a normal directory before.
346 # The old path auditor would have rejected by mistake since it
346 # The old path auditor would have rejected by mistake since it
347 # panics when it sees sub/.hg/.
347 # panics when it sees sub/.hg/.
348 #
348 #
349 # All in all, checking against the working copy seems sensible
349 # All in all, checking against the working copy seems sensible
350 # since we want to prevent access to nested repositories on
350 # since we want to prevent access to nested repositories on
351 # the filesystem *now*.
351 # the filesystem *now*.
352 ctx = self[None]
352 ctx = self[None]
353 parts = util.splitpath(subpath)
353 parts = util.splitpath(subpath)
354 while parts:
354 while parts:
355 prefix = '/'.join(parts)
355 prefix = '/'.join(parts)
356 if prefix in ctx.substate:
356 if prefix in ctx.substate:
357 if prefix == normsubpath:
357 if prefix == normsubpath:
358 return True
358 return True
359 else:
359 else:
360 sub = ctx.sub(prefix)
360 sub = ctx.sub(prefix)
361 return sub.checknested(subpath[len(prefix) + 1:])
361 return sub.checknested(subpath[len(prefix) + 1:])
362 else:
362 else:
363 parts.pop()
363 parts.pop()
364 return False
364 return False
365
365
366 def peer(self):
366 def peer(self):
367 return localpeer(self) # not cached to avoid reference cycle
367 return localpeer(self) # not cached to avoid reference cycle
368
368
369 def unfiltered(self):
369 def unfiltered(self):
370 """Return unfiltered version of the repository
370 """Return unfiltered version of the repository
371
371
372 Intended to be overwritten by filtered repo."""
372 Intended to be overwritten by filtered repo."""
373 return self
373 return self
374
374
375 def filtered(self, name):
375 def filtered(self, name):
376 """Return a filtered version of a repository"""
376 """Return a filtered version of a repository"""
377 # build a new class with the mixin and the current class
377 # build a new class with the mixin and the current class
378 # (possibly subclass of the repo)
378 # (possibly subclass of the repo)
379 class proxycls(repoview.repoview, self.unfiltered().__class__):
379 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 pass
380 pass
381 return proxycls(self, name)
381 return proxycls(self, name)
382
382
383 @repofilecache('bookmarks')
383 @repofilecache('bookmarks')
384 def _bookmarks(self):
384 def _bookmarks(self):
385 return bookmarks.bmstore(self)
385 return bookmarks.bmstore(self)
386
386
387 @repofilecache('bookmarks.current')
387 @repofilecache('bookmarks.current')
388 def _bookmarkcurrent(self):
388 def _bookmarkcurrent(self):
389 return bookmarks.readcurrent(self)
389 return bookmarks.readcurrent(self)
390
390
391 def bookmarkheads(self, bookmark):
391 def bookmarkheads(self, bookmark):
392 name = bookmark.split('@', 1)[0]
392 name = bookmark.split('@', 1)[0]
393 heads = []
393 heads = []
394 for mark, n in self._bookmarks.iteritems():
394 for mark, n in self._bookmarks.iteritems():
395 if mark.split('@', 1)[0] == name:
395 if mark.split('@', 1)[0] == name:
396 heads.append(n)
396 heads.append(n)
397 return heads
397 return heads
398
398
399 @storecache('phaseroots')
399 @storecache('phaseroots')
400 def _phasecache(self):
400 def _phasecache(self):
401 return phases.phasecache(self, self._phasedefaults)
401 return phases.phasecache(self, self._phasedefaults)
402
402
403 @storecache('obsstore')
403 @storecache('obsstore')
404 def obsstore(self):
404 def obsstore(self):
405 store = obsolete.obsstore(self.sopener)
405 store = obsolete.obsstore(self.sopener)
406 if store and not obsolete._enabled:
406 if store and not obsolete._enabled:
407 # message is rare enough to not be translated
407 # message is rare enough to not be translated
408 msg = 'obsolete feature not enabled but %i markers found!\n'
408 msg = 'obsolete feature not enabled but %i markers found!\n'
409 self.ui.warn(msg % len(list(store)))
409 self.ui.warn(msg % len(list(store)))
410 return store
410 return store
411
411
412 @storecache('00changelog.i')
412 @storecache('00changelog.i')
413 def changelog(self):
413 def changelog(self):
414 c = changelog.changelog(self.sopener)
414 c = changelog.changelog(self.sopener)
415 if 'HG_PENDING' in os.environ:
415 if 'HG_PENDING' in os.environ:
416 p = os.environ['HG_PENDING']
416 p = os.environ['HG_PENDING']
417 if p.startswith(self.root):
417 if p.startswith(self.root):
418 c.readpending('00changelog.i.a')
418 c.readpending('00changelog.i.a')
419 return c
419 return c
420
420
421 @storecache('00manifest.i')
421 @storecache('00manifest.i')
422 def manifest(self):
422 def manifest(self):
423 return manifest.manifest(self.sopener)
423 return manifest.manifest(self.sopener)
424
424
425 @repofilecache('dirstate')
425 @repofilecache('dirstate')
426 def dirstate(self):
426 def dirstate(self):
427 warned = [0]
427 warned = [0]
428 def validate(node):
428 def validate(node):
429 try:
429 try:
430 self.changelog.rev(node)
430 self.changelog.rev(node)
431 return node
431 return node
432 except error.LookupError:
432 except error.LookupError:
433 if not warned[0]:
433 if not warned[0]:
434 warned[0] = True
434 warned[0] = True
435 self.ui.warn(_("warning: ignoring unknown"
435 self.ui.warn(_("warning: ignoring unknown"
436 " working parent %s!\n") % short(node))
436 " working parent %s!\n") % short(node))
437 return nullid
437 return nullid
438
438
439 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
439 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
440
440
441 def __getitem__(self, changeid):
441 def __getitem__(self, changeid):
442 if changeid is None:
442 if changeid is None:
443 return context.workingctx(self)
443 return context.workingctx(self)
444 return context.changectx(self, changeid)
444 return context.changectx(self, changeid)
445
445
446 def __contains__(self, changeid):
446 def __contains__(self, changeid):
447 try:
447 try:
448 return bool(self.lookup(changeid))
448 return bool(self.lookup(changeid))
449 except error.RepoLookupError:
449 except error.RepoLookupError:
450 return False
450 return False
451
451
452 def __nonzero__(self):
452 def __nonzero__(self):
453 return True
453 return True
454
454
455 def __len__(self):
455 def __len__(self):
456 return len(self.changelog)
456 return len(self.changelog)
457
457
458 def __iter__(self):
458 def __iter__(self):
459 return iter(self.changelog)
459 return iter(self.changelog)
460
460
461 def revs(self, expr, *args):
461 def revs(self, expr, *args):
462 '''Return a list of revisions matching the given revset'''
462 '''Return a list of revisions matching the given revset'''
463 expr = revset.formatspec(expr, *args)
463 expr = revset.formatspec(expr, *args)
464 m = revset.match(None, expr)
464 m = revset.match(None, expr)
465 return m(self, revset.spanset(self))
465 return m(self, revset.spanset(self))
466
466
467 def set(self, expr, *args):
467 def set(self, expr, *args):
468 '''
468 '''
469 Yield a context for each matching revision, after doing arg
469 Yield a context for each matching revision, after doing arg
470 replacement via revset.formatspec
470 replacement via revset.formatspec
471 '''
471 '''
472 for r in self.revs(expr, *args):
472 for r in self.revs(expr, *args):
473 yield self[r]
473 yield self[r]
474
474
475 def url(self):
475 def url(self):
476 return 'file:' + self.root
476 return 'file:' + self.root
477
477
478 def hook(self, name, throw=False, **args):
478 def hook(self, name, throw=False, **args):
479 return hook.hook(self.ui, self, name, throw, **args)
479 return hook.hook(self.ui, self, name, throw, **args)
480
480
481 @unfilteredmethod
481 @unfilteredmethod
482 def _tag(self, names, node, message, local, user, date, extra={}):
482 def _tag(self, names, node, message, local, user, date, extra={}):
483 if isinstance(names, str):
483 if isinstance(names, str):
484 names = (names,)
484 names = (names,)
485
485
486 branches = self.branchmap()
486 branches = self.branchmap()
487 for name in names:
487 for name in names:
488 self.hook('pretag', throw=True, node=hex(node), tag=name,
488 self.hook('pretag', throw=True, node=hex(node), tag=name,
489 local=local)
489 local=local)
490 if name in branches:
490 if name in branches:
491 self.ui.warn(_("warning: tag %s conflicts with existing"
491 self.ui.warn(_("warning: tag %s conflicts with existing"
492 " branch name\n") % name)
492 " branch name\n") % name)
493
493
494 def writetags(fp, names, munge, prevtags):
494 def writetags(fp, names, munge, prevtags):
495 fp.seek(0, 2)
495 fp.seek(0, 2)
496 if prevtags and prevtags[-1] != '\n':
496 if prevtags and prevtags[-1] != '\n':
497 fp.write('\n')
497 fp.write('\n')
498 for name in names:
498 for name in names:
499 m = munge and munge(name) or name
499 m = munge and munge(name) or name
500 if (self._tagscache.tagtypes and
500 if (self._tagscache.tagtypes and
501 name in self._tagscache.tagtypes):
501 name in self._tagscache.tagtypes):
502 old = self.tags().get(name, nullid)
502 old = self.tags().get(name, nullid)
503 fp.write('%s %s\n' % (hex(old), m))
503 fp.write('%s %s\n' % (hex(old), m))
504 fp.write('%s %s\n' % (hex(node), m))
504 fp.write('%s %s\n' % (hex(node), m))
505 fp.close()
505 fp.close()
506
506
507 prevtags = ''
507 prevtags = ''
508 if local:
508 if local:
509 try:
509 try:
510 fp = self.opener('localtags', 'r+')
510 fp = self.opener('localtags', 'r+')
511 except IOError:
511 except IOError:
512 fp = self.opener('localtags', 'a')
512 fp = self.opener('localtags', 'a')
513 else:
513 else:
514 prevtags = fp.read()
514 prevtags = fp.read()
515
515
516 # local tags are stored in the current charset
516 # local tags are stored in the current charset
517 writetags(fp, names, None, prevtags)
517 writetags(fp, names, None, prevtags)
518 for name in names:
518 for name in names:
519 self.hook('tag', node=hex(node), tag=name, local=local)
519 self.hook('tag', node=hex(node), tag=name, local=local)
520 return
520 return
521
521
522 try:
522 try:
523 fp = self.wfile('.hgtags', 'rb+')
523 fp = self.wfile('.hgtags', 'rb+')
524 except IOError, e:
524 except IOError, e:
525 if e.errno != errno.ENOENT:
525 if e.errno != errno.ENOENT:
526 raise
526 raise
527 fp = self.wfile('.hgtags', 'ab')
527 fp = self.wfile('.hgtags', 'ab')
528 else:
528 else:
529 prevtags = fp.read()
529 prevtags = fp.read()
530
530
531 # committed tags are stored in UTF-8
531 # committed tags are stored in UTF-8
532 writetags(fp, names, encoding.fromlocal, prevtags)
532 writetags(fp, names, encoding.fromlocal, prevtags)
533
533
534 fp.close()
534 fp.close()
535
535
536 self.invalidatecaches()
536 self.invalidatecaches()
537
537
538 if '.hgtags' not in self.dirstate:
538 if '.hgtags' not in self.dirstate:
539 self[None].add(['.hgtags'])
539 self[None].add(['.hgtags'])
540
540
541 m = matchmod.exact(self.root, '', ['.hgtags'])
541 m = matchmod.exact(self.root, '', ['.hgtags'])
542 tagnode = self.commit(message, user, date, extra=extra, match=m)
542 tagnode = self.commit(message, user, date, extra=extra, match=m)
543
543
544 for name in names:
544 for name in names:
545 self.hook('tag', node=hex(node), tag=name, local=local)
545 self.hook('tag', node=hex(node), tag=name, local=local)
546
546
547 return tagnode
547 return tagnode
548
548
549 def tag(self, names, node, message, local, user, date):
549 def tag(self, names, node, message, local, user, date):
550 '''tag a revision with one or more symbolic names.
550 '''tag a revision with one or more symbolic names.
551
551
552 names is a list of strings or, when adding a single tag, names may be a
552 names is a list of strings or, when adding a single tag, names may be a
553 string.
553 string.
554
554
555 if local is True, the tags are stored in a per-repository file.
555 if local is True, the tags are stored in a per-repository file.
556 otherwise, they are stored in the .hgtags file, and a new
556 otherwise, they are stored in the .hgtags file, and a new
557 changeset is committed with the change.
557 changeset is committed with the change.
558
558
559 keyword arguments:
559 keyword arguments:
560
560
561 local: whether to store tags in non-version-controlled file
561 local: whether to store tags in non-version-controlled file
562 (default False)
562 (default False)
563
563
564 message: commit message to use if committing
564 message: commit message to use if committing
565
565
566 user: name of user to use if committing
566 user: name of user to use if committing
567
567
568 date: date tuple to use if committing'''
568 date: date tuple to use if committing'''
569
569
570 if not local:
570 if not local:
571 for x in self.status()[:5]:
571 for x in self.status()[:5]:
572 if '.hgtags' in x:
572 if '.hgtags' in x:
573 raise util.Abort(_('working copy of .hgtags is changed '
573 raise util.Abort(_('working copy of .hgtags is changed '
574 '(please commit .hgtags manually)'))
574 '(please commit .hgtags manually)'))
575
575
576 self.tags() # instantiate the cache
576 self.tags() # instantiate the cache
577 self._tag(names, node, message, local, user, date)
577 self._tag(names, node, message, local, user, date)
578
578
579 @filteredpropertycache
579 @filteredpropertycache
580 def _tagscache(self):
580 def _tagscache(self):
581 '''Returns a tagscache object that contains various tags related
581 '''Returns a tagscache object that contains various tags related
582 caches.'''
582 caches.'''
583
583
584 # This simplifies its cache management by having one decorated
584 # This simplifies its cache management by having one decorated
585 # function (this one) and the rest simply fetch things from it.
585 # function (this one) and the rest simply fetch things from it.
586 class tagscache(object):
586 class tagscache(object):
587 def __init__(self):
587 def __init__(self):
588 # These two define the set of tags for this repository. tags
588 # These two define the set of tags for this repository. tags
589 # maps tag name to node; tagtypes maps tag name to 'global' or
589 # maps tag name to node; tagtypes maps tag name to 'global' or
590 # 'local'. (Global tags are defined by .hgtags across all
590 # 'local'. (Global tags are defined by .hgtags across all
591 # heads, and local tags are defined in .hg/localtags.)
591 # heads, and local tags are defined in .hg/localtags.)
592 # They constitute the in-memory cache of tags.
592 # They constitute the in-memory cache of tags.
593 self.tags = self.tagtypes = None
593 self.tags = self.tagtypes = None
594
594
595 self.nodetagscache = self.tagslist = None
595 self.nodetagscache = self.tagslist = None
596
596
597 cache = tagscache()
597 cache = tagscache()
598 cache.tags, cache.tagtypes = self._findtags()
598 cache.tags, cache.tagtypes = self._findtags()
599
599
600 return cache
600 return cache
601
601
602 def tags(self):
602 def tags(self):
603 '''return a mapping of tag to node'''
603 '''return a mapping of tag to node'''
604 t = {}
604 t = {}
605 if self.changelog.filteredrevs:
605 if self.changelog.filteredrevs:
606 tags, tt = self._findtags()
606 tags, tt = self._findtags()
607 else:
607 else:
608 tags = self._tagscache.tags
608 tags = self._tagscache.tags
609 for k, v in tags.iteritems():
609 for k, v in tags.iteritems():
610 try:
610 try:
611 # ignore tags to unknown nodes
611 # ignore tags to unknown nodes
612 self.changelog.rev(v)
612 self.changelog.rev(v)
613 t[k] = v
613 t[k] = v
614 except (error.LookupError, ValueError):
614 except (error.LookupError, ValueError):
615 pass
615 pass
616 return t
616 return t
617
617
618 def _findtags(self):
618 def _findtags(self):
619 '''Do the hard work of finding tags. Return a pair of dicts
619 '''Do the hard work of finding tags. Return a pair of dicts
620 (tags, tagtypes) where tags maps tag name to node, and tagtypes
620 (tags, tagtypes) where tags maps tag name to node, and tagtypes
621 maps tag name to a string like \'global\' or \'local\'.
621 maps tag name to a string like \'global\' or \'local\'.
622 Subclasses or extensions are free to add their own tags, but
622 Subclasses or extensions are free to add their own tags, but
623 should be aware that the returned dicts will be retained for the
623 should be aware that the returned dicts will be retained for the
624 duration of the localrepo object.'''
624 duration of the localrepo object.'''
625
625
626 # XXX what tagtype should subclasses/extensions use? Currently
626 # XXX what tagtype should subclasses/extensions use? Currently
627 # mq and bookmarks add tags, but do not set the tagtype at all.
627 # mq and bookmarks add tags, but do not set the tagtype at all.
628 # Should each extension invent its own tag type? Should there
628 # Should each extension invent its own tag type? Should there
629 # be one tagtype for all such "virtual" tags? Or is the status
629 # be one tagtype for all such "virtual" tags? Or is the status
630 # quo fine?
630 # quo fine?
631
631
632 alltags = {} # map tag name to (node, hist)
632 alltags = {} # map tag name to (node, hist)
633 tagtypes = {}
633 tagtypes = {}
634
634
635 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
635 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
636 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
636 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
637
637
638 # Build the return dicts. Have to re-encode tag names because
638 # Build the return dicts. Have to re-encode tag names because
639 # the tags module always uses UTF-8 (in order not to lose info
639 # the tags module always uses UTF-8 (in order not to lose info
640 # writing to the cache), but the rest of Mercurial wants them in
640 # writing to the cache), but the rest of Mercurial wants them in
641 # local encoding.
641 # local encoding.
642 tags = {}
642 tags = {}
643 for (name, (node, hist)) in alltags.iteritems():
643 for (name, (node, hist)) in alltags.iteritems():
644 if node != nullid:
644 if node != nullid:
645 tags[encoding.tolocal(name)] = node
645 tags[encoding.tolocal(name)] = node
646 tags['tip'] = self.changelog.tip()
646 tags['tip'] = self.changelog.tip()
647 tagtypes = dict([(encoding.tolocal(name), value)
647 tagtypes = dict([(encoding.tolocal(name), value)
648 for (name, value) in tagtypes.iteritems()])
648 for (name, value) in tagtypes.iteritems()])
649 return (tags, tagtypes)
649 return (tags, tagtypes)
650
650
651 def tagtype(self, tagname):
651 def tagtype(self, tagname):
652 '''
652 '''
653 return the type of the given tag. result can be:
653 return the type of the given tag. result can be:
654
654
655 'local' : a local tag
655 'local' : a local tag
656 'global' : a global tag
656 'global' : a global tag
657 None : tag does not exist
657 None : tag does not exist
658 '''
658 '''
659
659
660 return self._tagscache.tagtypes.get(tagname)
660 return self._tagscache.tagtypes.get(tagname)
661
661
662 def tagslist(self):
662 def tagslist(self):
663 '''return a list of tags ordered by revision'''
663 '''return a list of tags ordered by revision'''
664 if not self._tagscache.tagslist:
664 if not self._tagscache.tagslist:
665 l = []
665 l = []
666 for t, n in self.tags().iteritems():
666 for t, n in self.tags().iteritems():
667 r = self.changelog.rev(n)
667 r = self.changelog.rev(n)
668 l.append((r, t, n))
668 l.append((r, t, n))
669 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
669 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
670
670
671 return self._tagscache.tagslist
671 return self._tagscache.tagslist
672
672
673 def nodetags(self, node):
673 def nodetags(self, node):
674 '''return the tags associated with a node'''
674 '''return the tags associated with a node'''
675 if not self._tagscache.nodetagscache:
675 if not self._tagscache.nodetagscache:
676 nodetagscache = {}
676 nodetagscache = {}
677 for t, n in self._tagscache.tags.iteritems():
677 for t, n in self._tagscache.tags.iteritems():
678 nodetagscache.setdefault(n, []).append(t)
678 nodetagscache.setdefault(n, []).append(t)
679 for tags in nodetagscache.itervalues():
679 for tags in nodetagscache.itervalues():
680 tags.sort()
680 tags.sort()
681 self._tagscache.nodetagscache = nodetagscache
681 self._tagscache.nodetagscache = nodetagscache
682 return self._tagscache.nodetagscache.get(node, [])
682 return self._tagscache.nodetagscache.get(node, [])
683
683
684 def nodebookmarks(self, node):
684 def nodebookmarks(self, node):
685 marks = []
685 marks = []
686 for bookmark, n in self._bookmarks.iteritems():
686 for bookmark, n in self._bookmarks.iteritems():
687 if n == node:
687 if n == node:
688 marks.append(bookmark)
688 marks.append(bookmark)
689 return sorted(marks)
689 return sorted(marks)
690
690
691 def branchmap(self):
691 def branchmap(self):
692 '''returns a dictionary {branch: [branchheads]} with branchheads
692 '''returns a dictionary {branch: [branchheads]} with branchheads
693 ordered by increasing revision number'''
693 ordered by increasing revision number'''
694 branchmap.updatecache(self)
694 branchmap.updatecache(self)
695 return self._branchcaches[self.filtername]
695 return self._branchcaches[self.filtername]
696
696
697 def branchtip(self, branch):
697 def branchtip(self, branch):
698 '''return the tip node for a given branch'''
698 '''return the tip node for a given branch'''
699 try:
699 try:
700 return self.branchmap().branchtip(branch)
700 return self.branchmap().branchtip(branch)
701 except KeyError:
701 except KeyError:
702 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
702 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
703
703
704 def lookup(self, key):
704 def lookup(self, key):
705 return self[key].node()
705 return self[key].node()
706
706
707 def lookupbranch(self, key, remote=None):
707 def lookupbranch(self, key, remote=None):
708 repo = remote or self
708 repo = remote or self
709 if key in repo.branchmap():
709 if key in repo.branchmap():
710 return key
710 return key
711
711
712 repo = (remote and remote.local()) and remote or self
712 repo = (remote and remote.local()) and remote or self
713 return repo[key].branch()
713 return repo[key].branch()
714
714
715 def known(self, nodes):
715 def known(self, nodes):
716 nm = self.changelog.nodemap
716 nm = self.changelog.nodemap
717 pc = self._phasecache
717 pc = self._phasecache
718 result = []
718 result = []
719 for n in nodes:
719 for n in nodes:
720 r = nm.get(n)
720 r = nm.get(n)
721 resp = not (r is None or pc.phase(self, r) >= phases.secret)
721 resp = not (r is None or pc.phase(self, r) >= phases.secret)
722 result.append(resp)
722 result.append(resp)
723 return result
723 return result
724
724
725 def local(self):
725 def local(self):
726 return self
726 return self
727
727
728 def cancopy(self):
728 def cancopy(self):
729 # so statichttprepo's override of local() works
729 # so statichttprepo's override of local() works
730 if not self.local():
730 if not self.local():
731 return False
731 return False
732 if not self.ui.configbool('phases', 'publish', True):
732 if not self.ui.configbool('phases', 'publish', True):
733 return True
733 return True
734 # if publishing we can't copy if there is filtered content
734 # if publishing we can't copy if there is filtered content
735 return not self.filtered('visible').changelog.filteredrevs
735 return not self.filtered('visible').changelog.filteredrevs
736
736
737 def join(self, f):
737 def join(self, f):
738 return os.path.join(self.path, f)
738 return os.path.join(self.path, f)
739
739
740 def wjoin(self, f):
740 def wjoin(self, f):
741 return os.path.join(self.root, f)
741 return os.path.join(self.root, f)
742
742
743 def file(self, f):
743 def file(self, f):
744 if f[0] == '/':
744 if f[0] == '/':
745 f = f[1:]
745 f = f[1:]
746 return filelog.filelog(self.sopener, f)
746 return filelog.filelog(self.sopener, f)
747
747
748 def changectx(self, changeid):
748 def changectx(self, changeid):
749 return self[changeid]
749 return self[changeid]
750
750
751 def parents(self, changeid=None):
751 def parents(self, changeid=None):
752 '''get list of changectxs for parents of changeid'''
752 '''get list of changectxs for parents of changeid'''
753 return self[changeid].parents()
753 return self[changeid].parents()
754
754
755 def setparents(self, p1, p2=nullid):
755 def setparents(self, p1, p2=nullid):
756 copies = self.dirstate.setparents(p1, p2)
756 copies = self.dirstate.setparents(p1, p2)
757 pctx = self[p1]
757 pctx = self[p1]
758 if copies:
758 if copies:
759 # Adjust copy records, the dirstate cannot do it, it
759 # Adjust copy records, the dirstate cannot do it, it
760 # requires access to parents manifests. Preserve them
760 # requires access to parents manifests. Preserve them
761 # only for entries added to first parent.
761 # only for entries added to first parent.
762 for f in copies:
762 for f in copies:
763 if f not in pctx and copies[f] in pctx:
763 if f not in pctx and copies[f] in pctx:
764 self.dirstate.copy(copies[f], f)
764 self.dirstate.copy(copies[f], f)
765 if p2 == nullid:
765 if p2 == nullid:
766 for f, s in sorted(self.dirstate.copies().items()):
766 for f, s in sorted(self.dirstate.copies().items()):
767 if f not in pctx and s not in pctx:
767 if f not in pctx and s not in pctx:
768 self.dirstate.copy(None, f)
768 self.dirstate.copy(None, f)
769
769
770 def filectx(self, path, changeid=None, fileid=None):
770 def filectx(self, path, changeid=None, fileid=None):
771 """changeid can be a changeset revision, node, or tag.
771 """changeid can be a changeset revision, node, or tag.
772 fileid can be a file revision or node."""
772 fileid can be a file revision or node."""
773 return context.filectx(self, path, changeid, fileid)
773 return context.filectx(self, path, changeid, fileid)
774
774
775 def getcwd(self):
775 def getcwd(self):
776 return self.dirstate.getcwd()
776 return self.dirstate.getcwd()
777
777
778 def pathto(self, f, cwd=None):
778 def pathto(self, f, cwd=None):
779 return self.dirstate.pathto(f, cwd)
779 return self.dirstate.pathto(f, cwd)
780
780
781 def wfile(self, f, mode='r'):
781 def wfile(self, f, mode='r'):
782 return self.wopener(f, mode)
782 return self.wopener(f, mode)
783
783
784 def _link(self, f):
784 def _link(self, f):
785 return self.wvfs.islink(f)
785 return self.wvfs.islink(f)
786
786
787 def _loadfilter(self, filter):
787 def _loadfilter(self, filter):
788 if filter not in self.filterpats:
788 if filter not in self.filterpats:
789 l = []
789 l = []
790 for pat, cmd in self.ui.configitems(filter):
790 for pat, cmd in self.ui.configitems(filter):
791 if cmd == '!':
791 if cmd == '!':
792 continue
792 continue
793 mf = matchmod.match(self.root, '', [pat])
793 mf = matchmod.match(self.root, '', [pat])
794 fn = None
794 fn = None
795 params = cmd
795 params = cmd
796 for name, filterfn in self._datafilters.iteritems():
796 for name, filterfn in self._datafilters.iteritems():
797 if cmd.startswith(name):
797 if cmd.startswith(name):
798 fn = filterfn
798 fn = filterfn
799 params = cmd[len(name):].lstrip()
799 params = cmd[len(name):].lstrip()
800 break
800 break
801 if not fn:
801 if not fn:
802 fn = lambda s, c, **kwargs: util.filter(s, c)
802 fn = lambda s, c, **kwargs: util.filter(s, c)
803 # Wrap old filters not supporting keyword arguments
803 # Wrap old filters not supporting keyword arguments
804 if not inspect.getargspec(fn)[2]:
804 if not inspect.getargspec(fn)[2]:
805 oldfn = fn
805 oldfn = fn
806 fn = lambda s, c, **kwargs: oldfn(s, c)
806 fn = lambda s, c, **kwargs: oldfn(s, c)
807 l.append((mf, fn, params))
807 l.append((mf, fn, params))
808 self.filterpats[filter] = l
808 self.filterpats[filter] = l
809 return self.filterpats[filter]
809 return self.filterpats[filter]
810
810
811 def _filter(self, filterpats, filename, data):
811 def _filter(self, filterpats, filename, data):
812 for mf, fn, cmd in filterpats:
812 for mf, fn, cmd in filterpats:
813 if mf(filename):
813 if mf(filename):
814 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
814 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
815 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
815 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
816 break
816 break
817
817
818 return data
818 return data
819
819
820 @unfilteredpropertycache
820 @unfilteredpropertycache
821 def _encodefilterpats(self):
821 def _encodefilterpats(self):
822 return self._loadfilter('encode')
822 return self._loadfilter('encode')
823
823
824 @unfilteredpropertycache
824 @unfilteredpropertycache
825 def _decodefilterpats(self):
825 def _decodefilterpats(self):
826 return self._loadfilter('decode')
826 return self._loadfilter('decode')
827
827
828 def adddatafilter(self, name, filter):
828 def adddatafilter(self, name, filter):
829 self._datafilters[name] = filter
829 self._datafilters[name] = filter
830
830
831 def wread(self, filename):
831 def wread(self, filename):
832 if self._link(filename):
832 if self._link(filename):
833 data = self.wvfs.readlink(filename)
833 data = self.wvfs.readlink(filename)
834 else:
834 else:
835 data = self.wopener.read(filename)
835 data = self.wopener.read(filename)
836 return self._filter(self._encodefilterpats, filename, data)
836 return self._filter(self._encodefilterpats, filename, data)
837
837
838 def wwrite(self, filename, data, flags):
838 def wwrite(self, filename, data, flags):
839 data = self._filter(self._decodefilterpats, filename, data)
839 data = self._filter(self._decodefilterpats, filename, data)
840 if 'l' in flags:
840 if 'l' in flags:
841 self.wopener.symlink(data, filename)
841 self.wopener.symlink(data, filename)
842 else:
842 else:
843 self.wopener.write(filename, data)
843 self.wopener.write(filename, data)
844 if 'x' in flags:
844 if 'x' in flags:
845 self.wvfs.setflags(filename, False, True)
845 self.wvfs.setflags(filename, False, True)
846
846
847 def wwritedata(self, filename, data):
847 def wwritedata(self, filename, data):
848 return self._filter(self._decodefilterpats, filename, data)
848 return self._filter(self._decodefilterpats, filename, data)
849
849
850 def transaction(self, desc, report=None):
850 def transaction(self, desc, report=None):
851 tr = self._transref and self._transref() or None
851 tr = self._transref and self._transref() or None
852 if tr and tr.running():
852 if tr and tr.running():
853 return tr.nest()
853 return tr.nest()
854
854
855 # abort here if the journal already exists
855 # abort here if the journal already exists
856 if self.svfs.exists("journal"):
856 if self.svfs.exists("journal"):
857 raise error.RepoError(
857 raise error.RepoError(
858 _("abandoned transaction found - run hg recover"))
858 _("abandoned transaction found - run hg recover"))
859
859
860 def onclose():
860 def onclose():
861 self.store.write(tr)
861 self.store.write(tr)
862
862
863 self._writejournal(desc)
863 self._writejournal(desc)
864 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
864 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
865 rp = report and report or self.ui.warn
865 rp = report and report or self.ui.warn
866 tr = transaction.transaction(rp, self.sopener,
866 tr = transaction.transaction(rp, self.sopener,
867 "journal",
867 "journal",
868 aftertrans(renames),
868 aftertrans(renames),
869 self.store.createmode,
869 self.store.createmode,
870 onclose)
870 onclose)
871 self._transref = weakref.ref(tr)
871 self._transref = weakref.ref(tr)
872 return tr
872 return tr
873
873
874 def _journalfiles(self):
874 def _journalfiles(self):
875 return ((self.svfs, 'journal'),
875 return ((self.svfs, 'journal'),
876 (self.vfs, 'journal.dirstate'),
876 (self.vfs, 'journal.dirstate'),
877 (self.vfs, 'journal.branch'),
877 (self.vfs, 'journal.branch'),
878 (self.vfs, 'journal.desc'),
878 (self.vfs, 'journal.desc'),
879 (self.vfs, 'journal.bookmarks'),
879 (self.vfs, 'journal.bookmarks'),
880 (self.svfs, 'journal.phaseroots'))
880 (self.svfs, 'journal.phaseroots'))
881
881
882 def undofiles(self):
882 def undofiles(self):
883 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
883 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
884
884
885 def _writejournal(self, desc):
885 def _writejournal(self, desc):
886 self.opener.write("journal.dirstate",
886 self.opener.write("journal.dirstate",
887 self.opener.tryread("dirstate"))
887 self.opener.tryread("dirstate"))
888 self.opener.write("journal.branch",
888 self.opener.write("journal.branch",
889 encoding.fromlocal(self.dirstate.branch()))
889 encoding.fromlocal(self.dirstate.branch()))
890 self.opener.write("journal.desc",
890 self.opener.write("journal.desc",
891 "%d\n%s\n" % (len(self), desc))
891 "%d\n%s\n" % (len(self), desc))
892 self.opener.write("journal.bookmarks",
892 self.opener.write("journal.bookmarks",
893 self.opener.tryread("bookmarks"))
893 self.opener.tryread("bookmarks"))
894 self.sopener.write("journal.phaseroots",
894 self.sopener.write("journal.phaseroots",
895 self.sopener.tryread("phaseroots"))
895 self.sopener.tryread("phaseroots"))
896
896
897 def recover(self):
897 def recover(self):
898 lock = self.lock()
898 lock = self.lock()
899 try:
899 try:
900 if self.svfs.exists("journal"):
900 if self.svfs.exists("journal"):
901 self.ui.status(_("rolling back interrupted transaction\n"))
901 self.ui.status(_("rolling back interrupted transaction\n"))
902 transaction.rollback(self.sopener, "journal",
902 transaction.rollback(self.sopener, "journal",
903 self.ui.warn)
903 self.ui.warn)
904 self.invalidate()
904 self.invalidate()
905 return True
905 return True
906 else:
906 else:
907 self.ui.warn(_("no interrupted transaction available\n"))
907 self.ui.warn(_("no interrupted transaction available\n"))
908 return False
908 return False
909 finally:
909 finally:
910 lock.release()
910 lock.release()
911
911
912 def rollback(self, dryrun=False, force=False):
912 def rollback(self, dryrun=False, force=False):
913 wlock = lock = None
913 wlock = lock = None
914 try:
914 try:
915 wlock = self.wlock()
915 wlock = self.wlock()
916 lock = self.lock()
916 lock = self.lock()
917 if self.svfs.exists("undo"):
917 if self.svfs.exists("undo"):
918 return self._rollback(dryrun, force)
918 return self._rollback(dryrun, force)
919 else:
919 else:
920 self.ui.warn(_("no rollback information available\n"))
920 self.ui.warn(_("no rollback information available\n"))
921 return 1
921 return 1
922 finally:
922 finally:
923 release(lock, wlock)
923 release(lock, wlock)
924
924
925 @unfilteredmethod # Until we get smarter cache management
925 @unfilteredmethod # Until we get smarter cache management
926 def _rollback(self, dryrun, force):
926 def _rollback(self, dryrun, force):
927 ui = self.ui
927 ui = self.ui
928 try:
928 try:
929 args = self.opener.read('undo.desc').splitlines()
929 args = self.opener.read('undo.desc').splitlines()
930 (oldlen, desc, detail) = (int(args[0]), args[1], None)
930 (oldlen, desc, detail) = (int(args[0]), args[1], None)
931 if len(args) >= 3:
931 if len(args) >= 3:
932 detail = args[2]
932 detail = args[2]
933 oldtip = oldlen - 1
933 oldtip = oldlen - 1
934
934
935 if detail and ui.verbose:
935 if detail and ui.verbose:
936 msg = (_('repository tip rolled back to revision %s'
936 msg = (_('repository tip rolled back to revision %s'
937 ' (undo %s: %s)\n')
937 ' (undo %s: %s)\n')
938 % (oldtip, desc, detail))
938 % (oldtip, desc, detail))
939 else:
939 else:
940 msg = (_('repository tip rolled back to revision %s'
940 msg = (_('repository tip rolled back to revision %s'
941 ' (undo %s)\n')
941 ' (undo %s)\n')
942 % (oldtip, desc))
942 % (oldtip, desc))
943 except IOError:
943 except IOError:
944 msg = _('rolling back unknown transaction\n')
944 msg = _('rolling back unknown transaction\n')
945 desc = None
945 desc = None
946
946
947 if not force and self['.'] != self['tip'] and desc == 'commit':
947 if not force and self['.'] != self['tip'] and desc == 'commit':
948 raise util.Abort(
948 raise util.Abort(
949 _('rollback of last commit while not checked out '
949 _('rollback of last commit while not checked out '
950 'may lose data'), hint=_('use -f to force'))
950 'may lose data'), hint=_('use -f to force'))
951
951
952 ui.status(msg)
952 ui.status(msg)
953 if dryrun:
953 if dryrun:
954 return 0
954 return 0
955
955
956 parents = self.dirstate.parents()
956 parents = self.dirstate.parents()
957 self.destroying()
957 self.destroying()
958 transaction.rollback(self.sopener, 'undo', ui.warn)
958 transaction.rollback(self.sopener, 'undo', ui.warn)
959 if self.vfs.exists('undo.bookmarks'):
959 if self.vfs.exists('undo.bookmarks'):
960 self.vfs.rename('undo.bookmarks', 'bookmarks')
960 self.vfs.rename('undo.bookmarks', 'bookmarks')
961 if self.svfs.exists('undo.phaseroots'):
961 if self.svfs.exists('undo.phaseroots'):
962 self.svfs.rename('undo.phaseroots', 'phaseroots')
962 self.svfs.rename('undo.phaseroots', 'phaseroots')
963 self.invalidate()
963 self.invalidate()
964
964
965 parentgone = (parents[0] not in self.changelog.nodemap or
965 parentgone = (parents[0] not in self.changelog.nodemap or
966 parents[1] not in self.changelog.nodemap)
966 parents[1] not in self.changelog.nodemap)
967 if parentgone:
967 if parentgone:
968 self.vfs.rename('undo.dirstate', 'dirstate')
968 self.vfs.rename('undo.dirstate', 'dirstate')
969 try:
969 try:
970 branch = self.opener.read('undo.branch')
970 branch = self.opener.read('undo.branch')
971 self.dirstate.setbranch(encoding.tolocal(branch))
971 self.dirstate.setbranch(encoding.tolocal(branch))
972 except IOError:
972 except IOError:
973 ui.warn(_('named branch could not be reset: '
973 ui.warn(_('named branch could not be reset: '
974 'current branch is still \'%s\'\n')
974 'current branch is still \'%s\'\n')
975 % self.dirstate.branch())
975 % self.dirstate.branch())
976
976
977 self.dirstate.invalidate()
977 self.dirstate.invalidate()
978 parents = tuple([p.rev() for p in self.parents()])
978 parents = tuple([p.rev() for p in self.parents()])
979 if len(parents) > 1:
979 if len(parents) > 1:
980 ui.status(_('working directory now based on '
980 ui.status(_('working directory now based on '
981 'revisions %d and %d\n') % parents)
981 'revisions %d and %d\n') % parents)
982 else:
982 else:
983 ui.status(_('working directory now based on '
983 ui.status(_('working directory now based on '
984 'revision %d\n') % parents)
984 'revision %d\n') % parents)
985 # TODO: if we know which new heads may result from this rollback, pass
985 # TODO: if we know which new heads may result from this rollback, pass
986 # them to destroy(), which will prevent the branchhead cache from being
986 # them to destroy(), which will prevent the branchhead cache from being
987 # invalidated.
987 # invalidated.
988 self.destroyed()
988 self.destroyed()
989 return 0
989 return 0
990
990
991 def invalidatecaches(self):
991 def invalidatecaches(self):
992
992
993 if '_tagscache' in vars(self):
993 if '_tagscache' in vars(self):
994 # can't use delattr on proxy
994 # can't use delattr on proxy
995 del self.__dict__['_tagscache']
995 del self.__dict__['_tagscache']
996
996
997 self.unfiltered()._branchcaches.clear()
997 self.unfiltered()._branchcaches.clear()
998 self.invalidatevolatilesets()
998 self.invalidatevolatilesets()
999
999
1000 def invalidatevolatilesets(self):
1000 def invalidatevolatilesets(self):
1001 self.filteredrevcache.clear()
1001 self.filteredrevcache.clear()
1002 obsolete.clearobscaches(self)
1002 obsolete.clearobscaches(self)
1003
1003
1004 def invalidatedirstate(self):
1004 def invalidatedirstate(self):
1005 '''Invalidates the dirstate, causing the next call to dirstate
1005 '''Invalidates the dirstate, causing the next call to dirstate
1006 to check if it was modified since the last time it was read,
1006 to check if it was modified since the last time it was read,
1007 rereading it if it has.
1007 rereading it if it has.
1008
1008
1009 This is different to dirstate.invalidate() that it doesn't always
1009 This is different to dirstate.invalidate() that it doesn't always
1010 rereads the dirstate. Use dirstate.invalidate() if you want to
1010 rereads the dirstate. Use dirstate.invalidate() if you want to
1011 explicitly read the dirstate again (i.e. restoring it to a previous
1011 explicitly read the dirstate again (i.e. restoring it to a previous
1012 known good state).'''
1012 known good state).'''
1013 if hasunfilteredcache(self, 'dirstate'):
1013 if hasunfilteredcache(self, 'dirstate'):
1014 for k in self.dirstate._filecache:
1014 for k in self.dirstate._filecache:
1015 try:
1015 try:
1016 delattr(self.dirstate, k)
1016 delattr(self.dirstate, k)
1017 except AttributeError:
1017 except AttributeError:
1018 pass
1018 pass
1019 delattr(self.unfiltered(), 'dirstate')
1019 delattr(self.unfiltered(), 'dirstate')
1020
1020
1021 def invalidate(self):
1021 def invalidate(self):
1022 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1022 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1023 for k in self._filecache:
1023 for k in self._filecache:
1024 # dirstate is invalidated separately in invalidatedirstate()
1024 # dirstate is invalidated separately in invalidatedirstate()
1025 if k == 'dirstate':
1025 if k == 'dirstate':
1026 continue
1026 continue
1027
1027
1028 try:
1028 try:
1029 delattr(unfiltered, k)
1029 delattr(unfiltered, k)
1030 except AttributeError:
1030 except AttributeError:
1031 pass
1031 pass
1032 self.invalidatecaches()
1032 self.invalidatecaches()
1033 self.store.invalidatecaches()
1033 self.store.invalidatecaches()
1034
1034
1035 def invalidateall(self):
1035 def invalidateall(self):
1036 '''Fully invalidates both store and non-store parts, causing the
1036 '''Fully invalidates both store and non-store parts, causing the
1037 subsequent operation to reread any outside changes.'''
1037 subsequent operation to reread any outside changes.'''
1038 # extension should hook this to invalidate its caches
1038 # extension should hook this to invalidate its caches
1039 self.invalidate()
1039 self.invalidate()
1040 self.invalidatedirstate()
1040 self.invalidatedirstate()
1041
1041
1042 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1042 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1043 try:
1043 try:
1044 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1044 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1045 except error.LockHeld, inst:
1045 except error.LockHeld, inst:
1046 if not wait:
1046 if not wait:
1047 raise
1047 raise
1048 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1048 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1049 (desc, inst.locker))
1049 (desc, inst.locker))
1050 # default to 600 seconds timeout
1050 # default to 600 seconds timeout
1051 l = lockmod.lock(vfs, lockname,
1051 l = lockmod.lock(vfs, lockname,
1052 int(self.ui.config("ui", "timeout", "600")),
1052 int(self.ui.config("ui", "timeout", "600")),
1053 releasefn, desc=desc)
1053 releasefn, desc=desc)
1054 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1054 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1055 if acquirefn:
1055 if acquirefn:
1056 acquirefn()
1056 acquirefn()
1057 return l
1057 return l
1058
1058
1059 def _afterlock(self, callback):
1059 def _afterlock(self, callback):
1060 """add a callback to the current repository lock.
1060 """add a callback to the current repository lock.
1061
1061
1062 The callback will be executed on lock release."""
1062 The callback will be executed on lock release."""
1063 l = self._lockref and self._lockref()
1063 l = self._lockref and self._lockref()
1064 if l:
1064 if l:
1065 l.postrelease.append(callback)
1065 l.postrelease.append(callback)
1066 else:
1066 else:
1067 callback()
1067 callback()
1068
1068
1069 def lock(self, wait=True):
1069 def lock(self, wait=True):
1070 '''Lock the repository store (.hg/store) and return a weak reference
1070 '''Lock the repository store (.hg/store) and return a weak reference
1071 to the lock. Use this before modifying the store (e.g. committing or
1071 to the lock. Use this before modifying the store (e.g. committing or
1072 stripping). If you are opening a transaction, get a lock as well.)'''
1072 stripping). If you are opening a transaction, get a lock as well.)'''
1073 l = self._lockref and self._lockref()
1073 l = self._lockref and self._lockref()
1074 if l is not None and l.held:
1074 if l is not None and l.held:
1075 l.lock()
1075 l.lock()
1076 return l
1076 return l
1077
1077
1078 def unlock():
1078 def unlock():
1079 if hasunfilteredcache(self, '_phasecache'):
1079 if hasunfilteredcache(self, '_phasecache'):
1080 self._phasecache.write()
1080 self._phasecache.write()
1081 for k, ce in self._filecache.items():
1081 for k, ce in self._filecache.items():
1082 if k == 'dirstate' or k not in self.__dict__:
1082 if k == 'dirstate' or k not in self.__dict__:
1083 continue
1083 continue
1084 ce.refresh()
1084 ce.refresh()
1085
1085
1086 l = self._lock(self.svfs, "lock", wait, unlock,
1086 l = self._lock(self.svfs, "lock", wait, unlock,
1087 self.invalidate, _('repository %s') % self.origroot)
1087 self.invalidate, _('repository %s') % self.origroot)
1088 self._lockref = weakref.ref(l)
1088 self._lockref = weakref.ref(l)
1089 return l
1089 return l
1090
1090
1091 def wlock(self, wait=True):
1091 def wlock(self, wait=True):
1092 '''Lock the non-store parts of the repository (everything under
1092 '''Lock the non-store parts of the repository (everything under
1093 .hg except .hg/store) and return a weak reference to the lock.
1093 .hg except .hg/store) and return a weak reference to the lock.
1094 Use this before modifying files in .hg.'''
1094 Use this before modifying files in .hg.'''
1095 l = self._wlockref and self._wlockref()
1095 l = self._wlockref and self._wlockref()
1096 if l is not None and l.held:
1096 if l is not None and l.held:
1097 l.lock()
1097 l.lock()
1098 return l
1098 return l
1099
1099
1100 def unlock():
1100 def unlock():
1101 self.dirstate.write()
1101 self.dirstate.write()
1102 self._filecache['dirstate'].refresh()
1102 self._filecache['dirstate'].refresh()
1103
1103
1104 l = self._lock(self.vfs, "wlock", wait, unlock,
1104 l = self._lock(self.vfs, "wlock", wait, unlock,
1105 self.invalidatedirstate, _('working directory of %s') %
1105 self.invalidatedirstate, _('working directory of %s') %
1106 self.origroot)
1106 self.origroot)
1107 self._wlockref = weakref.ref(l)
1107 self._wlockref = weakref.ref(l)
1108 return l
1108 return l
1109
1109
1110 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1110 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1111 """
1111 """
1112 commit an individual file as part of a larger transaction
1112 commit an individual file as part of a larger transaction
1113 """
1113 """
1114
1114
1115 fname = fctx.path()
1115 fname = fctx.path()
1116 text = fctx.data()
1116 text = fctx.data()
1117 flog = self.file(fname)
1117 flog = self.file(fname)
1118 fparent1 = manifest1.get(fname, nullid)
1118 fparent1 = manifest1.get(fname, nullid)
1119 fparent2 = fparent2o = manifest2.get(fname, nullid)
1119 fparent2 = fparent2o = manifest2.get(fname, nullid)
1120
1120
1121 meta = {}
1121 meta = {}
1122 copy = fctx.renamed()
1122 copy = fctx.renamed()
1123 if copy and copy[0] != fname:
1123 if copy and copy[0] != fname:
1124 # Mark the new revision of this file as a copy of another
1124 # Mark the new revision of this file as a copy of another
1125 # file. This copy data will effectively act as a parent
1125 # file. This copy data will effectively act as a parent
1126 # of this new revision. If this is a merge, the first
1126 # of this new revision. If this is a merge, the first
1127 # parent will be the nullid (meaning "look up the copy data")
1127 # parent will be the nullid (meaning "look up the copy data")
1128 # and the second one will be the other parent. For example:
1128 # and the second one will be the other parent. For example:
1129 #
1129 #
1130 # 0 --- 1 --- 3 rev1 changes file foo
1130 # 0 --- 1 --- 3 rev1 changes file foo
1131 # \ / rev2 renames foo to bar and changes it
1131 # \ / rev2 renames foo to bar and changes it
1132 # \- 2 -/ rev3 should have bar with all changes and
1132 # \- 2 -/ rev3 should have bar with all changes and
1133 # should record that bar descends from
1133 # should record that bar descends from
1134 # bar in rev2 and foo in rev1
1134 # bar in rev2 and foo in rev1
1135 #
1135 #
1136 # this allows this merge to succeed:
1136 # this allows this merge to succeed:
1137 #
1137 #
1138 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1138 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1139 # \ / merging rev3 and rev4 should use bar@rev2
1139 # \ / merging rev3 and rev4 should use bar@rev2
1140 # \- 2 --- 4 as the merge base
1140 # \- 2 --- 4 as the merge base
1141 #
1141 #
1142
1142
1143 cfname = copy[0]
1143 cfname = copy[0]
1144 crev = manifest1.get(cfname)
1144 crev = manifest1.get(cfname)
1145 newfparent = fparent2
1145 newfparent = fparent2
1146
1146
1147 if manifest2: # branch merge
1147 if manifest2: # branch merge
1148 if fparent2 == nullid or crev is None: # copied on remote side
1148 if fparent2 == nullid or crev is None: # copied on remote side
1149 if cfname in manifest2:
1149 if cfname in manifest2:
1150 crev = manifest2[cfname]
1150 crev = manifest2[cfname]
1151 newfparent = fparent1
1151 newfparent = fparent1
1152
1152
1153 # find source in nearest ancestor if we've lost track
1153 # find source in nearest ancestor if we've lost track
1154 if not crev:
1154 if not crev:
1155 self.ui.debug(" %s: searching for copy revision for %s\n" %
1155 self.ui.debug(" %s: searching for copy revision for %s\n" %
1156 (fname, cfname))
1156 (fname, cfname))
1157 for ancestor in self[None].ancestors():
1157 for ancestor in self[None].ancestors():
1158 if cfname in ancestor:
1158 if cfname in ancestor:
1159 crev = ancestor[cfname].filenode()
1159 crev = ancestor[cfname].filenode()
1160 break
1160 break
1161
1161
1162 if crev:
1162 if crev:
1163 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1163 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1164 meta["copy"] = cfname
1164 meta["copy"] = cfname
1165 meta["copyrev"] = hex(crev)
1165 meta["copyrev"] = hex(crev)
1166 fparent1, fparent2 = nullid, newfparent
1166 fparent1, fparent2 = nullid, newfparent
1167 else:
1167 else:
1168 self.ui.warn(_("warning: can't find ancestor for '%s' "
1168 self.ui.warn(_("warning: can't find ancestor for '%s' "
1169 "copied from '%s'!\n") % (fname, cfname))
1169 "copied from '%s'!\n") % (fname, cfname))
1170
1170
1171 elif fparent1 == nullid:
1171 elif fparent1 == nullid:
1172 fparent1, fparent2 = fparent2, nullid
1172 fparent1, fparent2 = fparent2, nullid
1173 elif fparent2 != nullid:
1173 elif fparent2 != nullid:
1174 # is one parent an ancestor of the other?
1174 # is one parent an ancestor of the other?
1175 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1175 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1176 if fparent1 in fparentancestors:
1176 if fparent1 in fparentancestors:
1177 fparent1, fparent2 = fparent2, nullid
1177 fparent1, fparent2 = fparent2, nullid
1178 elif fparent2 in fparentancestors:
1178 elif fparent2 in fparentancestors:
1179 fparent2 = nullid
1179 fparent2 = nullid
1180
1180
1181 # is the file changed?
1181 # is the file changed?
1182 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1182 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1183 changelist.append(fname)
1183 changelist.append(fname)
1184 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1184 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1185
1185
1186 # are just the flags changed during merge?
1186 # are just the flags changed during merge?
1187 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1187 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1188 changelist.append(fname)
1188 changelist.append(fname)
1189
1189
1190 return fparent1
1190 return fparent1
1191
1191
1192 @unfilteredmethod
1192 @unfilteredmethod
1193 def commit(self, text="", user=None, date=None, match=None, force=False,
1193 def commit(self, text="", user=None, date=None, match=None, force=False,
1194 editor=False, extra={}):
1194 editor=False, extra={}):
1195 """Add a new revision to current repository.
1195 """Add a new revision to current repository.
1196
1196
1197 Revision information is gathered from the working directory,
1197 Revision information is gathered from the working directory,
1198 match can be used to filter the committed files. If editor is
1198 match can be used to filter the committed files. If editor is
1199 supplied, it is called to get a commit message.
1199 supplied, it is called to get a commit message.
1200 """
1200 """
1201
1201
1202 def fail(f, msg):
1202 def fail(f, msg):
1203 raise util.Abort('%s: %s' % (f, msg))
1203 raise util.Abort('%s: %s' % (f, msg))
1204
1204
1205 if not match:
1205 if not match:
1206 match = matchmod.always(self.root, '')
1206 match = matchmod.always(self.root, '')
1207
1207
1208 if not force:
1208 if not force:
1209 vdirs = []
1209 vdirs = []
1210 match.explicitdir = vdirs.append
1210 match.explicitdir = vdirs.append
1211 match.bad = fail
1211 match.bad = fail
1212
1212
1213 wlock = self.wlock()
1213 wlock = self.wlock()
1214 try:
1214 try:
1215 wctx = self[None]
1215 wctx = self[None]
1216 merge = len(wctx.parents()) > 1
1216 merge = len(wctx.parents()) > 1
1217
1217
1218 if (not force and merge and match and
1218 if (not force and merge and match and
1219 (match.files() or match.anypats())):
1219 (match.files() or match.anypats())):
1220 raise util.Abort(_('cannot partially commit a merge '
1220 raise util.Abort(_('cannot partially commit a merge '
1221 '(do not specify files or patterns)'))
1221 '(do not specify files or patterns)'))
1222
1222
1223 changes = self.status(match=match, clean=force)
1223 changes = self.status(match=match, clean=force)
1224 if force:
1224 if force:
1225 changes[0].extend(changes[6]) # mq may commit unchanged files
1225 changes[0].extend(changes[6]) # mq may commit unchanged files
1226
1226
1227 # check subrepos
1227 # check subrepos
1228 subs = []
1228 subs = []
1229 commitsubs = set()
1229 commitsubs = set()
1230 newstate = wctx.substate.copy()
1230 newstate = wctx.substate.copy()
1231 # only manage subrepos and .hgsubstate if .hgsub is present
1231 # only manage subrepos and .hgsubstate if .hgsub is present
1232 if '.hgsub' in wctx:
1232 if '.hgsub' in wctx:
1233 # we'll decide whether to track this ourselves, thanks
1233 # we'll decide whether to track this ourselves, thanks
1234 for c in changes[:3]:
1234 for c in changes[:3]:
1235 if '.hgsubstate' in c:
1235 if '.hgsubstate' in c:
1236 c.remove('.hgsubstate')
1236 c.remove('.hgsubstate')
1237
1237
1238 # compare current state to last committed state
1238 # compare current state to last committed state
1239 # build new substate based on last committed state
1239 # build new substate based on last committed state
1240 oldstate = wctx.p1().substate
1240 oldstate = wctx.p1().substate
1241 for s in sorted(newstate.keys()):
1241 for s in sorted(newstate.keys()):
1242 if not match(s):
1242 if not match(s):
1243 # ignore working copy, use old state if present
1243 # ignore working copy, use old state if present
1244 if s in oldstate:
1244 if s in oldstate:
1245 newstate[s] = oldstate[s]
1245 newstate[s] = oldstate[s]
1246 continue
1246 continue
1247 if not force:
1247 if not force:
1248 raise util.Abort(
1248 raise util.Abort(
1249 _("commit with new subrepo %s excluded") % s)
1249 _("commit with new subrepo %s excluded") % s)
1250 if wctx.sub(s).dirty(True):
1250 if wctx.sub(s).dirty(True):
1251 if not self.ui.configbool('ui', 'commitsubrepos'):
1251 if not self.ui.configbool('ui', 'commitsubrepos'):
1252 raise util.Abort(
1252 raise util.Abort(
1253 _("uncommitted changes in subrepo %s") % s,
1253 _("uncommitted changes in subrepo %s") % s,
1254 hint=_("use --subrepos for recursive commit"))
1254 hint=_("use --subrepos for recursive commit"))
1255 subs.append(s)
1255 subs.append(s)
1256 commitsubs.add(s)
1256 commitsubs.add(s)
1257 else:
1257 else:
1258 bs = wctx.sub(s).basestate()
1258 bs = wctx.sub(s).basestate()
1259 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1259 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1260 if oldstate.get(s, (None, None, None))[1] != bs:
1260 if oldstate.get(s, (None, None, None))[1] != bs:
1261 subs.append(s)
1261 subs.append(s)
1262
1262
1263 # check for removed subrepos
1263 # check for removed subrepos
1264 for p in wctx.parents():
1264 for p in wctx.parents():
1265 r = [s for s in p.substate if s not in newstate]
1265 r = [s for s in p.substate if s not in newstate]
1266 subs += [s for s in r if match(s)]
1266 subs += [s for s in r if match(s)]
1267 if subs:
1267 if subs:
1268 if (not match('.hgsub') and
1268 if (not match('.hgsub') and
1269 '.hgsub' in (wctx.modified() + wctx.added())):
1269 '.hgsub' in (wctx.modified() + wctx.added())):
1270 raise util.Abort(
1270 raise util.Abort(
1271 _("can't commit subrepos without .hgsub"))
1271 _("can't commit subrepos without .hgsub"))
1272 changes[0].insert(0, '.hgsubstate')
1272 changes[0].insert(0, '.hgsubstate')
1273
1273
1274 elif '.hgsub' in changes[2]:
1274 elif '.hgsub' in changes[2]:
1275 # clean up .hgsubstate when .hgsub is removed
1275 # clean up .hgsubstate when .hgsub is removed
1276 if ('.hgsubstate' in wctx and
1276 if ('.hgsubstate' in wctx and
1277 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1277 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1278 changes[2].insert(0, '.hgsubstate')
1278 changes[2].insert(0, '.hgsubstate')
1279
1279
1280 # make sure all explicit patterns are matched
1280 # make sure all explicit patterns are matched
1281 if not force and match.files():
1281 if not force and match.files():
1282 matched = set(changes[0] + changes[1] + changes[2])
1282 matched = set(changes[0] + changes[1] + changes[2])
1283
1283
1284 for f in match.files():
1284 for f in match.files():
1285 f = self.dirstate.normalize(f)
1285 f = self.dirstate.normalize(f)
1286 if f == '.' or f in matched or f in wctx.substate:
1286 if f == '.' or f in matched or f in wctx.substate:
1287 continue
1287 continue
1288 if f in changes[3]: # missing
1288 if f in changes[3]: # missing
1289 fail(f, _('file not found!'))
1289 fail(f, _('file not found!'))
1290 if f in vdirs: # visited directory
1290 if f in vdirs: # visited directory
1291 d = f + '/'
1291 d = f + '/'
1292 for mf in matched:
1292 for mf in matched:
1293 if mf.startswith(d):
1293 if mf.startswith(d):
1294 break
1294 break
1295 else:
1295 else:
1296 fail(f, _("no match under directory!"))
1296 fail(f, _("no match under directory!"))
1297 elif f not in self.dirstate:
1297 elif f not in self.dirstate:
1298 fail(f, _("file not tracked!"))
1298 fail(f, _("file not tracked!"))
1299
1299
1300 cctx = context.workingctx(self, text, user, date, extra, changes)
1300 cctx = context.workingctx(self, text, user, date, extra, changes)
1301
1301
1302 if (not force and not extra.get("close") and not merge
1302 if (not force and not extra.get("close") and not merge
1303 and not cctx.files()
1303 and not cctx.files()
1304 and wctx.branch() == wctx.p1().branch()):
1304 and wctx.branch() == wctx.p1().branch()):
1305 return None
1305 return None
1306
1306
1307 if merge and cctx.deleted():
1307 if merge and cctx.deleted():
1308 raise util.Abort(_("cannot commit merge with missing files"))
1308 raise util.Abort(_("cannot commit merge with missing files"))
1309
1309
1310 ms = mergemod.mergestate(self)
1310 ms = mergemod.mergestate(self)
1311 for f in changes[0]:
1311 for f in changes[0]:
1312 if f in ms and ms[f] == 'u':
1312 if f in ms and ms[f] == 'u':
1313 raise util.Abort(_("unresolved merge conflicts "
1313 raise util.Abort(_("unresolved merge conflicts "
1314 "(see hg help resolve)"))
1314 "(see hg help resolve)"))
1315
1315
1316 if editor:
1316 if editor:
1317 cctx._text = editor(self, cctx, subs)
1317 cctx._text = editor(self, cctx, subs)
1318 edited = (text != cctx._text)
1318 edited = (text != cctx._text)
1319
1319
1320 # Save commit message in case this transaction gets rolled back
1320 # Save commit message in case this transaction gets rolled back
1321 # (e.g. by a pretxncommit hook). Leave the content alone on
1321 # (e.g. by a pretxncommit hook). Leave the content alone on
1322 # the assumption that the user will use the same editor again.
1322 # the assumption that the user will use the same editor again.
1323 msgfn = self.savecommitmessage(cctx._text)
1323 msgfn = self.savecommitmessage(cctx._text)
1324
1324
1325 # commit subs and write new state
1325 # commit subs and write new state
1326 if subs:
1326 if subs:
1327 for s in sorted(commitsubs):
1327 for s in sorted(commitsubs):
1328 sub = wctx.sub(s)
1328 sub = wctx.sub(s)
1329 self.ui.status(_('committing subrepository %s\n') %
1329 self.ui.status(_('committing subrepository %s\n') %
1330 subrepo.subrelpath(sub))
1330 subrepo.subrelpath(sub))
1331 sr = sub.commit(cctx._text, user, date)
1331 sr = sub.commit(cctx._text, user, date)
1332 newstate[s] = (newstate[s][0], sr)
1332 newstate[s] = (newstate[s][0], sr)
1333 subrepo.writestate(self, newstate)
1333 subrepo.writestate(self, newstate)
1334
1334
1335 p1, p2 = self.dirstate.parents()
1335 p1, p2 = self.dirstate.parents()
1336 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1336 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1337 try:
1337 try:
1338 self.hook("precommit", throw=True, parent1=hookp1,
1338 self.hook("precommit", throw=True, parent1=hookp1,
1339 parent2=hookp2)
1339 parent2=hookp2)
1340 ret = self.commitctx(cctx, True)
1340 ret = self.commitctx(cctx, True)
1341 except: # re-raises
1341 except: # re-raises
1342 if edited:
1342 if edited:
1343 self.ui.write(
1343 self.ui.write(
1344 _('note: commit message saved in %s\n') % msgfn)
1344 _('note: commit message saved in %s\n') % msgfn)
1345 raise
1345 raise
1346
1346
1347 # update bookmarks, dirstate and mergestate
1347 # update bookmarks, dirstate and mergestate
1348 bookmarks.update(self, [p1, p2], ret)
1348 bookmarks.update(self, [p1, p2], ret)
1349 cctx.markcommitted(ret)
1349 cctx.markcommitted(ret)
1350 ms.reset()
1350 ms.reset()
1351 finally:
1351 finally:
1352 wlock.release()
1352 wlock.release()
1353
1353
1354 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1354 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1355 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1355 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1356 self._afterlock(commithook)
1356 self._afterlock(commithook)
1357 return ret
1357 return ret
1358
1358
1359 @unfilteredmethod
1359 @unfilteredmethod
1360 def commitctx(self, ctx, error=False):
1360 def commitctx(self, ctx, error=False):
1361 """Add a new revision to current repository.
1361 """Add a new revision to current repository.
1362 Revision information is passed via the context argument.
1362 Revision information is passed via the context argument.
1363 """
1363 """
1364
1364
1365 tr = lock = None
1365 tr = lock = None
1366 removed = list(ctx.removed())
1366 removed = list(ctx.removed())
1367 p1, p2 = ctx.p1(), ctx.p2()
1367 p1, p2 = ctx.p1(), ctx.p2()
1368 user = ctx.user()
1368 user = ctx.user()
1369
1369
1370 lock = self.lock()
1370 lock = self.lock()
1371 try:
1371 try:
1372 tr = self.transaction("commit")
1372 tr = self.transaction("commit")
1373 trp = weakref.proxy(tr)
1373 trp = weakref.proxy(tr)
1374
1374
1375 if ctx.files():
1375 if ctx.files():
1376 m1 = p1.manifest().copy()
1376 m1 = p1.manifest().copy()
1377 m2 = p2.manifest()
1377 m2 = p2.manifest()
1378
1378
1379 # check in files
1379 # check in files
1380 new = {}
1380 new = {}
1381 changed = []
1381 changed = []
1382 linkrev = len(self)
1382 linkrev = len(self)
1383 for f in sorted(ctx.modified() + ctx.added()):
1383 for f in sorted(ctx.modified() + ctx.added()):
1384 self.ui.note(f + "\n")
1384 self.ui.note(f + "\n")
1385 try:
1385 try:
1386 fctx = ctx[f]
1386 fctx = ctx[f]
1387 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1387 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1388 changed)
1388 changed)
1389 m1.set(f, fctx.flags())
1389 m1.set(f, fctx.flags())
1390 except OSError, inst:
1390 except OSError, inst:
1391 self.ui.warn(_("trouble committing %s!\n") % f)
1391 self.ui.warn(_("trouble committing %s!\n") % f)
1392 raise
1392 raise
1393 except IOError, inst:
1393 except IOError, inst:
1394 errcode = getattr(inst, 'errno', errno.ENOENT)
1394 errcode = getattr(inst, 'errno', errno.ENOENT)
1395 if error or errcode and errcode != errno.ENOENT:
1395 if error or errcode and errcode != errno.ENOENT:
1396 self.ui.warn(_("trouble committing %s!\n") % f)
1396 self.ui.warn(_("trouble committing %s!\n") % f)
1397 raise
1397 raise
1398 else:
1398 else:
1399 removed.append(f)
1399 removed.append(f)
1400
1400
1401 # update manifest
1401 # update manifest
1402 m1.update(new)
1402 m1.update(new)
1403 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1403 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1404 drop = [f for f in removed if f in m1]
1404 drop = [f for f in removed if f in m1]
1405 for f in drop:
1405 for f in drop:
1406 del m1[f]
1406 del m1[f]
1407 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1407 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1408 p2.manifestnode(), (new, drop))
1408 p2.manifestnode(), (new, drop))
1409 files = changed + removed
1409 files = changed + removed
1410 else:
1410 else:
1411 mn = p1.manifestnode()
1411 mn = p1.manifestnode()
1412 files = []
1412 files = []
1413
1413
1414 # update changelog
1414 # update changelog
1415 self.changelog.delayupdate()
1415 self.changelog.delayupdate()
1416 n = self.changelog.add(mn, files, ctx.description(),
1416 n = self.changelog.add(mn, files, ctx.description(),
1417 trp, p1.node(), p2.node(),
1417 trp, p1.node(), p2.node(),
1418 user, ctx.date(), ctx.extra().copy())
1418 user, ctx.date(), ctx.extra().copy())
1419 p = lambda: self.changelog.writepending() and self.root or ""
1419 p = lambda: self.changelog.writepending() and self.root or ""
1420 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1420 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1421 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1421 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1422 parent2=xp2, pending=p)
1422 parent2=xp2, pending=p)
1423 self.changelog.finalize(trp)
1423 self.changelog.finalize(trp)
1424 # set the new commit is proper phase
1424 # set the new commit is proper phase
1425 targetphase = subrepo.newcommitphase(self.ui, ctx)
1425 targetphase = subrepo.newcommitphase(self.ui, ctx)
1426 if targetphase:
1426 if targetphase:
1427 # retract boundary do not alter parent changeset.
1427 # retract boundary do not alter parent changeset.
1428 # if a parent have higher the resulting phase will
1428 # if a parent have higher the resulting phase will
1429 # be compliant anyway
1429 # be compliant anyway
1430 #
1430 #
1431 # if minimal phase was 0 we don't need to retract anything
1431 # if minimal phase was 0 we don't need to retract anything
1432 phases.retractboundary(self, targetphase, [n])
1432 phases.retractboundary(self, targetphase, [n])
1433 tr.close()
1433 tr.close()
1434 branchmap.updatecache(self.filtered('served'))
1434 branchmap.updatecache(self.filtered('served'))
1435 return n
1435 return n
1436 finally:
1436 finally:
1437 if tr:
1437 if tr:
1438 tr.release()
1438 tr.release()
1439 lock.release()
1439 lock.release()
1440
1440
1441 @unfilteredmethod
1441 @unfilteredmethod
1442 def destroying(self):
1442 def destroying(self):
1443 '''Inform the repository that nodes are about to be destroyed.
1443 '''Inform the repository that nodes are about to be destroyed.
1444 Intended for use by strip and rollback, so there's a common
1444 Intended for use by strip and rollback, so there's a common
1445 place for anything that has to be done before destroying history.
1445 place for anything that has to be done before destroying history.
1446
1446
1447 This is mostly useful for saving state that is in memory and waiting
1447 This is mostly useful for saving state that is in memory and waiting
1448 to be flushed when the current lock is released. Because a call to
1448 to be flushed when the current lock is released. Because a call to
1449 destroyed is imminent, the repo will be invalidated causing those
1449 destroyed is imminent, the repo will be invalidated causing those
1450 changes to stay in memory (waiting for the next unlock), or vanish
1450 changes to stay in memory (waiting for the next unlock), or vanish
1451 completely.
1451 completely.
1452 '''
1452 '''
1453 # When using the same lock to commit and strip, the phasecache is left
1453 # When using the same lock to commit and strip, the phasecache is left
1454 # dirty after committing. Then when we strip, the repo is invalidated,
1454 # dirty after committing. Then when we strip, the repo is invalidated,
1455 # causing those changes to disappear.
1455 # causing those changes to disappear.
1456 if '_phasecache' in vars(self):
1456 if '_phasecache' in vars(self):
1457 self._phasecache.write()
1457 self._phasecache.write()
1458
1458
1459 @unfilteredmethod
1459 @unfilteredmethod
1460 def destroyed(self):
1460 def destroyed(self):
1461 '''Inform the repository that nodes have been destroyed.
1461 '''Inform the repository that nodes have been destroyed.
1462 Intended for use by strip and rollback, so there's a common
1462 Intended for use by strip and rollback, so there's a common
1463 place for anything that has to be done after destroying history.
1463 place for anything that has to be done after destroying history.
1464 '''
1464 '''
1465 # When one tries to:
1465 # When one tries to:
1466 # 1) destroy nodes thus calling this method (e.g. strip)
1466 # 1) destroy nodes thus calling this method (e.g. strip)
1467 # 2) use phasecache somewhere (e.g. commit)
1467 # 2) use phasecache somewhere (e.g. commit)
1468 #
1468 #
1469 # then 2) will fail because the phasecache contains nodes that were
1469 # then 2) will fail because the phasecache contains nodes that were
1470 # removed. We can either remove phasecache from the filecache,
1470 # removed. We can either remove phasecache from the filecache,
1471 # causing it to reload next time it is accessed, or simply filter
1471 # causing it to reload next time it is accessed, or simply filter
1472 # the removed nodes now and write the updated cache.
1472 # the removed nodes now and write the updated cache.
1473 self._phasecache.filterunknown(self)
1473 self._phasecache.filterunknown(self)
1474 self._phasecache.write()
1474 self._phasecache.write()
1475
1475
1476 # update the 'served' branch cache to help read only server process
1476 # update the 'served' branch cache to help read only server process
1477 # Thanks to branchcache collaboration this is done from the nearest
1477 # Thanks to branchcache collaboration this is done from the nearest
1478 # filtered subset and it is expected to be fast.
1478 # filtered subset and it is expected to be fast.
1479 branchmap.updatecache(self.filtered('served'))
1479 branchmap.updatecache(self.filtered('served'))
1480
1480
1481 # Ensure the persistent tag cache is updated. Doing it now
1481 # Ensure the persistent tag cache is updated. Doing it now
1482 # means that the tag cache only has to worry about destroyed
1482 # means that the tag cache only has to worry about destroyed
1483 # heads immediately after a strip/rollback. That in turn
1483 # heads immediately after a strip/rollback. That in turn
1484 # guarantees that "cachetip == currenttip" (comparing both rev
1484 # guarantees that "cachetip == currenttip" (comparing both rev
1485 # and node) always means no nodes have been added or destroyed.
1485 # and node) always means no nodes have been added or destroyed.
1486
1486
1487 # XXX this is suboptimal when qrefresh'ing: we strip the current
1487 # XXX this is suboptimal when qrefresh'ing: we strip the current
1488 # head, refresh the tag cache, then immediately add a new head.
1488 # head, refresh the tag cache, then immediately add a new head.
1489 # But I think doing it this way is necessary for the "instant
1489 # But I think doing it this way is necessary for the "instant
1490 # tag cache retrieval" case to work.
1490 # tag cache retrieval" case to work.
1491 self.invalidate()
1491 self.invalidate()
1492
1492
1493 def walk(self, match, node=None):
1493 def walk(self, match, node=None):
1494 '''
1494 '''
1495 walk recursively through the directory tree or a given
1495 walk recursively through the directory tree or a given
1496 changeset, finding all files matched by the match
1496 changeset, finding all files matched by the match
1497 function
1497 function
1498 '''
1498 '''
1499 return self[node].walk(match)
1499 return self[node].walk(match)
1500
1500
1501 def status(self, node1='.', node2=None, match=None,
1501 def status(self, node1='.', node2=None, match=None,
1502 ignored=False, clean=False, unknown=False,
1502 ignored=False, clean=False, unknown=False,
1503 listsubrepos=False):
1503 listsubrepos=False):
1504 """return status of files between two nodes or node and working
1504 """return status of files between two nodes or node and working
1505 directory.
1505 directory.
1506
1506
1507 If node1 is None, use the first dirstate parent instead.
1507 If node1 is None, use the first dirstate parent instead.
1508 If node2 is None, compare node1 with working directory.
1508 If node2 is None, compare node1 with working directory.
1509 """
1509 """
1510
1510
1511 def mfmatches(ctx):
1511 def mfmatches(ctx):
1512 mf = ctx.manifest().copy()
1512 mf = ctx.manifest().copy()
1513 if match.always():
1513 if match.always():
1514 return mf
1514 return mf
1515 for fn in mf.keys():
1515 for fn in mf.keys():
1516 if not match(fn):
1516 if not match(fn):
1517 del mf[fn]
1517 del mf[fn]
1518 return mf
1518 return mf
1519
1519
1520 ctx1 = self[node1]
1520 ctx1 = self[node1]
1521 ctx2 = self[node2]
1521 ctx2 = self[node2]
1522
1522
1523 working = ctx2.rev() is None
1523 working = ctx2.rev() is None
1524 parentworking = working and ctx1 == self['.']
1524 parentworking = working and ctx1 == self['.']
1525 match = match or matchmod.always(self.root, self.getcwd())
1525 match = match or matchmod.always(self.root, self.getcwd())
1526 listignored, listclean, listunknown = ignored, clean, unknown
1526 listignored, listclean, listunknown = ignored, clean, unknown
1527
1527
1528 # load earliest manifest first for caching reasons
1528 # load earliest manifest first for caching reasons
1529 if not working and ctx2.rev() < ctx1.rev():
1529 if not working and ctx2.rev() < ctx1.rev():
1530 ctx2.manifest()
1530 ctx2.manifest()
1531
1531
1532 if not parentworking:
1532 if not parentworking:
1533 def bad(f, msg):
1533 def bad(f, msg):
1534 # 'f' may be a directory pattern from 'match.files()',
1534 # 'f' may be a directory pattern from 'match.files()',
1535 # so 'f not in ctx1' is not enough
1535 # so 'f not in ctx1' is not enough
1536 if f not in ctx1 and f not in ctx1.dirs():
1536 if f not in ctx1 and f not in ctx1.dirs():
1537 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1537 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1538 match.bad = bad
1538 match.bad = bad
1539
1539
1540 if working: # we need to scan the working dir
1540 if working: # we need to scan the working dir
1541 subrepos = []
1541 subrepos = []
1542 if '.hgsub' in self.dirstate:
1542 if '.hgsub' in self.dirstate:
1543 subrepos = sorted(ctx2.substate)
1543 subrepos = sorted(ctx2.substate)
1544 s = self.dirstate.status(match, subrepos, listignored,
1544 s = self.dirstate.status(match, subrepos, listignored,
1545 listclean, listunknown)
1545 listclean, listunknown)
1546 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1546 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1547
1547
1548 # check for any possibly clean files
1548 # check for any possibly clean files
1549 if parentworking and cmp:
1549 if parentworking and cmp:
1550 fixup = []
1550 fixup = []
1551 # do a full compare of any files that might have changed
1551 # do a full compare of any files that might have changed
1552 for f in sorted(cmp):
1552 for f in sorted(cmp):
1553 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1553 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1554 or ctx1[f].cmp(ctx2[f])):
1554 or ctx1[f].cmp(ctx2[f])):
1555 modified.append(f)
1555 modified.append(f)
1556 else:
1556 else:
1557 fixup.append(f)
1557 fixup.append(f)
1558
1558
1559 # update dirstate for files that are actually clean
1559 # update dirstate for files that are actually clean
1560 if fixup:
1560 if fixup:
1561 if listclean:
1561 if listclean:
1562 clean += fixup
1562 clean += fixup
1563
1563
1564 try:
1564 try:
1565 # updating the dirstate is optional
1565 # updating the dirstate is optional
1566 # so we don't wait on the lock
1566 # so we don't wait on the lock
1567 wlock = self.wlock(False)
1567 wlock = self.wlock(False)
1568 try:
1568 try:
1569 for f in fixup:
1569 for f in fixup:
1570 self.dirstate.normal(f)
1570 self.dirstate.normal(f)
1571 finally:
1571 finally:
1572 wlock.release()
1572 wlock.release()
1573 except error.LockError:
1573 except error.LockError:
1574 pass
1574 pass
1575
1575
1576 if not parentworking:
1576 if not parentworking:
1577 mf1 = mfmatches(ctx1)
1577 mf1 = mfmatches(ctx1)
1578 if working:
1578 if working:
1579 # we are comparing working dir against non-parent
1579 # we are comparing working dir against non-parent
1580 # generate a pseudo-manifest for the working dir
1580 # generate a pseudo-manifest for the working dir
1581 mf2 = mfmatches(self['.'])
1581 mf2 = mfmatches(self['.'])
1582 for f in cmp + modified + added:
1582 for f in cmp + modified + added:
1583 mf2[f] = None
1583 mf2[f] = None
1584 mf2.set(f, ctx2.flags(f))
1584 mf2.set(f, ctx2.flags(f))
1585 for f in removed:
1585 for f in removed:
1586 if f in mf2:
1586 if f in mf2:
1587 del mf2[f]
1587 del mf2[f]
1588 else:
1588 else:
1589 # we are comparing two revisions
1589 # we are comparing two revisions
1590 deleted, unknown, ignored = [], [], []
1590 deleted, unknown, ignored = [], [], []
1591 mf2 = mfmatches(ctx2)
1591 mf2 = mfmatches(ctx2)
1592
1592
1593 modified, added, clean = [], [], []
1593 modified, added, clean = [], [], []
1594 withflags = mf1.withflags() | mf2.withflags()
1594 withflags = mf1.withflags() | mf2.withflags()
1595 for fn, mf2node in mf2.iteritems():
1595 for fn, mf2node in mf2.iteritems():
1596 if fn in mf1:
1596 if fn in mf1:
1597 if (fn not in deleted and
1597 if (fn not in deleted and
1598 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1598 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1599 (mf1[fn] != mf2node and
1599 (mf1[fn] != mf2node and
1600 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1600 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1601 modified.append(fn)
1601 modified.append(fn)
1602 elif listclean:
1602 elif listclean:
1603 clean.append(fn)
1603 clean.append(fn)
1604 del mf1[fn]
1604 del mf1[fn]
1605 elif fn not in deleted:
1605 elif fn not in deleted:
1606 added.append(fn)
1606 added.append(fn)
1607 removed = mf1.keys()
1607 removed = mf1.keys()
1608
1608
1609 if working and modified and not self.dirstate._checklink:
1609 if working and modified and not self.dirstate._checklink:
1610 # Symlink placeholders may get non-symlink-like contents
1610 # Symlink placeholders may get non-symlink-like contents
1611 # via user error or dereferencing by NFS or Samba servers,
1611 # via user error or dereferencing by NFS or Samba servers,
1612 # so we filter out any placeholders that don't look like a
1612 # so we filter out any placeholders that don't look like a
1613 # symlink
1613 # symlink
1614 sane = []
1614 sane = []
1615 for f in modified:
1615 for f in modified:
1616 if ctx2.flags(f) == 'l':
1616 if ctx2.flags(f) == 'l':
1617 d = ctx2[f].data()
1617 d = ctx2[f].data()
1618 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1618 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1619 self.ui.debug('ignoring suspect symlink placeholder'
1619 self.ui.debug('ignoring suspect symlink placeholder'
1620 ' "%s"\n' % f)
1620 ' "%s"\n' % f)
1621 continue
1621 continue
1622 sane.append(f)
1622 sane.append(f)
1623 modified = sane
1623 modified = sane
1624
1624
1625 r = modified, added, removed, deleted, unknown, ignored, clean
1625 r = modified, added, removed, deleted, unknown, ignored, clean
1626
1626
1627 if listsubrepos:
1627 if listsubrepos:
1628 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1628 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1629 if working:
1629 if working:
1630 rev2 = None
1630 rev2 = None
1631 else:
1631 else:
1632 rev2 = ctx2.substate[subpath][1]
1632 rev2 = ctx2.substate[subpath][1]
1633 try:
1633 try:
1634 submatch = matchmod.narrowmatcher(subpath, match)
1634 submatch = matchmod.narrowmatcher(subpath, match)
1635 s = sub.status(rev2, match=submatch, ignored=listignored,
1635 s = sub.status(rev2, match=submatch, ignored=listignored,
1636 clean=listclean, unknown=listunknown,
1636 clean=listclean, unknown=listunknown,
1637 listsubrepos=True)
1637 listsubrepos=True)
1638 for rfiles, sfiles in zip(r, s):
1638 for rfiles, sfiles in zip(r, s):
1639 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1639 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1640 except error.LookupError:
1640 except error.LookupError:
1641 self.ui.status(_("skipping missing subrepository: %s\n")
1641 self.ui.status(_("skipping missing subrepository: %s\n")
1642 % subpath)
1642 % subpath)
1643
1643
1644 for l in r:
1644 for l in r:
1645 l.sort()
1645 l.sort()
1646 return r
1646 return r
1647
1647
1648 def heads(self, start=None):
1648 def heads(self, start=None):
1649 heads = self.changelog.heads(start)
1649 heads = self.changelog.heads(start)
1650 # sort the output in rev descending order
1650 # sort the output in rev descending order
1651 return sorted(heads, key=self.changelog.rev, reverse=True)
1651 return sorted(heads, key=self.changelog.rev, reverse=True)
1652
1652
1653 def branchheads(self, branch=None, start=None, closed=False):
1653 def branchheads(self, branch=None, start=None, closed=False):
1654 '''return a (possibly filtered) list of heads for the given branch
1654 '''return a (possibly filtered) list of heads for the given branch
1655
1655
1656 Heads are returned in topological order, from newest to oldest.
1656 Heads are returned in topological order, from newest to oldest.
1657 If branch is None, use the dirstate branch.
1657 If branch is None, use the dirstate branch.
1658 If start is not None, return only heads reachable from start.
1658 If start is not None, return only heads reachable from start.
1659 If closed is True, return heads that are marked as closed as well.
1659 If closed is True, return heads that are marked as closed as well.
1660 '''
1660 '''
1661 if branch is None:
1661 if branch is None:
1662 branch = self[None].branch()
1662 branch = self[None].branch()
1663 branches = self.branchmap()
1663 branches = self.branchmap()
1664 if branch not in branches:
1664 if branch not in branches:
1665 return []
1665 return []
1666 # the cache returns heads ordered lowest to highest
1666 # the cache returns heads ordered lowest to highest
1667 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1667 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1668 if start is not None:
1668 if start is not None:
1669 # filter out the heads that cannot be reached from startrev
1669 # filter out the heads that cannot be reached from startrev
1670 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1670 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1671 bheads = [h for h in bheads if h in fbheads]
1671 bheads = [h for h in bheads if h in fbheads]
1672 return bheads
1672 return bheads
1673
1673
1674 def branches(self, nodes):
1674 def branches(self, nodes):
1675 if not nodes:
1675 if not nodes:
1676 nodes = [self.changelog.tip()]
1676 nodes = [self.changelog.tip()]
1677 b = []
1677 b = []
1678 for n in nodes:
1678 for n in nodes:
1679 t = n
1679 t = n
1680 while True:
1680 while True:
1681 p = self.changelog.parents(n)
1681 p = self.changelog.parents(n)
1682 if p[1] != nullid or p[0] == nullid:
1682 if p[1] != nullid or p[0] == nullid:
1683 b.append((t, n, p[0], p[1]))
1683 b.append((t, n, p[0], p[1]))
1684 break
1684 break
1685 n = p[0]
1685 n = p[0]
1686 return b
1686 return b
1687
1687
1688 def between(self, pairs):
1688 def between(self, pairs):
1689 r = []
1689 r = []
1690
1690
1691 for top, bottom in pairs:
1691 for top, bottom in pairs:
1692 n, l, i = top, [], 0
1692 n, l, i = top, [], 0
1693 f = 1
1693 f = 1
1694
1694
1695 while n != bottom and n != nullid:
1695 while n != bottom and n != nullid:
1696 p = self.changelog.parents(n)[0]
1696 p = self.changelog.parents(n)[0]
1697 if i == f:
1697 if i == f:
1698 l.append(n)
1698 l.append(n)
1699 f = f * 2
1699 f = f * 2
1700 n = p
1700 n = p
1701 i += 1
1701 i += 1
1702
1702
1703 r.append(l)
1703 r.append(l)
1704
1704
1705 return r
1705 return r
1706
1706
1707 def pull(self, remote, heads=None, force=False):
1707 def pull(self, remote, heads=None, force=False):
1708 return exchange.pull (self, remote, heads, force)
1708 return exchange.pull (self, remote, heads, force)
1709
1709
1710 def checkpush(self, pushop):
1710 def checkpush(self, pushop):
1711 """Extensions can override this function if additional checks have
1711 """Extensions can override this function if additional checks have
1712 to be performed before pushing, or call it if they override push
1712 to be performed before pushing, or call it if they override push
1713 command.
1713 command.
1714 """
1714 """
1715 pass
1715 pass
1716
1716
1717 @unfilteredpropertycache
1717 @unfilteredpropertycache
1718 def prepushoutgoinghooks(self):
1718 def prepushoutgoinghooks(self):
1719 """Return util.hooks consists of "(repo, remote, outgoing)"
1719 """Return util.hooks consists of "(repo, remote, outgoing)"
1720 functions, which are called before pushing changesets.
1720 functions, which are called before pushing changesets.
1721 """
1721 """
1722 return util.hooks()
1722 return util.hooks()
1723
1723
1724 def push(self, remote, force=False, revs=None, newbranch=False):
1724 def push(self, remote, force=False, revs=None, newbranch=False):
1725 return exchange.push(self, remote, force, revs, newbranch)
1725 return exchange.push(self, remote, force, revs, newbranch)
1726
1726
1727 def stream_in(self, remote, requirements):
1727 def stream_in(self, remote, requirements):
1728 lock = self.lock()
1728 lock = self.lock()
1729 try:
1729 try:
1730 # Save remote branchmap. We will use it later
1730 # Save remote branchmap. We will use it later
1731 # to speed up branchcache creation
1731 # to speed up branchcache creation
1732 rbranchmap = None
1732 rbranchmap = None
1733 if remote.capable("branchmap"):
1733 if remote.capable("branchmap"):
1734 rbranchmap = remote.branchmap()
1734 rbranchmap = remote.branchmap()
1735
1735
1736 fp = remote.stream_out()
1736 fp = remote.stream_out()
1737 l = fp.readline()
1737 l = fp.readline()
1738 try:
1738 try:
1739 resp = int(l)
1739 resp = int(l)
1740 except ValueError:
1740 except ValueError:
1741 raise error.ResponseError(
1741 raise error.ResponseError(
1742 _('unexpected response from remote server:'), l)
1742 _('unexpected response from remote server:'), l)
1743 if resp == 1:
1743 if resp == 1:
1744 raise util.Abort(_('operation forbidden by server'))
1744 raise util.Abort(_('operation forbidden by server'))
1745 elif resp == 2:
1745 elif resp == 2:
1746 raise util.Abort(_('locking the remote repository failed'))
1746 raise util.Abort(_('locking the remote repository failed'))
1747 elif resp != 0:
1747 elif resp != 0:
1748 raise util.Abort(_('the server sent an unknown error code'))
1748 raise util.Abort(_('the server sent an unknown error code'))
1749 self.ui.status(_('streaming all changes\n'))
1749 self.ui.status(_('streaming all changes\n'))
1750 l = fp.readline()
1750 l = fp.readline()
1751 try:
1751 try:
1752 total_files, total_bytes = map(int, l.split(' ', 1))
1752 total_files, total_bytes = map(int, l.split(' ', 1))
1753 except (ValueError, TypeError):
1753 except (ValueError, TypeError):
1754 raise error.ResponseError(
1754 raise error.ResponseError(
1755 _('unexpected response from remote server:'), l)
1755 _('unexpected response from remote server:'), l)
1756 self.ui.status(_('%d files to transfer, %s of data\n') %
1756 self.ui.status(_('%d files to transfer, %s of data\n') %
1757 (total_files, util.bytecount(total_bytes)))
1757 (total_files, util.bytecount(total_bytes)))
1758 handled_bytes = 0
1758 handled_bytes = 0
1759 self.ui.progress(_('clone'), 0, total=total_bytes)
1759 self.ui.progress(_('clone'), 0, total=total_bytes)
1760 start = time.time()
1760 start = time.time()
1761
1761
1762 tr = self.transaction(_('clone'))
1762 tr = self.transaction(_('clone'))
1763 try:
1763 try:
1764 for i in xrange(total_files):
1764 for i in xrange(total_files):
1765 # XXX doesn't support '\n' or '\r' in filenames
1765 # XXX doesn't support '\n' or '\r' in filenames
1766 l = fp.readline()
1766 l = fp.readline()
1767 try:
1767 try:
1768 name, size = l.split('\0', 1)
1768 name, size = l.split('\0', 1)
1769 size = int(size)
1769 size = int(size)
1770 except (ValueError, TypeError):
1770 except (ValueError, TypeError):
1771 raise error.ResponseError(
1771 raise error.ResponseError(
1772 _('unexpected response from remote server:'), l)
1772 _('unexpected response from remote server:'), l)
1773 if self.ui.debugflag:
1773 if self.ui.debugflag:
1774 self.ui.debug('adding %s (%s)\n' %
1774 self.ui.debug('adding %s (%s)\n' %
1775 (name, util.bytecount(size)))
1775 (name, util.bytecount(size)))
1776 # for backwards compat, name was partially encoded
1776 # for backwards compat, name was partially encoded
1777 ofp = self.sopener(store.decodedir(name), 'w')
1777 ofp = self.sopener(store.decodedir(name), 'w')
1778 for chunk in util.filechunkiter(fp, limit=size):
1778 for chunk in util.filechunkiter(fp, limit=size):
1779 handled_bytes += len(chunk)
1779 handled_bytes += len(chunk)
1780 self.ui.progress(_('clone'), handled_bytes,
1780 self.ui.progress(_('clone'), handled_bytes,
1781 total=total_bytes)
1781 total=total_bytes)
1782 ofp.write(chunk)
1782 ofp.write(chunk)
1783 ofp.close()
1783 ofp.close()
1784 tr.close()
1784 tr.close()
1785 finally:
1785 finally:
1786 tr.release()
1786 tr.release()
1787
1787
1788 # Writing straight to files circumvented the inmemory caches
1788 # Writing straight to files circumvented the inmemory caches
1789 self.invalidate()
1789 self.invalidate()
1790
1790
1791 elapsed = time.time() - start
1791 elapsed = time.time() - start
1792 if elapsed <= 0:
1792 if elapsed <= 0:
1793 elapsed = 0.001
1793 elapsed = 0.001
1794 self.ui.progress(_('clone'), None)
1794 self.ui.progress(_('clone'), None)
1795 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1795 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1796 (util.bytecount(total_bytes), elapsed,
1796 (util.bytecount(total_bytes), elapsed,
1797 util.bytecount(total_bytes / elapsed)))
1797 util.bytecount(total_bytes / elapsed)))
1798
1798
1799 # new requirements = old non-format requirements +
1799 # new requirements = old non-format requirements +
1800 # new format-related
1800 # new format-related
1801 # requirements from the streamed-in repository
1801 # requirements from the streamed-in repository
1802 requirements.update(set(self.requirements) - self.supportedformats)
1802 requirements.update(set(self.requirements) - self.supportedformats)
1803 self._applyrequirements(requirements)
1803 self._applyrequirements(requirements)
1804 self._writerequirements()
1804 self._writerequirements()
1805
1805
1806 if rbranchmap:
1806 if rbranchmap:
1807 rbheads = []
1807 rbheads = []
1808 for bheads in rbranchmap.itervalues():
1808 for bheads in rbranchmap.itervalues():
1809 rbheads.extend(bheads)
1809 rbheads.extend(bheads)
1810
1810
1811 if rbheads:
1811 if rbheads:
1812 rtiprev = max((int(self.changelog.rev(node))
1812 rtiprev = max((int(self.changelog.rev(node))
1813 for node in rbheads))
1813 for node in rbheads))
1814 cache = branchmap.branchcache(rbranchmap,
1814 cache = branchmap.branchcache(rbranchmap,
1815 self[rtiprev].node(),
1815 self[rtiprev].node(),
1816 rtiprev)
1816 rtiprev)
1817 # Try to stick it as low as possible
1817 # Try to stick it as low as possible
1818 # filter above served are unlikely to be fetch from a clone
1818 # filter above served are unlikely to be fetch from a clone
1819 for candidate in ('base', 'immutable', 'served'):
1819 for candidate in ('base', 'immutable', 'served'):
1820 rview = self.filtered(candidate)
1820 rview = self.filtered(candidate)
1821 if cache.validfor(rview):
1821 if cache.validfor(rview):
1822 self._branchcaches[candidate] = cache
1822 self._branchcaches[candidate] = cache
1823 cache.write(rview)
1823 cache.write(rview)
1824 break
1824 break
1825 self.invalidate()
1825 self.invalidate()
1826 return len(self.heads()) + 1
1826 return len(self.heads()) + 1
1827 finally:
1827 finally:
1828 lock.release()
1828 lock.release()
1829
1829
1830 def clone(self, remote, heads=[], stream=False):
1830 def clone(self, remote, heads=[], stream=False):
1831 '''clone remote repository.
1831 '''clone remote repository.
1832
1832
1833 keyword arguments:
1833 keyword arguments:
1834 heads: list of revs to clone (forces use of pull)
1834 heads: list of revs to clone (forces use of pull)
1835 stream: use streaming clone if possible'''
1835 stream: use streaming clone if possible'''
1836
1836
1837 # now, all clients that can request uncompressed clones can
1837 # now, all clients that can request uncompressed clones can
1838 # read repo formats supported by all servers that can serve
1838 # read repo formats supported by all servers that can serve
1839 # them.
1839 # them.
1840
1840
1841 # if revlog format changes, client will have to check version
1841 # if revlog format changes, client will have to check version
1842 # and format flags on "stream" capability, and use
1842 # and format flags on "stream" capability, and use
1843 # uncompressed only if compatible.
1843 # uncompressed only if compatible.
1844
1844
1845 if not stream:
1845 if not stream:
1846 # if the server explicitly prefers to stream (for fast LANs)
1846 # if the server explicitly prefers to stream (for fast LANs)
1847 stream = remote.capable('stream-preferred')
1847 stream = remote.capable('stream-preferred')
1848
1848
1849 if stream and not heads:
1849 if stream and not heads:
1850 # 'stream' means remote revlog format is revlogv1 only
1850 # 'stream' means remote revlog format is revlogv1 only
1851 if remote.capable('stream'):
1851 if remote.capable('stream'):
1852 return self.stream_in(remote, set(('revlogv1',)))
1852 return self.stream_in(remote, set(('revlogv1',)))
1853 # otherwise, 'streamreqs' contains the remote revlog format
1853 # otherwise, 'streamreqs' contains the remote revlog format
1854 streamreqs = remote.capable('streamreqs')
1854 streamreqs = remote.capable('streamreqs')
1855 if streamreqs:
1855 if streamreqs:
1856 streamreqs = set(streamreqs.split(','))
1856 streamreqs = set(streamreqs.split(','))
1857 # if we support it, stream in and adjust our requirements
1857 # if we support it, stream in and adjust our requirements
1858 if not streamreqs - self.supportedformats:
1858 if not streamreqs - self.supportedformats:
1859 return self.stream_in(remote, streamreqs)
1859 return self.stream_in(remote, streamreqs)
1860 return self.pull(remote, heads)
1860 return self.pull(remote, heads)
1861
1861
1862 def pushkey(self, namespace, key, old, new):
1862 def pushkey(self, namespace, key, old, new):
1863 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1863 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1864 old=old, new=new)
1864 old=old, new=new)
1865 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1865 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1866 ret = pushkey.push(self, namespace, key, old, new)
1866 ret = pushkey.push(self, namespace, key, old, new)
1867 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1867 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1868 ret=ret)
1868 ret=ret)
1869 return ret
1869 return ret
1870
1870
1871 def listkeys(self, namespace):
1871 def listkeys(self, namespace):
1872 self.hook('prelistkeys', throw=True, namespace=namespace)
1872 self.hook('prelistkeys', throw=True, namespace=namespace)
1873 self.ui.debug('listing keys for "%s"\n' % namespace)
1873 self.ui.debug('listing keys for "%s"\n' % namespace)
1874 values = pushkey.list(self, namespace)
1874 values = pushkey.list(self, namespace)
1875 self.hook('listkeys', namespace=namespace, values=values)
1875 self.hook('listkeys', namespace=namespace, values=values)
1876 return values
1876 return values
1877
1877
1878 def debugwireargs(self, one, two, three=None, four=None, five=None):
1878 def debugwireargs(self, one, two, three=None, four=None, five=None):
1879 '''used to test argument passing over the wire'''
1879 '''used to test argument passing over the wire'''
1880 return "%s %s %s %s %s" % (one, two, three, four, five)
1880 return "%s %s %s %s %s" % (one, two, three, four, five)
1881
1881
1882 def savecommitmessage(self, text):
1882 def savecommitmessage(self, text):
1883 fp = self.opener('last-message.txt', 'wb')
1883 fp = self.opener('last-message.txt', 'wb')
1884 try:
1884 try:
1885 fp.write(text)
1885 fp.write(text)
1886 finally:
1886 finally:
1887 fp.close()
1887 fp.close()
1888 return self.pathto(fp.name[len(self.root) + 1:])
1888 return self.pathto(fp.name[len(self.root) + 1:])
1889
1889
1890 # used to avoid circular references so destructors work
1890 # used to avoid circular references so destructors work
1891 def aftertrans(files):
1891 def aftertrans(files):
1892 renamefiles = [tuple(t) for t in files]
1892 renamefiles = [tuple(t) for t in files]
1893 def a():
1893 def a():
1894 for vfs, src, dest in renamefiles:
1894 for vfs, src, dest in renamefiles:
1895 try:
1895 try:
1896 vfs.rename(src, dest)
1896 vfs.rename(src, dest)
1897 except OSError: # journal file does not yet exist
1897 except OSError: # journal file does not yet exist
1898 pass
1898 pass
1899 return a
1899 return a
1900
1900
1901 def undoname(fn):
1901 def undoname(fn):
1902 base, name = os.path.split(fn)
1902 base, name = os.path.split(fn)
1903 assert name.startswith('journal')
1903 assert name.startswith('journal')
1904 return os.path.join(base, name.replace('journal', 'undo', 1))
1904 return os.path.join(base, name.replace('journal', 'undo', 1))
1905
1905
1906 def instance(ui, path, create):
1906 def instance(ui, path, create):
1907 return localrepository(ui, util.urllocalpath(path), create)
1907 return localrepository(ui, util.urllocalpath(path), create)
1908
1908
1909 def islocal(path):
1909 def islocal(path):
1910 return True
1910 return True
@@ -1,812 +1,814 b''
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import urllib, tempfile, os, sys
8 import urllib, tempfile, os, sys
9 from i18n import _
9 from i18n import _
10 from node import bin, hex
10 from node import bin, hex
11 import changegroup as changegroupmod, bundle2
11 import changegroup as changegroupmod, bundle2
12 import peer, error, encoding, util, store, exchange
12 import peer, error, encoding, util, store, exchange
13
13
14
14
15 class abstractserverproto(object):
15 class abstractserverproto(object):
16 """abstract class that summarizes the protocol API
16 """abstract class that summarizes the protocol API
17
17
18 Used as reference and documentation.
18 Used as reference and documentation.
19 """
19 """
20
20
21 def getargs(self, args):
21 def getargs(self, args):
22 """return the value for arguments in <args>
22 """return the value for arguments in <args>
23
23
24 returns a list of values (same order as <args>)"""
24 returns a list of values (same order as <args>)"""
25 raise NotImplementedError()
25 raise NotImplementedError()
26
26
27 def getfile(self, fp):
27 def getfile(self, fp):
28 """write the whole content of a file into a file like object
28 """write the whole content of a file into a file like object
29
29
30 The file is in the form::
30 The file is in the form::
31
31
32 (<chunk-size>\n<chunk>)+0\n
32 (<chunk-size>\n<chunk>)+0\n
33
33
34 chunk size is the ascii version of the int.
34 chunk size is the ascii version of the int.
35 """
35 """
36 raise NotImplementedError()
36 raise NotImplementedError()
37
37
38 def redirect(self):
38 def redirect(self):
39 """may setup interception for stdout and stderr
39 """may setup interception for stdout and stderr
40
40
41 See also the `restore` method."""
41 See also the `restore` method."""
42 raise NotImplementedError()
42 raise NotImplementedError()
43
43
44 # If the `redirect` function does install interception, the `restore`
44 # If the `redirect` function does install interception, the `restore`
45 # function MUST be defined. If interception is not used, this function
45 # function MUST be defined. If interception is not used, this function
46 # MUST NOT be defined.
46 # MUST NOT be defined.
47 #
47 #
48 # left commented here on purpose
48 # left commented here on purpose
49 #
49 #
50 #def restore(self):
50 #def restore(self):
51 # """reinstall previous stdout and stderr and return intercepted stdout
51 # """reinstall previous stdout and stderr and return intercepted stdout
52 # """
52 # """
53 # raise NotImplementedError()
53 # raise NotImplementedError()
54
54
55 def groupchunks(self, cg):
55 def groupchunks(self, cg):
56 """return 4096 chunks from a changegroup object
56 """return 4096 chunks from a changegroup object
57
57
58 Some protocols may have compressed the contents."""
58 Some protocols may have compressed the contents."""
59 raise NotImplementedError()
59 raise NotImplementedError()
60
60
61 # abstract batching support
61 # abstract batching support
62
62
63 class future(object):
63 class future(object):
64 '''placeholder for a value to be set later'''
64 '''placeholder for a value to be set later'''
65 def set(self, value):
65 def set(self, value):
66 if util.safehasattr(self, 'value'):
66 if util.safehasattr(self, 'value'):
67 raise error.RepoError("future is already set")
67 raise error.RepoError("future is already set")
68 self.value = value
68 self.value = value
69
69
70 class batcher(object):
70 class batcher(object):
71 '''base class for batches of commands submittable in a single request
71 '''base class for batches of commands submittable in a single request
72
72
73 All methods invoked on instances of this class are simply queued and
73 All methods invoked on instances of this class are simply queued and
74 return a a future for the result. Once you call submit(), all the queued
74 return a a future for the result. Once you call submit(), all the queued
75 calls are performed and the results set in their respective futures.
75 calls are performed and the results set in their respective futures.
76 '''
76 '''
77 def __init__(self):
77 def __init__(self):
78 self.calls = []
78 self.calls = []
79 def __getattr__(self, name):
79 def __getattr__(self, name):
80 def call(*args, **opts):
80 def call(*args, **opts):
81 resref = future()
81 resref = future()
82 self.calls.append((name, args, opts, resref,))
82 self.calls.append((name, args, opts, resref,))
83 return resref
83 return resref
84 return call
84 return call
85 def submit(self):
85 def submit(self):
86 pass
86 pass
87
87
88 class localbatch(batcher):
88 class localbatch(batcher):
89 '''performs the queued calls directly'''
89 '''performs the queued calls directly'''
90 def __init__(self, local):
90 def __init__(self, local):
91 batcher.__init__(self)
91 batcher.__init__(self)
92 self.local = local
92 self.local = local
93 def submit(self):
93 def submit(self):
94 for name, args, opts, resref in self.calls:
94 for name, args, opts, resref in self.calls:
95 resref.set(getattr(self.local, name)(*args, **opts))
95 resref.set(getattr(self.local, name)(*args, **opts))
96
96
97 class remotebatch(batcher):
97 class remotebatch(batcher):
98 '''batches the queued calls; uses as few roundtrips as possible'''
98 '''batches the queued calls; uses as few roundtrips as possible'''
99 def __init__(self, remote):
99 def __init__(self, remote):
100 '''remote must support _submitbatch(encbatch) and
100 '''remote must support _submitbatch(encbatch) and
101 _submitone(op, encargs)'''
101 _submitone(op, encargs)'''
102 batcher.__init__(self)
102 batcher.__init__(self)
103 self.remote = remote
103 self.remote = remote
104 def submit(self):
104 def submit(self):
105 req, rsp = [], []
105 req, rsp = [], []
106 for name, args, opts, resref in self.calls:
106 for name, args, opts, resref in self.calls:
107 mtd = getattr(self.remote, name)
107 mtd = getattr(self.remote, name)
108 batchablefn = getattr(mtd, 'batchable', None)
108 batchablefn = getattr(mtd, 'batchable', None)
109 if batchablefn is not None:
109 if batchablefn is not None:
110 batchable = batchablefn(mtd.im_self, *args, **opts)
110 batchable = batchablefn(mtd.im_self, *args, **opts)
111 encargsorres, encresref = batchable.next()
111 encargsorres, encresref = batchable.next()
112 if encresref:
112 if encresref:
113 req.append((name, encargsorres,))
113 req.append((name, encargsorres,))
114 rsp.append((batchable, encresref, resref,))
114 rsp.append((batchable, encresref, resref,))
115 else:
115 else:
116 resref.set(encargsorres)
116 resref.set(encargsorres)
117 else:
117 else:
118 if req:
118 if req:
119 self._submitreq(req, rsp)
119 self._submitreq(req, rsp)
120 req, rsp = [], []
120 req, rsp = [], []
121 resref.set(mtd(*args, **opts))
121 resref.set(mtd(*args, **opts))
122 if req:
122 if req:
123 self._submitreq(req, rsp)
123 self._submitreq(req, rsp)
124 def _submitreq(self, req, rsp):
124 def _submitreq(self, req, rsp):
125 encresults = self.remote._submitbatch(req)
125 encresults = self.remote._submitbatch(req)
126 for encres, r in zip(encresults, rsp):
126 for encres, r in zip(encresults, rsp):
127 batchable, encresref, resref = r
127 batchable, encresref, resref = r
128 encresref.set(encres)
128 encresref.set(encres)
129 resref.set(batchable.next())
129 resref.set(batchable.next())
130
130
131 def batchable(f):
131 def batchable(f):
132 '''annotation for batchable methods
132 '''annotation for batchable methods
133
133
134 Such methods must implement a coroutine as follows:
134 Such methods must implement a coroutine as follows:
135
135
136 @batchable
136 @batchable
137 def sample(self, one, two=None):
137 def sample(self, one, two=None):
138 # Handle locally computable results first:
138 # Handle locally computable results first:
139 if not one:
139 if not one:
140 yield "a local result", None
140 yield "a local result", None
141 # Build list of encoded arguments suitable for your wire protocol:
141 # Build list of encoded arguments suitable for your wire protocol:
142 encargs = [('one', encode(one),), ('two', encode(two),)]
142 encargs = [('one', encode(one),), ('two', encode(two),)]
143 # Create future for injection of encoded result:
143 # Create future for injection of encoded result:
144 encresref = future()
144 encresref = future()
145 # Return encoded arguments and future:
145 # Return encoded arguments and future:
146 yield encargs, encresref
146 yield encargs, encresref
147 # Assuming the future to be filled with the result from the batched
147 # Assuming the future to be filled with the result from the batched
148 # request now. Decode it:
148 # request now. Decode it:
149 yield decode(encresref.value)
149 yield decode(encresref.value)
150
150
151 The decorator returns a function which wraps this coroutine as a plain
151 The decorator returns a function which wraps this coroutine as a plain
152 method, but adds the original method as an attribute called "batchable",
152 method, but adds the original method as an attribute called "batchable",
153 which is used by remotebatch to split the call into separate encoding and
153 which is used by remotebatch to split the call into separate encoding and
154 decoding phases.
154 decoding phases.
155 '''
155 '''
156 def plain(*args, **opts):
156 def plain(*args, **opts):
157 batchable = f(*args, **opts)
157 batchable = f(*args, **opts)
158 encargsorres, encresref = batchable.next()
158 encargsorres, encresref = batchable.next()
159 if not encresref:
159 if not encresref:
160 return encargsorres # a local result in this case
160 return encargsorres # a local result in this case
161 self = args[0]
161 self = args[0]
162 encresref.set(self._submitone(f.func_name, encargsorres))
162 encresref.set(self._submitone(f.func_name, encargsorres))
163 return batchable.next()
163 return batchable.next()
164 setattr(plain, 'batchable', f)
164 setattr(plain, 'batchable', f)
165 return plain
165 return plain
166
166
167 # list of nodes encoding / decoding
167 # list of nodes encoding / decoding
168
168
169 def decodelist(l, sep=' '):
169 def decodelist(l, sep=' '):
170 if l:
170 if l:
171 return map(bin, l.split(sep))
171 return map(bin, l.split(sep))
172 return []
172 return []
173
173
174 def encodelist(l, sep=' '):
174 def encodelist(l, sep=' '):
175 return sep.join(map(hex, l))
175 return sep.join(map(hex, l))
176
176
177 # batched call argument encoding
177 # batched call argument encoding
178
178
179 def escapearg(plain):
179 def escapearg(plain):
180 return (plain
180 return (plain
181 .replace(':', '::')
181 .replace(':', '::')
182 .replace(',', ':,')
182 .replace(',', ':,')
183 .replace(';', ':;')
183 .replace(';', ':;')
184 .replace('=', ':='))
184 .replace('=', ':='))
185
185
186 def unescapearg(escaped):
186 def unescapearg(escaped):
187 return (escaped
187 return (escaped
188 .replace(':=', '=')
188 .replace(':=', '=')
189 .replace(':;', ';')
189 .replace(':;', ';')
190 .replace(':,', ',')
190 .replace(':,', ',')
191 .replace('::', ':'))
191 .replace('::', ':'))
192
192
193 # client side
193 # client side
194
194
195 class wirepeer(peer.peerrepository):
195 class wirepeer(peer.peerrepository):
196
196
197 def batch(self):
197 def batch(self):
198 return remotebatch(self)
198 return remotebatch(self)
199 def _submitbatch(self, req):
199 def _submitbatch(self, req):
200 cmds = []
200 cmds = []
201 for op, argsdict in req:
201 for op, argsdict in req:
202 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
202 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
203 cmds.append('%s %s' % (op, args))
203 cmds.append('%s %s' % (op, args))
204 rsp = self._call("batch", cmds=';'.join(cmds))
204 rsp = self._call("batch", cmds=';'.join(cmds))
205 return rsp.split(';')
205 return rsp.split(';')
206 def _submitone(self, op, args):
206 def _submitone(self, op, args):
207 return self._call(op, **args)
207 return self._call(op, **args)
208
208
209 @batchable
209 @batchable
210 def lookup(self, key):
210 def lookup(self, key):
211 self.requirecap('lookup', _('look up remote revision'))
211 self.requirecap('lookup', _('look up remote revision'))
212 f = future()
212 f = future()
213 yield {'key': encoding.fromlocal(key)}, f
213 yield {'key': encoding.fromlocal(key)}, f
214 d = f.value
214 d = f.value
215 success, data = d[:-1].split(" ", 1)
215 success, data = d[:-1].split(" ", 1)
216 if int(success):
216 if int(success):
217 yield bin(data)
217 yield bin(data)
218 self._abort(error.RepoError(data))
218 self._abort(error.RepoError(data))
219
219
220 @batchable
220 @batchable
221 def heads(self):
221 def heads(self):
222 f = future()
222 f = future()
223 yield {}, f
223 yield {}, f
224 d = f.value
224 d = f.value
225 try:
225 try:
226 yield decodelist(d[:-1])
226 yield decodelist(d[:-1])
227 except ValueError:
227 except ValueError:
228 self._abort(error.ResponseError(_("unexpected response:"), d))
228 self._abort(error.ResponseError(_("unexpected response:"), d))
229
229
230 @batchable
230 @batchable
231 def known(self, nodes):
231 def known(self, nodes):
232 f = future()
232 f = future()
233 yield {'nodes': encodelist(nodes)}, f
233 yield {'nodes': encodelist(nodes)}, f
234 d = f.value
234 d = f.value
235 try:
235 try:
236 yield [bool(int(f)) for f in d]
236 yield [bool(int(f)) for f in d]
237 except ValueError:
237 except ValueError:
238 self._abort(error.ResponseError(_("unexpected response:"), d))
238 self._abort(error.ResponseError(_("unexpected response:"), d))
239
239
240 @batchable
240 @batchable
241 def branchmap(self):
241 def branchmap(self):
242 f = future()
242 f = future()
243 yield {}, f
243 yield {}, f
244 d = f.value
244 d = f.value
245 try:
245 try:
246 branchmap = {}
246 branchmap = {}
247 for branchpart in d.splitlines():
247 for branchpart in d.splitlines():
248 branchname, branchheads = branchpart.split(' ', 1)
248 branchname, branchheads = branchpart.split(' ', 1)
249 branchname = encoding.tolocal(urllib.unquote(branchname))
249 branchname = encoding.tolocal(urllib.unquote(branchname))
250 branchheads = decodelist(branchheads)
250 branchheads = decodelist(branchheads)
251 branchmap[branchname] = branchheads
251 branchmap[branchname] = branchheads
252 yield branchmap
252 yield branchmap
253 except TypeError:
253 except TypeError:
254 self._abort(error.ResponseError(_("unexpected response:"), d))
254 self._abort(error.ResponseError(_("unexpected response:"), d))
255
255
256 def branches(self, nodes):
256 def branches(self, nodes):
257 n = encodelist(nodes)
257 n = encodelist(nodes)
258 d = self._call("branches", nodes=n)
258 d = self._call("branches", nodes=n)
259 try:
259 try:
260 br = [tuple(decodelist(b)) for b in d.splitlines()]
260 br = [tuple(decodelist(b)) for b in d.splitlines()]
261 return br
261 return br
262 except ValueError:
262 except ValueError:
263 self._abort(error.ResponseError(_("unexpected response:"), d))
263 self._abort(error.ResponseError(_("unexpected response:"), d))
264
264
265 def between(self, pairs):
265 def between(self, pairs):
266 batch = 8 # avoid giant requests
266 batch = 8 # avoid giant requests
267 r = []
267 r = []
268 for i in xrange(0, len(pairs), batch):
268 for i in xrange(0, len(pairs), batch):
269 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
269 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
270 d = self._call("between", pairs=n)
270 d = self._call("between", pairs=n)
271 try:
271 try:
272 r.extend(l and decodelist(l) or [] for l in d.splitlines())
272 r.extend(l and decodelist(l) or [] for l in d.splitlines())
273 except ValueError:
273 except ValueError:
274 self._abort(error.ResponseError(_("unexpected response:"), d))
274 self._abort(error.ResponseError(_("unexpected response:"), d))
275 return r
275 return r
276
276
277 @batchable
277 @batchable
278 def pushkey(self, namespace, key, old, new):
278 def pushkey(self, namespace, key, old, new):
279 if not self.capable('pushkey'):
279 if not self.capable('pushkey'):
280 yield False, None
280 yield False, None
281 f = future()
281 f = future()
282 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
282 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
283 yield {'namespace': encoding.fromlocal(namespace),
283 yield {'namespace': encoding.fromlocal(namespace),
284 'key': encoding.fromlocal(key),
284 'key': encoding.fromlocal(key),
285 'old': encoding.fromlocal(old),
285 'old': encoding.fromlocal(old),
286 'new': encoding.fromlocal(new)}, f
286 'new': encoding.fromlocal(new)}, f
287 d = f.value
287 d = f.value
288 d, output = d.split('\n', 1)
288 d, output = d.split('\n', 1)
289 try:
289 try:
290 d = bool(int(d))
290 d = bool(int(d))
291 except ValueError:
291 except ValueError:
292 raise error.ResponseError(
292 raise error.ResponseError(
293 _('push failed (unexpected response):'), d)
293 _('push failed (unexpected response):'), d)
294 for l in output.splitlines(True):
294 for l in output.splitlines(True):
295 self.ui.status(_('remote: '), l)
295 self.ui.status(_('remote: '), l)
296 yield d
296 yield d
297
297
298 @batchable
298 @batchable
299 def listkeys(self, namespace):
299 def listkeys(self, namespace):
300 if not self.capable('pushkey'):
300 if not self.capable('pushkey'):
301 yield {}, None
301 yield {}, None
302 f = future()
302 f = future()
303 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
303 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
304 yield {'namespace': encoding.fromlocal(namespace)}, f
304 yield {'namespace': encoding.fromlocal(namespace)}, f
305 d = f.value
305 d = f.value
306 r = {}
306 r = {}
307 for l in d.splitlines():
307 for l in d.splitlines():
308 k, v = l.split('\t')
308 k, v = l.split('\t')
309 r[encoding.tolocal(k)] = encoding.tolocal(v)
309 r[encoding.tolocal(k)] = encoding.tolocal(v)
310 yield r
310 yield r
311
311
312 def stream_out(self):
312 def stream_out(self):
313 return self._callstream('stream_out')
313 return self._callstream('stream_out')
314
314
315 def changegroup(self, nodes, kind):
315 def changegroup(self, nodes, kind):
316 n = encodelist(nodes)
316 n = encodelist(nodes)
317 f = self._callcompressable("changegroup", roots=n)
317 f = self._callcompressable("changegroup", roots=n)
318 return changegroupmod.unbundle10(f, 'UN')
318 return changegroupmod.unbundle10(f, 'UN')
319
319
320 def changegroupsubset(self, bases, heads, kind):
320 def changegroupsubset(self, bases, heads, kind):
321 self.requirecap('changegroupsubset', _('look up remote changes'))
321 self.requirecap('changegroupsubset', _('look up remote changes'))
322 bases = encodelist(bases)
322 bases = encodelist(bases)
323 heads = encodelist(heads)
323 heads = encodelist(heads)
324 f = self._callcompressable("changegroupsubset",
324 f = self._callcompressable("changegroupsubset",
325 bases=bases, heads=heads)
325 bases=bases, heads=heads)
326 return changegroupmod.unbundle10(f, 'UN')
326 return changegroupmod.unbundle10(f, 'UN')
327
327
328 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
328 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
329 **kwargs):
329 self.requirecap('getbundle', _('look up remote changes'))
330 self.requirecap('getbundle', _('look up remote changes'))
330 opts = {}
331 opts = {}
331 if heads is not None:
332 if heads is not None:
332 opts['heads'] = encodelist(heads)
333 opts['heads'] = encodelist(heads)
333 if common is not None:
334 if common is not None:
334 opts['common'] = encodelist(common)
335 opts['common'] = encodelist(common)
335 if bundlecaps is not None:
336 if bundlecaps is not None:
336 opts['bundlecaps'] = ','.join(bundlecaps)
337 opts['bundlecaps'] = ','.join(bundlecaps)
338 opts.update(kwargs)
337 f = self._callcompressable("getbundle", **opts)
339 f = self._callcompressable("getbundle", **opts)
338 if bundlecaps is not None and 'HG2X' in bundlecaps:
340 if bundlecaps is not None and 'HG2X' in bundlecaps:
339 return bundle2.unbundle20(self.ui, f)
341 return bundle2.unbundle20(self.ui, f)
340 else:
342 else:
341 return changegroupmod.unbundle10(f, 'UN')
343 return changegroupmod.unbundle10(f, 'UN')
342
344
343 def unbundle(self, cg, heads, source):
345 def unbundle(self, cg, heads, source):
344 '''Send cg (a readable file-like object representing the
346 '''Send cg (a readable file-like object representing the
345 changegroup to push, typically a chunkbuffer object) to the
347 changegroup to push, typically a chunkbuffer object) to the
346 remote server as a bundle.
348 remote server as a bundle.
347
349
348 When pushing a bundle10 stream, return an integer indicating the
350 When pushing a bundle10 stream, return an integer indicating the
349 result of the push (see localrepository.addchangegroup()).
351 result of the push (see localrepository.addchangegroup()).
350
352
351 When pushing a bundle20 stream, return a bundle20 stream.'''
353 When pushing a bundle20 stream, return a bundle20 stream.'''
352
354
353 if heads != ['force'] and self.capable('unbundlehash'):
355 if heads != ['force'] and self.capable('unbundlehash'):
354 heads = encodelist(['hashed',
356 heads = encodelist(['hashed',
355 util.sha1(''.join(sorted(heads))).digest()])
357 util.sha1(''.join(sorted(heads))).digest()])
356 else:
358 else:
357 heads = encodelist(heads)
359 heads = encodelist(heads)
358
360
359 if util.safehasattr(cg, 'deltaheader'):
361 if util.safehasattr(cg, 'deltaheader'):
360 # this a bundle10, do the old style call sequence
362 # this a bundle10, do the old style call sequence
361 ret, output = self._callpush("unbundle", cg, heads=heads)
363 ret, output = self._callpush("unbundle", cg, heads=heads)
362 if ret == "":
364 if ret == "":
363 raise error.ResponseError(
365 raise error.ResponseError(
364 _('push failed:'), output)
366 _('push failed:'), output)
365 try:
367 try:
366 ret = int(ret)
368 ret = int(ret)
367 except ValueError:
369 except ValueError:
368 raise error.ResponseError(
370 raise error.ResponseError(
369 _('push failed (unexpected response):'), ret)
371 _('push failed (unexpected response):'), ret)
370
372
371 for l in output.splitlines(True):
373 for l in output.splitlines(True):
372 self.ui.status(_('remote: '), l)
374 self.ui.status(_('remote: '), l)
373 else:
375 else:
374 # bundle2 push. Send a stream, fetch a stream.
376 # bundle2 push. Send a stream, fetch a stream.
375 stream = self._calltwowaystream('unbundle', cg, heads=heads)
377 stream = self._calltwowaystream('unbundle', cg, heads=heads)
376 ret = bundle2.unbundle20(self.ui, stream)
378 ret = bundle2.unbundle20(self.ui, stream)
377 return ret
379 return ret
378
380
379 def debugwireargs(self, one, two, three=None, four=None, five=None):
381 def debugwireargs(self, one, two, three=None, four=None, five=None):
380 # don't pass optional arguments left at their default value
382 # don't pass optional arguments left at their default value
381 opts = {}
383 opts = {}
382 if three is not None:
384 if three is not None:
383 opts['three'] = three
385 opts['three'] = three
384 if four is not None:
386 if four is not None:
385 opts['four'] = four
387 opts['four'] = four
386 return self._call('debugwireargs', one=one, two=two, **opts)
388 return self._call('debugwireargs', one=one, two=two, **opts)
387
389
388 def _call(self, cmd, **args):
390 def _call(self, cmd, **args):
389 """execute <cmd> on the server
391 """execute <cmd> on the server
390
392
391 The command is expected to return a simple string.
393 The command is expected to return a simple string.
392
394
393 returns the server reply as a string."""
395 returns the server reply as a string."""
394 raise NotImplementedError()
396 raise NotImplementedError()
395
397
396 def _callstream(self, cmd, **args):
398 def _callstream(self, cmd, **args):
397 """execute <cmd> on the server
399 """execute <cmd> on the server
398
400
399 The command is expected to return a stream.
401 The command is expected to return a stream.
400
402
401 returns the server reply as a file like object."""
403 returns the server reply as a file like object."""
402 raise NotImplementedError()
404 raise NotImplementedError()
403
405
404 def _callcompressable(self, cmd, **args):
406 def _callcompressable(self, cmd, **args):
405 """execute <cmd> on the server
407 """execute <cmd> on the server
406
408
407 The command is expected to return a stream.
409 The command is expected to return a stream.
408
410
409 The stream may have been compressed in some implementations. This
411 The stream may have been compressed in some implementations. This
410 function takes care of the decompression. This is the only difference
412 function takes care of the decompression. This is the only difference
411 with _callstream.
413 with _callstream.
412
414
413 returns the server reply as a file like object.
415 returns the server reply as a file like object.
414 """
416 """
415 raise NotImplementedError()
417 raise NotImplementedError()
416
418
417 def _callpush(self, cmd, fp, **args):
419 def _callpush(self, cmd, fp, **args):
418 """execute a <cmd> on server
420 """execute a <cmd> on server
419
421
420 The command is expected to be related to a push. Push has a special
422 The command is expected to be related to a push. Push has a special
421 return method.
423 return method.
422
424
423 returns the server reply as a (ret, output) tuple. ret is either
425 returns the server reply as a (ret, output) tuple. ret is either
424 empty (error) or a stringified int.
426 empty (error) or a stringified int.
425 """
427 """
426 raise NotImplementedError()
428 raise NotImplementedError()
427
429
428 def _calltwowaystream(self, cmd, fp, **args):
430 def _calltwowaystream(self, cmd, fp, **args):
429 """execute <cmd> on server
431 """execute <cmd> on server
430
432
431 The command will send a stream to the server and get a stream in reply.
433 The command will send a stream to the server and get a stream in reply.
432 """
434 """
433 raise NotImplementedError()
435 raise NotImplementedError()
434
436
435 def _abort(self, exception):
437 def _abort(self, exception):
436 """clearly abort the wire protocol connection and raise the exception
438 """clearly abort the wire protocol connection and raise the exception
437 """
439 """
438 raise NotImplementedError()
440 raise NotImplementedError()
439
441
440 # server side
442 # server side
441
443
442 # wire protocol command can either return a string or one of these classes.
444 # wire protocol command can either return a string or one of these classes.
443 class streamres(object):
445 class streamres(object):
444 """wireproto reply: binary stream
446 """wireproto reply: binary stream
445
447
446 The call was successful and the result is a stream.
448 The call was successful and the result is a stream.
447 Iterate on the `self.gen` attribute to retrieve chunks.
449 Iterate on the `self.gen` attribute to retrieve chunks.
448 """
450 """
449 def __init__(self, gen):
451 def __init__(self, gen):
450 self.gen = gen
452 self.gen = gen
451
453
452 class pushres(object):
454 class pushres(object):
453 """wireproto reply: success with simple integer return
455 """wireproto reply: success with simple integer return
454
456
455 The call was successful and returned an integer contained in `self.res`.
457 The call was successful and returned an integer contained in `self.res`.
456 """
458 """
457 def __init__(self, res):
459 def __init__(self, res):
458 self.res = res
460 self.res = res
459
461
460 class pusherr(object):
462 class pusherr(object):
461 """wireproto reply: failure
463 """wireproto reply: failure
462
464
463 The call failed. The `self.res` attribute contains the error message.
465 The call failed. The `self.res` attribute contains the error message.
464 """
466 """
465 def __init__(self, res):
467 def __init__(self, res):
466 self.res = res
468 self.res = res
467
469
468 class ooberror(object):
470 class ooberror(object):
469 """wireproto reply: failure of a batch of operation
471 """wireproto reply: failure of a batch of operation
470
472
471 Something failed during a batch call. The error message is stored in
473 Something failed during a batch call. The error message is stored in
472 `self.message`.
474 `self.message`.
473 """
475 """
474 def __init__(self, message):
476 def __init__(self, message):
475 self.message = message
477 self.message = message
476
478
477 def dispatch(repo, proto, command):
479 def dispatch(repo, proto, command):
478 repo = repo.filtered("served")
480 repo = repo.filtered("served")
479 func, spec = commands[command]
481 func, spec = commands[command]
480 args = proto.getargs(spec)
482 args = proto.getargs(spec)
481 return func(repo, proto, *args)
483 return func(repo, proto, *args)
482
484
483 def options(cmd, keys, others):
485 def options(cmd, keys, others):
484 opts = {}
486 opts = {}
485 for k in keys:
487 for k in keys:
486 if k in others:
488 if k in others:
487 opts[k] = others[k]
489 opts[k] = others[k]
488 del others[k]
490 del others[k]
489 if others:
491 if others:
490 sys.stderr.write("abort: %s got unexpected arguments %s\n"
492 sys.stderr.write("abort: %s got unexpected arguments %s\n"
491 % (cmd, ",".join(others)))
493 % (cmd, ",".join(others)))
492 return opts
494 return opts
493
495
494 # list of commands
496 # list of commands
495 commands = {}
497 commands = {}
496
498
497 def wireprotocommand(name, args=''):
499 def wireprotocommand(name, args=''):
498 """decorator for wire protocol command"""
500 """decorator for wire protocol command"""
499 def register(func):
501 def register(func):
500 commands[name] = (func, args)
502 commands[name] = (func, args)
501 return func
503 return func
502 return register
504 return register
503
505
504 @wireprotocommand('batch', 'cmds *')
506 @wireprotocommand('batch', 'cmds *')
505 def batch(repo, proto, cmds, others):
507 def batch(repo, proto, cmds, others):
506 repo = repo.filtered("served")
508 repo = repo.filtered("served")
507 res = []
509 res = []
508 for pair in cmds.split(';'):
510 for pair in cmds.split(';'):
509 op, args = pair.split(' ', 1)
511 op, args = pair.split(' ', 1)
510 vals = {}
512 vals = {}
511 for a in args.split(','):
513 for a in args.split(','):
512 if a:
514 if a:
513 n, v = a.split('=')
515 n, v = a.split('=')
514 vals[n] = unescapearg(v)
516 vals[n] = unescapearg(v)
515 func, spec = commands[op]
517 func, spec = commands[op]
516 if spec:
518 if spec:
517 keys = spec.split()
519 keys = spec.split()
518 data = {}
520 data = {}
519 for k in keys:
521 for k in keys:
520 if k == '*':
522 if k == '*':
521 star = {}
523 star = {}
522 for key in vals.keys():
524 for key in vals.keys():
523 if key not in keys:
525 if key not in keys:
524 star[key] = vals[key]
526 star[key] = vals[key]
525 data['*'] = star
527 data['*'] = star
526 else:
528 else:
527 data[k] = vals[k]
529 data[k] = vals[k]
528 result = func(repo, proto, *[data[k] for k in keys])
530 result = func(repo, proto, *[data[k] for k in keys])
529 else:
531 else:
530 result = func(repo, proto)
532 result = func(repo, proto)
531 if isinstance(result, ooberror):
533 if isinstance(result, ooberror):
532 return result
534 return result
533 res.append(escapearg(result))
535 res.append(escapearg(result))
534 return ';'.join(res)
536 return ';'.join(res)
535
537
536 @wireprotocommand('between', 'pairs')
538 @wireprotocommand('between', 'pairs')
537 def between(repo, proto, pairs):
539 def between(repo, proto, pairs):
538 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
540 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
539 r = []
541 r = []
540 for b in repo.between(pairs):
542 for b in repo.between(pairs):
541 r.append(encodelist(b) + "\n")
543 r.append(encodelist(b) + "\n")
542 return "".join(r)
544 return "".join(r)
543
545
544 @wireprotocommand('branchmap')
546 @wireprotocommand('branchmap')
545 def branchmap(repo, proto):
547 def branchmap(repo, proto):
546 branchmap = repo.branchmap()
548 branchmap = repo.branchmap()
547 heads = []
549 heads = []
548 for branch, nodes in branchmap.iteritems():
550 for branch, nodes in branchmap.iteritems():
549 branchname = urllib.quote(encoding.fromlocal(branch))
551 branchname = urllib.quote(encoding.fromlocal(branch))
550 branchnodes = encodelist(nodes)
552 branchnodes = encodelist(nodes)
551 heads.append('%s %s' % (branchname, branchnodes))
553 heads.append('%s %s' % (branchname, branchnodes))
552 return '\n'.join(heads)
554 return '\n'.join(heads)
553
555
554 @wireprotocommand('branches', 'nodes')
556 @wireprotocommand('branches', 'nodes')
555 def branches(repo, proto, nodes):
557 def branches(repo, proto, nodes):
556 nodes = decodelist(nodes)
558 nodes = decodelist(nodes)
557 r = []
559 r = []
558 for b in repo.branches(nodes):
560 for b in repo.branches(nodes):
559 r.append(encodelist(b) + "\n")
561 r.append(encodelist(b) + "\n")
560 return "".join(r)
562 return "".join(r)
561
563
562
564
563 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
565 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
564 'known', 'getbundle', 'unbundlehash', 'batch']
566 'known', 'getbundle', 'unbundlehash', 'batch']
565
567
566 def _capabilities(repo, proto):
568 def _capabilities(repo, proto):
567 """return a list of capabilities for a repo
569 """return a list of capabilities for a repo
568
570
569 This function exists to allow extensions to easily wrap capabilities
571 This function exists to allow extensions to easily wrap capabilities
570 computation
572 computation
571
573
572 - returns a lists: easy to alter
574 - returns a lists: easy to alter
573 - change done here will be propagated to both `capabilities` and `hello`
575 - change done here will be propagated to both `capabilities` and `hello`
574 command without any other action needed.
576 command without any other action needed.
575 """
577 """
576 # copy to prevent modification of the global list
578 # copy to prevent modification of the global list
577 caps = list(wireprotocaps)
579 caps = list(wireprotocaps)
578 if _allowstream(repo.ui):
580 if _allowstream(repo.ui):
579 if repo.ui.configbool('server', 'preferuncompressed', False):
581 if repo.ui.configbool('server', 'preferuncompressed', False):
580 caps.append('stream-preferred')
582 caps.append('stream-preferred')
581 requiredformats = repo.requirements & repo.supportedformats
583 requiredformats = repo.requirements & repo.supportedformats
582 # if our local revlogs are just revlogv1, add 'stream' cap
584 # if our local revlogs are just revlogv1, add 'stream' cap
583 if not requiredformats - set(('revlogv1',)):
585 if not requiredformats - set(('revlogv1',)):
584 caps.append('stream')
586 caps.append('stream')
585 # otherwise, add 'streamreqs' detailing our local revlog format
587 # otherwise, add 'streamreqs' detailing our local revlog format
586 else:
588 else:
587 caps.append('streamreqs=%s' % ','.join(requiredformats))
589 caps.append('streamreqs=%s' % ','.join(requiredformats))
588 if repo.ui.configbool('experimental', 'bundle2-exp', False):
590 if repo.ui.configbool('experimental', 'bundle2-exp', False):
589 capsblob = bundle2.encodecaps(repo.bundle2caps)
591 capsblob = bundle2.encodecaps(repo.bundle2caps)
590 caps.append('bundle2-exp=' + urllib.quote(capsblob))
592 caps.append('bundle2-exp=' + urllib.quote(capsblob))
591 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
593 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
592 caps.append('httpheader=1024')
594 caps.append('httpheader=1024')
593 return caps
595 return caps
594
596
595 # If you are writing an extension and consider wrapping this function. Wrap
597 # If you are writing an extension and consider wrapping this function. Wrap
596 # `_capabilities` instead.
598 # `_capabilities` instead.
597 @wireprotocommand('capabilities')
599 @wireprotocommand('capabilities')
598 def capabilities(repo, proto):
600 def capabilities(repo, proto):
599 return ' '.join(_capabilities(repo, proto))
601 return ' '.join(_capabilities(repo, proto))
600
602
601 @wireprotocommand('changegroup', 'roots')
603 @wireprotocommand('changegroup', 'roots')
602 def changegroup(repo, proto, roots):
604 def changegroup(repo, proto, roots):
603 nodes = decodelist(roots)
605 nodes = decodelist(roots)
604 cg = changegroupmod.changegroup(repo, nodes, 'serve')
606 cg = changegroupmod.changegroup(repo, nodes, 'serve')
605 return streamres(proto.groupchunks(cg))
607 return streamres(proto.groupchunks(cg))
606
608
607 @wireprotocommand('changegroupsubset', 'bases heads')
609 @wireprotocommand('changegroupsubset', 'bases heads')
608 def changegroupsubset(repo, proto, bases, heads):
610 def changegroupsubset(repo, proto, bases, heads):
609 bases = decodelist(bases)
611 bases = decodelist(bases)
610 heads = decodelist(heads)
612 heads = decodelist(heads)
611 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
613 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
612 return streamres(proto.groupchunks(cg))
614 return streamres(proto.groupchunks(cg))
613
615
614 @wireprotocommand('debugwireargs', 'one two *')
616 @wireprotocommand('debugwireargs', 'one two *')
615 def debugwireargs(repo, proto, one, two, others):
617 def debugwireargs(repo, proto, one, two, others):
616 # only accept optional args from the known set
618 # only accept optional args from the known set
617 opts = options('debugwireargs', ['three', 'four'], others)
619 opts = options('debugwireargs', ['three', 'four'], others)
618 return repo.debugwireargs(one, two, **opts)
620 return repo.debugwireargs(one, two, **opts)
619
621
620 @wireprotocommand('getbundle', '*')
622 @wireprotocommand('getbundle', '*')
621 def getbundle(repo, proto, others):
623 def getbundle(repo, proto, others):
622 opts = options('getbundle', ['heads', 'common', 'bundlecaps'], others)
624 opts = options('getbundle', ['heads', 'common', 'bundlecaps'], others)
623 for k, v in opts.iteritems():
625 for k, v in opts.iteritems():
624 if k in ('heads', 'common'):
626 if k in ('heads', 'common'):
625 opts[k] = decodelist(v)
627 opts[k] = decodelist(v)
626 elif k == 'bundlecaps':
628 elif k == 'bundlecaps':
627 opts[k] = set(v.split(','))
629 opts[k] = set(v.split(','))
628 cg = exchange.getbundle(repo, 'serve', **opts)
630 cg = exchange.getbundle(repo, 'serve', **opts)
629 return streamres(proto.groupchunks(cg))
631 return streamres(proto.groupchunks(cg))
630
632
631 @wireprotocommand('heads')
633 @wireprotocommand('heads')
632 def heads(repo, proto):
634 def heads(repo, proto):
633 h = repo.heads()
635 h = repo.heads()
634 return encodelist(h) + "\n"
636 return encodelist(h) + "\n"
635
637
636 @wireprotocommand('hello')
638 @wireprotocommand('hello')
637 def hello(repo, proto):
639 def hello(repo, proto):
638 '''the hello command returns a set of lines describing various
640 '''the hello command returns a set of lines describing various
639 interesting things about the server, in an RFC822-like format.
641 interesting things about the server, in an RFC822-like format.
640 Currently the only one defined is "capabilities", which
642 Currently the only one defined is "capabilities", which
641 consists of a line in the form:
643 consists of a line in the form:
642
644
643 capabilities: space separated list of tokens
645 capabilities: space separated list of tokens
644 '''
646 '''
645 return "capabilities: %s\n" % (capabilities(repo, proto))
647 return "capabilities: %s\n" % (capabilities(repo, proto))
646
648
647 @wireprotocommand('listkeys', 'namespace')
649 @wireprotocommand('listkeys', 'namespace')
648 def listkeys(repo, proto, namespace):
650 def listkeys(repo, proto, namespace):
649 d = repo.listkeys(encoding.tolocal(namespace)).items()
651 d = repo.listkeys(encoding.tolocal(namespace)).items()
650 t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
652 t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
651 for k, v in d])
653 for k, v in d])
652 return t
654 return t
653
655
654 @wireprotocommand('lookup', 'key')
656 @wireprotocommand('lookup', 'key')
655 def lookup(repo, proto, key):
657 def lookup(repo, proto, key):
656 try:
658 try:
657 k = encoding.tolocal(key)
659 k = encoding.tolocal(key)
658 c = repo[k]
660 c = repo[k]
659 r = c.hex()
661 r = c.hex()
660 success = 1
662 success = 1
661 except Exception, inst:
663 except Exception, inst:
662 r = str(inst)
664 r = str(inst)
663 success = 0
665 success = 0
664 return "%s %s\n" % (success, r)
666 return "%s %s\n" % (success, r)
665
667
666 @wireprotocommand('known', 'nodes *')
668 @wireprotocommand('known', 'nodes *')
667 def known(repo, proto, nodes, others):
669 def known(repo, proto, nodes, others):
668 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
670 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
669
671
670 @wireprotocommand('pushkey', 'namespace key old new')
672 @wireprotocommand('pushkey', 'namespace key old new')
671 def pushkey(repo, proto, namespace, key, old, new):
673 def pushkey(repo, proto, namespace, key, old, new):
672 # compatibility with pre-1.8 clients which were accidentally
674 # compatibility with pre-1.8 clients which were accidentally
673 # sending raw binary nodes rather than utf-8-encoded hex
675 # sending raw binary nodes rather than utf-8-encoded hex
674 if len(new) == 20 and new.encode('string-escape') != new:
676 if len(new) == 20 and new.encode('string-escape') != new:
675 # looks like it could be a binary node
677 # looks like it could be a binary node
676 try:
678 try:
677 new.decode('utf-8')
679 new.decode('utf-8')
678 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
680 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
679 except UnicodeDecodeError:
681 except UnicodeDecodeError:
680 pass # binary, leave unmodified
682 pass # binary, leave unmodified
681 else:
683 else:
682 new = encoding.tolocal(new) # normal path
684 new = encoding.tolocal(new) # normal path
683
685
684 if util.safehasattr(proto, 'restore'):
686 if util.safehasattr(proto, 'restore'):
685
687
686 proto.redirect()
688 proto.redirect()
687
689
688 try:
690 try:
689 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
691 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
690 encoding.tolocal(old), new) or False
692 encoding.tolocal(old), new) or False
691 except util.Abort:
693 except util.Abort:
692 r = False
694 r = False
693
695
694 output = proto.restore()
696 output = proto.restore()
695
697
696 return '%s\n%s' % (int(r), output)
698 return '%s\n%s' % (int(r), output)
697
699
698 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
700 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
699 encoding.tolocal(old), new)
701 encoding.tolocal(old), new)
700 return '%s\n' % int(r)
702 return '%s\n' % int(r)
701
703
702 def _allowstream(ui):
704 def _allowstream(ui):
703 return ui.configbool('server', 'uncompressed', True, untrusted=True)
705 return ui.configbool('server', 'uncompressed', True, untrusted=True)
704
706
705 def _walkstreamfiles(repo):
707 def _walkstreamfiles(repo):
706 # this is it's own function so extensions can override it
708 # this is it's own function so extensions can override it
707 return repo.store.walk()
709 return repo.store.walk()
708
710
709 @wireprotocommand('stream_out')
711 @wireprotocommand('stream_out')
710 def stream(repo, proto):
712 def stream(repo, proto):
711 '''If the server supports streaming clone, it advertises the "stream"
713 '''If the server supports streaming clone, it advertises the "stream"
712 capability with a value representing the version and flags of the repo
714 capability with a value representing the version and flags of the repo
713 it is serving. Client checks to see if it understands the format.
715 it is serving. Client checks to see if it understands the format.
714
716
715 The format is simple: the server writes out a line with the amount
717 The format is simple: the server writes out a line with the amount
716 of files, then the total amount of bytes to be transferred (separated
718 of files, then the total amount of bytes to be transferred (separated
717 by a space). Then, for each file, the server first writes the filename
719 by a space). Then, for each file, the server first writes the filename
718 and file size (separated by the null character), then the file contents.
720 and file size (separated by the null character), then the file contents.
719 '''
721 '''
720
722
721 if not _allowstream(repo.ui):
723 if not _allowstream(repo.ui):
722 return '1\n'
724 return '1\n'
723
725
724 entries = []
726 entries = []
725 total_bytes = 0
727 total_bytes = 0
726 try:
728 try:
727 # get consistent snapshot of repo, lock during scan
729 # get consistent snapshot of repo, lock during scan
728 lock = repo.lock()
730 lock = repo.lock()
729 try:
731 try:
730 repo.ui.debug('scanning\n')
732 repo.ui.debug('scanning\n')
731 for name, ename, size in _walkstreamfiles(repo):
733 for name, ename, size in _walkstreamfiles(repo):
732 if size:
734 if size:
733 entries.append((name, size))
735 entries.append((name, size))
734 total_bytes += size
736 total_bytes += size
735 finally:
737 finally:
736 lock.release()
738 lock.release()
737 except error.LockError:
739 except error.LockError:
738 return '2\n' # error: 2
740 return '2\n' # error: 2
739
741
740 def streamer(repo, entries, total):
742 def streamer(repo, entries, total):
741 '''stream out all metadata files in repository.'''
743 '''stream out all metadata files in repository.'''
742 yield '0\n' # success
744 yield '0\n' # success
743 repo.ui.debug('%d files, %d bytes to transfer\n' %
745 repo.ui.debug('%d files, %d bytes to transfer\n' %
744 (len(entries), total_bytes))
746 (len(entries), total_bytes))
745 yield '%d %d\n' % (len(entries), total_bytes)
747 yield '%d %d\n' % (len(entries), total_bytes)
746
748
747 sopener = repo.sopener
749 sopener = repo.sopener
748 oldaudit = sopener.mustaudit
750 oldaudit = sopener.mustaudit
749 debugflag = repo.ui.debugflag
751 debugflag = repo.ui.debugflag
750 sopener.mustaudit = False
752 sopener.mustaudit = False
751
753
752 try:
754 try:
753 for name, size in entries:
755 for name, size in entries:
754 if debugflag:
756 if debugflag:
755 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
757 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
756 # partially encode name over the wire for backwards compat
758 # partially encode name over the wire for backwards compat
757 yield '%s\0%d\n' % (store.encodedir(name), size)
759 yield '%s\0%d\n' % (store.encodedir(name), size)
758 if size <= 65536:
760 if size <= 65536:
759 fp = sopener(name)
761 fp = sopener(name)
760 try:
762 try:
761 data = fp.read(size)
763 data = fp.read(size)
762 finally:
764 finally:
763 fp.close()
765 fp.close()
764 yield data
766 yield data
765 else:
767 else:
766 for chunk in util.filechunkiter(sopener(name), limit=size):
768 for chunk in util.filechunkiter(sopener(name), limit=size):
767 yield chunk
769 yield chunk
768 # replace with "finally:" when support for python 2.4 has been dropped
770 # replace with "finally:" when support for python 2.4 has been dropped
769 except Exception:
771 except Exception:
770 sopener.mustaudit = oldaudit
772 sopener.mustaudit = oldaudit
771 raise
773 raise
772 sopener.mustaudit = oldaudit
774 sopener.mustaudit = oldaudit
773
775
774 return streamres(streamer(repo, entries, total_bytes))
776 return streamres(streamer(repo, entries, total_bytes))
775
777
776 @wireprotocommand('unbundle', 'heads')
778 @wireprotocommand('unbundle', 'heads')
777 def unbundle(repo, proto, heads):
779 def unbundle(repo, proto, heads):
778 their_heads = decodelist(heads)
780 their_heads = decodelist(heads)
779
781
780 try:
782 try:
781 proto.redirect()
783 proto.redirect()
782
784
783 exchange.check_heads(repo, their_heads, 'preparing changes')
785 exchange.check_heads(repo, their_heads, 'preparing changes')
784
786
785 # write bundle data to temporary file because it can be big
787 # write bundle data to temporary file because it can be big
786 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
788 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
787 fp = os.fdopen(fd, 'wb+')
789 fp = os.fdopen(fd, 'wb+')
788 r = 0
790 r = 0
789 try:
791 try:
790 proto.getfile(fp)
792 proto.getfile(fp)
791 fp.seek(0)
793 fp.seek(0)
792 gen = exchange.readbundle(repo.ui, fp, None)
794 gen = exchange.readbundle(repo.ui, fp, None)
793 r = exchange.unbundle(repo, gen, their_heads, 'serve',
795 r = exchange.unbundle(repo, gen, their_heads, 'serve',
794 proto._client())
796 proto._client())
795 if util.safehasattr(r, 'addpart'):
797 if util.safehasattr(r, 'addpart'):
796 # The return looks streameable, we are in the bundle2 case and
798 # The return looks streameable, we are in the bundle2 case and
797 # should return a stream.
799 # should return a stream.
798 return streamres(r.getchunks())
800 return streamres(r.getchunks())
799 return pushres(r)
801 return pushres(r)
800
802
801 finally:
803 finally:
802 fp.close()
804 fp.close()
803 os.unlink(tempname)
805 os.unlink(tempname)
804 except util.Abort, inst:
806 except util.Abort, inst:
805 # The old code we moved used sys.stderr directly.
807 # The old code we moved used sys.stderr directly.
806 # We did not change it to minimise code change.
808 # We did not change it to minimise code change.
807 # This need to be moved to something proper.
809 # This need to be moved to something proper.
808 # Feel free to do it.
810 # Feel free to do it.
809 sys.stderr.write("abort: %s\n" % inst)
811 sys.stderr.write("abort: %s\n" % inst)
810 return pushres(0)
812 return pushres(0)
811 except exchange.PushRaced, exc:
813 except exchange.PushRaced, exc:
812 return pusherr(str(exc))
814 return pusherr(str(exc))
General Comments 0
You need to be logged in to leave comments. Login now