##// END OF EJS Templates
bundle2: advertise bundle2 caps in server capabilities...
Pierre-Yves David -
r21141:d8dd19e0 default
parent child Browse files
Show More
@@ -1,706 +1,708 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno
10 import errno, urllib
11 import util, scmutil, changegroup, base85
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '20':
35 elif version == '20':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # Integer version of the push result
64 # Integer version of the push result
65 # - None means nothing to push
65 # - None means nothing to push
66 # - 0 means HTTP error
66 # - 0 means HTTP error
67 # - 1 means we pushed and remote head count is unchanged *or*
67 # - 1 means we pushed and remote head count is unchanged *or*
68 # we have outgoing changesets but refused to push
68 # we have outgoing changesets but refused to push
69 # - other values as described by addchangegroup()
69 # - other values as described by addchangegroup()
70 self.ret = None
70 self.ret = None
71 # discover.outgoing object (contains common and outgoing data)
71 # discover.outgoing object (contains common and outgoing data)
72 self.outgoing = None
72 self.outgoing = None
73 # all remote heads before the push
73 # all remote heads before the push
74 self.remoteheads = None
74 self.remoteheads = None
75 # testable as a boolean indicating if any nodes are missing locally.
75 # testable as a boolean indicating if any nodes are missing locally.
76 self.incoming = None
76 self.incoming = None
77 # set of all heads common after changeset bundle push
77 # set of all heads common after changeset bundle push
78 self.commonheads = None
78 self.commonheads = None
79
79
80 def push(repo, remote, force=False, revs=None, newbranch=False):
80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 '''Push outgoing changesets (limited by revs) from a local
81 '''Push outgoing changesets (limited by revs) from a local
82 repository to remote. Return an integer:
82 repository to remote. Return an integer:
83 - None means nothing to push
83 - None means nothing to push
84 - 0 means HTTP error
84 - 0 means HTTP error
85 - 1 means we pushed and remote head count is unchanged *or*
85 - 1 means we pushed and remote head count is unchanged *or*
86 we have outgoing changesets but refused to push
86 we have outgoing changesets but refused to push
87 - other values as described by addchangegroup()
87 - other values as described by addchangegroup()
88 '''
88 '''
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 if pushop.remote.local():
90 if pushop.remote.local():
91 missing = (set(pushop.repo.requirements)
91 missing = (set(pushop.repo.requirements)
92 - pushop.remote.local().supported)
92 - pushop.remote.local().supported)
93 if missing:
93 if missing:
94 msg = _("required features are not"
94 msg = _("required features are not"
95 " supported in the destination:"
95 " supported in the destination:"
96 " %s") % (', '.join(sorted(missing)))
96 " %s") % (', '.join(sorted(missing)))
97 raise util.Abort(msg)
97 raise util.Abort(msg)
98
98
99 # there are two ways to push to remote repo:
99 # there are two ways to push to remote repo:
100 #
100 #
101 # addchangegroup assumes local user can lock remote
101 # addchangegroup assumes local user can lock remote
102 # repo (local filesystem, old ssh servers).
102 # repo (local filesystem, old ssh servers).
103 #
103 #
104 # unbundle assumes local user cannot lock remote repo (new ssh
104 # unbundle assumes local user cannot lock remote repo (new ssh
105 # servers, http servers).
105 # servers, http servers).
106
106
107 if not pushop.remote.canpush():
107 if not pushop.remote.canpush():
108 raise util.Abort(_("destination does not support push"))
108 raise util.Abort(_("destination does not support push"))
109 # get local lock as we might write phase data
109 # get local lock as we might write phase data
110 locallock = None
110 locallock = None
111 try:
111 try:
112 locallock = pushop.repo.lock()
112 locallock = pushop.repo.lock()
113 pushop.locallocked = True
113 pushop.locallocked = True
114 except IOError, err:
114 except IOError, err:
115 pushop.locallocked = False
115 pushop.locallocked = False
116 if err.errno != errno.EACCES:
116 if err.errno != errno.EACCES:
117 raise
117 raise
118 # source repo cannot be locked.
118 # source repo cannot be locked.
119 # We do not abort the push, but just disable the local phase
119 # We do not abort the push, but just disable the local phase
120 # synchronisation.
120 # synchronisation.
121 msg = 'cannot lock source repository: %s\n' % err
121 msg = 'cannot lock source repository: %s\n' % err
122 pushop.ui.debug(msg)
122 pushop.ui.debug(msg)
123 try:
123 try:
124 pushop.repo.checkpush(pushop)
124 pushop.repo.checkpush(pushop)
125 lock = None
125 lock = None
126 unbundle = pushop.remote.capable('unbundle')
126 unbundle = pushop.remote.capable('unbundle')
127 if not unbundle:
127 if not unbundle:
128 lock = pushop.remote.lock()
128 lock = pushop.remote.lock()
129 try:
129 try:
130 _pushdiscovery(pushop)
130 _pushdiscovery(pushop)
131 if _pushcheckoutgoing(pushop):
131 if _pushcheckoutgoing(pushop):
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 pushop.remote,
133 pushop.remote,
134 pushop.outgoing)
134 pushop.outgoing)
135 if pushop.remote.capable('bundle2'):
135 if pushop.remote.capable('bundle2'):
136 _pushbundle2(pushop)
136 _pushbundle2(pushop)
137 else:
137 else:
138 _pushchangeset(pushop)
138 _pushchangeset(pushop)
139 _pushcomputecommonheads(pushop)
139 _pushcomputecommonheads(pushop)
140 _pushsyncphase(pushop)
140 _pushsyncphase(pushop)
141 _pushobsolete(pushop)
141 _pushobsolete(pushop)
142 finally:
142 finally:
143 if lock is not None:
143 if lock is not None:
144 lock.release()
144 lock.release()
145 finally:
145 finally:
146 if locallock is not None:
146 if locallock is not None:
147 locallock.release()
147 locallock.release()
148
148
149 _pushbookmark(pushop)
149 _pushbookmark(pushop)
150 return pushop.ret
150 return pushop.ret
151
151
152 def _pushdiscovery(pushop):
152 def _pushdiscovery(pushop):
153 # discovery
153 # discovery
154 unfi = pushop.repo.unfiltered()
154 unfi = pushop.repo.unfiltered()
155 fci = discovery.findcommonincoming
155 fci = discovery.findcommonincoming
156 commoninc = fci(unfi, pushop.remote, force=pushop.force)
156 commoninc = fci(unfi, pushop.remote, force=pushop.force)
157 common, inc, remoteheads = commoninc
157 common, inc, remoteheads = commoninc
158 fco = discovery.findcommonoutgoing
158 fco = discovery.findcommonoutgoing
159 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
159 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
160 commoninc=commoninc, force=pushop.force)
160 commoninc=commoninc, force=pushop.force)
161 pushop.outgoing = outgoing
161 pushop.outgoing = outgoing
162 pushop.remoteheads = remoteheads
162 pushop.remoteheads = remoteheads
163 pushop.incoming = inc
163 pushop.incoming = inc
164
164
165 def _pushcheckoutgoing(pushop):
165 def _pushcheckoutgoing(pushop):
166 outgoing = pushop.outgoing
166 outgoing = pushop.outgoing
167 unfi = pushop.repo.unfiltered()
167 unfi = pushop.repo.unfiltered()
168 if not outgoing.missing:
168 if not outgoing.missing:
169 # nothing to push
169 # nothing to push
170 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
170 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
171 return False
171 return False
172 # something to push
172 # something to push
173 if not pushop.force:
173 if not pushop.force:
174 # if repo.obsstore == False --> no obsolete
174 # if repo.obsstore == False --> no obsolete
175 # then, save the iteration
175 # then, save the iteration
176 if unfi.obsstore:
176 if unfi.obsstore:
177 # this message are here for 80 char limit reason
177 # this message are here for 80 char limit reason
178 mso = _("push includes obsolete changeset: %s!")
178 mso = _("push includes obsolete changeset: %s!")
179 mst = "push includes %s changeset: %s!"
179 mst = "push includes %s changeset: %s!"
180 # plain versions for i18n tool to detect them
180 # plain versions for i18n tool to detect them
181 _("push includes unstable changeset: %s!")
181 _("push includes unstable changeset: %s!")
182 _("push includes bumped changeset: %s!")
182 _("push includes bumped changeset: %s!")
183 _("push includes divergent changeset: %s!")
183 _("push includes divergent changeset: %s!")
184 # If we are to push if there is at least one
184 # If we are to push if there is at least one
185 # obsolete or unstable changeset in missing, at
185 # obsolete or unstable changeset in missing, at
186 # least one of the missinghead will be obsolete or
186 # least one of the missinghead will be obsolete or
187 # unstable. So checking heads only is ok
187 # unstable. So checking heads only is ok
188 for node in outgoing.missingheads:
188 for node in outgoing.missingheads:
189 ctx = unfi[node]
189 ctx = unfi[node]
190 if ctx.obsolete():
190 if ctx.obsolete():
191 raise util.Abort(mso % ctx)
191 raise util.Abort(mso % ctx)
192 elif ctx.troubled():
192 elif ctx.troubled():
193 raise util.Abort(_(mst)
193 raise util.Abort(_(mst)
194 % (ctx.troubles()[0],
194 % (ctx.troubles()[0],
195 ctx))
195 ctx))
196 newbm = pushop.ui.configlist('bookmarks', 'pushing')
196 newbm = pushop.ui.configlist('bookmarks', 'pushing')
197 discovery.checkheads(unfi, pushop.remote, outgoing,
197 discovery.checkheads(unfi, pushop.remote, outgoing,
198 pushop.remoteheads,
198 pushop.remoteheads,
199 pushop.newbranch,
199 pushop.newbranch,
200 bool(pushop.incoming),
200 bool(pushop.incoming),
201 newbm)
201 newbm)
202 return True
202 return True
203
203
204 def _pushbundle2(pushop):
204 def _pushbundle2(pushop):
205 """push data to the remote using bundle2
205 """push data to the remote using bundle2
206
206
207 The only currently supported type of data is changegroup but this will
207 The only currently supported type of data is changegroup but this will
208 evolve in the future."""
208 evolve in the future."""
209 # Send known head to the server for race detection.
209 # Send known head to the server for race detection.
210 bundler = bundle2.bundle20(pushop.ui)
210 capsblob = urllib.unquote(pushop.remote.capable('bundle2'))
211 caps = bundle2.decodecaps(capsblob)
212 bundler = bundle2.bundle20(pushop.ui, caps)
211 bundler.addpart(bundle2.bundlepart('replycaps'))
213 bundler.addpart(bundle2.bundlepart('replycaps'))
212 if not pushop.force:
214 if not pushop.force:
213 part = bundle2.bundlepart('CHECK:HEADS', data=iter(pushop.remoteheads))
215 part = bundle2.bundlepart('CHECK:HEADS', data=iter(pushop.remoteheads))
214 bundler.addpart(part)
216 bundler.addpart(part)
215 # add the changegroup bundle
217 # add the changegroup bundle
216 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
218 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
217 cgpart = bundle2.bundlepart('CHANGEGROUP', data=cg.getchunks())
219 cgpart = bundle2.bundlepart('CHANGEGROUP', data=cg.getchunks())
218 bundler.addpart(cgpart)
220 bundler.addpart(cgpart)
219 stream = util.chunkbuffer(bundler.getchunks())
221 stream = util.chunkbuffer(bundler.getchunks())
220 reply = pushop.remote.unbundle(stream, ['force'], 'push')
222 reply = pushop.remote.unbundle(stream, ['force'], 'push')
221 try:
223 try:
222 op = bundle2.processbundle(pushop.repo, reply)
224 op = bundle2.processbundle(pushop.repo, reply)
223 except KeyError, exc:
225 except KeyError, exc:
224 raise util.Abort('missing support for %s' % exc)
226 raise util.Abort('missing support for %s' % exc)
225 cgreplies = op.records.getreplies(cgpart.id)
227 cgreplies = op.records.getreplies(cgpart.id)
226 assert len(cgreplies['changegroup']) == 1
228 assert len(cgreplies['changegroup']) == 1
227 pushop.ret = cgreplies['changegroup'][0]['return']
229 pushop.ret = cgreplies['changegroup'][0]['return']
228
230
229 def _pushchangeset(pushop):
231 def _pushchangeset(pushop):
230 """Make the actual push of changeset bundle to remote repo"""
232 """Make the actual push of changeset bundle to remote repo"""
231 outgoing = pushop.outgoing
233 outgoing = pushop.outgoing
232 unbundle = pushop.remote.capable('unbundle')
234 unbundle = pushop.remote.capable('unbundle')
233 # TODO: get bundlecaps from remote
235 # TODO: get bundlecaps from remote
234 bundlecaps = None
236 bundlecaps = None
235 # create a changegroup from local
237 # create a changegroup from local
236 if pushop.revs is None and not (outgoing.excluded
238 if pushop.revs is None and not (outgoing.excluded
237 or pushop.repo.changelog.filteredrevs):
239 or pushop.repo.changelog.filteredrevs):
238 # push everything,
240 # push everything,
239 # use the fast path, no race possible on push
241 # use the fast path, no race possible on push
240 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
242 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
241 cg = changegroup.getsubset(pushop.repo,
243 cg = changegroup.getsubset(pushop.repo,
242 outgoing,
244 outgoing,
243 bundler,
245 bundler,
244 'push',
246 'push',
245 fastpath=True)
247 fastpath=True)
246 else:
248 else:
247 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
249 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
248 bundlecaps)
250 bundlecaps)
249
251
250 # apply changegroup to remote
252 # apply changegroup to remote
251 if unbundle:
253 if unbundle:
252 # local repo finds heads on server, finds out what
254 # local repo finds heads on server, finds out what
253 # revs it must push. once revs transferred, if server
255 # revs it must push. once revs transferred, if server
254 # finds it has different heads (someone else won
256 # finds it has different heads (someone else won
255 # commit/push race), server aborts.
257 # commit/push race), server aborts.
256 if pushop.force:
258 if pushop.force:
257 remoteheads = ['force']
259 remoteheads = ['force']
258 else:
260 else:
259 remoteheads = pushop.remoteheads
261 remoteheads = pushop.remoteheads
260 # ssh: return remote's addchangegroup()
262 # ssh: return remote's addchangegroup()
261 # http: return remote's addchangegroup() or 0 for error
263 # http: return remote's addchangegroup() or 0 for error
262 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
264 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
263 'push')
265 'push')
264 else:
266 else:
265 # we return an integer indicating remote head count
267 # we return an integer indicating remote head count
266 # change
268 # change
267 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
269 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
268
270
269 def _pushcomputecommonheads(pushop):
271 def _pushcomputecommonheads(pushop):
270 unfi = pushop.repo.unfiltered()
272 unfi = pushop.repo.unfiltered()
271 if pushop.ret:
273 if pushop.ret:
272 # push succeed, synchronize target of the push
274 # push succeed, synchronize target of the push
273 cheads = pushop.outgoing.missingheads
275 cheads = pushop.outgoing.missingheads
274 elif pushop.revs is None:
276 elif pushop.revs is None:
275 # All out push fails. synchronize all common
277 # All out push fails. synchronize all common
276 cheads = pushop.outgoing.commonheads
278 cheads = pushop.outgoing.commonheads
277 else:
279 else:
278 # I want cheads = heads(::missingheads and ::commonheads)
280 # I want cheads = heads(::missingheads and ::commonheads)
279 # (missingheads is revs with secret changeset filtered out)
281 # (missingheads is revs with secret changeset filtered out)
280 #
282 #
281 # This can be expressed as:
283 # This can be expressed as:
282 # cheads = ( (missingheads and ::commonheads)
284 # cheads = ( (missingheads and ::commonheads)
283 # + (commonheads and ::missingheads))"
285 # + (commonheads and ::missingheads))"
284 # )
286 # )
285 #
287 #
286 # while trying to push we already computed the following:
288 # while trying to push we already computed the following:
287 # common = (::commonheads)
289 # common = (::commonheads)
288 # missing = ((commonheads::missingheads) - commonheads)
290 # missing = ((commonheads::missingheads) - commonheads)
289 #
291 #
290 # We can pick:
292 # We can pick:
291 # * missingheads part of common (::commonheads)
293 # * missingheads part of common (::commonheads)
292 common = set(pushop.outgoing.common)
294 common = set(pushop.outgoing.common)
293 nm = pushop.repo.changelog.nodemap
295 nm = pushop.repo.changelog.nodemap
294 cheads = [node for node in pushop.revs if nm[node] in common]
296 cheads = [node for node in pushop.revs if nm[node] in common]
295 # and
297 # and
296 # * commonheads parents on missing
298 # * commonheads parents on missing
297 revset = unfi.set('%ln and parents(roots(%ln))',
299 revset = unfi.set('%ln and parents(roots(%ln))',
298 pushop.outgoing.commonheads,
300 pushop.outgoing.commonheads,
299 pushop.outgoing.missing)
301 pushop.outgoing.missing)
300 cheads.extend(c.node() for c in revset)
302 cheads.extend(c.node() for c in revset)
301 pushop.commonheads = cheads
303 pushop.commonheads = cheads
302
304
303 def _pushsyncphase(pushop):
305 def _pushsyncphase(pushop):
304 """synchronise phase information locally and remotely"""
306 """synchronise phase information locally and remotely"""
305 unfi = pushop.repo.unfiltered()
307 unfi = pushop.repo.unfiltered()
306 cheads = pushop.commonheads
308 cheads = pushop.commonheads
307 if pushop.ret:
309 if pushop.ret:
308 # push succeed, synchronize target of the push
310 # push succeed, synchronize target of the push
309 cheads = pushop.outgoing.missingheads
311 cheads = pushop.outgoing.missingheads
310 elif pushop.revs is None:
312 elif pushop.revs is None:
311 # All out push fails. synchronize all common
313 # All out push fails. synchronize all common
312 cheads = pushop.outgoing.commonheads
314 cheads = pushop.outgoing.commonheads
313 else:
315 else:
314 # I want cheads = heads(::missingheads and ::commonheads)
316 # I want cheads = heads(::missingheads and ::commonheads)
315 # (missingheads is revs with secret changeset filtered out)
317 # (missingheads is revs with secret changeset filtered out)
316 #
318 #
317 # This can be expressed as:
319 # This can be expressed as:
318 # cheads = ( (missingheads and ::commonheads)
320 # cheads = ( (missingheads and ::commonheads)
319 # + (commonheads and ::missingheads))"
321 # + (commonheads and ::missingheads))"
320 # )
322 # )
321 #
323 #
322 # while trying to push we already computed the following:
324 # while trying to push we already computed the following:
323 # common = (::commonheads)
325 # common = (::commonheads)
324 # missing = ((commonheads::missingheads) - commonheads)
326 # missing = ((commonheads::missingheads) - commonheads)
325 #
327 #
326 # We can pick:
328 # We can pick:
327 # * missingheads part of common (::commonheads)
329 # * missingheads part of common (::commonheads)
328 common = set(pushop.outgoing.common)
330 common = set(pushop.outgoing.common)
329 nm = pushop.repo.changelog.nodemap
331 nm = pushop.repo.changelog.nodemap
330 cheads = [node for node in pushop.revs if nm[node] in common]
332 cheads = [node for node in pushop.revs if nm[node] in common]
331 # and
333 # and
332 # * commonheads parents on missing
334 # * commonheads parents on missing
333 revset = unfi.set('%ln and parents(roots(%ln))',
335 revset = unfi.set('%ln and parents(roots(%ln))',
334 pushop.outgoing.commonheads,
336 pushop.outgoing.commonheads,
335 pushop.outgoing.missing)
337 pushop.outgoing.missing)
336 cheads.extend(c.node() for c in revset)
338 cheads.extend(c.node() for c in revset)
337 pushop.commonheads = cheads
339 pushop.commonheads = cheads
338 # even when we don't push, exchanging phase data is useful
340 # even when we don't push, exchanging phase data is useful
339 remotephases = pushop.remote.listkeys('phases')
341 remotephases = pushop.remote.listkeys('phases')
340 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
342 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
341 and remotephases # server supports phases
343 and remotephases # server supports phases
342 and pushop.ret is None # nothing was pushed
344 and pushop.ret is None # nothing was pushed
343 and remotephases.get('publishing', False)):
345 and remotephases.get('publishing', False)):
344 # When:
346 # When:
345 # - this is a subrepo push
347 # - this is a subrepo push
346 # - and remote support phase
348 # - and remote support phase
347 # - and no changeset was pushed
349 # - and no changeset was pushed
348 # - and remote is publishing
350 # - and remote is publishing
349 # We may be in issue 3871 case!
351 # We may be in issue 3871 case!
350 # We drop the possible phase synchronisation done by
352 # We drop the possible phase synchronisation done by
351 # courtesy to publish changesets possibly locally draft
353 # courtesy to publish changesets possibly locally draft
352 # on the remote.
354 # on the remote.
353 remotephases = {'publishing': 'True'}
355 remotephases = {'publishing': 'True'}
354 if not remotephases: # old server or public only reply from non-publishing
356 if not remotephases: # old server or public only reply from non-publishing
355 _localphasemove(pushop, cheads)
357 _localphasemove(pushop, cheads)
356 # don't push any phase data as there is nothing to push
358 # don't push any phase data as there is nothing to push
357 else:
359 else:
358 ana = phases.analyzeremotephases(pushop.repo, cheads,
360 ana = phases.analyzeremotephases(pushop.repo, cheads,
359 remotephases)
361 remotephases)
360 pheads, droots = ana
362 pheads, droots = ana
361 ### Apply remote phase on local
363 ### Apply remote phase on local
362 if remotephases.get('publishing', False):
364 if remotephases.get('publishing', False):
363 _localphasemove(pushop, cheads)
365 _localphasemove(pushop, cheads)
364 else: # publish = False
366 else: # publish = False
365 _localphasemove(pushop, pheads)
367 _localphasemove(pushop, pheads)
366 _localphasemove(pushop, cheads, phases.draft)
368 _localphasemove(pushop, cheads, phases.draft)
367 ### Apply local phase on remote
369 ### Apply local phase on remote
368
370
369 # Get the list of all revs draft on remote by public here.
371 # Get the list of all revs draft on remote by public here.
370 # XXX Beware that revset break if droots is not strictly
372 # XXX Beware that revset break if droots is not strictly
371 # XXX root we may want to ensure it is but it is costly
373 # XXX root we may want to ensure it is but it is costly
372 outdated = unfi.set('heads((%ln::%ln) and public())',
374 outdated = unfi.set('heads((%ln::%ln) and public())',
373 droots, cheads)
375 droots, cheads)
374 for newremotehead in outdated:
376 for newremotehead in outdated:
375 r = pushop.remote.pushkey('phases',
377 r = pushop.remote.pushkey('phases',
376 newremotehead.hex(),
378 newremotehead.hex(),
377 str(phases.draft),
379 str(phases.draft),
378 str(phases.public))
380 str(phases.public))
379 if not r:
381 if not r:
380 pushop.ui.warn(_('updating %s to public failed!\n')
382 pushop.ui.warn(_('updating %s to public failed!\n')
381 % newremotehead)
383 % newremotehead)
382
384
383 def _localphasemove(pushop, nodes, phase=phases.public):
385 def _localphasemove(pushop, nodes, phase=phases.public):
384 """move <nodes> to <phase> in the local source repo"""
386 """move <nodes> to <phase> in the local source repo"""
385 if pushop.locallocked:
387 if pushop.locallocked:
386 phases.advanceboundary(pushop.repo, phase, nodes)
388 phases.advanceboundary(pushop.repo, phase, nodes)
387 else:
389 else:
388 # repo is not locked, do not change any phases!
390 # repo is not locked, do not change any phases!
389 # Informs the user that phases should have been moved when
391 # Informs the user that phases should have been moved when
390 # applicable.
392 # applicable.
391 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
393 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
392 phasestr = phases.phasenames[phase]
394 phasestr = phases.phasenames[phase]
393 if actualmoves:
395 if actualmoves:
394 pushop.ui.status(_('cannot lock source repo, skipping '
396 pushop.ui.status(_('cannot lock source repo, skipping '
395 'local %s phase update\n') % phasestr)
397 'local %s phase update\n') % phasestr)
396
398
397 def _pushobsolete(pushop):
399 def _pushobsolete(pushop):
398 """utility function to push obsolete markers to a remote"""
400 """utility function to push obsolete markers to a remote"""
399 pushop.ui.debug('try to push obsolete markers to remote\n')
401 pushop.ui.debug('try to push obsolete markers to remote\n')
400 repo = pushop.repo
402 repo = pushop.repo
401 remote = pushop.remote
403 remote = pushop.remote
402 if (obsolete._enabled and repo.obsstore and
404 if (obsolete._enabled and repo.obsstore and
403 'obsolete' in remote.listkeys('namespaces')):
405 'obsolete' in remote.listkeys('namespaces')):
404 rslts = []
406 rslts = []
405 remotedata = repo.listkeys('obsolete')
407 remotedata = repo.listkeys('obsolete')
406 for key in sorted(remotedata, reverse=True):
408 for key in sorted(remotedata, reverse=True):
407 # reverse sort to ensure we end with dump0
409 # reverse sort to ensure we end with dump0
408 data = remotedata[key]
410 data = remotedata[key]
409 rslts.append(remote.pushkey('obsolete', key, '', data))
411 rslts.append(remote.pushkey('obsolete', key, '', data))
410 if [r for r in rslts if not r]:
412 if [r for r in rslts if not r]:
411 msg = _('failed to push some obsolete markers!\n')
413 msg = _('failed to push some obsolete markers!\n')
412 repo.ui.warn(msg)
414 repo.ui.warn(msg)
413
415
414 def _pushbookmark(pushop):
416 def _pushbookmark(pushop):
415 """Update bookmark position on remote"""
417 """Update bookmark position on remote"""
416 ui = pushop.ui
418 ui = pushop.ui
417 repo = pushop.repo.unfiltered()
419 repo = pushop.repo.unfiltered()
418 remote = pushop.remote
420 remote = pushop.remote
419 ui.debug("checking for updated bookmarks\n")
421 ui.debug("checking for updated bookmarks\n")
420 revnums = map(repo.changelog.rev, pushop.revs or [])
422 revnums = map(repo.changelog.rev, pushop.revs or [])
421 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
423 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
422 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
424 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
423 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
425 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
424 srchex=hex)
426 srchex=hex)
425
427
426 for b, scid, dcid in advsrc:
428 for b, scid, dcid in advsrc:
427 if ancestors and repo[scid].rev() not in ancestors:
429 if ancestors and repo[scid].rev() not in ancestors:
428 continue
430 continue
429 if remote.pushkey('bookmarks', b, dcid, scid):
431 if remote.pushkey('bookmarks', b, dcid, scid):
430 ui.status(_("updating bookmark %s\n") % b)
432 ui.status(_("updating bookmark %s\n") % b)
431 else:
433 else:
432 ui.warn(_('updating bookmark %s failed!\n') % b)
434 ui.warn(_('updating bookmark %s failed!\n') % b)
433
435
434 class pulloperation(object):
436 class pulloperation(object):
435 """A object that represent a single pull operation
437 """A object that represent a single pull operation
436
438
437 It purpose is to carry push related state and very common operation.
439 It purpose is to carry push related state and very common operation.
438
440
439 A new should be created at the beginning of each pull and discarded
441 A new should be created at the beginning of each pull and discarded
440 afterward.
442 afterward.
441 """
443 """
442
444
443 def __init__(self, repo, remote, heads=None, force=False):
445 def __init__(self, repo, remote, heads=None, force=False):
444 # repo we pull into
446 # repo we pull into
445 self.repo = repo
447 self.repo = repo
446 # repo we pull from
448 # repo we pull from
447 self.remote = remote
449 self.remote = remote
448 # revision we try to pull (None is "all")
450 # revision we try to pull (None is "all")
449 self.heads = heads
451 self.heads = heads
450 # do we force pull?
452 # do we force pull?
451 self.force = force
453 self.force = force
452 # the name the pull transaction
454 # the name the pull transaction
453 self._trname = 'pull\n' + util.hidepassword(remote.url())
455 self._trname = 'pull\n' + util.hidepassword(remote.url())
454 # hold the transaction once created
456 # hold the transaction once created
455 self._tr = None
457 self._tr = None
456 # set of common changeset between local and remote before pull
458 # set of common changeset between local and remote before pull
457 self.common = None
459 self.common = None
458 # set of pulled head
460 # set of pulled head
459 self.rheads = None
461 self.rheads = None
460 # list of missing changeset to fetch remotely
462 # list of missing changeset to fetch remotely
461 self.fetch = None
463 self.fetch = None
462 # result of changegroup pulling (used as return code by pull)
464 # result of changegroup pulling (used as return code by pull)
463 self.cgresult = None
465 self.cgresult = None
464 # list of step remaining todo (related to future bundle2 usage)
466 # list of step remaining todo (related to future bundle2 usage)
465 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
467 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
466
468
467 @util.propertycache
469 @util.propertycache
468 def pulledsubset(self):
470 def pulledsubset(self):
469 """heads of the set of changeset target by the pull"""
471 """heads of the set of changeset target by the pull"""
470 # compute target subset
472 # compute target subset
471 if self.heads is None:
473 if self.heads is None:
472 # We pulled every thing possible
474 # We pulled every thing possible
473 # sync on everything common
475 # sync on everything common
474 c = set(self.common)
476 c = set(self.common)
475 ret = list(self.common)
477 ret = list(self.common)
476 for n in self.rheads:
478 for n in self.rheads:
477 if n not in c:
479 if n not in c:
478 ret.append(n)
480 ret.append(n)
479 return ret
481 return ret
480 else:
482 else:
481 # We pulled a specific subset
483 # We pulled a specific subset
482 # sync on this subset
484 # sync on this subset
483 return self.heads
485 return self.heads
484
486
485 def gettransaction(self):
487 def gettransaction(self):
486 """get appropriate pull transaction, creating it if needed"""
488 """get appropriate pull transaction, creating it if needed"""
487 if self._tr is None:
489 if self._tr is None:
488 self._tr = self.repo.transaction(self._trname)
490 self._tr = self.repo.transaction(self._trname)
489 return self._tr
491 return self._tr
490
492
491 def closetransaction(self):
493 def closetransaction(self):
492 """close transaction if created"""
494 """close transaction if created"""
493 if self._tr is not None:
495 if self._tr is not None:
494 self._tr.close()
496 self._tr.close()
495
497
496 def releasetransaction(self):
498 def releasetransaction(self):
497 """release transaction if created"""
499 """release transaction if created"""
498 if self._tr is not None:
500 if self._tr is not None:
499 self._tr.release()
501 self._tr.release()
500
502
501 def pull(repo, remote, heads=None, force=False):
503 def pull(repo, remote, heads=None, force=False):
502 pullop = pulloperation(repo, remote, heads, force)
504 pullop = pulloperation(repo, remote, heads, force)
503 if pullop.remote.local():
505 if pullop.remote.local():
504 missing = set(pullop.remote.requirements) - pullop.repo.supported
506 missing = set(pullop.remote.requirements) - pullop.repo.supported
505 if missing:
507 if missing:
506 msg = _("required features are not"
508 msg = _("required features are not"
507 " supported in the destination:"
509 " supported in the destination:"
508 " %s") % (', '.join(sorted(missing)))
510 " %s") % (', '.join(sorted(missing)))
509 raise util.Abort(msg)
511 raise util.Abort(msg)
510
512
511 lock = pullop.repo.lock()
513 lock = pullop.repo.lock()
512 try:
514 try:
513 _pulldiscovery(pullop)
515 _pulldiscovery(pullop)
514 if pullop.remote.capable('bundle2'):
516 if pullop.remote.capable('bundle2'):
515 _pullbundle2(pullop)
517 _pullbundle2(pullop)
516 if 'changegroup' in pullop.todosteps:
518 if 'changegroup' in pullop.todosteps:
517 _pullchangeset(pullop)
519 _pullchangeset(pullop)
518 if 'phases' in pullop.todosteps:
520 if 'phases' in pullop.todosteps:
519 _pullphase(pullop)
521 _pullphase(pullop)
520 if 'obsmarkers' in pullop.todosteps:
522 if 'obsmarkers' in pullop.todosteps:
521 _pullobsolete(pullop)
523 _pullobsolete(pullop)
522 pullop.closetransaction()
524 pullop.closetransaction()
523 finally:
525 finally:
524 pullop.releasetransaction()
526 pullop.releasetransaction()
525 lock.release()
527 lock.release()
526
528
527 return pullop.cgresult
529 return pullop.cgresult
528
530
529 def _pulldiscovery(pullop):
531 def _pulldiscovery(pullop):
530 """discovery phase for the pull
532 """discovery phase for the pull
531
533
532 Current handle changeset discovery only, will change handle all discovery
534 Current handle changeset discovery only, will change handle all discovery
533 at some point."""
535 at some point."""
534 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
536 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
535 pullop.remote,
537 pullop.remote,
536 heads=pullop.heads,
538 heads=pullop.heads,
537 force=pullop.force)
539 force=pullop.force)
538 pullop.common, pullop.fetch, pullop.rheads = tmp
540 pullop.common, pullop.fetch, pullop.rheads = tmp
539
541
540 def _pullbundle2(pullop):
542 def _pullbundle2(pullop):
541 """pull data using bundle2
543 """pull data using bundle2
542
544
543 For now, the only supported data are changegroup."""
545 For now, the only supported data are changegroup."""
544 kwargs = {'bundlecaps': set(['HG20'])}
546 kwargs = {'bundlecaps': set(['HG20'])}
545 # pulling changegroup
547 # pulling changegroup
546 pullop.todosteps.remove('changegroup')
548 pullop.todosteps.remove('changegroup')
547 if not pullop.fetch:
549 if not pullop.fetch:
548 pullop.repo.ui.status(_("no changes found\n"))
550 pullop.repo.ui.status(_("no changes found\n"))
549 pullop.cgresult = 0
551 pullop.cgresult = 0
550 else:
552 else:
551 kwargs['common'] = pullop.common
553 kwargs['common'] = pullop.common
552 kwargs['heads'] = pullop.heads or pullop.rheads
554 kwargs['heads'] = pullop.heads or pullop.rheads
553 if pullop.heads is None and list(pullop.common) == [nullid]:
555 if pullop.heads is None and list(pullop.common) == [nullid]:
554 pullop.repo.ui.status(_("requesting all changes\n"))
556 pullop.repo.ui.status(_("requesting all changes\n"))
555 if kwargs.keys() == ['format']:
557 if kwargs.keys() == ['format']:
556 return # nothing to pull
558 return # nothing to pull
557 bundle = pullop.remote.getbundle('pull', **kwargs)
559 bundle = pullop.remote.getbundle('pull', **kwargs)
558 try:
560 try:
559 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
561 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
560 except KeyError, exc:
562 except KeyError, exc:
561 raise util.Abort('missing support for %s' % exc)
563 raise util.Abort('missing support for %s' % exc)
562 assert len(op.records['changegroup']) == 1
564 assert len(op.records['changegroup']) == 1
563 pullop.cgresult = op.records['changegroup'][0]['return']
565 pullop.cgresult = op.records['changegroup'][0]['return']
564
566
565 def _pullchangeset(pullop):
567 def _pullchangeset(pullop):
566 """pull changeset from unbundle into the local repo"""
568 """pull changeset from unbundle into the local repo"""
567 # We delay the open of the transaction as late as possible so we
569 # We delay the open of the transaction as late as possible so we
568 # don't open transaction for nothing or you break future useful
570 # don't open transaction for nothing or you break future useful
569 # rollback call
571 # rollback call
570 pullop.todosteps.remove('changegroup')
572 pullop.todosteps.remove('changegroup')
571 if not pullop.fetch:
573 if not pullop.fetch:
572 pullop.repo.ui.status(_("no changes found\n"))
574 pullop.repo.ui.status(_("no changes found\n"))
573 pullop.cgresult = 0
575 pullop.cgresult = 0
574 return
576 return
575 pullop.gettransaction()
577 pullop.gettransaction()
576 if pullop.heads is None and list(pullop.common) == [nullid]:
578 if pullop.heads is None and list(pullop.common) == [nullid]:
577 pullop.repo.ui.status(_("requesting all changes\n"))
579 pullop.repo.ui.status(_("requesting all changes\n"))
578 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
580 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
579 # issue1320, avoid a race if remote changed after discovery
581 # issue1320, avoid a race if remote changed after discovery
580 pullop.heads = pullop.rheads
582 pullop.heads = pullop.rheads
581
583
582 if pullop.remote.capable('getbundle'):
584 if pullop.remote.capable('getbundle'):
583 # TODO: get bundlecaps from remote
585 # TODO: get bundlecaps from remote
584 cg = pullop.remote.getbundle('pull', common=pullop.common,
586 cg = pullop.remote.getbundle('pull', common=pullop.common,
585 heads=pullop.heads or pullop.rheads)
587 heads=pullop.heads or pullop.rheads)
586 elif pullop.heads is None:
588 elif pullop.heads is None:
587 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
589 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
588 elif not pullop.remote.capable('changegroupsubset'):
590 elif not pullop.remote.capable('changegroupsubset'):
589 raise util.Abort(_("partial pull cannot be done because "
591 raise util.Abort(_("partial pull cannot be done because "
590 "other repository doesn't support "
592 "other repository doesn't support "
591 "changegroupsubset."))
593 "changegroupsubset."))
592 else:
594 else:
593 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
595 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
594 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
596 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
595 pullop.remote.url())
597 pullop.remote.url())
596
598
597 def _pullphase(pullop):
599 def _pullphase(pullop):
598 # Get remote phases data from remote
600 # Get remote phases data from remote
599 pullop.todosteps.remove('phases')
601 pullop.todosteps.remove('phases')
600 remotephases = pullop.remote.listkeys('phases')
602 remotephases = pullop.remote.listkeys('phases')
601 publishing = bool(remotephases.get('publishing', False))
603 publishing = bool(remotephases.get('publishing', False))
602 if remotephases and not publishing:
604 if remotephases and not publishing:
603 # remote is new and unpublishing
605 # remote is new and unpublishing
604 pheads, _dr = phases.analyzeremotephases(pullop.repo,
606 pheads, _dr = phases.analyzeremotephases(pullop.repo,
605 pullop.pulledsubset,
607 pullop.pulledsubset,
606 remotephases)
608 remotephases)
607 phases.advanceboundary(pullop.repo, phases.public, pheads)
609 phases.advanceboundary(pullop.repo, phases.public, pheads)
608 phases.advanceboundary(pullop.repo, phases.draft,
610 phases.advanceboundary(pullop.repo, phases.draft,
609 pullop.pulledsubset)
611 pullop.pulledsubset)
610 else:
612 else:
611 # Remote is old or publishing all common changesets
613 # Remote is old or publishing all common changesets
612 # should be seen as public
614 # should be seen as public
613 phases.advanceboundary(pullop.repo, phases.public,
615 phases.advanceboundary(pullop.repo, phases.public,
614 pullop.pulledsubset)
616 pullop.pulledsubset)
615
617
616 def _pullobsolete(pullop):
618 def _pullobsolete(pullop):
617 """utility function to pull obsolete markers from a remote
619 """utility function to pull obsolete markers from a remote
618
620
619 The `gettransaction` is function that return the pull transaction, creating
621 The `gettransaction` is function that return the pull transaction, creating
620 one if necessary. We return the transaction to inform the calling code that
622 one if necessary. We return the transaction to inform the calling code that
621 a new transaction have been created (when applicable).
623 a new transaction have been created (when applicable).
622
624
623 Exists mostly to allow overriding for experimentation purpose"""
625 Exists mostly to allow overriding for experimentation purpose"""
624 pullop.todosteps.remove('obsmarkers')
626 pullop.todosteps.remove('obsmarkers')
625 tr = None
627 tr = None
626 if obsolete._enabled:
628 if obsolete._enabled:
627 pullop.repo.ui.debug('fetching remote obsolete markers\n')
629 pullop.repo.ui.debug('fetching remote obsolete markers\n')
628 remoteobs = pullop.remote.listkeys('obsolete')
630 remoteobs = pullop.remote.listkeys('obsolete')
629 if 'dump0' in remoteobs:
631 if 'dump0' in remoteobs:
630 tr = pullop.gettransaction()
632 tr = pullop.gettransaction()
631 for key in sorted(remoteobs, reverse=True):
633 for key in sorted(remoteobs, reverse=True):
632 if key.startswith('dump'):
634 if key.startswith('dump'):
633 data = base85.b85decode(remoteobs[key])
635 data = base85.b85decode(remoteobs[key])
634 pullop.repo.obsstore.mergemarkers(tr, data)
636 pullop.repo.obsstore.mergemarkers(tr, data)
635 pullop.repo.invalidatevolatilesets()
637 pullop.repo.invalidatevolatilesets()
636 return tr
638 return tr
637
639
638 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
640 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
639 """return a full bundle (with potentially multiple kind of parts)
641 """return a full bundle (with potentially multiple kind of parts)
640
642
641 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
643 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
642 passed. For now, the bundle can contain only changegroup, but this will
644 passed. For now, the bundle can contain only changegroup, but this will
643 changes when more part type will be available for bundle2.
645 changes when more part type will be available for bundle2.
644
646
645 This is different from changegroup.getbundle that only returns an HG10
647 This is different from changegroup.getbundle that only returns an HG10
646 changegroup bundle. They may eventually get reunited in the future when we
648 changegroup bundle. They may eventually get reunited in the future when we
647 have a clearer idea of the API we what to query different data.
649 have a clearer idea of the API we what to query different data.
648
650
649 The implementation is at a very early stage and will get massive rework
651 The implementation is at a very early stage and will get massive rework
650 when the API of bundle is refined.
652 when the API of bundle is refined.
651 """
653 """
652 # build bundle here.
654 # build bundle here.
653 cg = changegroup.getbundle(repo, source, heads=heads,
655 cg = changegroup.getbundle(repo, source, heads=heads,
654 common=common, bundlecaps=bundlecaps)
656 common=common, bundlecaps=bundlecaps)
655 if bundlecaps is None or 'HG20' not in bundlecaps:
657 if bundlecaps is None or 'HG20' not in bundlecaps:
656 return cg
658 return cg
657 # very crude first implementation,
659 # very crude first implementation,
658 # the bundle API will change and the generation will be done lazily.
660 # the bundle API will change and the generation will be done lazily.
659 bundler = bundle2.bundle20(repo.ui)
661 bundler = bundle2.bundle20(repo.ui)
660 part = bundle2.bundlepart('changegroup', data=cg.getchunks())
662 part = bundle2.bundlepart('changegroup', data=cg.getchunks())
661 bundler.addpart(part)
663 bundler.addpart(part)
662 return util.chunkbuffer(bundler.getchunks())
664 return util.chunkbuffer(bundler.getchunks())
663
665
664 class PushRaced(RuntimeError):
666 class PushRaced(RuntimeError):
665 """An exception raised during unbundling that indicate a push race"""
667 """An exception raised during unbundling that indicate a push race"""
666
668
667 def check_heads(repo, their_heads, context):
669 def check_heads(repo, their_heads, context):
668 """check if the heads of a repo have been modified
670 """check if the heads of a repo have been modified
669
671
670 Used by peer for unbundling.
672 Used by peer for unbundling.
671 """
673 """
672 heads = repo.heads()
674 heads = repo.heads()
673 heads_hash = util.sha1(''.join(sorted(heads))).digest()
675 heads_hash = util.sha1(''.join(sorted(heads))).digest()
674 if not (their_heads == ['force'] or their_heads == heads or
676 if not (their_heads == ['force'] or their_heads == heads or
675 their_heads == ['hashed', heads_hash]):
677 their_heads == ['hashed', heads_hash]):
676 # someone else committed/pushed/unbundled while we
678 # someone else committed/pushed/unbundled while we
677 # were transferring data
679 # were transferring data
678 raise PushRaced('repository changed while %s - '
680 raise PushRaced('repository changed while %s - '
679 'please try again' % context)
681 'please try again' % context)
680
682
681 def unbundle(repo, cg, heads, source, url):
683 def unbundle(repo, cg, heads, source, url):
682 """Apply a bundle to a repo.
684 """Apply a bundle to a repo.
683
685
684 this function makes sure the repo is locked during the application and have
686 this function makes sure the repo is locked during the application and have
685 mechanism to check that no push race occurred between the creation of the
687 mechanism to check that no push race occurred between the creation of the
686 bundle and its application.
688 bundle and its application.
687
689
688 If the push was raced as PushRaced exception is raised."""
690 If the push was raced as PushRaced exception is raised."""
689 r = 0
691 r = 0
690 # need a transaction when processing a bundle2 stream
692 # need a transaction when processing a bundle2 stream
691 tr = None
693 tr = None
692 lock = repo.lock()
694 lock = repo.lock()
693 try:
695 try:
694 check_heads(repo, heads, 'uploading changes')
696 check_heads(repo, heads, 'uploading changes')
695 # push can proceed
697 # push can proceed
696 if util.safehasattr(cg, 'params'):
698 if util.safehasattr(cg, 'params'):
697 tr = repo.transaction('unbundle')
699 tr = repo.transaction('unbundle')
698 r = bundle2.processbundle(repo, cg, lambda: tr).reply
700 r = bundle2.processbundle(repo, cg, lambda: tr).reply
699 tr.close()
701 tr.close()
700 else:
702 else:
701 r = changegroup.addchangegroup(repo, cg, source, url)
703 r = changegroup.addchangegroup(repo, cg, source, url)
702 finally:
704 finally:
703 if tr is not None:
705 if tr is not None:
704 tr.release()
706 tr.release()
705 lock.release()
707 lock.release()
706 return r
708 return r
@@ -1,1908 +1,1910 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
12 import lock as lockmod
12 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
13 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
15 import match as matchmod
15 import merge as mergemod
16 import merge as mergemod
16 import tags as tagsmod
17 import tags as tagsmod
17 from lock import release
18 from lock import release
18 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
20 import branchmap, pathutil
20 propertycache = util.propertycache
21 propertycache = util.propertycache
21 filecache = scmutil.filecache
22 filecache = scmutil.filecache
22
23
23 class repofilecache(filecache):
24 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """
26 """
26
27
27 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
30 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
32 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33
34
34 class storecache(repofilecache):
35 class storecache(repofilecache):
35 """filecache for files in the store"""
36 """filecache for files in the store"""
36 def join(self, obj, fname):
37 def join(self, obj, fname):
37 return obj.sjoin(fname)
38 return obj.sjoin(fname)
38
39
39 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
41
42
42 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
44 if unfi is repo:
45 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
47
48
48 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
50
51
51 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
53
54
54
55
55 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
58
59
59 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
64 return wrapper
64
65
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 'bundle2', 'unbundle'))
67 'unbundle'))
67 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68
69
69 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
70 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
71
72
72 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
73 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
74 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
75 self.ui = repo.ui
76 self.ui = repo.ui
76 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
77 self.requirements = repo.requirements
78 self.requirements = repo.requirements
78 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
79
80
80 def close(self):
81 def close(self):
81 self._repo.close()
82 self._repo.close()
82
83
83 def _capabilities(self):
84 def _capabilities(self):
84 return self._caps
85 return self._caps
85
86
86 def local(self):
87 def local(self):
87 return self._repo
88 return self._repo
88
89
89 def canpush(self):
90 def canpush(self):
90 return True
91 return True
91
92
92 def url(self):
93 def url(self):
93 return self._repo.url()
94 return self._repo.url()
94
95
95 def lookup(self, key):
96 def lookup(self, key):
96 return self._repo.lookup(key)
97 return self._repo.lookup(key)
97
98
98 def branchmap(self):
99 def branchmap(self):
99 return self._repo.branchmap()
100 return self._repo.branchmap()
100
101
101 def heads(self):
102 def heads(self):
102 return self._repo.heads()
103 return self._repo.heads()
103
104
104 def known(self, nodes):
105 def known(self, nodes):
105 return self._repo.known(nodes)
106 return self._repo.known(nodes)
106
107
107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 format='HG10'):
109 format='HG10'):
109 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 common=common, bundlecaps=bundlecaps)
111 common=common, bundlecaps=bundlecaps)
111 if bundlecaps is not None and 'HG20' in bundlecaps:
112 if bundlecaps is not None and 'HG20' in bundlecaps:
112 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
114 # from it in local peer.
115 # from it in local peer.
115 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
116 return cg
117 return cg
117
118
118 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
119 # unbundle instead.
120 # unbundle instead.
120
121
121 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
122 """apply a bundle on a repo
123 """apply a bundle on a repo
123
124
124 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
125 try:
126 try:
126 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
127 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
129 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
130 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
131 # is finally improved.
132 # is finally improved.
132 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
133 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
134 return ret
135 return ret
135 except exchange.PushRaced, exc:
136 except exchange.PushRaced, exc:
136 raise error.ResponseError(_('push failed:'), exc.message)
137 raise error.ResponseError(_('push failed:'), exc.message)
137
138
138 def lock(self):
139 def lock(self):
139 return self._repo.lock()
140 return self._repo.lock()
140
141
141 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
142 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143
144
144 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
145 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
146
147
147 def listkeys(self, namespace):
148 def listkeys(self, namespace):
148 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
149
150
150 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
152 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153
154
154 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
155 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
156 restricted capabilities'''
157 restricted capabilities'''
157
158
158 def __init__(self, repo):
159 def __init__(self, repo):
159 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
160
161
161 def branches(self, nodes):
162 def branches(self, nodes):
162 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
163
164
164 def between(self, pairs):
165 def between(self, pairs):
165 return self._repo.between(pairs)
166 return self._repo.between(pairs)
166
167
167 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
168 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
169
170
170 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
171 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172
173
173 class localrepository(object):
174 class localrepository(object):
174
175
175 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 'dotencode'))
178 'dotencode'))
178 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 requirements = ['revlogv1']
180 requirements = ['revlogv1']
180 filtername = None
181 filtername = None
181
182
182 bundle2caps = {'HG20': ()}
183 bundle2caps = {'HG20': ()}
183
184
184 # a list of (ui, featureset) functions.
185 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
186 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
187 featuresetupfuncs = set()
187
188
188 def _baserequirements(self, create):
189 def _baserequirements(self, create):
189 return self.requirements[:]
190 return self.requirements[:]
190
191
191 def __init__(self, baseui, path=None, create=False):
192 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
194 self.wopener = self.wvfs
194 self.root = self.wvfs.base
195 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
196 self.path = self.wvfs.join(".hg")
196 self.origroot = path
197 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
199 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
200 self.opener = self.vfs
200 self.baseui = baseui
201 self.baseui = baseui
201 self.ui = baseui.copy()
202 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
204 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
206 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
207 self._phasedefaults = []
207 try:
208 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
210 extensions.loadall(self.ui)
210 except IOError:
211 except IOError:
211 pass
212 pass
212
213
213 if self.featuresetupfuncs:
214 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
215 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
216 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
217 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
218 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
219 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
220 setupfunc(self.ui, self.supported)
220 else:
221 else:
221 self.supported = self._basesupported
222 self.supported = self._basesupported
222
223
223 if not self.vfs.isdir():
224 if not self.vfs.isdir():
224 if create:
225 if create:
225 if not self.wvfs.exists():
226 if not self.wvfs.exists():
226 self.wvfs.makedirs()
227 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
228 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
229 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
230 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
231 self.vfs.mkdir("store")
231 requirements.append("store")
232 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
233 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
234 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
235 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
236 requirements.append('dotencode')
236 # create an invalid changelog
237 # create an invalid changelog
237 self.vfs.append(
238 self.vfs.append(
238 "00changelog.i",
239 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
240 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
241 ' dummy changelog to prevent using the old repo layout'
241 )
242 )
242 if self.ui.configbool('format', 'generaldelta', False):
243 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
244 requirements.append("generaldelta")
244 requirements = set(requirements)
245 requirements = set(requirements)
245 else:
246 else:
246 raise error.RepoError(_("repository %s not found") % path)
247 raise error.RepoError(_("repository %s not found") % path)
247 elif create:
248 elif create:
248 raise error.RepoError(_("repository %s already exists") % path)
249 raise error.RepoError(_("repository %s already exists") % path)
249 else:
250 else:
250 try:
251 try:
251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 requirements = scmutil.readrequires(self.vfs, self.supported)
252 except IOError, inst:
253 except IOError, inst:
253 if inst.errno != errno.ENOENT:
254 if inst.errno != errno.ENOENT:
254 raise
255 raise
255 requirements = set()
256 requirements = set()
256
257
257 self.sharedpath = self.path
258 self.sharedpath = self.path
258 try:
259 try:
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 realpath=True)
261 realpath=True)
261 s = vfs.base
262 s = vfs.base
262 if not vfs.exists():
263 if not vfs.exists():
263 raise error.RepoError(
264 raise error.RepoError(
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 self.sharedpath = s
266 self.sharedpath = s
266 except IOError, inst:
267 except IOError, inst:
267 if inst.errno != errno.ENOENT:
268 if inst.errno != errno.ENOENT:
268 raise
269 raise
269
270
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.spath = self.store.path
272 self.spath = self.store.path
272 self.svfs = self.store.vfs
273 self.svfs = self.store.vfs
273 self.sopener = self.svfs
274 self.sopener = self.svfs
274 self.sjoin = self.store.join
275 self.sjoin = self.store.join
275 self.vfs.createmode = self.store.createmode
276 self.vfs.createmode = self.store.createmode
276 self._applyrequirements(requirements)
277 self._applyrequirements(requirements)
277 if create:
278 if create:
278 self._writerequirements()
279 self._writerequirements()
279
280
280
281
281 self._branchcaches = {}
282 self._branchcaches = {}
282 self.filterpats = {}
283 self.filterpats = {}
283 self._datafilters = {}
284 self._datafilters = {}
284 self._transref = self._lockref = self._wlockref = None
285 self._transref = self._lockref = self._wlockref = None
285
286
286 # A cache for various files under .hg/ that tracks file changes,
287 # A cache for various files under .hg/ that tracks file changes,
287 # (used by the filecache decorator)
288 # (used by the filecache decorator)
288 #
289 #
289 # Maps a property name to its util.filecacheentry
290 # Maps a property name to its util.filecacheentry
290 self._filecache = {}
291 self._filecache = {}
291
292
292 # hold sets of revision to be filtered
293 # hold sets of revision to be filtered
293 # should be cleared when something might have changed the filter value:
294 # should be cleared when something might have changed the filter value:
294 # - new changesets,
295 # - new changesets,
295 # - phase change,
296 # - phase change,
296 # - new obsolescence marker,
297 # - new obsolescence marker,
297 # - working directory parent change,
298 # - working directory parent change,
298 # - bookmark changes
299 # - bookmark changes
299 self.filteredrevcache = {}
300 self.filteredrevcache = {}
300
301
301 def close(self):
302 def close(self):
302 pass
303 pass
303
304
304 def _restrictcapabilities(self, caps):
305 def _restrictcapabilities(self, caps):
305 # bundle2 is not ready for prime time, drop it unless explicitly
306 # bundle2 is not ready for prime time, drop it unless explicitly
306 # required by the tests (or some brave tester)
307 # required by the tests (or some brave tester)
307 if not self.ui.configbool('server', 'bundle2', False):
308 if self.ui.configbool('server', 'bundle2', False):
308 caps = set(caps)
309 caps = set(caps)
309 caps.discard('bundle2')
310 capsblob = bundle2.encodecaps(self.bundle2caps)
311 caps.add('bundle2=' + urllib.quote(capsblob))
310 return caps
312 return caps
311
313
312 def _applyrequirements(self, requirements):
314 def _applyrequirements(self, requirements):
313 self.requirements = requirements
315 self.requirements = requirements
314 self.sopener.options = dict((r, 1) for r in requirements
316 self.sopener.options = dict((r, 1) for r in requirements
315 if r in self.openerreqs)
317 if r in self.openerreqs)
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
318 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 if chunkcachesize is not None:
319 if chunkcachesize is not None:
318 self.sopener.options['chunkcachesize'] = chunkcachesize
320 self.sopener.options['chunkcachesize'] = chunkcachesize
319
321
320 def _writerequirements(self):
322 def _writerequirements(self):
321 reqfile = self.opener("requires", "w")
323 reqfile = self.opener("requires", "w")
322 for r in sorted(self.requirements):
324 for r in sorted(self.requirements):
323 reqfile.write("%s\n" % r)
325 reqfile.write("%s\n" % r)
324 reqfile.close()
326 reqfile.close()
325
327
326 def _checknested(self, path):
328 def _checknested(self, path):
327 """Determine if path is a legal nested repository."""
329 """Determine if path is a legal nested repository."""
328 if not path.startswith(self.root):
330 if not path.startswith(self.root):
329 return False
331 return False
330 subpath = path[len(self.root) + 1:]
332 subpath = path[len(self.root) + 1:]
331 normsubpath = util.pconvert(subpath)
333 normsubpath = util.pconvert(subpath)
332
334
333 # XXX: Checking against the current working copy is wrong in
335 # XXX: Checking against the current working copy is wrong in
334 # the sense that it can reject things like
336 # the sense that it can reject things like
335 #
337 #
336 # $ hg cat -r 10 sub/x.txt
338 # $ hg cat -r 10 sub/x.txt
337 #
339 #
338 # if sub/ is no longer a subrepository in the working copy
340 # if sub/ is no longer a subrepository in the working copy
339 # parent revision.
341 # parent revision.
340 #
342 #
341 # However, it can of course also allow things that would have
343 # However, it can of course also allow things that would have
342 # been rejected before, such as the above cat command if sub/
344 # been rejected before, such as the above cat command if sub/
343 # is a subrepository now, but was a normal directory before.
345 # is a subrepository now, but was a normal directory before.
344 # The old path auditor would have rejected by mistake since it
346 # The old path auditor would have rejected by mistake since it
345 # panics when it sees sub/.hg/.
347 # panics when it sees sub/.hg/.
346 #
348 #
347 # All in all, checking against the working copy seems sensible
349 # All in all, checking against the working copy seems sensible
348 # since we want to prevent access to nested repositories on
350 # since we want to prevent access to nested repositories on
349 # the filesystem *now*.
351 # the filesystem *now*.
350 ctx = self[None]
352 ctx = self[None]
351 parts = util.splitpath(subpath)
353 parts = util.splitpath(subpath)
352 while parts:
354 while parts:
353 prefix = '/'.join(parts)
355 prefix = '/'.join(parts)
354 if prefix in ctx.substate:
356 if prefix in ctx.substate:
355 if prefix == normsubpath:
357 if prefix == normsubpath:
356 return True
358 return True
357 else:
359 else:
358 sub = ctx.sub(prefix)
360 sub = ctx.sub(prefix)
359 return sub.checknested(subpath[len(prefix) + 1:])
361 return sub.checknested(subpath[len(prefix) + 1:])
360 else:
362 else:
361 parts.pop()
363 parts.pop()
362 return False
364 return False
363
365
364 def peer(self):
366 def peer(self):
365 return localpeer(self) # not cached to avoid reference cycle
367 return localpeer(self) # not cached to avoid reference cycle
366
368
367 def unfiltered(self):
369 def unfiltered(self):
368 """Return unfiltered version of the repository
370 """Return unfiltered version of the repository
369
371
370 Intended to be overwritten by filtered repo."""
372 Intended to be overwritten by filtered repo."""
371 return self
373 return self
372
374
373 def filtered(self, name):
375 def filtered(self, name):
374 """Return a filtered version of a repository"""
376 """Return a filtered version of a repository"""
375 # build a new class with the mixin and the current class
377 # build a new class with the mixin and the current class
376 # (possibly subclass of the repo)
378 # (possibly subclass of the repo)
377 class proxycls(repoview.repoview, self.unfiltered().__class__):
379 class proxycls(repoview.repoview, self.unfiltered().__class__):
378 pass
380 pass
379 return proxycls(self, name)
381 return proxycls(self, name)
380
382
381 @repofilecache('bookmarks')
383 @repofilecache('bookmarks')
382 def _bookmarks(self):
384 def _bookmarks(self):
383 return bookmarks.bmstore(self)
385 return bookmarks.bmstore(self)
384
386
385 @repofilecache('bookmarks.current')
387 @repofilecache('bookmarks.current')
386 def _bookmarkcurrent(self):
388 def _bookmarkcurrent(self):
387 return bookmarks.readcurrent(self)
389 return bookmarks.readcurrent(self)
388
390
389 def bookmarkheads(self, bookmark):
391 def bookmarkheads(self, bookmark):
390 name = bookmark.split('@', 1)[0]
392 name = bookmark.split('@', 1)[0]
391 heads = []
393 heads = []
392 for mark, n in self._bookmarks.iteritems():
394 for mark, n in self._bookmarks.iteritems():
393 if mark.split('@', 1)[0] == name:
395 if mark.split('@', 1)[0] == name:
394 heads.append(n)
396 heads.append(n)
395 return heads
397 return heads
396
398
397 @storecache('phaseroots')
399 @storecache('phaseroots')
398 def _phasecache(self):
400 def _phasecache(self):
399 return phases.phasecache(self, self._phasedefaults)
401 return phases.phasecache(self, self._phasedefaults)
400
402
401 @storecache('obsstore')
403 @storecache('obsstore')
402 def obsstore(self):
404 def obsstore(self):
403 store = obsolete.obsstore(self.sopener)
405 store = obsolete.obsstore(self.sopener)
404 if store and not obsolete._enabled:
406 if store and not obsolete._enabled:
405 # message is rare enough to not be translated
407 # message is rare enough to not be translated
406 msg = 'obsolete feature not enabled but %i markers found!\n'
408 msg = 'obsolete feature not enabled but %i markers found!\n'
407 self.ui.warn(msg % len(list(store)))
409 self.ui.warn(msg % len(list(store)))
408 return store
410 return store
409
411
410 @storecache('00changelog.i')
412 @storecache('00changelog.i')
411 def changelog(self):
413 def changelog(self):
412 c = changelog.changelog(self.sopener)
414 c = changelog.changelog(self.sopener)
413 if 'HG_PENDING' in os.environ:
415 if 'HG_PENDING' in os.environ:
414 p = os.environ['HG_PENDING']
416 p = os.environ['HG_PENDING']
415 if p.startswith(self.root):
417 if p.startswith(self.root):
416 c.readpending('00changelog.i.a')
418 c.readpending('00changelog.i.a')
417 return c
419 return c
418
420
419 @storecache('00manifest.i')
421 @storecache('00manifest.i')
420 def manifest(self):
422 def manifest(self):
421 return manifest.manifest(self.sopener)
423 return manifest.manifest(self.sopener)
422
424
423 @repofilecache('dirstate')
425 @repofilecache('dirstate')
424 def dirstate(self):
426 def dirstate(self):
425 warned = [0]
427 warned = [0]
426 def validate(node):
428 def validate(node):
427 try:
429 try:
428 self.changelog.rev(node)
430 self.changelog.rev(node)
429 return node
431 return node
430 except error.LookupError:
432 except error.LookupError:
431 if not warned[0]:
433 if not warned[0]:
432 warned[0] = True
434 warned[0] = True
433 self.ui.warn(_("warning: ignoring unknown"
435 self.ui.warn(_("warning: ignoring unknown"
434 " working parent %s!\n") % short(node))
436 " working parent %s!\n") % short(node))
435 return nullid
437 return nullid
436
438
437 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
439 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
438
440
439 def __getitem__(self, changeid):
441 def __getitem__(self, changeid):
440 if changeid is None:
442 if changeid is None:
441 return context.workingctx(self)
443 return context.workingctx(self)
442 return context.changectx(self, changeid)
444 return context.changectx(self, changeid)
443
445
444 def __contains__(self, changeid):
446 def __contains__(self, changeid):
445 try:
447 try:
446 return bool(self.lookup(changeid))
448 return bool(self.lookup(changeid))
447 except error.RepoLookupError:
449 except error.RepoLookupError:
448 return False
450 return False
449
451
450 def __nonzero__(self):
452 def __nonzero__(self):
451 return True
453 return True
452
454
453 def __len__(self):
455 def __len__(self):
454 return len(self.changelog)
456 return len(self.changelog)
455
457
456 def __iter__(self):
458 def __iter__(self):
457 return iter(self.changelog)
459 return iter(self.changelog)
458
460
459 def revs(self, expr, *args):
461 def revs(self, expr, *args):
460 '''Return a list of revisions matching the given revset'''
462 '''Return a list of revisions matching the given revset'''
461 expr = revset.formatspec(expr, *args)
463 expr = revset.formatspec(expr, *args)
462 m = revset.match(None, expr)
464 m = revset.match(None, expr)
463 return m(self, revset.spanset(self))
465 return m(self, revset.spanset(self))
464
466
465 def set(self, expr, *args):
467 def set(self, expr, *args):
466 '''
468 '''
467 Yield a context for each matching revision, after doing arg
469 Yield a context for each matching revision, after doing arg
468 replacement via revset.formatspec
470 replacement via revset.formatspec
469 '''
471 '''
470 for r in self.revs(expr, *args):
472 for r in self.revs(expr, *args):
471 yield self[r]
473 yield self[r]
472
474
473 def url(self):
475 def url(self):
474 return 'file:' + self.root
476 return 'file:' + self.root
475
477
476 def hook(self, name, throw=False, **args):
478 def hook(self, name, throw=False, **args):
477 return hook.hook(self.ui, self, name, throw, **args)
479 return hook.hook(self.ui, self, name, throw, **args)
478
480
479 @unfilteredmethod
481 @unfilteredmethod
480 def _tag(self, names, node, message, local, user, date, extra={}):
482 def _tag(self, names, node, message, local, user, date, extra={}):
481 if isinstance(names, str):
483 if isinstance(names, str):
482 names = (names,)
484 names = (names,)
483
485
484 branches = self.branchmap()
486 branches = self.branchmap()
485 for name in names:
487 for name in names:
486 self.hook('pretag', throw=True, node=hex(node), tag=name,
488 self.hook('pretag', throw=True, node=hex(node), tag=name,
487 local=local)
489 local=local)
488 if name in branches:
490 if name in branches:
489 self.ui.warn(_("warning: tag %s conflicts with existing"
491 self.ui.warn(_("warning: tag %s conflicts with existing"
490 " branch name\n") % name)
492 " branch name\n") % name)
491
493
492 def writetags(fp, names, munge, prevtags):
494 def writetags(fp, names, munge, prevtags):
493 fp.seek(0, 2)
495 fp.seek(0, 2)
494 if prevtags and prevtags[-1] != '\n':
496 if prevtags and prevtags[-1] != '\n':
495 fp.write('\n')
497 fp.write('\n')
496 for name in names:
498 for name in names:
497 m = munge and munge(name) or name
499 m = munge and munge(name) or name
498 if (self._tagscache.tagtypes and
500 if (self._tagscache.tagtypes and
499 name in self._tagscache.tagtypes):
501 name in self._tagscache.tagtypes):
500 old = self.tags().get(name, nullid)
502 old = self.tags().get(name, nullid)
501 fp.write('%s %s\n' % (hex(old), m))
503 fp.write('%s %s\n' % (hex(old), m))
502 fp.write('%s %s\n' % (hex(node), m))
504 fp.write('%s %s\n' % (hex(node), m))
503 fp.close()
505 fp.close()
504
506
505 prevtags = ''
507 prevtags = ''
506 if local:
508 if local:
507 try:
509 try:
508 fp = self.opener('localtags', 'r+')
510 fp = self.opener('localtags', 'r+')
509 except IOError:
511 except IOError:
510 fp = self.opener('localtags', 'a')
512 fp = self.opener('localtags', 'a')
511 else:
513 else:
512 prevtags = fp.read()
514 prevtags = fp.read()
513
515
514 # local tags are stored in the current charset
516 # local tags are stored in the current charset
515 writetags(fp, names, None, prevtags)
517 writetags(fp, names, None, prevtags)
516 for name in names:
518 for name in names:
517 self.hook('tag', node=hex(node), tag=name, local=local)
519 self.hook('tag', node=hex(node), tag=name, local=local)
518 return
520 return
519
521
520 try:
522 try:
521 fp = self.wfile('.hgtags', 'rb+')
523 fp = self.wfile('.hgtags', 'rb+')
522 except IOError, e:
524 except IOError, e:
523 if e.errno != errno.ENOENT:
525 if e.errno != errno.ENOENT:
524 raise
526 raise
525 fp = self.wfile('.hgtags', 'ab')
527 fp = self.wfile('.hgtags', 'ab')
526 else:
528 else:
527 prevtags = fp.read()
529 prevtags = fp.read()
528
530
529 # committed tags are stored in UTF-8
531 # committed tags are stored in UTF-8
530 writetags(fp, names, encoding.fromlocal, prevtags)
532 writetags(fp, names, encoding.fromlocal, prevtags)
531
533
532 fp.close()
534 fp.close()
533
535
534 self.invalidatecaches()
536 self.invalidatecaches()
535
537
536 if '.hgtags' not in self.dirstate:
538 if '.hgtags' not in self.dirstate:
537 self[None].add(['.hgtags'])
539 self[None].add(['.hgtags'])
538
540
539 m = matchmod.exact(self.root, '', ['.hgtags'])
541 m = matchmod.exact(self.root, '', ['.hgtags'])
540 tagnode = self.commit(message, user, date, extra=extra, match=m)
542 tagnode = self.commit(message, user, date, extra=extra, match=m)
541
543
542 for name in names:
544 for name in names:
543 self.hook('tag', node=hex(node), tag=name, local=local)
545 self.hook('tag', node=hex(node), tag=name, local=local)
544
546
545 return tagnode
547 return tagnode
546
548
547 def tag(self, names, node, message, local, user, date):
549 def tag(self, names, node, message, local, user, date):
548 '''tag a revision with one or more symbolic names.
550 '''tag a revision with one or more symbolic names.
549
551
550 names is a list of strings or, when adding a single tag, names may be a
552 names is a list of strings or, when adding a single tag, names may be a
551 string.
553 string.
552
554
553 if local is True, the tags are stored in a per-repository file.
555 if local is True, the tags are stored in a per-repository file.
554 otherwise, they are stored in the .hgtags file, and a new
556 otherwise, they are stored in the .hgtags file, and a new
555 changeset is committed with the change.
557 changeset is committed with the change.
556
558
557 keyword arguments:
559 keyword arguments:
558
560
559 local: whether to store tags in non-version-controlled file
561 local: whether to store tags in non-version-controlled file
560 (default False)
562 (default False)
561
563
562 message: commit message to use if committing
564 message: commit message to use if committing
563
565
564 user: name of user to use if committing
566 user: name of user to use if committing
565
567
566 date: date tuple to use if committing'''
568 date: date tuple to use if committing'''
567
569
568 if not local:
570 if not local:
569 for x in self.status()[:5]:
571 for x in self.status()[:5]:
570 if '.hgtags' in x:
572 if '.hgtags' in x:
571 raise util.Abort(_('working copy of .hgtags is changed '
573 raise util.Abort(_('working copy of .hgtags is changed '
572 '(please commit .hgtags manually)'))
574 '(please commit .hgtags manually)'))
573
575
574 self.tags() # instantiate the cache
576 self.tags() # instantiate the cache
575 self._tag(names, node, message, local, user, date)
577 self._tag(names, node, message, local, user, date)
576
578
577 @filteredpropertycache
579 @filteredpropertycache
578 def _tagscache(self):
580 def _tagscache(self):
579 '''Returns a tagscache object that contains various tags related
581 '''Returns a tagscache object that contains various tags related
580 caches.'''
582 caches.'''
581
583
582 # This simplifies its cache management by having one decorated
584 # This simplifies its cache management by having one decorated
583 # function (this one) and the rest simply fetch things from it.
585 # function (this one) and the rest simply fetch things from it.
584 class tagscache(object):
586 class tagscache(object):
585 def __init__(self):
587 def __init__(self):
586 # These two define the set of tags for this repository. tags
588 # These two define the set of tags for this repository. tags
587 # maps tag name to node; tagtypes maps tag name to 'global' or
589 # maps tag name to node; tagtypes maps tag name to 'global' or
588 # 'local'. (Global tags are defined by .hgtags across all
590 # 'local'. (Global tags are defined by .hgtags across all
589 # heads, and local tags are defined in .hg/localtags.)
591 # heads, and local tags are defined in .hg/localtags.)
590 # They constitute the in-memory cache of tags.
592 # They constitute the in-memory cache of tags.
591 self.tags = self.tagtypes = None
593 self.tags = self.tagtypes = None
592
594
593 self.nodetagscache = self.tagslist = None
595 self.nodetagscache = self.tagslist = None
594
596
595 cache = tagscache()
597 cache = tagscache()
596 cache.tags, cache.tagtypes = self._findtags()
598 cache.tags, cache.tagtypes = self._findtags()
597
599
598 return cache
600 return cache
599
601
600 def tags(self):
602 def tags(self):
601 '''return a mapping of tag to node'''
603 '''return a mapping of tag to node'''
602 t = {}
604 t = {}
603 if self.changelog.filteredrevs:
605 if self.changelog.filteredrevs:
604 tags, tt = self._findtags()
606 tags, tt = self._findtags()
605 else:
607 else:
606 tags = self._tagscache.tags
608 tags = self._tagscache.tags
607 for k, v in tags.iteritems():
609 for k, v in tags.iteritems():
608 try:
610 try:
609 # ignore tags to unknown nodes
611 # ignore tags to unknown nodes
610 self.changelog.rev(v)
612 self.changelog.rev(v)
611 t[k] = v
613 t[k] = v
612 except (error.LookupError, ValueError):
614 except (error.LookupError, ValueError):
613 pass
615 pass
614 return t
616 return t
615
617
616 def _findtags(self):
618 def _findtags(self):
617 '''Do the hard work of finding tags. Return a pair of dicts
619 '''Do the hard work of finding tags. Return a pair of dicts
618 (tags, tagtypes) where tags maps tag name to node, and tagtypes
620 (tags, tagtypes) where tags maps tag name to node, and tagtypes
619 maps tag name to a string like \'global\' or \'local\'.
621 maps tag name to a string like \'global\' or \'local\'.
620 Subclasses or extensions are free to add their own tags, but
622 Subclasses or extensions are free to add their own tags, but
621 should be aware that the returned dicts will be retained for the
623 should be aware that the returned dicts will be retained for the
622 duration of the localrepo object.'''
624 duration of the localrepo object.'''
623
625
624 # XXX what tagtype should subclasses/extensions use? Currently
626 # XXX what tagtype should subclasses/extensions use? Currently
625 # mq and bookmarks add tags, but do not set the tagtype at all.
627 # mq and bookmarks add tags, but do not set the tagtype at all.
626 # Should each extension invent its own tag type? Should there
628 # Should each extension invent its own tag type? Should there
627 # be one tagtype for all such "virtual" tags? Or is the status
629 # be one tagtype for all such "virtual" tags? Or is the status
628 # quo fine?
630 # quo fine?
629
631
630 alltags = {} # map tag name to (node, hist)
632 alltags = {} # map tag name to (node, hist)
631 tagtypes = {}
633 tagtypes = {}
632
634
633 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
635 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
634 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
636 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
635
637
636 # Build the return dicts. Have to re-encode tag names because
638 # Build the return dicts. Have to re-encode tag names because
637 # the tags module always uses UTF-8 (in order not to lose info
639 # the tags module always uses UTF-8 (in order not to lose info
638 # writing to the cache), but the rest of Mercurial wants them in
640 # writing to the cache), but the rest of Mercurial wants them in
639 # local encoding.
641 # local encoding.
640 tags = {}
642 tags = {}
641 for (name, (node, hist)) in alltags.iteritems():
643 for (name, (node, hist)) in alltags.iteritems():
642 if node != nullid:
644 if node != nullid:
643 tags[encoding.tolocal(name)] = node
645 tags[encoding.tolocal(name)] = node
644 tags['tip'] = self.changelog.tip()
646 tags['tip'] = self.changelog.tip()
645 tagtypes = dict([(encoding.tolocal(name), value)
647 tagtypes = dict([(encoding.tolocal(name), value)
646 for (name, value) in tagtypes.iteritems()])
648 for (name, value) in tagtypes.iteritems()])
647 return (tags, tagtypes)
649 return (tags, tagtypes)
648
650
649 def tagtype(self, tagname):
651 def tagtype(self, tagname):
650 '''
652 '''
651 return the type of the given tag. result can be:
653 return the type of the given tag. result can be:
652
654
653 'local' : a local tag
655 'local' : a local tag
654 'global' : a global tag
656 'global' : a global tag
655 None : tag does not exist
657 None : tag does not exist
656 '''
658 '''
657
659
658 return self._tagscache.tagtypes.get(tagname)
660 return self._tagscache.tagtypes.get(tagname)
659
661
660 def tagslist(self):
662 def tagslist(self):
661 '''return a list of tags ordered by revision'''
663 '''return a list of tags ordered by revision'''
662 if not self._tagscache.tagslist:
664 if not self._tagscache.tagslist:
663 l = []
665 l = []
664 for t, n in self.tags().iteritems():
666 for t, n in self.tags().iteritems():
665 r = self.changelog.rev(n)
667 r = self.changelog.rev(n)
666 l.append((r, t, n))
668 l.append((r, t, n))
667 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
669 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
668
670
669 return self._tagscache.tagslist
671 return self._tagscache.tagslist
670
672
671 def nodetags(self, node):
673 def nodetags(self, node):
672 '''return the tags associated with a node'''
674 '''return the tags associated with a node'''
673 if not self._tagscache.nodetagscache:
675 if not self._tagscache.nodetagscache:
674 nodetagscache = {}
676 nodetagscache = {}
675 for t, n in self._tagscache.tags.iteritems():
677 for t, n in self._tagscache.tags.iteritems():
676 nodetagscache.setdefault(n, []).append(t)
678 nodetagscache.setdefault(n, []).append(t)
677 for tags in nodetagscache.itervalues():
679 for tags in nodetagscache.itervalues():
678 tags.sort()
680 tags.sort()
679 self._tagscache.nodetagscache = nodetagscache
681 self._tagscache.nodetagscache = nodetagscache
680 return self._tagscache.nodetagscache.get(node, [])
682 return self._tagscache.nodetagscache.get(node, [])
681
683
682 def nodebookmarks(self, node):
684 def nodebookmarks(self, node):
683 marks = []
685 marks = []
684 for bookmark, n in self._bookmarks.iteritems():
686 for bookmark, n in self._bookmarks.iteritems():
685 if n == node:
687 if n == node:
686 marks.append(bookmark)
688 marks.append(bookmark)
687 return sorted(marks)
689 return sorted(marks)
688
690
689 def branchmap(self):
691 def branchmap(self):
690 '''returns a dictionary {branch: [branchheads]} with branchheads
692 '''returns a dictionary {branch: [branchheads]} with branchheads
691 ordered by increasing revision number'''
693 ordered by increasing revision number'''
692 branchmap.updatecache(self)
694 branchmap.updatecache(self)
693 return self._branchcaches[self.filtername]
695 return self._branchcaches[self.filtername]
694
696
695 def branchtip(self, branch):
697 def branchtip(self, branch):
696 '''return the tip node for a given branch'''
698 '''return the tip node for a given branch'''
697 try:
699 try:
698 return self.branchmap().branchtip(branch)
700 return self.branchmap().branchtip(branch)
699 except KeyError:
701 except KeyError:
700 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
702 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
701
703
702 def lookup(self, key):
704 def lookup(self, key):
703 return self[key].node()
705 return self[key].node()
704
706
705 def lookupbranch(self, key, remote=None):
707 def lookupbranch(self, key, remote=None):
706 repo = remote or self
708 repo = remote or self
707 if key in repo.branchmap():
709 if key in repo.branchmap():
708 return key
710 return key
709
711
710 repo = (remote and remote.local()) and remote or self
712 repo = (remote and remote.local()) and remote or self
711 return repo[key].branch()
713 return repo[key].branch()
712
714
713 def known(self, nodes):
715 def known(self, nodes):
714 nm = self.changelog.nodemap
716 nm = self.changelog.nodemap
715 pc = self._phasecache
717 pc = self._phasecache
716 result = []
718 result = []
717 for n in nodes:
719 for n in nodes:
718 r = nm.get(n)
720 r = nm.get(n)
719 resp = not (r is None or pc.phase(self, r) >= phases.secret)
721 resp = not (r is None or pc.phase(self, r) >= phases.secret)
720 result.append(resp)
722 result.append(resp)
721 return result
723 return result
722
724
723 def local(self):
725 def local(self):
724 return self
726 return self
725
727
726 def cancopy(self):
728 def cancopy(self):
727 # so statichttprepo's override of local() works
729 # so statichttprepo's override of local() works
728 if not self.local():
730 if not self.local():
729 return False
731 return False
730 if not self.ui.configbool('phases', 'publish', True):
732 if not self.ui.configbool('phases', 'publish', True):
731 return True
733 return True
732 # if publishing we can't copy if there is filtered content
734 # if publishing we can't copy if there is filtered content
733 return not self.filtered('visible').changelog.filteredrevs
735 return not self.filtered('visible').changelog.filteredrevs
734
736
735 def join(self, f):
737 def join(self, f):
736 return os.path.join(self.path, f)
738 return os.path.join(self.path, f)
737
739
738 def wjoin(self, f):
740 def wjoin(self, f):
739 return os.path.join(self.root, f)
741 return os.path.join(self.root, f)
740
742
741 def file(self, f):
743 def file(self, f):
742 if f[0] == '/':
744 if f[0] == '/':
743 f = f[1:]
745 f = f[1:]
744 return filelog.filelog(self.sopener, f)
746 return filelog.filelog(self.sopener, f)
745
747
746 def changectx(self, changeid):
748 def changectx(self, changeid):
747 return self[changeid]
749 return self[changeid]
748
750
749 def parents(self, changeid=None):
751 def parents(self, changeid=None):
750 '''get list of changectxs for parents of changeid'''
752 '''get list of changectxs for parents of changeid'''
751 return self[changeid].parents()
753 return self[changeid].parents()
752
754
753 def setparents(self, p1, p2=nullid):
755 def setparents(self, p1, p2=nullid):
754 copies = self.dirstate.setparents(p1, p2)
756 copies = self.dirstate.setparents(p1, p2)
755 pctx = self[p1]
757 pctx = self[p1]
756 if copies:
758 if copies:
757 # Adjust copy records, the dirstate cannot do it, it
759 # Adjust copy records, the dirstate cannot do it, it
758 # requires access to parents manifests. Preserve them
760 # requires access to parents manifests. Preserve them
759 # only for entries added to first parent.
761 # only for entries added to first parent.
760 for f in copies:
762 for f in copies:
761 if f not in pctx and copies[f] in pctx:
763 if f not in pctx and copies[f] in pctx:
762 self.dirstate.copy(copies[f], f)
764 self.dirstate.copy(copies[f], f)
763 if p2 == nullid:
765 if p2 == nullid:
764 for f, s in sorted(self.dirstate.copies().items()):
766 for f, s in sorted(self.dirstate.copies().items()):
765 if f not in pctx and s not in pctx:
767 if f not in pctx and s not in pctx:
766 self.dirstate.copy(None, f)
768 self.dirstate.copy(None, f)
767
769
768 def filectx(self, path, changeid=None, fileid=None):
770 def filectx(self, path, changeid=None, fileid=None):
769 """changeid can be a changeset revision, node, or tag.
771 """changeid can be a changeset revision, node, or tag.
770 fileid can be a file revision or node."""
772 fileid can be a file revision or node."""
771 return context.filectx(self, path, changeid, fileid)
773 return context.filectx(self, path, changeid, fileid)
772
774
773 def getcwd(self):
775 def getcwd(self):
774 return self.dirstate.getcwd()
776 return self.dirstate.getcwd()
775
777
776 def pathto(self, f, cwd=None):
778 def pathto(self, f, cwd=None):
777 return self.dirstate.pathto(f, cwd)
779 return self.dirstate.pathto(f, cwd)
778
780
779 def wfile(self, f, mode='r'):
781 def wfile(self, f, mode='r'):
780 return self.wopener(f, mode)
782 return self.wopener(f, mode)
781
783
782 def _link(self, f):
784 def _link(self, f):
783 return self.wvfs.islink(f)
785 return self.wvfs.islink(f)
784
786
785 def _loadfilter(self, filter):
787 def _loadfilter(self, filter):
786 if filter not in self.filterpats:
788 if filter not in self.filterpats:
787 l = []
789 l = []
788 for pat, cmd in self.ui.configitems(filter):
790 for pat, cmd in self.ui.configitems(filter):
789 if cmd == '!':
791 if cmd == '!':
790 continue
792 continue
791 mf = matchmod.match(self.root, '', [pat])
793 mf = matchmod.match(self.root, '', [pat])
792 fn = None
794 fn = None
793 params = cmd
795 params = cmd
794 for name, filterfn in self._datafilters.iteritems():
796 for name, filterfn in self._datafilters.iteritems():
795 if cmd.startswith(name):
797 if cmd.startswith(name):
796 fn = filterfn
798 fn = filterfn
797 params = cmd[len(name):].lstrip()
799 params = cmd[len(name):].lstrip()
798 break
800 break
799 if not fn:
801 if not fn:
800 fn = lambda s, c, **kwargs: util.filter(s, c)
802 fn = lambda s, c, **kwargs: util.filter(s, c)
801 # Wrap old filters not supporting keyword arguments
803 # Wrap old filters not supporting keyword arguments
802 if not inspect.getargspec(fn)[2]:
804 if not inspect.getargspec(fn)[2]:
803 oldfn = fn
805 oldfn = fn
804 fn = lambda s, c, **kwargs: oldfn(s, c)
806 fn = lambda s, c, **kwargs: oldfn(s, c)
805 l.append((mf, fn, params))
807 l.append((mf, fn, params))
806 self.filterpats[filter] = l
808 self.filterpats[filter] = l
807 return self.filterpats[filter]
809 return self.filterpats[filter]
808
810
809 def _filter(self, filterpats, filename, data):
811 def _filter(self, filterpats, filename, data):
810 for mf, fn, cmd in filterpats:
812 for mf, fn, cmd in filterpats:
811 if mf(filename):
813 if mf(filename):
812 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
814 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
813 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
815 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
814 break
816 break
815
817
816 return data
818 return data
817
819
818 @unfilteredpropertycache
820 @unfilteredpropertycache
819 def _encodefilterpats(self):
821 def _encodefilterpats(self):
820 return self._loadfilter('encode')
822 return self._loadfilter('encode')
821
823
822 @unfilteredpropertycache
824 @unfilteredpropertycache
823 def _decodefilterpats(self):
825 def _decodefilterpats(self):
824 return self._loadfilter('decode')
826 return self._loadfilter('decode')
825
827
826 def adddatafilter(self, name, filter):
828 def adddatafilter(self, name, filter):
827 self._datafilters[name] = filter
829 self._datafilters[name] = filter
828
830
829 def wread(self, filename):
831 def wread(self, filename):
830 if self._link(filename):
832 if self._link(filename):
831 data = self.wvfs.readlink(filename)
833 data = self.wvfs.readlink(filename)
832 else:
834 else:
833 data = self.wopener.read(filename)
835 data = self.wopener.read(filename)
834 return self._filter(self._encodefilterpats, filename, data)
836 return self._filter(self._encodefilterpats, filename, data)
835
837
836 def wwrite(self, filename, data, flags):
838 def wwrite(self, filename, data, flags):
837 data = self._filter(self._decodefilterpats, filename, data)
839 data = self._filter(self._decodefilterpats, filename, data)
838 if 'l' in flags:
840 if 'l' in flags:
839 self.wopener.symlink(data, filename)
841 self.wopener.symlink(data, filename)
840 else:
842 else:
841 self.wopener.write(filename, data)
843 self.wopener.write(filename, data)
842 if 'x' in flags:
844 if 'x' in flags:
843 self.wvfs.setflags(filename, False, True)
845 self.wvfs.setflags(filename, False, True)
844
846
845 def wwritedata(self, filename, data):
847 def wwritedata(self, filename, data):
846 return self._filter(self._decodefilterpats, filename, data)
848 return self._filter(self._decodefilterpats, filename, data)
847
849
848 def transaction(self, desc, report=None):
850 def transaction(self, desc, report=None):
849 tr = self._transref and self._transref() or None
851 tr = self._transref and self._transref() or None
850 if tr and tr.running():
852 if tr and tr.running():
851 return tr.nest()
853 return tr.nest()
852
854
853 # abort here if the journal already exists
855 # abort here if the journal already exists
854 if self.svfs.exists("journal"):
856 if self.svfs.exists("journal"):
855 raise error.RepoError(
857 raise error.RepoError(
856 _("abandoned transaction found - run hg recover"))
858 _("abandoned transaction found - run hg recover"))
857
859
858 def onclose():
860 def onclose():
859 self.store.write(tr)
861 self.store.write(tr)
860
862
861 self._writejournal(desc)
863 self._writejournal(desc)
862 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
864 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
863 rp = report and report or self.ui.warn
865 rp = report and report or self.ui.warn
864 tr = transaction.transaction(rp, self.sopener,
866 tr = transaction.transaction(rp, self.sopener,
865 "journal",
867 "journal",
866 aftertrans(renames),
868 aftertrans(renames),
867 self.store.createmode,
869 self.store.createmode,
868 onclose)
870 onclose)
869 self._transref = weakref.ref(tr)
871 self._transref = weakref.ref(tr)
870 return tr
872 return tr
871
873
872 def _journalfiles(self):
874 def _journalfiles(self):
873 return ((self.svfs, 'journal'),
875 return ((self.svfs, 'journal'),
874 (self.vfs, 'journal.dirstate'),
876 (self.vfs, 'journal.dirstate'),
875 (self.vfs, 'journal.branch'),
877 (self.vfs, 'journal.branch'),
876 (self.vfs, 'journal.desc'),
878 (self.vfs, 'journal.desc'),
877 (self.vfs, 'journal.bookmarks'),
879 (self.vfs, 'journal.bookmarks'),
878 (self.svfs, 'journal.phaseroots'))
880 (self.svfs, 'journal.phaseroots'))
879
881
880 def undofiles(self):
882 def undofiles(self):
881 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
883 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
882
884
883 def _writejournal(self, desc):
885 def _writejournal(self, desc):
884 self.opener.write("journal.dirstate",
886 self.opener.write("journal.dirstate",
885 self.opener.tryread("dirstate"))
887 self.opener.tryread("dirstate"))
886 self.opener.write("journal.branch",
888 self.opener.write("journal.branch",
887 encoding.fromlocal(self.dirstate.branch()))
889 encoding.fromlocal(self.dirstate.branch()))
888 self.opener.write("journal.desc",
890 self.opener.write("journal.desc",
889 "%d\n%s\n" % (len(self), desc))
891 "%d\n%s\n" % (len(self), desc))
890 self.opener.write("journal.bookmarks",
892 self.opener.write("journal.bookmarks",
891 self.opener.tryread("bookmarks"))
893 self.opener.tryread("bookmarks"))
892 self.sopener.write("journal.phaseroots",
894 self.sopener.write("journal.phaseroots",
893 self.sopener.tryread("phaseroots"))
895 self.sopener.tryread("phaseroots"))
894
896
895 def recover(self):
897 def recover(self):
896 lock = self.lock()
898 lock = self.lock()
897 try:
899 try:
898 if self.svfs.exists("journal"):
900 if self.svfs.exists("journal"):
899 self.ui.status(_("rolling back interrupted transaction\n"))
901 self.ui.status(_("rolling back interrupted transaction\n"))
900 transaction.rollback(self.sopener, "journal",
902 transaction.rollback(self.sopener, "journal",
901 self.ui.warn)
903 self.ui.warn)
902 self.invalidate()
904 self.invalidate()
903 return True
905 return True
904 else:
906 else:
905 self.ui.warn(_("no interrupted transaction available\n"))
907 self.ui.warn(_("no interrupted transaction available\n"))
906 return False
908 return False
907 finally:
909 finally:
908 lock.release()
910 lock.release()
909
911
910 def rollback(self, dryrun=False, force=False):
912 def rollback(self, dryrun=False, force=False):
911 wlock = lock = None
913 wlock = lock = None
912 try:
914 try:
913 wlock = self.wlock()
915 wlock = self.wlock()
914 lock = self.lock()
916 lock = self.lock()
915 if self.svfs.exists("undo"):
917 if self.svfs.exists("undo"):
916 return self._rollback(dryrun, force)
918 return self._rollback(dryrun, force)
917 else:
919 else:
918 self.ui.warn(_("no rollback information available\n"))
920 self.ui.warn(_("no rollback information available\n"))
919 return 1
921 return 1
920 finally:
922 finally:
921 release(lock, wlock)
923 release(lock, wlock)
922
924
923 @unfilteredmethod # Until we get smarter cache management
925 @unfilteredmethod # Until we get smarter cache management
924 def _rollback(self, dryrun, force):
926 def _rollback(self, dryrun, force):
925 ui = self.ui
927 ui = self.ui
926 try:
928 try:
927 args = self.opener.read('undo.desc').splitlines()
929 args = self.opener.read('undo.desc').splitlines()
928 (oldlen, desc, detail) = (int(args[0]), args[1], None)
930 (oldlen, desc, detail) = (int(args[0]), args[1], None)
929 if len(args) >= 3:
931 if len(args) >= 3:
930 detail = args[2]
932 detail = args[2]
931 oldtip = oldlen - 1
933 oldtip = oldlen - 1
932
934
933 if detail and ui.verbose:
935 if detail and ui.verbose:
934 msg = (_('repository tip rolled back to revision %s'
936 msg = (_('repository tip rolled back to revision %s'
935 ' (undo %s: %s)\n')
937 ' (undo %s: %s)\n')
936 % (oldtip, desc, detail))
938 % (oldtip, desc, detail))
937 else:
939 else:
938 msg = (_('repository tip rolled back to revision %s'
940 msg = (_('repository tip rolled back to revision %s'
939 ' (undo %s)\n')
941 ' (undo %s)\n')
940 % (oldtip, desc))
942 % (oldtip, desc))
941 except IOError:
943 except IOError:
942 msg = _('rolling back unknown transaction\n')
944 msg = _('rolling back unknown transaction\n')
943 desc = None
945 desc = None
944
946
945 if not force and self['.'] != self['tip'] and desc == 'commit':
947 if not force and self['.'] != self['tip'] and desc == 'commit':
946 raise util.Abort(
948 raise util.Abort(
947 _('rollback of last commit while not checked out '
949 _('rollback of last commit while not checked out '
948 'may lose data'), hint=_('use -f to force'))
950 'may lose data'), hint=_('use -f to force'))
949
951
950 ui.status(msg)
952 ui.status(msg)
951 if dryrun:
953 if dryrun:
952 return 0
954 return 0
953
955
954 parents = self.dirstate.parents()
956 parents = self.dirstate.parents()
955 self.destroying()
957 self.destroying()
956 transaction.rollback(self.sopener, 'undo', ui.warn)
958 transaction.rollback(self.sopener, 'undo', ui.warn)
957 if self.vfs.exists('undo.bookmarks'):
959 if self.vfs.exists('undo.bookmarks'):
958 self.vfs.rename('undo.bookmarks', 'bookmarks')
960 self.vfs.rename('undo.bookmarks', 'bookmarks')
959 if self.svfs.exists('undo.phaseroots'):
961 if self.svfs.exists('undo.phaseroots'):
960 self.svfs.rename('undo.phaseroots', 'phaseroots')
962 self.svfs.rename('undo.phaseroots', 'phaseroots')
961 self.invalidate()
963 self.invalidate()
962
964
963 parentgone = (parents[0] not in self.changelog.nodemap or
965 parentgone = (parents[0] not in self.changelog.nodemap or
964 parents[1] not in self.changelog.nodemap)
966 parents[1] not in self.changelog.nodemap)
965 if parentgone:
967 if parentgone:
966 self.vfs.rename('undo.dirstate', 'dirstate')
968 self.vfs.rename('undo.dirstate', 'dirstate')
967 try:
969 try:
968 branch = self.opener.read('undo.branch')
970 branch = self.opener.read('undo.branch')
969 self.dirstate.setbranch(encoding.tolocal(branch))
971 self.dirstate.setbranch(encoding.tolocal(branch))
970 except IOError:
972 except IOError:
971 ui.warn(_('named branch could not be reset: '
973 ui.warn(_('named branch could not be reset: '
972 'current branch is still \'%s\'\n')
974 'current branch is still \'%s\'\n')
973 % self.dirstate.branch())
975 % self.dirstate.branch())
974
976
975 self.dirstate.invalidate()
977 self.dirstate.invalidate()
976 parents = tuple([p.rev() for p in self.parents()])
978 parents = tuple([p.rev() for p in self.parents()])
977 if len(parents) > 1:
979 if len(parents) > 1:
978 ui.status(_('working directory now based on '
980 ui.status(_('working directory now based on '
979 'revisions %d and %d\n') % parents)
981 'revisions %d and %d\n') % parents)
980 else:
982 else:
981 ui.status(_('working directory now based on '
983 ui.status(_('working directory now based on '
982 'revision %d\n') % parents)
984 'revision %d\n') % parents)
983 # TODO: if we know which new heads may result from this rollback, pass
985 # TODO: if we know which new heads may result from this rollback, pass
984 # them to destroy(), which will prevent the branchhead cache from being
986 # them to destroy(), which will prevent the branchhead cache from being
985 # invalidated.
987 # invalidated.
986 self.destroyed()
988 self.destroyed()
987 return 0
989 return 0
988
990
989 def invalidatecaches(self):
991 def invalidatecaches(self):
990
992
991 if '_tagscache' in vars(self):
993 if '_tagscache' in vars(self):
992 # can't use delattr on proxy
994 # can't use delattr on proxy
993 del self.__dict__['_tagscache']
995 del self.__dict__['_tagscache']
994
996
995 self.unfiltered()._branchcaches.clear()
997 self.unfiltered()._branchcaches.clear()
996 self.invalidatevolatilesets()
998 self.invalidatevolatilesets()
997
999
998 def invalidatevolatilesets(self):
1000 def invalidatevolatilesets(self):
999 self.filteredrevcache.clear()
1001 self.filteredrevcache.clear()
1000 obsolete.clearobscaches(self)
1002 obsolete.clearobscaches(self)
1001
1003
1002 def invalidatedirstate(self):
1004 def invalidatedirstate(self):
1003 '''Invalidates the dirstate, causing the next call to dirstate
1005 '''Invalidates the dirstate, causing the next call to dirstate
1004 to check if it was modified since the last time it was read,
1006 to check if it was modified since the last time it was read,
1005 rereading it if it has.
1007 rereading it if it has.
1006
1008
1007 This is different to dirstate.invalidate() that it doesn't always
1009 This is different to dirstate.invalidate() that it doesn't always
1008 rereads the dirstate. Use dirstate.invalidate() if you want to
1010 rereads the dirstate. Use dirstate.invalidate() if you want to
1009 explicitly read the dirstate again (i.e. restoring it to a previous
1011 explicitly read the dirstate again (i.e. restoring it to a previous
1010 known good state).'''
1012 known good state).'''
1011 if hasunfilteredcache(self, 'dirstate'):
1013 if hasunfilteredcache(self, 'dirstate'):
1012 for k in self.dirstate._filecache:
1014 for k in self.dirstate._filecache:
1013 try:
1015 try:
1014 delattr(self.dirstate, k)
1016 delattr(self.dirstate, k)
1015 except AttributeError:
1017 except AttributeError:
1016 pass
1018 pass
1017 delattr(self.unfiltered(), 'dirstate')
1019 delattr(self.unfiltered(), 'dirstate')
1018
1020
1019 def invalidate(self):
1021 def invalidate(self):
1020 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1022 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1021 for k in self._filecache:
1023 for k in self._filecache:
1022 # dirstate is invalidated separately in invalidatedirstate()
1024 # dirstate is invalidated separately in invalidatedirstate()
1023 if k == 'dirstate':
1025 if k == 'dirstate':
1024 continue
1026 continue
1025
1027
1026 try:
1028 try:
1027 delattr(unfiltered, k)
1029 delattr(unfiltered, k)
1028 except AttributeError:
1030 except AttributeError:
1029 pass
1031 pass
1030 self.invalidatecaches()
1032 self.invalidatecaches()
1031 self.store.invalidatecaches()
1033 self.store.invalidatecaches()
1032
1034
1033 def invalidateall(self):
1035 def invalidateall(self):
1034 '''Fully invalidates both store and non-store parts, causing the
1036 '''Fully invalidates both store and non-store parts, causing the
1035 subsequent operation to reread any outside changes.'''
1037 subsequent operation to reread any outside changes.'''
1036 # extension should hook this to invalidate its caches
1038 # extension should hook this to invalidate its caches
1037 self.invalidate()
1039 self.invalidate()
1038 self.invalidatedirstate()
1040 self.invalidatedirstate()
1039
1041
1040 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1042 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1041 try:
1043 try:
1042 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1044 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1043 except error.LockHeld, inst:
1045 except error.LockHeld, inst:
1044 if not wait:
1046 if not wait:
1045 raise
1047 raise
1046 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1048 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1047 (desc, inst.locker))
1049 (desc, inst.locker))
1048 # default to 600 seconds timeout
1050 # default to 600 seconds timeout
1049 l = lockmod.lock(vfs, lockname,
1051 l = lockmod.lock(vfs, lockname,
1050 int(self.ui.config("ui", "timeout", "600")),
1052 int(self.ui.config("ui", "timeout", "600")),
1051 releasefn, desc=desc)
1053 releasefn, desc=desc)
1052 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1054 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1053 if acquirefn:
1055 if acquirefn:
1054 acquirefn()
1056 acquirefn()
1055 return l
1057 return l
1056
1058
1057 def _afterlock(self, callback):
1059 def _afterlock(self, callback):
1058 """add a callback to the current repository lock.
1060 """add a callback to the current repository lock.
1059
1061
1060 The callback will be executed on lock release."""
1062 The callback will be executed on lock release."""
1061 l = self._lockref and self._lockref()
1063 l = self._lockref and self._lockref()
1062 if l:
1064 if l:
1063 l.postrelease.append(callback)
1065 l.postrelease.append(callback)
1064 else:
1066 else:
1065 callback()
1067 callback()
1066
1068
1067 def lock(self, wait=True):
1069 def lock(self, wait=True):
1068 '''Lock the repository store (.hg/store) and return a weak reference
1070 '''Lock the repository store (.hg/store) and return a weak reference
1069 to the lock. Use this before modifying the store (e.g. committing or
1071 to the lock. Use this before modifying the store (e.g. committing or
1070 stripping). If you are opening a transaction, get a lock as well.)'''
1072 stripping). If you are opening a transaction, get a lock as well.)'''
1071 l = self._lockref and self._lockref()
1073 l = self._lockref and self._lockref()
1072 if l is not None and l.held:
1074 if l is not None and l.held:
1073 l.lock()
1075 l.lock()
1074 return l
1076 return l
1075
1077
1076 def unlock():
1078 def unlock():
1077 if hasunfilteredcache(self, '_phasecache'):
1079 if hasunfilteredcache(self, '_phasecache'):
1078 self._phasecache.write()
1080 self._phasecache.write()
1079 for k, ce in self._filecache.items():
1081 for k, ce in self._filecache.items():
1080 if k == 'dirstate' or k not in self.__dict__:
1082 if k == 'dirstate' or k not in self.__dict__:
1081 continue
1083 continue
1082 ce.refresh()
1084 ce.refresh()
1083
1085
1084 l = self._lock(self.svfs, "lock", wait, unlock,
1086 l = self._lock(self.svfs, "lock", wait, unlock,
1085 self.invalidate, _('repository %s') % self.origroot)
1087 self.invalidate, _('repository %s') % self.origroot)
1086 self._lockref = weakref.ref(l)
1088 self._lockref = weakref.ref(l)
1087 return l
1089 return l
1088
1090
1089 def wlock(self, wait=True):
1091 def wlock(self, wait=True):
1090 '''Lock the non-store parts of the repository (everything under
1092 '''Lock the non-store parts of the repository (everything under
1091 .hg except .hg/store) and return a weak reference to the lock.
1093 .hg except .hg/store) and return a weak reference to the lock.
1092 Use this before modifying files in .hg.'''
1094 Use this before modifying files in .hg.'''
1093 l = self._wlockref and self._wlockref()
1095 l = self._wlockref and self._wlockref()
1094 if l is not None and l.held:
1096 if l is not None and l.held:
1095 l.lock()
1097 l.lock()
1096 return l
1098 return l
1097
1099
1098 def unlock():
1100 def unlock():
1099 self.dirstate.write()
1101 self.dirstate.write()
1100 self._filecache['dirstate'].refresh()
1102 self._filecache['dirstate'].refresh()
1101
1103
1102 l = self._lock(self.vfs, "wlock", wait, unlock,
1104 l = self._lock(self.vfs, "wlock", wait, unlock,
1103 self.invalidatedirstate, _('working directory of %s') %
1105 self.invalidatedirstate, _('working directory of %s') %
1104 self.origroot)
1106 self.origroot)
1105 self._wlockref = weakref.ref(l)
1107 self._wlockref = weakref.ref(l)
1106 return l
1108 return l
1107
1109
1108 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1110 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1109 """
1111 """
1110 commit an individual file as part of a larger transaction
1112 commit an individual file as part of a larger transaction
1111 """
1113 """
1112
1114
1113 fname = fctx.path()
1115 fname = fctx.path()
1114 text = fctx.data()
1116 text = fctx.data()
1115 flog = self.file(fname)
1117 flog = self.file(fname)
1116 fparent1 = manifest1.get(fname, nullid)
1118 fparent1 = manifest1.get(fname, nullid)
1117 fparent2 = fparent2o = manifest2.get(fname, nullid)
1119 fparent2 = fparent2o = manifest2.get(fname, nullid)
1118
1120
1119 meta = {}
1121 meta = {}
1120 copy = fctx.renamed()
1122 copy = fctx.renamed()
1121 if copy and copy[0] != fname:
1123 if copy and copy[0] != fname:
1122 # Mark the new revision of this file as a copy of another
1124 # Mark the new revision of this file as a copy of another
1123 # file. This copy data will effectively act as a parent
1125 # file. This copy data will effectively act as a parent
1124 # of this new revision. If this is a merge, the first
1126 # of this new revision. If this is a merge, the first
1125 # parent will be the nullid (meaning "look up the copy data")
1127 # parent will be the nullid (meaning "look up the copy data")
1126 # and the second one will be the other parent. For example:
1128 # and the second one will be the other parent. For example:
1127 #
1129 #
1128 # 0 --- 1 --- 3 rev1 changes file foo
1130 # 0 --- 1 --- 3 rev1 changes file foo
1129 # \ / rev2 renames foo to bar and changes it
1131 # \ / rev2 renames foo to bar and changes it
1130 # \- 2 -/ rev3 should have bar with all changes and
1132 # \- 2 -/ rev3 should have bar with all changes and
1131 # should record that bar descends from
1133 # should record that bar descends from
1132 # bar in rev2 and foo in rev1
1134 # bar in rev2 and foo in rev1
1133 #
1135 #
1134 # this allows this merge to succeed:
1136 # this allows this merge to succeed:
1135 #
1137 #
1136 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1138 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1137 # \ / merging rev3 and rev4 should use bar@rev2
1139 # \ / merging rev3 and rev4 should use bar@rev2
1138 # \- 2 --- 4 as the merge base
1140 # \- 2 --- 4 as the merge base
1139 #
1141 #
1140
1142
1141 cfname = copy[0]
1143 cfname = copy[0]
1142 crev = manifest1.get(cfname)
1144 crev = manifest1.get(cfname)
1143 newfparent = fparent2
1145 newfparent = fparent2
1144
1146
1145 if manifest2: # branch merge
1147 if manifest2: # branch merge
1146 if fparent2 == nullid or crev is None: # copied on remote side
1148 if fparent2 == nullid or crev is None: # copied on remote side
1147 if cfname in manifest2:
1149 if cfname in manifest2:
1148 crev = manifest2[cfname]
1150 crev = manifest2[cfname]
1149 newfparent = fparent1
1151 newfparent = fparent1
1150
1152
1151 # find source in nearest ancestor if we've lost track
1153 # find source in nearest ancestor if we've lost track
1152 if not crev:
1154 if not crev:
1153 self.ui.debug(" %s: searching for copy revision for %s\n" %
1155 self.ui.debug(" %s: searching for copy revision for %s\n" %
1154 (fname, cfname))
1156 (fname, cfname))
1155 for ancestor in self[None].ancestors():
1157 for ancestor in self[None].ancestors():
1156 if cfname in ancestor:
1158 if cfname in ancestor:
1157 crev = ancestor[cfname].filenode()
1159 crev = ancestor[cfname].filenode()
1158 break
1160 break
1159
1161
1160 if crev:
1162 if crev:
1161 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1163 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1162 meta["copy"] = cfname
1164 meta["copy"] = cfname
1163 meta["copyrev"] = hex(crev)
1165 meta["copyrev"] = hex(crev)
1164 fparent1, fparent2 = nullid, newfparent
1166 fparent1, fparent2 = nullid, newfparent
1165 else:
1167 else:
1166 self.ui.warn(_("warning: can't find ancestor for '%s' "
1168 self.ui.warn(_("warning: can't find ancestor for '%s' "
1167 "copied from '%s'!\n") % (fname, cfname))
1169 "copied from '%s'!\n") % (fname, cfname))
1168
1170
1169 elif fparent1 == nullid:
1171 elif fparent1 == nullid:
1170 fparent1, fparent2 = fparent2, nullid
1172 fparent1, fparent2 = fparent2, nullid
1171 elif fparent2 != nullid:
1173 elif fparent2 != nullid:
1172 # is one parent an ancestor of the other?
1174 # is one parent an ancestor of the other?
1173 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1175 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1174 if fparent1 in fparentancestors:
1176 if fparent1 in fparentancestors:
1175 fparent1, fparent2 = fparent2, nullid
1177 fparent1, fparent2 = fparent2, nullid
1176 elif fparent2 in fparentancestors:
1178 elif fparent2 in fparentancestors:
1177 fparent2 = nullid
1179 fparent2 = nullid
1178
1180
1179 # is the file changed?
1181 # is the file changed?
1180 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1182 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1181 changelist.append(fname)
1183 changelist.append(fname)
1182 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1184 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1183
1185
1184 # are just the flags changed during merge?
1186 # are just the flags changed during merge?
1185 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1187 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1186 changelist.append(fname)
1188 changelist.append(fname)
1187
1189
1188 return fparent1
1190 return fparent1
1189
1191
1190 @unfilteredmethod
1192 @unfilteredmethod
1191 def commit(self, text="", user=None, date=None, match=None, force=False,
1193 def commit(self, text="", user=None, date=None, match=None, force=False,
1192 editor=False, extra={}):
1194 editor=False, extra={}):
1193 """Add a new revision to current repository.
1195 """Add a new revision to current repository.
1194
1196
1195 Revision information is gathered from the working directory,
1197 Revision information is gathered from the working directory,
1196 match can be used to filter the committed files. If editor is
1198 match can be used to filter the committed files. If editor is
1197 supplied, it is called to get a commit message.
1199 supplied, it is called to get a commit message.
1198 """
1200 """
1199
1201
1200 def fail(f, msg):
1202 def fail(f, msg):
1201 raise util.Abort('%s: %s' % (f, msg))
1203 raise util.Abort('%s: %s' % (f, msg))
1202
1204
1203 if not match:
1205 if not match:
1204 match = matchmod.always(self.root, '')
1206 match = matchmod.always(self.root, '')
1205
1207
1206 if not force:
1208 if not force:
1207 vdirs = []
1209 vdirs = []
1208 match.explicitdir = vdirs.append
1210 match.explicitdir = vdirs.append
1209 match.bad = fail
1211 match.bad = fail
1210
1212
1211 wlock = self.wlock()
1213 wlock = self.wlock()
1212 try:
1214 try:
1213 wctx = self[None]
1215 wctx = self[None]
1214 merge = len(wctx.parents()) > 1
1216 merge = len(wctx.parents()) > 1
1215
1217
1216 if (not force and merge and match and
1218 if (not force and merge and match and
1217 (match.files() or match.anypats())):
1219 (match.files() or match.anypats())):
1218 raise util.Abort(_('cannot partially commit a merge '
1220 raise util.Abort(_('cannot partially commit a merge '
1219 '(do not specify files or patterns)'))
1221 '(do not specify files or patterns)'))
1220
1222
1221 changes = self.status(match=match, clean=force)
1223 changes = self.status(match=match, clean=force)
1222 if force:
1224 if force:
1223 changes[0].extend(changes[6]) # mq may commit unchanged files
1225 changes[0].extend(changes[6]) # mq may commit unchanged files
1224
1226
1225 # check subrepos
1227 # check subrepos
1226 subs = []
1228 subs = []
1227 commitsubs = set()
1229 commitsubs = set()
1228 newstate = wctx.substate.copy()
1230 newstate = wctx.substate.copy()
1229 # only manage subrepos and .hgsubstate if .hgsub is present
1231 # only manage subrepos and .hgsubstate if .hgsub is present
1230 if '.hgsub' in wctx:
1232 if '.hgsub' in wctx:
1231 # we'll decide whether to track this ourselves, thanks
1233 # we'll decide whether to track this ourselves, thanks
1232 for c in changes[:3]:
1234 for c in changes[:3]:
1233 if '.hgsubstate' in c:
1235 if '.hgsubstate' in c:
1234 c.remove('.hgsubstate')
1236 c.remove('.hgsubstate')
1235
1237
1236 # compare current state to last committed state
1238 # compare current state to last committed state
1237 # build new substate based on last committed state
1239 # build new substate based on last committed state
1238 oldstate = wctx.p1().substate
1240 oldstate = wctx.p1().substate
1239 for s in sorted(newstate.keys()):
1241 for s in sorted(newstate.keys()):
1240 if not match(s):
1242 if not match(s):
1241 # ignore working copy, use old state if present
1243 # ignore working copy, use old state if present
1242 if s in oldstate:
1244 if s in oldstate:
1243 newstate[s] = oldstate[s]
1245 newstate[s] = oldstate[s]
1244 continue
1246 continue
1245 if not force:
1247 if not force:
1246 raise util.Abort(
1248 raise util.Abort(
1247 _("commit with new subrepo %s excluded") % s)
1249 _("commit with new subrepo %s excluded") % s)
1248 if wctx.sub(s).dirty(True):
1250 if wctx.sub(s).dirty(True):
1249 if not self.ui.configbool('ui', 'commitsubrepos'):
1251 if not self.ui.configbool('ui', 'commitsubrepos'):
1250 raise util.Abort(
1252 raise util.Abort(
1251 _("uncommitted changes in subrepo %s") % s,
1253 _("uncommitted changes in subrepo %s") % s,
1252 hint=_("use --subrepos for recursive commit"))
1254 hint=_("use --subrepos for recursive commit"))
1253 subs.append(s)
1255 subs.append(s)
1254 commitsubs.add(s)
1256 commitsubs.add(s)
1255 else:
1257 else:
1256 bs = wctx.sub(s).basestate()
1258 bs = wctx.sub(s).basestate()
1257 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1259 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1258 if oldstate.get(s, (None, None, None))[1] != bs:
1260 if oldstate.get(s, (None, None, None))[1] != bs:
1259 subs.append(s)
1261 subs.append(s)
1260
1262
1261 # check for removed subrepos
1263 # check for removed subrepos
1262 for p in wctx.parents():
1264 for p in wctx.parents():
1263 r = [s for s in p.substate if s not in newstate]
1265 r = [s for s in p.substate if s not in newstate]
1264 subs += [s for s in r if match(s)]
1266 subs += [s for s in r if match(s)]
1265 if subs:
1267 if subs:
1266 if (not match('.hgsub') and
1268 if (not match('.hgsub') and
1267 '.hgsub' in (wctx.modified() + wctx.added())):
1269 '.hgsub' in (wctx.modified() + wctx.added())):
1268 raise util.Abort(
1270 raise util.Abort(
1269 _("can't commit subrepos without .hgsub"))
1271 _("can't commit subrepos without .hgsub"))
1270 changes[0].insert(0, '.hgsubstate')
1272 changes[0].insert(0, '.hgsubstate')
1271
1273
1272 elif '.hgsub' in changes[2]:
1274 elif '.hgsub' in changes[2]:
1273 # clean up .hgsubstate when .hgsub is removed
1275 # clean up .hgsubstate when .hgsub is removed
1274 if ('.hgsubstate' in wctx and
1276 if ('.hgsubstate' in wctx and
1275 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1277 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1276 changes[2].insert(0, '.hgsubstate')
1278 changes[2].insert(0, '.hgsubstate')
1277
1279
1278 # make sure all explicit patterns are matched
1280 # make sure all explicit patterns are matched
1279 if not force and match.files():
1281 if not force and match.files():
1280 matched = set(changes[0] + changes[1] + changes[2])
1282 matched = set(changes[0] + changes[1] + changes[2])
1281
1283
1282 for f in match.files():
1284 for f in match.files():
1283 f = self.dirstate.normalize(f)
1285 f = self.dirstate.normalize(f)
1284 if f == '.' or f in matched or f in wctx.substate:
1286 if f == '.' or f in matched or f in wctx.substate:
1285 continue
1287 continue
1286 if f in changes[3]: # missing
1288 if f in changes[3]: # missing
1287 fail(f, _('file not found!'))
1289 fail(f, _('file not found!'))
1288 if f in vdirs: # visited directory
1290 if f in vdirs: # visited directory
1289 d = f + '/'
1291 d = f + '/'
1290 for mf in matched:
1292 for mf in matched:
1291 if mf.startswith(d):
1293 if mf.startswith(d):
1292 break
1294 break
1293 else:
1295 else:
1294 fail(f, _("no match under directory!"))
1296 fail(f, _("no match under directory!"))
1295 elif f not in self.dirstate:
1297 elif f not in self.dirstate:
1296 fail(f, _("file not tracked!"))
1298 fail(f, _("file not tracked!"))
1297
1299
1298 cctx = context.workingctx(self, text, user, date, extra, changes)
1300 cctx = context.workingctx(self, text, user, date, extra, changes)
1299
1301
1300 if (not force and not extra.get("close") and not merge
1302 if (not force and not extra.get("close") and not merge
1301 and not cctx.files()
1303 and not cctx.files()
1302 and wctx.branch() == wctx.p1().branch()):
1304 and wctx.branch() == wctx.p1().branch()):
1303 return None
1305 return None
1304
1306
1305 if merge and cctx.deleted():
1307 if merge and cctx.deleted():
1306 raise util.Abort(_("cannot commit merge with missing files"))
1308 raise util.Abort(_("cannot commit merge with missing files"))
1307
1309
1308 ms = mergemod.mergestate(self)
1310 ms = mergemod.mergestate(self)
1309 for f in changes[0]:
1311 for f in changes[0]:
1310 if f in ms and ms[f] == 'u':
1312 if f in ms and ms[f] == 'u':
1311 raise util.Abort(_("unresolved merge conflicts "
1313 raise util.Abort(_("unresolved merge conflicts "
1312 "(see hg help resolve)"))
1314 "(see hg help resolve)"))
1313
1315
1314 if editor:
1316 if editor:
1315 cctx._text = editor(self, cctx, subs)
1317 cctx._text = editor(self, cctx, subs)
1316 edited = (text != cctx._text)
1318 edited = (text != cctx._text)
1317
1319
1318 # Save commit message in case this transaction gets rolled back
1320 # Save commit message in case this transaction gets rolled back
1319 # (e.g. by a pretxncommit hook). Leave the content alone on
1321 # (e.g. by a pretxncommit hook). Leave the content alone on
1320 # the assumption that the user will use the same editor again.
1322 # the assumption that the user will use the same editor again.
1321 msgfn = self.savecommitmessage(cctx._text)
1323 msgfn = self.savecommitmessage(cctx._text)
1322
1324
1323 # commit subs and write new state
1325 # commit subs and write new state
1324 if subs:
1326 if subs:
1325 for s in sorted(commitsubs):
1327 for s in sorted(commitsubs):
1326 sub = wctx.sub(s)
1328 sub = wctx.sub(s)
1327 self.ui.status(_('committing subrepository %s\n') %
1329 self.ui.status(_('committing subrepository %s\n') %
1328 subrepo.subrelpath(sub))
1330 subrepo.subrelpath(sub))
1329 sr = sub.commit(cctx._text, user, date)
1331 sr = sub.commit(cctx._text, user, date)
1330 newstate[s] = (newstate[s][0], sr)
1332 newstate[s] = (newstate[s][0], sr)
1331 subrepo.writestate(self, newstate)
1333 subrepo.writestate(self, newstate)
1332
1334
1333 p1, p2 = self.dirstate.parents()
1335 p1, p2 = self.dirstate.parents()
1334 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1336 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1335 try:
1337 try:
1336 self.hook("precommit", throw=True, parent1=hookp1,
1338 self.hook("precommit", throw=True, parent1=hookp1,
1337 parent2=hookp2)
1339 parent2=hookp2)
1338 ret = self.commitctx(cctx, True)
1340 ret = self.commitctx(cctx, True)
1339 except: # re-raises
1341 except: # re-raises
1340 if edited:
1342 if edited:
1341 self.ui.write(
1343 self.ui.write(
1342 _('note: commit message saved in %s\n') % msgfn)
1344 _('note: commit message saved in %s\n') % msgfn)
1343 raise
1345 raise
1344
1346
1345 # update bookmarks, dirstate and mergestate
1347 # update bookmarks, dirstate and mergestate
1346 bookmarks.update(self, [p1, p2], ret)
1348 bookmarks.update(self, [p1, p2], ret)
1347 cctx.markcommitted(ret)
1349 cctx.markcommitted(ret)
1348 ms.reset()
1350 ms.reset()
1349 finally:
1351 finally:
1350 wlock.release()
1352 wlock.release()
1351
1353
1352 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1354 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1353 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1355 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1354 self._afterlock(commithook)
1356 self._afterlock(commithook)
1355 return ret
1357 return ret
1356
1358
1357 @unfilteredmethod
1359 @unfilteredmethod
1358 def commitctx(self, ctx, error=False):
1360 def commitctx(self, ctx, error=False):
1359 """Add a new revision to current repository.
1361 """Add a new revision to current repository.
1360 Revision information is passed via the context argument.
1362 Revision information is passed via the context argument.
1361 """
1363 """
1362
1364
1363 tr = lock = None
1365 tr = lock = None
1364 removed = list(ctx.removed())
1366 removed = list(ctx.removed())
1365 p1, p2 = ctx.p1(), ctx.p2()
1367 p1, p2 = ctx.p1(), ctx.p2()
1366 user = ctx.user()
1368 user = ctx.user()
1367
1369
1368 lock = self.lock()
1370 lock = self.lock()
1369 try:
1371 try:
1370 tr = self.transaction("commit")
1372 tr = self.transaction("commit")
1371 trp = weakref.proxy(tr)
1373 trp = weakref.proxy(tr)
1372
1374
1373 if ctx.files():
1375 if ctx.files():
1374 m1 = p1.manifest().copy()
1376 m1 = p1.manifest().copy()
1375 m2 = p2.manifest()
1377 m2 = p2.manifest()
1376
1378
1377 # check in files
1379 # check in files
1378 new = {}
1380 new = {}
1379 changed = []
1381 changed = []
1380 linkrev = len(self)
1382 linkrev = len(self)
1381 for f in sorted(ctx.modified() + ctx.added()):
1383 for f in sorted(ctx.modified() + ctx.added()):
1382 self.ui.note(f + "\n")
1384 self.ui.note(f + "\n")
1383 try:
1385 try:
1384 fctx = ctx[f]
1386 fctx = ctx[f]
1385 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1387 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1386 changed)
1388 changed)
1387 m1.set(f, fctx.flags())
1389 m1.set(f, fctx.flags())
1388 except OSError, inst:
1390 except OSError, inst:
1389 self.ui.warn(_("trouble committing %s!\n") % f)
1391 self.ui.warn(_("trouble committing %s!\n") % f)
1390 raise
1392 raise
1391 except IOError, inst:
1393 except IOError, inst:
1392 errcode = getattr(inst, 'errno', errno.ENOENT)
1394 errcode = getattr(inst, 'errno', errno.ENOENT)
1393 if error or errcode and errcode != errno.ENOENT:
1395 if error or errcode and errcode != errno.ENOENT:
1394 self.ui.warn(_("trouble committing %s!\n") % f)
1396 self.ui.warn(_("trouble committing %s!\n") % f)
1395 raise
1397 raise
1396 else:
1398 else:
1397 removed.append(f)
1399 removed.append(f)
1398
1400
1399 # update manifest
1401 # update manifest
1400 m1.update(new)
1402 m1.update(new)
1401 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1403 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1402 drop = [f for f in removed if f in m1]
1404 drop = [f for f in removed if f in m1]
1403 for f in drop:
1405 for f in drop:
1404 del m1[f]
1406 del m1[f]
1405 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1407 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1406 p2.manifestnode(), (new, drop))
1408 p2.manifestnode(), (new, drop))
1407 files = changed + removed
1409 files = changed + removed
1408 else:
1410 else:
1409 mn = p1.manifestnode()
1411 mn = p1.manifestnode()
1410 files = []
1412 files = []
1411
1413
1412 # update changelog
1414 # update changelog
1413 self.changelog.delayupdate()
1415 self.changelog.delayupdate()
1414 n = self.changelog.add(mn, files, ctx.description(),
1416 n = self.changelog.add(mn, files, ctx.description(),
1415 trp, p1.node(), p2.node(),
1417 trp, p1.node(), p2.node(),
1416 user, ctx.date(), ctx.extra().copy())
1418 user, ctx.date(), ctx.extra().copy())
1417 p = lambda: self.changelog.writepending() and self.root or ""
1419 p = lambda: self.changelog.writepending() and self.root or ""
1418 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1420 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1419 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1421 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1420 parent2=xp2, pending=p)
1422 parent2=xp2, pending=p)
1421 self.changelog.finalize(trp)
1423 self.changelog.finalize(trp)
1422 # set the new commit is proper phase
1424 # set the new commit is proper phase
1423 targetphase = subrepo.newcommitphase(self.ui, ctx)
1425 targetphase = subrepo.newcommitphase(self.ui, ctx)
1424 if targetphase:
1426 if targetphase:
1425 # retract boundary do not alter parent changeset.
1427 # retract boundary do not alter parent changeset.
1426 # if a parent have higher the resulting phase will
1428 # if a parent have higher the resulting phase will
1427 # be compliant anyway
1429 # be compliant anyway
1428 #
1430 #
1429 # if minimal phase was 0 we don't need to retract anything
1431 # if minimal phase was 0 we don't need to retract anything
1430 phases.retractboundary(self, targetphase, [n])
1432 phases.retractboundary(self, targetphase, [n])
1431 tr.close()
1433 tr.close()
1432 branchmap.updatecache(self.filtered('served'))
1434 branchmap.updatecache(self.filtered('served'))
1433 return n
1435 return n
1434 finally:
1436 finally:
1435 if tr:
1437 if tr:
1436 tr.release()
1438 tr.release()
1437 lock.release()
1439 lock.release()
1438
1440
1439 @unfilteredmethod
1441 @unfilteredmethod
1440 def destroying(self):
1442 def destroying(self):
1441 '''Inform the repository that nodes are about to be destroyed.
1443 '''Inform the repository that nodes are about to be destroyed.
1442 Intended for use by strip and rollback, so there's a common
1444 Intended for use by strip and rollback, so there's a common
1443 place for anything that has to be done before destroying history.
1445 place for anything that has to be done before destroying history.
1444
1446
1445 This is mostly useful for saving state that is in memory and waiting
1447 This is mostly useful for saving state that is in memory and waiting
1446 to be flushed when the current lock is released. Because a call to
1448 to be flushed when the current lock is released. Because a call to
1447 destroyed is imminent, the repo will be invalidated causing those
1449 destroyed is imminent, the repo will be invalidated causing those
1448 changes to stay in memory (waiting for the next unlock), or vanish
1450 changes to stay in memory (waiting for the next unlock), or vanish
1449 completely.
1451 completely.
1450 '''
1452 '''
1451 # When using the same lock to commit and strip, the phasecache is left
1453 # When using the same lock to commit and strip, the phasecache is left
1452 # dirty after committing. Then when we strip, the repo is invalidated,
1454 # dirty after committing. Then when we strip, the repo is invalidated,
1453 # causing those changes to disappear.
1455 # causing those changes to disappear.
1454 if '_phasecache' in vars(self):
1456 if '_phasecache' in vars(self):
1455 self._phasecache.write()
1457 self._phasecache.write()
1456
1458
1457 @unfilteredmethod
1459 @unfilteredmethod
1458 def destroyed(self):
1460 def destroyed(self):
1459 '''Inform the repository that nodes have been destroyed.
1461 '''Inform the repository that nodes have been destroyed.
1460 Intended for use by strip and rollback, so there's a common
1462 Intended for use by strip and rollback, so there's a common
1461 place for anything that has to be done after destroying history.
1463 place for anything that has to be done after destroying history.
1462 '''
1464 '''
1463 # When one tries to:
1465 # When one tries to:
1464 # 1) destroy nodes thus calling this method (e.g. strip)
1466 # 1) destroy nodes thus calling this method (e.g. strip)
1465 # 2) use phasecache somewhere (e.g. commit)
1467 # 2) use phasecache somewhere (e.g. commit)
1466 #
1468 #
1467 # then 2) will fail because the phasecache contains nodes that were
1469 # then 2) will fail because the phasecache contains nodes that were
1468 # removed. We can either remove phasecache from the filecache,
1470 # removed. We can either remove phasecache from the filecache,
1469 # causing it to reload next time it is accessed, or simply filter
1471 # causing it to reload next time it is accessed, or simply filter
1470 # the removed nodes now and write the updated cache.
1472 # the removed nodes now and write the updated cache.
1471 self._phasecache.filterunknown(self)
1473 self._phasecache.filterunknown(self)
1472 self._phasecache.write()
1474 self._phasecache.write()
1473
1475
1474 # update the 'served' branch cache to help read only server process
1476 # update the 'served' branch cache to help read only server process
1475 # Thanks to branchcache collaboration this is done from the nearest
1477 # Thanks to branchcache collaboration this is done from the nearest
1476 # filtered subset and it is expected to be fast.
1478 # filtered subset and it is expected to be fast.
1477 branchmap.updatecache(self.filtered('served'))
1479 branchmap.updatecache(self.filtered('served'))
1478
1480
1479 # Ensure the persistent tag cache is updated. Doing it now
1481 # Ensure the persistent tag cache is updated. Doing it now
1480 # means that the tag cache only has to worry about destroyed
1482 # means that the tag cache only has to worry about destroyed
1481 # heads immediately after a strip/rollback. That in turn
1483 # heads immediately after a strip/rollback. That in turn
1482 # guarantees that "cachetip == currenttip" (comparing both rev
1484 # guarantees that "cachetip == currenttip" (comparing both rev
1483 # and node) always means no nodes have been added or destroyed.
1485 # and node) always means no nodes have been added or destroyed.
1484
1486
1485 # XXX this is suboptimal when qrefresh'ing: we strip the current
1487 # XXX this is suboptimal when qrefresh'ing: we strip the current
1486 # head, refresh the tag cache, then immediately add a new head.
1488 # head, refresh the tag cache, then immediately add a new head.
1487 # But I think doing it this way is necessary for the "instant
1489 # But I think doing it this way is necessary for the "instant
1488 # tag cache retrieval" case to work.
1490 # tag cache retrieval" case to work.
1489 self.invalidate()
1491 self.invalidate()
1490
1492
1491 def walk(self, match, node=None):
1493 def walk(self, match, node=None):
1492 '''
1494 '''
1493 walk recursively through the directory tree or a given
1495 walk recursively through the directory tree or a given
1494 changeset, finding all files matched by the match
1496 changeset, finding all files matched by the match
1495 function
1497 function
1496 '''
1498 '''
1497 return self[node].walk(match)
1499 return self[node].walk(match)
1498
1500
1499 def status(self, node1='.', node2=None, match=None,
1501 def status(self, node1='.', node2=None, match=None,
1500 ignored=False, clean=False, unknown=False,
1502 ignored=False, clean=False, unknown=False,
1501 listsubrepos=False):
1503 listsubrepos=False):
1502 """return status of files between two nodes or node and working
1504 """return status of files between two nodes or node and working
1503 directory.
1505 directory.
1504
1506
1505 If node1 is None, use the first dirstate parent instead.
1507 If node1 is None, use the first dirstate parent instead.
1506 If node2 is None, compare node1 with working directory.
1508 If node2 is None, compare node1 with working directory.
1507 """
1509 """
1508
1510
1509 def mfmatches(ctx):
1511 def mfmatches(ctx):
1510 mf = ctx.manifest().copy()
1512 mf = ctx.manifest().copy()
1511 if match.always():
1513 if match.always():
1512 return mf
1514 return mf
1513 for fn in mf.keys():
1515 for fn in mf.keys():
1514 if not match(fn):
1516 if not match(fn):
1515 del mf[fn]
1517 del mf[fn]
1516 return mf
1518 return mf
1517
1519
1518 ctx1 = self[node1]
1520 ctx1 = self[node1]
1519 ctx2 = self[node2]
1521 ctx2 = self[node2]
1520
1522
1521 working = ctx2.rev() is None
1523 working = ctx2.rev() is None
1522 parentworking = working and ctx1 == self['.']
1524 parentworking = working and ctx1 == self['.']
1523 match = match or matchmod.always(self.root, self.getcwd())
1525 match = match or matchmod.always(self.root, self.getcwd())
1524 listignored, listclean, listunknown = ignored, clean, unknown
1526 listignored, listclean, listunknown = ignored, clean, unknown
1525
1527
1526 # load earliest manifest first for caching reasons
1528 # load earliest manifest first for caching reasons
1527 if not working and ctx2.rev() < ctx1.rev():
1529 if not working and ctx2.rev() < ctx1.rev():
1528 ctx2.manifest()
1530 ctx2.manifest()
1529
1531
1530 if not parentworking:
1532 if not parentworking:
1531 def bad(f, msg):
1533 def bad(f, msg):
1532 # 'f' may be a directory pattern from 'match.files()',
1534 # 'f' may be a directory pattern from 'match.files()',
1533 # so 'f not in ctx1' is not enough
1535 # so 'f not in ctx1' is not enough
1534 if f not in ctx1 and f not in ctx1.dirs():
1536 if f not in ctx1 and f not in ctx1.dirs():
1535 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1537 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1536 match.bad = bad
1538 match.bad = bad
1537
1539
1538 if working: # we need to scan the working dir
1540 if working: # we need to scan the working dir
1539 subrepos = []
1541 subrepos = []
1540 if '.hgsub' in self.dirstate:
1542 if '.hgsub' in self.dirstate:
1541 subrepos = sorted(ctx2.substate)
1543 subrepos = sorted(ctx2.substate)
1542 s = self.dirstate.status(match, subrepos, listignored,
1544 s = self.dirstate.status(match, subrepos, listignored,
1543 listclean, listunknown)
1545 listclean, listunknown)
1544 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1546 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1545
1547
1546 # check for any possibly clean files
1548 # check for any possibly clean files
1547 if parentworking and cmp:
1549 if parentworking and cmp:
1548 fixup = []
1550 fixup = []
1549 # do a full compare of any files that might have changed
1551 # do a full compare of any files that might have changed
1550 for f in sorted(cmp):
1552 for f in sorted(cmp):
1551 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1553 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1552 or ctx1[f].cmp(ctx2[f])):
1554 or ctx1[f].cmp(ctx2[f])):
1553 modified.append(f)
1555 modified.append(f)
1554 else:
1556 else:
1555 fixup.append(f)
1557 fixup.append(f)
1556
1558
1557 # update dirstate for files that are actually clean
1559 # update dirstate for files that are actually clean
1558 if fixup:
1560 if fixup:
1559 if listclean:
1561 if listclean:
1560 clean += fixup
1562 clean += fixup
1561
1563
1562 try:
1564 try:
1563 # updating the dirstate is optional
1565 # updating the dirstate is optional
1564 # so we don't wait on the lock
1566 # so we don't wait on the lock
1565 wlock = self.wlock(False)
1567 wlock = self.wlock(False)
1566 try:
1568 try:
1567 for f in fixup:
1569 for f in fixup:
1568 self.dirstate.normal(f)
1570 self.dirstate.normal(f)
1569 finally:
1571 finally:
1570 wlock.release()
1572 wlock.release()
1571 except error.LockError:
1573 except error.LockError:
1572 pass
1574 pass
1573
1575
1574 if not parentworking:
1576 if not parentworking:
1575 mf1 = mfmatches(ctx1)
1577 mf1 = mfmatches(ctx1)
1576 if working:
1578 if working:
1577 # we are comparing working dir against non-parent
1579 # we are comparing working dir against non-parent
1578 # generate a pseudo-manifest for the working dir
1580 # generate a pseudo-manifest for the working dir
1579 mf2 = mfmatches(self['.'])
1581 mf2 = mfmatches(self['.'])
1580 for f in cmp + modified + added:
1582 for f in cmp + modified + added:
1581 mf2[f] = None
1583 mf2[f] = None
1582 mf2.set(f, ctx2.flags(f))
1584 mf2.set(f, ctx2.flags(f))
1583 for f in removed:
1585 for f in removed:
1584 if f in mf2:
1586 if f in mf2:
1585 del mf2[f]
1587 del mf2[f]
1586 else:
1588 else:
1587 # we are comparing two revisions
1589 # we are comparing two revisions
1588 deleted, unknown, ignored = [], [], []
1590 deleted, unknown, ignored = [], [], []
1589 mf2 = mfmatches(ctx2)
1591 mf2 = mfmatches(ctx2)
1590
1592
1591 modified, added, clean = [], [], []
1593 modified, added, clean = [], [], []
1592 withflags = mf1.withflags() | mf2.withflags()
1594 withflags = mf1.withflags() | mf2.withflags()
1593 for fn, mf2node in mf2.iteritems():
1595 for fn, mf2node in mf2.iteritems():
1594 if fn in mf1:
1596 if fn in mf1:
1595 if (fn not in deleted and
1597 if (fn not in deleted and
1596 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1598 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1597 (mf1[fn] != mf2node and
1599 (mf1[fn] != mf2node and
1598 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1600 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1599 modified.append(fn)
1601 modified.append(fn)
1600 elif listclean:
1602 elif listclean:
1601 clean.append(fn)
1603 clean.append(fn)
1602 del mf1[fn]
1604 del mf1[fn]
1603 elif fn not in deleted:
1605 elif fn not in deleted:
1604 added.append(fn)
1606 added.append(fn)
1605 removed = mf1.keys()
1607 removed = mf1.keys()
1606
1608
1607 if working and modified and not self.dirstate._checklink:
1609 if working and modified and not self.dirstate._checklink:
1608 # Symlink placeholders may get non-symlink-like contents
1610 # Symlink placeholders may get non-symlink-like contents
1609 # via user error or dereferencing by NFS or Samba servers,
1611 # via user error or dereferencing by NFS or Samba servers,
1610 # so we filter out any placeholders that don't look like a
1612 # so we filter out any placeholders that don't look like a
1611 # symlink
1613 # symlink
1612 sane = []
1614 sane = []
1613 for f in modified:
1615 for f in modified:
1614 if ctx2.flags(f) == 'l':
1616 if ctx2.flags(f) == 'l':
1615 d = ctx2[f].data()
1617 d = ctx2[f].data()
1616 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1618 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1617 self.ui.debug('ignoring suspect symlink placeholder'
1619 self.ui.debug('ignoring suspect symlink placeholder'
1618 ' "%s"\n' % f)
1620 ' "%s"\n' % f)
1619 continue
1621 continue
1620 sane.append(f)
1622 sane.append(f)
1621 modified = sane
1623 modified = sane
1622
1624
1623 r = modified, added, removed, deleted, unknown, ignored, clean
1625 r = modified, added, removed, deleted, unknown, ignored, clean
1624
1626
1625 if listsubrepos:
1627 if listsubrepos:
1626 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1628 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1627 if working:
1629 if working:
1628 rev2 = None
1630 rev2 = None
1629 else:
1631 else:
1630 rev2 = ctx2.substate[subpath][1]
1632 rev2 = ctx2.substate[subpath][1]
1631 try:
1633 try:
1632 submatch = matchmod.narrowmatcher(subpath, match)
1634 submatch = matchmod.narrowmatcher(subpath, match)
1633 s = sub.status(rev2, match=submatch, ignored=listignored,
1635 s = sub.status(rev2, match=submatch, ignored=listignored,
1634 clean=listclean, unknown=listunknown,
1636 clean=listclean, unknown=listunknown,
1635 listsubrepos=True)
1637 listsubrepos=True)
1636 for rfiles, sfiles in zip(r, s):
1638 for rfiles, sfiles in zip(r, s):
1637 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1639 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1638 except error.LookupError:
1640 except error.LookupError:
1639 self.ui.status(_("skipping missing subrepository: %s\n")
1641 self.ui.status(_("skipping missing subrepository: %s\n")
1640 % subpath)
1642 % subpath)
1641
1643
1642 for l in r:
1644 for l in r:
1643 l.sort()
1645 l.sort()
1644 return r
1646 return r
1645
1647
1646 def heads(self, start=None):
1648 def heads(self, start=None):
1647 heads = self.changelog.heads(start)
1649 heads = self.changelog.heads(start)
1648 # sort the output in rev descending order
1650 # sort the output in rev descending order
1649 return sorted(heads, key=self.changelog.rev, reverse=True)
1651 return sorted(heads, key=self.changelog.rev, reverse=True)
1650
1652
1651 def branchheads(self, branch=None, start=None, closed=False):
1653 def branchheads(self, branch=None, start=None, closed=False):
1652 '''return a (possibly filtered) list of heads for the given branch
1654 '''return a (possibly filtered) list of heads for the given branch
1653
1655
1654 Heads are returned in topological order, from newest to oldest.
1656 Heads are returned in topological order, from newest to oldest.
1655 If branch is None, use the dirstate branch.
1657 If branch is None, use the dirstate branch.
1656 If start is not None, return only heads reachable from start.
1658 If start is not None, return only heads reachable from start.
1657 If closed is True, return heads that are marked as closed as well.
1659 If closed is True, return heads that are marked as closed as well.
1658 '''
1660 '''
1659 if branch is None:
1661 if branch is None:
1660 branch = self[None].branch()
1662 branch = self[None].branch()
1661 branches = self.branchmap()
1663 branches = self.branchmap()
1662 if branch not in branches:
1664 if branch not in branches:
1663 return []
1665 return []
1664 # the cache returns heads ordered lowest to highest
1666 # the cache returns heads ordered lowest to highest
1665 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1667 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1666 if start is not None:
1668 if start is not None:
1667 # filter out the heads that cannot be reached from startrev
1669 # filter out the heads that cannot be reached from startrev
1668 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1670 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1669 bheads = [h for h in bheads if h in fbheads]
1671 bheads = [h for h in bheads if h in fbheads]
1670 return bheads
1672 return bheads
1671
1673
1672 def branches(self, nodes):
1674 def branches(self, nodes):
1673 if not nodes:
1675 if not nodes:
1674 nodes = [self.changelog.tip()]
1676 nodes = [self.changelog.tip()]
1675 b = []
1677 b = []
1676 for n in nodes:
1678 for n in nodes:
1677 t = n
1679 t = n
1678 while True:
1680 while True:
1679 p = self.changelog.parents(n)
1681 p = self.changelog.parents(n)
1680 if p[1] != nullid or p[0] == nullid:
1682 if p[1] != nullid or p[0] == nullid:
1681 b.append((t, n, p[0], p[1]))
1683 b.append((t, n, p[0], p[1]))
1682 break
1684 break
1683 n = p[0]
1685 n = p[0]
1684 return b
1686 return b
1685
1687
1686 def between(self, pairs):
1688 def between(self, pairs):
1687 r = []
1689 r = []
1688
1690
1689 for top, bottom in pairs:
1691 for top, bottom in pairs:
1690 n, l, i = top, [], 0
1692 n, l, i = top, [], 0
1691 f = 1
1693 f = 1
1692
1694
1693 while n != bottom and n != nullid:
1695 while n != bottom and n != nullid:
1694 p = self.changelog.parents(n)[0]
1696 p = self.changelog.parents(n)[0]
1695 if i == f:
1697 if i == f:
1696 l.append(n)
1698 l.append(n)
1697 f = f * 2
1699 f = f * 2
1698 n = p
1700 n = p
1699 i += 1
1701 i += 1
1700
1702
1701 r.append(l)
1703 r.append(l)
1702
1704
1703 return r
1705 return r
1704
1706
1705 def pull(self, remote, heads=None, force=False):
1707 def pull(self, remote, heads=None, force=False):
1706 return exchange.pull (self, remote, heads, force)
1708 return exchange.pull (self, remote, heads, force)
1707
1709
1708 def checkpush(self, pushop):
1710 def checkpush(self, pushop):
1709 """Extensions can override this function if additional checks have
1711 """Extensions can override this function if additional checks have
1710 to be performed before pushing, or call it if they override push
1712 to be performed before pushing, or call it if they override push
1711 command.
1713 command.
1712 """
1714 """
1713 pass
1715 pass
1714
1716
1715 @unfilteredpropertycache
1717 @unfilteredpropertycache
1716 def prepushoutgoinghooks(self):
1718 def prepushoutgoinghooks(self):
1717 """Return util.hooks consists of "(repo, remote, outgoing)"
1719 """Return util.hooks consists of "(repo, remote, outgoing)"
1718 functions, which are called before pushing changesets.
1720 functions, which are called before pushing changesets.
1719 """
1721 """
1720 return util.hooks()
1722 return util.hooks()
1721
1723
1722 def push(self, remote, force=False, revs=None, newbranch=False):
1724 def push(self, remote, force=False, revs=None, newbranch=False):
1723 return exchange.push(self, remote, force, revs, newbranch)
1725 return exchange.push(self, remote, force, revs, newbranch)
1724
1726
1725 def stream_in(self, remote, requirements):
1727 def stream_in(self, remote, requirements):
1726 lock = self.lock()
1728 lock = self.lock()
1727 try:
1729 try:
1728 # Save remote branchmap. We will use it later
1730 # Save remote branchmap. We will use it later
1729 # to speed up branchcache creation
1731 # to speed up branchcache creation
1730 rbranchmap = None
1732 rbranchmap = None
1731 if remote.capable("branchmap"):
1733 if remote.capable("branchmap"):
1732 rbranchmap = remote.branchmap()
1734 rbranchmap = remote.branchmap()
1733
1735
1734 fp = remote.stream_out()
1736 fp = remote.stream_out()
1735 l = fp.readline()
1737 l = fp.readline()
1736 try:
1738 try:
1737 resp = int(l)
1739 resp = int(l)
1738 except ValueError:
1740 except ValueError:
1739 raise error.ResponseError(
1741 raise error.ResponseError(
1740 _('unexpected response from remote server:'), l)
1742 _('unexpected response from remote server:'), l)
1741 if resp == 1:
1743 if resp == 1:
1742 raise util.Abort(_('operation forbidden by server'))
1744 raise util.Abort(_('operation forbidden by server'))
1743 elif resp == 2:
1745 elif resp == 2:
1744 raise util.Abort(_('locking the remote repository failed'))
1746 raise util.Abort(_('locking the remote repository failed'))
1745 elif resp != 0:
1747 elif resp != 0:
1746 raise util.Abort(_('the server sent an unknown error code'))
1748 raise util.Abort(_('the server sent an unknown error code'))
1747 self.ui.status(_('streaming all changes\n'))
1749 self.ui.status(_('streaming all changes\n'))
1748 l = fp.readline()
1750 l = fp.readline()
1749 try:
1751 try:
1750 total_files, total_bytes = map(int, l.split(' ', 1))
1752 total_files, total_bytes = map(int, l.split(' ', 1))
1751 except (ValueError, TypeError):
1753 except (ValueError, TypeError):
1752 raise error.ResponseError(
1754 raise error.ResponseError(
1753 _('unexpected response from remote server:'), l)
1755 _('unexpected response from remote server:'), l)
1754 self.ui.status(_('%d files to transfer, %s of data\n') %
1756 self.ui.status(_('%d files to transfer, %s of data\n') %
1755 (total_files, util.bytecount(total_bytes)))
1757 (total_files, util.bytecount(total_bytes)))
1756 handled_bytes = 0
1758 handled_bytes = 0
1757 self.ui.progress(_('clone'), 0, total=total_bytes)
1759 self.ui.progress(_('clone'), 0, total=total_bytes)
1758 start = time.time()
1760 start = time.time()
1759
1761
1760 tr = self.transaction(_('clone'))
1762 tr = self.transaction(_('clone'))
1761 try:
1763 try:
1762 for i in xrange(total_files):
1764 for i in xrange(total_files):
1763 # XXX doesn't support '\n' or '\r' in filenames
1765 # XXX doesn't support '\n' or '\r' in filenames
1764 l = fp.readline()
1766 l = fp.readline()
1765 try:
1767 try:
1766 name, size = l.split('\0', 1)
1768 name, size = l.split('\0', 1)
1767 size = int(size)
1769 size = int(size)
1768 except (ValueError, TypeError):
1770 except (ValueError, TypeError):
1769 raise error.ResponseError(
1771 raise error.ResponseError(
1770 _('unexpected response from remote server:'), l)
1772 _('unexpected response from remote server:'), l)
1771 if self.ui.debugflag:
1773 if self.ui.debugflag:
1772 self.ui.debug('adding %s (%s)\n' %
1774 self.ui.debug('adding %s (%s)\n' %
1773 (name, util.bytecount(size)))
1775 (name, util.bytecount(size)))
1774 # for backwards compat, name was partially encoded
1776 # for backwards compat, name was partially encoded
1775 ofp = self.sopener(store.decodedir(name), 'w')
1777 ofp = self.sopener(store.decodedir(name), 'w')
1776 for chunk in util.filechunkiter(fp, limit=size):
1778 for chunk in util.filechunkiter(fp, limit=size):
1777 handled_bytes += len(chunk)
1779 handled_bytes += len(chunk)
1778 self.ui.progress(_('clone'), handled_bytes,
1780 self.ui.progress(_('clone'), handled_bytes,
1779 total=total_bytes)
1781 total=total_bytes)
1780 ofp.write(chunk)
1782 ofp.write(chunk)
1781 ofp.close()
1783 ofp.close()
1782 tr.close()
1784 tr.close()
1783 finally:
1785 finally:
1784 tr.release()
1786 tr.release()
1785
1787
1786 # Writing straight to files circumvented the inmemory caches
1788 # Writing straight to files circumvented the inmemory caches
1787 self.invalidate()
1789 self.invalidate()
1788
1790
1789 elapsed = time.time() - start
1791 elapsed = time.time() - start
1790 if elapsed <= 0:
1792 if elapsed <= 0:
1791 elapsed = 0.001
1793 elapsed = 0.001
1792 self.ui.progress(_('clone'), None)
1794 self.ui.progress(_('clone'), None)
1793 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1795 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1794 (util.bytecount(total_bytes), elapsed,
1796 (util.bytecount(total_bytes), elapsed,
1795 util.bytecount(total_bytes / elapsed)))
1797 util.bytecount(total_bytes / elapsed)))
1796
1798
1797 # new requirements = old non-format requirements +
1799 # new requirements = old non-format requirements +
1798 # new format-related
1800 # new format-related
1799 # requirements from the streamed-in repository
1801 # requirements from the streamed-in repository
1800 requirements.update(set(self.requirements) - self.supportedformats)
1802 requirements.update(set(self.requirements) - self.supportedformats)
1801 self._applyrequirements(requirements)
1803 self._applyrequirements(requirements)
1802 self._writerequirements()
1804 self._writerequirements()
1803
1805
1804 if rbranchmap:
1806 if rbranchmap:
1805 rbheads = []
1807 rbheads = []
1806 for bheads in rbranchmap.itervalues():
1808 for bheads in rbranchmap.itervalues():
1807 rbheads.extend(bheads)
1809 rbheads.extend(bheads)
1808
1810
1809 if rbheads:
1811 if rbheads:
1810 rtiprev = max((int(self.changelog.rev(node))
1812 rtiprev = max((int(self.changelog.rev(node))
1811 for node in rbheads))
1813 for node in rbheads))
1812 cache = branchmap.branchcache(rbranchmap,
1814 cache = branchmap.branchcache(rbranchmap,
1813 self[rtiprev].node(),
1815 self[rtiprev].node(),
1814 rtiprev)
1816 rtiprev)
1815 # Try to stick it as low as possible
1817 # Try to stick it as low as possible
1816 # filter above served are unlikely to be fetch from a clone
1818 # filter above served are unlikely to be fetch from a clone
1817 for candidate in ('base', 'immutable', 'served'):
1819 for candidate in ('base', 'immutable', 'served'):
1818 rview = self.filtered(candidate)
1820 rview = self.filtered(candidate)
1819 if cache.validfor(rview):
1821 if cache.validfor(rview):
1820 self._branchcaches[candidate] = cache
1822 self._branchcaches[candidate] = cache
1821 cache.write(rview)
1823 cache.write(rview)
1822 break
1824 break
1823 self.invalidate()
1825 self.invalidate()
1824 return len(self.heads()) + 1
1826 return len(self.heads()) + 1
1825 finally:
1827 finally:
1826 lock.release()
1828 lock.release()
1827
1829
1828 def clone(self, remote, heads=[], stream=False):
1830 def clone(self, remote, heads=[], stream=False):
1829 '''clone remote repository.
1831 '''clone remote repository.
1830
1832
1831 keyword arguments:
1833 keyword arguments:
1832 heads: list of revs to clone (forces use of pull)
1834 heads: list of revs to clone (forces use of pull)
1833 stream: use streaming clone if possible'''
1835 stream: use streaming clone if possible'''
1834
1836
1835 # now, all clients that can request uncompressed clones can
1837 # now, all clients that can request uncompressed clones can
1836 # read repo formats supported by all servers that can serve
1838 # read repo formats supported by all servers that can serve
1837 # them.
1839 # them.
1838
1840
1839 # if revlog format changes, client will have to check version
1841 # if revlog format changes, client will have to check version
1840 # and format flags on "stream" capability, and use
1842 # and format flags on "stream" capability, and use
1841 # uncompressed only if compatible.
1843 # uncompressed only if compatible.
1842
1844
1843 if not stream:
1845 if not stream:
1844 # if the server explicitly prefers to stream (for fast LANs)
1846 # if the server explicitly prefers to stream (for fast LANs)
1845 stream = remote.capable('stream-preferred')
1847 stream = remote.capable('stream-preferred')
1846
1848
1847 if stream and not heads:
1849 if stream and not heads:
1848 # 'stream' means remote revlog format is revlogv1 only
1850 # 'stream' means remote revlog format is revlogv1 only
1849 if remote.capable('stream'):
1851 if remote.capable('stream'):
1850 return self.stream_in(remote, set(('revlogv1',)))
1852 return self.stream_in(remote, set(('revlogv1',)))
1851 # otherwise, 'streamreqs' contains the remote revlog format
1853 # otherwise, 'streamreqs' contains the remote revlog format
1852 streamreqs = remote.capable('streamreqs')
1854 streamreqs = remote.capable('streamreqs')
1853 if streamreqs:
1855 if streamreqs:
1854 streamreqs = set(streamreqs.split(','))
1856 streamreqs = set(streamreqs.split(','))
1855 # if we support it, stream in and adjust our requirements
1857 # if we support it, stream in and adjust our requirements
1856 if not streamreqs - self.supportedformats:
1858 if not streamreqs - self.supportedformats:
1857 return self.stream_in(remote, streamreqs)
1859 return self.stream_in(remote, streamreqs)
1858 return self.pull(remote, heads)
1860 return self.pull(remote, heads)
1859
1861
1860 def pushkey(self, namespace, key, old, new):
1862 def pushkey(self, namespace, key, old, new):
1861 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1863 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1862 old=old, new=new)
1864 old=old, new=new)
1863 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1865 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1864 ret = pushkey.push(self, namespace, key, old, new)
1866 ret = pushkey.push(self, namespace, key, old, new)
1865 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1867 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1866 ret=ret)
1868 ret=ret)
1867 return ret
1869 return ret
1868
1870
1869 def listkeys(self, namespace):
1871 def listkeys(self, namespace):
1870 self.hook('prelistkeys', throw=True, namespace=namespace)
1872 self.hook('prelistkeys', throw=True, namespace=namespace)
1871 self.ui.debug('listing keys for "%s"\n' % namespace)
1873 self.ui.debug('listing keys for "%s"\n' % namespace)
1872 values = pushkey.list(self, namespace)
1874 values = pushkey.list(self, namespace)
1873 self.hook('listkeys', namespace=namespace, values=values)
1875 self.hook('listkeys', namespace=namespace, values=values)
1874 return values
1876 return values
1875
1877
1876 def debugwireargs(self, one, two, three=None, four=None, five=None):
1878 def debugwireargs(self, one, two, three=None, four=None, five=None):
1877 '''used to test argument passing over the wire'''
1879 '''used to test argument passing over the wire'''
1878 return "%s %s %s %s %s" % (one, two, three, four, five)
1880 return "%s %s %s %s %s" % (one, two, three, four, five)
1879
1881
1880 def savecommitmessage(self, text):
1882 def savecommitmessage(self, text):
1881 fp = self.opener('last-message.txt', 'wb')
1883 fp = self.opener('last-message.txt', 'wb')
1882 try:
1884 try:
1883 fp.write(text)
1885 fp.write(text)
1884 finally:
1886 finally:
1885 fp.close()
1887 fp.close()
1886 return self.pathto(fp.name[len(self.root) + 1:])
1888 return self.pathto(fp.name[len(self.root) + 1:])
1887
1889
1888 # used to avoid circular references so destructors work
1890 # used to avoid circular references so destructors work
1889 def aftertrans(files):
1891 def aftertrans(files):
1890 renamefiles = [tuple(t) for t in files]
1892 renamefiles = [tuple(t) for t in files]
1891 def a():
1893 def a():
1892 for vfs, src, dest in renamefiles:
1894 for vfs, src, dest in renamefiles:
1893 try:
1895 try:
1894 vfs.rename(src, dest)
1896 vfs.rename(src, dest)
1895 except OSError: # journal file does not yet exist
1897 except OSError: # journal file does not yet exist
1896 pass
1898 pass
1897 return a
1899 return a
1898
1900
1899 def undoname(fn):
1901 def undoname(fn):
1900 base, name = os.path.split(fn)
1902 base, name = os.path.split(fn)
1901 assert name.startswith('journal')
1903 assert name.startswith('journal')
1902 return os.path.join(base, name.replace('journal', 'undo', 1))
1904 return os.path.join(base, name.replace('journal', 'undo', 1))
1903
1905
1904 def instance(ui, path, create):
1906 def instance(ui, path, create):
1905 return localrepository(ui, util.urllocalpath(path), create)
1907 return localrepository(ui, util.urllocalpath(path), create)
1906
1908
1907 def islocal(path):
1909 def islocal(path):
1908 return True
1910 return True
@@ -1,811 +1,812 b''
1 # wireproto.py - generic wire protocol support functions
1 # wireproto.py - generic wire protocol support functions
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import urllib, tempfile, os, sys
8 import urllib, tempfile, os, sys
9 from i18n import _
9 from i18n import _
10 from node import bin, hex
10 from node import bin, hex
11 import changegroup as changegroupmod, bundle2
11 import changegroup as changegroupmod, bundle2
12 import peer, error, encoding, util, store, exchange
12 import peer, error, encoding, util, store, exchange
13
13
14
14
15 class abstractserverproto(object):
15 class abstractserverproto(object):
16 """abstract class that summarizes the protocol API
16 """abstract class that summarizes the protocol API
17
17
18 Used as reference and documentation.
18 Used as reference and documentation.
19 """
19 """
20
20
21 def getargs(self, args):
21 def getargs(self, args):
22 """return the value for arguments in <args>
22 """return the value for arguments in <args>
23
23
24 returns a list of values (same order as <args>)"""
24 returns a list of values (same order as <args>)"""
25 raise NotImplementedError()
25 raise NotImplementedError()
26
26
27 def getfile(self, fp):
27 def getfile(self, fp):
28 """write the whole content of a file into a file like object
28 """write the whole content of a file into a file like object
29
29
30 The file is in the form::
30 The file is in the form::
31
31
32 (<chunk-size>\n<chunk>)+0\n
32 (<chunk-size>\n<chunk>)+0\n
33
33
34 chunk size is the ascii version of the int.
34 chunk size is the ascii version of the int.
35 """
35 """
36 raise NotImplementedError()
36 raise NotImplementedError()
37
37
38 def redirect(self):
38 def redirect(self):
39 """may setup interception for stdout and stderr
39 """may setup interception for stdout and stderr
40
40
41 See also the `restore` method."""
41 See also the `restore` method."""
42 raise NotImplementedError()
42 raise NotImplementedError()
43
43
44 # If the `redirect` function does install interception, the `restore`
44 # If the `redirect` function does install interception, the `restore`
45 # function MUST be defined. If interception is not used, this function
45 # function MUST be defined. If interception is not used, this function
46 # MUST NOT be defined.
46 # MUST NOT be defined.
47 #
47 #
48 # left commented here on purpose
48 # left commented here on purpose
49 #
49 #
50 #def restore(self):
50 #def restore(self):
51 # """reinstall previous stdout and stderr and return intercepted stdout
51 # """reinstall previous stdout and stderr and return intercepted stdout
52 # """
52 # """
53 # raise NotImplementedError()
53 # raise NotImplementedError()
54
54
55 def groupchunks(self, cg):
55 def groupchunks(self, cg):
56 """return 4096 chunks from a changegroup object
56 """return 4096 chunks from a changegroup object
57
57
58 Some protocols may have compressed the contents."""
58 Some protocols may have compressed the contents."""
59 raise NotImplementedError()
59 raise NotImplementedError()
60
60
61 # abstract batching support
61 # abstract batching support
62
62
63 class future(object):
63 class future(object):
64 '''placeholder for a value to be set later'''
64 '''placeholder for a value to be set later'''
65 def set(self, value):
65 def set(self, value):
66 if util.safehasattr(self, 'value'):
66 if util.safehasattr(self, 'value'):
67 raise error.RepoError("future is already set")
67 raise error.RepoError("future is already set")
68 self.value = value
68 self.value = value
69
69
70 class batcher(object):
70 class batcher(object):
71 '''base class for batches of commands submittable in a single request
71 '''base class for batches of commands submittable in a single request
72
72
73 All methods invoked on instances of this class are simply queued and
73 All methods invoked on instances of this class are simply queued and
74 return a a future for the result. Once you call submit(), all the queued
74 return a a future for the result. Once you call submit(), all the queued
75 calls are performed and the results set in their respective futures.
75 calls are performed and the results set in their respective futures.
76 '''
76 '''
77 def __init__(self):
77 def __init__(self):
78 self.calls = []
78 self.calls = []
79 def __getattr__(self, name):
79 def __getattr__(self, name):
80 def call(*args, **opts):
80 def call(*args, **opts):
81 resref = future()
81 resref = future()
82 self.calls.append((name, args, opts, resref,))
82 self.calls.append((name, args, opts, resref,))
83 return resref
83 return resref
84 return call
84 return call
85 def submit(self):
85 def submit(self):
86 pass
86 pass
87
87
88 class localbatch(batcher):
88 class localbatch(batcher):
89 '''performs the queued calls directly'''
89 '''performs the queued calls directly'''
90 def __init__(self, local):
90 def __init__(self, local):
91 batcher.__init__(self)
91 batcher.__init__(self)
92 self.local = local
92 self.local = local
93 def submit(self):
93 def submit(self):
94 for name, args, opts, resref in self.calls:
94 for name, args, opts, resref in self.calls:
95 resref.set(getattr(self.local, name)(*args, **opts))
95 resref.set(getattr(self.local, name)(*args, **opts))
96
96
97 class remotebatch(batcher):
97 class remotebatch(batcher):
98 '''batches the queued calls; uses as few roundtrips as possible'''
98 '''batches the queued calls; uses as few roundtrips as possible'''
99 def __init__(self, remote):
99 def __init__(self, remote):
100 '''remote must support _submitbatch(encbatch) and
100 '''remote must support _submitbatch(encbatch) and
101 _submitone(op, encargs)'''
101 _submitone(op, encargs)'''
102 batcher.__init__(self)
102 batcher.__init__(self)
103 self.remote = remote
103 self.remote = remote
104 def submit(self):
104 def submit(self):
105 req, rsp = [], []
105 req, rsp = [], []
106 for name, args, opts, resref in self.calls:
106 for name, args, opts, resref in self.calls:
107 mtd = getattr(self.remote, name)
107 mtd = getattr(self.remote, name)
108 batchablefn = getattr(mtd, 'batchable', None)
108 batchablefn = getattr(mtd, 'batchable', None)
109 if batchablefn is not None:
109 if batchablefn is not None:
110 batchable = batchablefn(mtd.im_self, *args, **opts)
110 batchable = batchablefn(mtd.im_self, *args, **opts)
111 encargsorres, encresref = batchable.next()
111 encargsorres, encresref = batchable.next()
112 if encresref:
112 if encresref:
113 req.append((name, encargsorres,))
113 req.append((name, encargsorres,))
114 rsp.append((batchable, encresref, resref,))
114 rsp.append((batchable, encresref, resref,))
115 else:
115 else:
116 resref.set(encargsorres)
116 resref.set(encargsorres)
117 else:
117 else:
118 if req:
118 if req:
119 self._submitreq(req, rsp)
119 self._submitreq(req, rsp)
120 req, rsp = [], []
120 req, rsp = [], []
121 resref.set(mtd(*args, **opts))
121 resref.set(mtd(*args, **opts))
122 if req:
122 if req:
123 self._submitreq(req, rsp)
123 self._submitreq(req, rsp)
124 def _submitreq(self, req, rsp):
124 def _submitreq(self, req, rsp):
125 encresults = self.remote._submitbatch(req)
125 encresults = self.remote._submitbatch(req)
126 for encres, r in zip(encresults, rsp):
126 for encres, r in zip(encresults, rsp):
127 batchable, encresref, resref = r
127 batchable, encresref, resref = r
128 encresref.set(encres)
128 encresref.set(encres)
129 resref.set(batchable.next())
129 resref.set(batchable.next())
130
130
131 def batchable(f):
131 def batchable(f):
132 '''annotation for batchable methods
132 '''annotation for batchable methods
133
133
134 Such methods must implement a coroutine as follows:
134 Such methods must implement a coroutine as follows:
135
135
136 @batchable
136 @batchable
137 def sample(self, one, two=None):
137 def sample(self, one, two=None):
138 # Handle locally computable results first:
138 # Handle locally computable results first:
139 if not one:
139 if not one:
140 yield "a local result", None
140 yield "a local result", None
141 # Build list of encoded arguments suitable for your wire protocol:
141 # Build list of encoded arguments suitable for your wire protocol:
142 encargs = [('one', encode(one),), ('two', encode(two),)]
142 encargs = [('one', encode(one),), ('two', encode(two),)]
143 # Create future for injection of encoded result:
143 # Create future for injection of encoded result:
144 encresref = future()
144 encresref = future()
145 # Return encoded arguments and future:
145 # Return encoded arguments and future:
146 yield encargs, encresref
146 yield encargs, encresref
147 # Assuming the future to be filled with the result from the batched
147 # Assuming the future to be filled with the result from the batched
148 # request now. Decode it:
148 # request now. Decode it:
149 yield decode(encresref.value)
149 yield decode(encresref.value)
150
150
151 The decorator returns a function which wraps this coroutine as a plain
151 The decorator returns a function which wraps this coroutine as a plain
152 method, but adds the original method as an attribute called "batchable",
152 method, but adds the original method as an attribute called "batchable",
153 which is used by remotebatch to split the call into separate encoding and
153 which is used by remotebatch to split the call into separate encoding and
154 decoding phases.
154 decoding phases.
155 '''
155 '''
156 def plain(*args, **opts):
156 def plain(*args, **opts):
157 batchable = f(*args, **opts)
157 batchable = f(*args, **opts)
158 encargsorres, encresref = batchable.next()
158 encargsorres, encresref = batchable.next()
159 if not encresref:
159 if not encresref:
160 return encargsorres # a local result in this case
160 return encargsorres # a local result in this case
161 self = args[0]
161 self = args[0]
162 encresref.set(self._submitone(f.func_name, encargsorres))
162 encresref.set(self._submitone(f.func_name, encargsorres))
163 return batchable.next()
163 return batchable.next()
164 setattr(plain, 'batchable', f)
164 setattr(plain, 'batchable', f)
165 return plain
165 return plain
166
166
167 # list of nodes encoding / decoding
167 # list of nodes encoding / decoding
168
168
169 def decodelist(l, sep=' '):
169 def decodelist(l, sep=' '):
170 if l:
170 if l:
171 return map(bin, l.split(sep))
171 return map(bin, l.split(sep))
172 return []
172 return []
173
173
174 def encodelist(l, sep=' '):
174 def encodelist(l, sep=' '):
175 return sep.join(map(hex, l))
175 return sep.join(map(hex, l))
176
176
177 # batched call argument encoding
177 # batched call argument encoding
178
178
179 def escapearg(plain):
179 def escapearg(plain):
180 return (plain
180 return (plain
181 .replace(':', '::')
181 .replace(':', '::')
182 .replace(',', ':,')
182 .replace(',', ':,')
183 .replace(';', ':;')
183 .replace(';', ':;')
184 .replace('=', ':='))
184 .replace('=', ':='))
185
185
186 def unescapearg(escaped):
186 def unescapearg(escaped):
187 return (escaped
187 return (escaped
188 .replace(':=', '=')
188 .replace(':=', '=')
189 .replace(':;', ';')
189 .replace(':;', ';')
190 .replace(':,', ',')
190 .replace(':,', ',')
191 .replace('::', ':'))
191 .replace('::', ':'))
192
192
193 # client side
193 # client side
194
194
195 class wirepeer(peer.peerrepository):
195 class wirepeer(peer.peerrepository):
196
196
197 def batch(self):
197 def batch(self):
198 return remotebatch(self)
198 return remotebatch(self)
199 def _submitbatch(self, req):
199 def _submitbatch(self, req):
200 cmds = []
200 cmds = []
201 for op, argsdict in req:
201 for op, argsdict in req:
202 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
202 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
203 cmds.append('%s %s' % (op, args))
203 cmds.append('%s %s' % (op, args))
204 rsp = self._call("batch", cmds=';'.join(cmds))
204 rsp = self._call("batch", cmds=';'.join(cmds))
205 return rsp.split(';')
205 return rsp.split(';')
206 def _submitone(self, op, args):
206 def _submitone(self, op, args):
207 return self._call(op, **args)
207 return self._call(op, **args)
208
208
209 @batchable
209 @batchable
210 def lookup(self, key):
210 def lookup(self, key):
211 self.requirecap('lookup', _('look up remote revision'))
211 self.requirecap('lookup', _('look up remote revision'))
212 f = future()
212 f = future()
213 yield {'key': encoding.fromlocal(key)}, f
213 yield {'key': encoding.fromlocal(key)}, f
214 d = f.value
214 d = f.value
215 success, data = d[:-1].split(" ", 1)
215 success, data = d[:-1].split(" ", 1)
216 if int(success):
216 if int(success):
217 yield bin(data)
217 yield bin(data)
218 self._abort(error.RepoError(data))
218 self._abort(error.RepoError(data))
219
219
220 @batchable
220 @batchable
221 def heads(self):
221 def heads(self):
222 f = future()
222 f = future()
223 yield {}, f
223 yield {}, f
224 d = f.value
224 d = f.value
225 try:
225 try:
226 yield decodelist(d[:-1])
226 yield decodelist(d[:-1])
227 except ValueError:
227 except ValueError:
228 self._abort(error.ResponseError(_("unexpected response:"), d))
228 self._abort(error.ResponseError(_("unexpected response:"), d))
229
229
230 @batchable
230 @batchable
231 def known(self, nodes):
231 def known(self, nodes):
232 f = future()
232 f = future()
233 yield {'nodes': encodelist(nodes)}, f
233 yield {'nodes': encodelist(nodes)}, f
234 d = f.value
234 d = f.value
235 try:
235 try:
236 yield [bool(int(f)) for f in d]
236 yield [bool(int(f)) for f in d]
237 except ValueError:
237 except ValueError:
238 self._abort(error.ResponseError(_("unexpected response:"), d))
238 self._abort(error.ResponseError(_("unexpected response:"), d))
239
239
240 @batchable
240 @batchable
241 def branchmap(self):
241 def branchmap(self):
242 f = future()
242 f = future()
243 yield {}, f
243 yield {}, f
244 d = f.value
244 d = f.value
245 try:
245 try:
246 branchmap = {}
246 branchmap = {}
247 for branchpart in d.splitlines():
247 for branchpart in d.splitlines():
248 branchname, branchheads = branchpart.split(' ', 1)
248 branchname, branchheads = branchpart.split(' ', 1)
249 branchname = encoding.tolocal(urllib.unquote(branchname))
249 branchname = encoding.tolocal(urllib.unquote(branchname))
250 branchheads = decodelist(branchheads)
250 branchheads = decodelist(branchheads)
251 branchmap[branchname] = branchheads
251 branchmap[branchname] = branchheads
252 yield branchmap
252 yield branchmap
253 except TypeError:
253 except TypeError:
254 self._abort(error.ResponseError(_("unexpected response:"), d))
254 self._abort(error.ResponseError(_("unexpected response:"), d))
255
255
256 def branches(self, nodes):
256 def branches(self, nodes):
257 n = encodelist(nodes)
257 n = encodelist(nodes)
258 d = self._call("branches", nodes=n)
258 d = self._call("branches", nodes=n)
259 try:
259 try:
260 br = [tuple(decodelist(b)) for b in d.splitlines()]
260 br = [tuple(decodelist(b)) for b in d.splitlines()]
261 return br
261 return br
262 except ValueError:
262 except ValueError:
263 self._abort(error.ResponseError(_("unexpected response:"), d))
263 self._abort(error.ResponseError(_("unexpected response:"), d))
264
264
265 def between(self, pairs):
265 def between(self, pairs):
266 batch = 8 # avoid giant requests
266 batch = 8 # avoid giant requests
267 r = []
267 r = []
268 for i in xrange(0, len(pairs), batch):
268 for i in xrange(0, len(pairs), batch):
269 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
269 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
270 d = self._call("between", pairs=n)
270 d = self._call("between", pairs=n)
271 try:
271 try:
272 r.extend(l and decodelist(l) or [] for l in d.splitlines())
272 r.extend(l and decodelist(l) or [] for l in d.splitlines())
273 except ValueError:
273 except ValueError:
274 self._abort(error.ResponseError(_("unexpected response:"), d))
274 self._abort(error.ResponseError(_("unexpected response:"), d))
275 return r
275 return r
276
276
277 @batchable
277 @batchable
278 def pushkey(self, namespace, key, old, new):
278 def pushkey(self, namespace, key, old, new):
279 if not self.capable('pushkey'):
279 if not self.capable('pushkey'):
280 yield False, None
280 yield False, None
281 f = future()
281 f = future()
282 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
282 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
283 yield {'namespace': encoding.fromlocal(namespace),
283 yield {'namespace': encoding.fromlocal(namespace),
284 'key': encoding.fromlocal(key),
284 'key': encoding.fromlocal(key),
285 'old': encoding.fromlocal(old),
285 'old': encoding.fromlocal(old),
286 'new': encoding.fromlocal(new)}, f
286 'new': encoding.fromlocal(new)}, f
287 d = f.value
287 d = f.value
288 d, output = d.split('\n', 1)
288 d, output = d.split('\n', 1)
289 try:
289 try:
290 d = bool(int(d))
290 d = bool(int(d))
291 except ValueError:
291 except ValueError:
292 raise error.ResponseError(
292 raise error.ResponseError(
293 _('push failed (unexpected response):'), d)
293 _('push failed (unexpected response):'), d)
294 for l in output.splitlines(True):
294 for l in output.splitlines(True):
295 self.ui.status(_('remote: '), l)
295 self.ui.status(_('remote: '), l)
296 yield d
296 yield d
297
297
298 @batchable
298 @batchable
299 def listkeys(self, namespace):
299 def listkeys(self, namespace):
300 if not self.capable('pushkey'):
300 if not self.capable('pushkey'):
301 yield {}, None
301 yield {}, None
302 f = future()
302 f = future()
303 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
303 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
304 yield {'namespace': encoding.fromlocal(namespace)}, f
304 yield {'namespace': encoding.fromlocal(namespace)}, f
305 d = f.value
305 d = f.value
306 r = {}
306 r = {}
307 for l in d.splitlines():
307 for l in d.splitlines():
308 k, v = l.split('\t')
308 k, v = l.split('\t')
309 r[encoding.tolocal(k)] = encoding.tolocal(v)
309 r[encoding.tolocal(k)] = encoding.tolocal(v)
310 yield r
310 yield r
311
311
312 def stream_out(self):
312 def stream_out(self):
313 return self._callstream('stream_out')
313 return self._callstream('stream_out')
314
314
315 def changegroup(self, nodes, kind):
315 def changegroup(self, nodes, kind):
316 n = encodelist(nodes)
316 n = encodelist(nodes)
317 f = self._callcompressable("changegroup", roots=n)
317 f = self._callcompressable("changegroup", roots=n)
318 return changegroupmod.unbundle10(f, 'UN')
318 return changegroupmod.unbundle10(f, 'UN')
319
319
320 def changegroupsubset(self, bases, heads, kind):
320 def changegroupsubset(self, bases, heads, kind):
321 self.requirecap('changegroupsubset', _('look up remote changes'))
321 self.requirecap('changegroupsubset', _('look up remote changes'))
322 bases = encodelist(bases)
322 bases = encodelist(bases)
323 heads = encodelist(heads)
323 heads = encodelist(heads)
324 f = self._callcompressable("changegroupsubset",
324 f = self._callcompressable("changegroupsubset",
325 bases=bases, heads=heads)
325 bases=bases, heads=heads)
326 return changegroupmod.unbundle10(f, 'UN')
326 return changegroupmod.unbundle10(f, 'UN')
327
327
328 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
328 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
329 self.requirecap('getbundle', _('look up remote changes'))
329 self.requirecap('getbundle', _('look up remote changes'))
330 opts = {}
330 opts = {}
331 if heads is not None:
331 if heads is not None:
332 opts['heads'] = encodelist(heads)
332 opts['heads'] = encodelist(heads)
333 if common is not None:
333 if common is not None:
334 opts['common'] = encodelist(common)
334 opts['common'] = encodelist(common)
335 if bundlecaps is not None:
335 if bundlecaps is not None:
336 opts['bundlecaps'] = ','.join(bundlecaps)
336 opts['bundlecaps'] = ','.join(bundlecaps)
337 f = self._callcompressable("getbundle", **opts)
337 f = self._callcompressable("getbundle", **opts)
338 if bundlecaps is not None and 'HG20' in bundlecaps:
338 if bundlecaps is not None and 'HG20' in bundlecaps:
339 return bundle2.unbundle20(self.ui, f)
339 return bundle2.unbundle20(self.ui, f)
340 else:
340 else:
341 return changegroupmod.unbundle10(f, 'UN')
341 return changegroupmod.unbundle10(f, 'UN')
342
342
343 def unbundle(self, cg, heads, source):
343 def unbundle(self, cg, heads, source):
344 '''Send cg (a readable file-like object representing the
344 '''Send cg (a readable file-like object representing the
345 changegroup to push, typically a chunkbuffer object) to the
345 changegroup to push, typically a chunkbuffer object) to the
346 remote server as a bundle.
346 remote server as a bundle.
347
347
348 When pushing a bundle10 stream, return an integer indicating the
348 When pushing a bundle10 stream, return an integer indicating the
349 result of the push (see localrepository.addchangegroup()).
349 result of the push (see localrepository.addchangegroup()).
350
350
351 When pushing a bundle20 stream, return a bundle20 stream.'''
351 When pushing a bundle20 stream, return a bundle20 stream.'''
352
352
353 if heads != ['force'] and self.capable('unbundlehash'):
353 if heads != ['force'] and self.capable('unbundlehash'):
354 heads = encodelist(['hashed',
354 heads = encodelist(['hashed',
355 util.sha1(''.join(sorted(heads))).digest()])
355 util.sha1(''.join(sorted(heads))).digest()])
356 else:
356 else:
357 heads = encodelist(heads)
357 heads = encodelist(heads)
358
358
359 if util.safehasattr(cg, 'deltaheader'):
359 if util.safehasattr(cg, 'deltaheader'):
360 # this a bundle10, do the old style call sequence
360 # this a bundle10, do the old style call sequence
361 ret, output = self._callpush("unbundle", cg, heads=heads)
361 ret, output = self._callpush("unbundle", cg, heads=heads)
362 if ret == "":
362 if ret == "":
363 raise error.ResponseError(
363 raise error.ResponseError(
364 _('push failed:'), output)
364 _('push failed:'), output)
365 try:
365 try:
366 ret = int(ret)
366 ret = int(ret)
367 except ValueError:
367 except ValueError:
368 raise error.ResponseError(
368 raise error.ResponseError(
369 _('push failed (unexpected response):'), ret)
369 _('push failed (unexpected response):'), ret)
370
370
371 for l in output.splitlines(True):
371 for l in output.splitlines(True):
372 self.ui.status(_('remote: '), l)
372 self.ui.status(_('remote: '), l)
373 else:
373 else:
374 # bundle2 push. Send a stream, fetch a stream.
374 # bundle2 push. Send a stream, fetch a stream.
375 stream = self._calltwowaystream('unbundle', cg, heads=heads)
375 stream = self._calltwowaystream('unbundle', cg, heads=heads)
376 ret = bundle2.unbundle20(self.ui, stream)
376 ret = bundle2.unbundle20(self.ui, stream)
377 return ret
377 return ret
378
378
379 def debugwireargs(self, one, two, three=None, four=None, five=None):
379 def debugwireargs(self, one, two, three=None, four=None, five=None):
380 # don't pass optional arguments left at their default value
380 # don't pass optional arguments left at their default value
381 opts = {}
381 opts = {}
382 if three is not None:
382 if three is not None:
383 opts['three'] = three
383 opts['three'] = three
384 if four is not None:
384 if four is not None:
385 opts['four'] = four
385 opts['four'] = four
386 return self._call('debugwireargs', one=one, two=two, **opts)
386 return self._call('debugwireargs', one=one, two=two, **opts)
387
387
388 def _call(self, cmd, **args):
388 def _call(self, cmd, **args):
389 """execute <cmd> on the server
389 """execute <cmd> on the server
390
390
391 The command is expected to return a simple string.
391 The command is expected to return a simple string.
392
392
393 returns the server reply as a string."""
393 returns the server reply as a string."""
394 raise NotImplementedError()
394 raise NotImplementedError()
395
395
396 def _callstream(self, cmd, **args):
396 def _callstream(self, cmd, **args):
397 """execute <cmd> on the server
397 """execute <cmd> on the server
398
398
399 The command is expected to return a stream.
399 The command is expected to return a stream.
400
400
401 returns the server reply as a file like object."""
401 returns the server reply as a file like object."""
402 raise NotImplementedError()
402 raise NotImplementedError()
403
403
404 def _callcompressable(self, cmd, **args):
404 def _callcompressable(self, cmd, **args):
405 """execute <cmd> on the server
405 """execute <cmd> on the server
406
406
407 The command is expected to return a stream.
407 The command is expected to return a stream.
408
408
409 The stream may have been compressed in some implementations. This
409 The stream may have been compressed in some implementations. This
410 function takes care of the decompression. This is the only difference
410 function takes care of the decompression. This is the only difference
411 with _callstream.
411 with _callstream.
412
412
413 returns the server reply as a file like object.
413 returns the server reply as a file like object.
414 """
414 """
415 raise NotImplementedError()
415 raise NotImplementedError()
416
416
417 def _callpush(self, cmd, fp, **args):
417 def _callpush(self, cmd, fp, **args):
418 """execute a <cmd> on server
418 """execute a <cmd> on server
419
419
420 The command is expected to be related to a push. Push has a special
420 The command is expected to be related to a push. Push has a special
421 return method.
421 return method.
422
422
423 returns the server reply as a (ret, output) tuple. ret is either
423 returns the server reply as a (ret, output) tuple. ret is either
424 empty (error) or a stringified int.
424 empty (error) or a stringified int.
425 """
425 """
426 raise NotImplementedError()
426 raise NotImplementedError()
427
427
428 def _calltwowaystream(self, cmd, fp, **args):
428 def _calltwowaystream(self, cmd, fp, **args):
429 """execute <cmd> on server
429 """execute <cmd> on server
430
430
431 The command will send a stream to the server and get a stream in reply.
431 The command will send a stream to the server and get a stream in reply.
432 """
432 """
433 raise NotImplementedError()
433 raise NotImplementedError()
434
434
435 def _abort(self, exception):
435 def _abort(self, exception):
436 """clearly abort the wire protocol connection and raise the exception
436 """clearly abort the wire protocol connection and raise the exception
437 """
437 """
438 raise NotImplementedError()
438 raise NotImplementedError()
439
439
440 # server side
440 # server side
441
441
442 # wire protocol command can either return a string or one of these classes.
442 # wire protocol command can either return a string or one of these classes.
443 class streamres(object):
443 class streamres(object):
444 """wireproto reply: binary stream
444 """wireproto reply: binary stream
445
445
446 The call was successful and the result is a stream.
446 The call was successful and the result is a stream.
447 Iterate on the `self.gen` attribute to retrieve chunks.
447 Iterate on the `self.gen` attribute to retrieve chunks.
448 """
448 """
449 def __init__(self, gen):
449 def __init__(self, gen):
450 self.gen = gen
450 self.gen = gen
451
451
452 class pushres(object):
452 class pushres(object):
453 """wireproto reply: success with simple integer return
453 """wireproto reply: success with simple integer return
454
454
455 The call was successful and returned an integer contained in `self.res`.
455 The call was successful and returned an integer contained in `self.res`.
456 """
456 """
457 def __init__(self, res):
457 def __init__(self, res):
458 self.res = res
458 self.res = res
459
459
460 class pusherr(object):
460 class pusherr(object):
461 """wireproto reply: failure
461 """wireproto reply: failure
462
462
463 The call failed. The `self.res` attribute contains the error message.
463 The call failed. The `self.res` attribute contains the error message.
464 """
464 """
465 def __init__(self, res):
465 def __init__(self, res):
466 self.res = res
466 self.res = res
467
467
468 class ooberror(object):
468 class ooberror(object):
469 """wireproto reply: failure of a batch of operation
469 """wireproto reply: failure of a batch of operation
470
470
471 Something failed during a batch call. The error message is stored in
471 Something failed during a batch call. The error message is stored in
472 `self.message`.
472 `self.message`.
473 """
473 """
474 def __init__(self, message):
474 def __init__(self, message):
475 self.message = message
475 self.message = message
476
476
477 def dispatch(repo, proto, command):
477 def dispatch(repo, proto, command):
478 repo = repo.filtered("served")
478 repo = repo.filtered("served")
479 func, spec = commands[command]
479 func, spec = commands[command]
480 args = proto.getargs(spec)
480 args = proto.getargs(spec)
481 return func(repo, proto, *args)
481 return func(repo, proto, *args)
482
482
483 def options(cmd, keys, others):
483 def options(cmd, keys, others):
484 opts = {}
484 opts = {}
485 for k in keys:
485 for k in keys:
486 if k in others:
486 if k in others:
487 opts[k] = others[k]
487 opts[k] = others[k]
488 del others[k]
488 del others[k]
489 if others:
489 if others:
490 sys.stderr.write("abort: %s got unexpected arguments %s\n"
490 sys.stderr.write("abort: %s got unexpected arguments %s\n"
491 % (cmd, ",".join(others)))
491 % (cmd, ",".join(others)))
492 return opts
492 return opts
493
493
494 # list of commands
494 # list of commands
495 commands = {}
495 commands = {}
496
496
497 def wireprotocommand(name, args=''):
497 def wireprotocommand(name, args=''):
498 """decorator for wire protocol command"""
498 """decorator for wire protocol command"""
499 def register(func):
499 def register(func):
500 commands[name] = (func, args)
500 commands[name] = (func, args)
501 return func
501 return func
502 return register
502 return register
503
503
504 @wireprotocommand('batch', 'cmds *')
504 @wireprotocommand('batch', 'cmds *')
505 def batch(repo, proto, cmds, others):
505 def batch(repo, proto, cmds, others):
506 repo = repo.filtered("served")
506 repo = repo.filtered("served")
507 res = []
507 res = []
508 for pair in cmds.split(';'):
508 for pair in cmds.split(';'):
509 op, args = pair.split(' ', 1)
509 op, args = pair.split(' ', 1)
510 vals = {}
510 vals = {}
511 for a in args.split(','):
511 for a in args.split(','):
512 if a:
512 if a:
513 n, v = a.split('=')
513 n, v = a.split('=')
514 vals[n] = unescapearg(v)
514 vals[n] = unescapearg(v)
515 func, spec = commands[op]
515 func, spec = commands[op]
516 if spec:
516 if spec:
517 keys = spec.split()
517 keys = spec.split()
518 data = {}
518 data = {}
519 for k in keys:
519 for k in keys:
520 if k == '*':
520 if k == '*':
521 star = {}
521 star = {}
522 for key in vals.keys():
522 for key in vals.keys():
523 if key not in keys:
523 if key not in keys:
524 star[key] = vals[key]
524 star[key] = vals[key]
525 data['*'] = star
525 data['*'] = star
526 else:
526 else:
527 data[k] = vals[k]
527 data[k] = vals[k]
528 result = func(repo, proto, *[data[k] for k in keys])
528 result = func(repo, proto, *[data[k] for k in keys])
529 else:
529 else:
530 result = func(repo, proto)
530 result = func(repo, proto)
531 if isinstance(result, ooberror):
531 if isinstance(result, ooberror):
532 return result
532 return result
533 res.append(escapearg(result))
533 res.append(escapearg(result))
534 return ';'.join(res)
534 return ';'.join(res)
535
535
536 @wireprotocommand('between', 'pairs')
536 @wireprotocommand('between', 'pairs')
537 def between(repo, proto, pairs):
537 def between(repo, proto, pairs):
538 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
538 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
539 r = []
539 r = []
540 for b in repo.between(pairs):
540 for b in repo.between(pairs):
541 r.append(encodelist(b) + "\n")
541 r.append(encodelist(b) + "\n")
542 return "".join(r)
542 return "".join(r)
543
543
544 @wireprotocommand('branchmap')
544 @wireprotocommand('branchmap')
545 def branchmap(repo, proto):
545 def branchmap(repo, proto):
546 branchmap = repo.branchmap()
546 branchmap = repo.branchmap()
547 heads = []
547 heads = []
548 for branch, nodes in branchmap.iteritems():
548 for branch, nodes in branchmap.iteritems():
549 branchname = urllib.quote(encoding.fromlocal(branch))
549 branchname = urllib.quote(encoding.fromlocal(branch))
550 branchnodes = encodelist(nodes)
550 branchnodes = encodelist(nodes)
551 heads.append('%s %s' % (branchname, branchnodes))
551 heads.append('%s %s' % (branchname, branchnodes))
552 return '\n'.join(heads)
552 return '\n'.join(heads)
553
553
554 @wireprotocommand('branches', 'nodes')
554 @wireprotocommand('branches', 'nodes')
555 def branches(repo, proto, nodes):
555 def branches(repo, proto, nodes):
556 nodes = decodelist(nodes)
556 nodes = decodelist(nodes)
557 r = []
557 r = []
558 for b in repo.branches(nodes):
558 for b in repo.branches(nodes):
559 r.append(encodelist(b) + "\n")
559 r.append(encodelist(b) + "\n")
560 return "".join(r)
560 return "".join(r)
561
561
562
562
563 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
563 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
564 'known', 'getbundle', 'unbundlehash', 'batch']
564 'known', 'getbundle', 'unbundlehash', 'batch']
565
565
566 def _capabilities(repo, proto):
566 def _capabilities(repo, proto):
567 """return a list of capabilities for a repo
567 """return a list of capabilities for a repo
568
568
569 This function exists to allow extensions to easily wrap capabilities
569 This function exists to allow extensions to easily wrap capabilities
570 computation
570 computation
571
571
572 - returns a lists: easy to alter
572 - returns a lists: easy to alter
573 - change done here will be propagated to both `capabilities` and `hello`
573 - change done here will be propagated to both `capabilities` and `hello`
574 command without any other action needed.
574 command without any other action needed.
575 """
575 """
576 # copy to prevent modification of the global list
576 # copy to prevent modification of the global list
577 caps = list(wireprotocaps)
577 caps = list(wireprotocaps)
578 if _allowstream(repo.ui):
578 if _allowstream(repo.ui):
579 if repo.ui.configbool('server', 'preferuncompressed', False):
579 if repo.ui.configbool('server', 'preferuncompressed', False):
580 caps.append('stream-preferred')
580 caps.append('stream-preferred')
581 requiredformats = repo.requirements & repo.supportedformats
581 requiredformats = repo.requirements & repo.supportedformats
582 # if our local revlogs are just revlogv1, add 'stream' cap
582 # if our local revlogs are just revlogv1, add 'stream' cap
583 if not requiredformats - set(('revlogv1',)):
583 if not requiredformats - set(('revlogv1',)):
584 caps.append('stream')
584 caps.append('stream')
585 # otherwise, add 'streamreqs' detailing our local revlog format
585 # otherwise, add 'streamreqs' detailing our local revlog format
586 else:
586 else:
587 caps.append('streamreqs=%s' % ','.join(requiredformats))
587 caps.append('streamreqs=%s' % ','.join(requiredformats))
588 if repo.ui.configbool('server', 'bundle2', False):
588 if repo.ui.configbool('server', 'bundle2', False):
589 caps.append('bundle2')
589 capsblob = bundle2.encodecaps(repo.bundle2caps)
590 caps.append('bundle2=' + urllib.quote(capsblob))
590 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
591 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
591 caps.append('httpheader=1024')
592 caps.append('httpheader=1024')
592 return caps
593 return caps
593
594
594 # If you are writing an extension and consider wrapping this function. Wrap
595 # If you are writing an extension and consider wrapping this function. Wrap
595 # `_capabilities` instead.
596 # `_capabilities` instead.
596 @wireprotocommand('capabilities')
597 @wireprotocommand('capabilities')
597 def capabilities(repo, proto):
598 def capabilities(repo, proto):
598 return ' '.join(_capabilities(repo, proto))
599 return ' '.join(_capabilities(repo, proto))
599
600
600 @wireprotocommand('changegroup', 'roots')
601 @wireprotocommand('changegroup', 'roots')
601 def changegroup(repo, proto, roots):
602 def changegroup(repo, proto, roots):
602 nodes = decodelist(roots)
603 nodes = decodelist(roots)
603 cg = changegroupmod.changegroup(repo, nodes, 'serve')
604 cg = changegroupmod.changegroup(repo, nodes, 'serve')
604 return streamres(proto.groupchunks(cg))
605 return streamres(proto.groupchunks(cg))
605
606
606 @wireprotocommand('changegroupsubset', 'bases heads')
607 @wireprotocommand('changegroupsubset', 'bases heads')
607 def changegroupsubset(repo, proto, bases, heads):
608 def changegroupsubset(repo, proto, bases, heads):
608 bases = decodelist(bases)
609 bases = decodelist(bases)
609 heads = decodelist(heads)
610 heads = decodelist(heads)
610 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
611 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
611 return streamres(proto.groupchunks(cg))
612 return streamres(proto.groupchunks(cg))
612
613
613 @wireprotocommand('debugwireargs', 'one two *')
614 @wireprotocommand('debugwireargs', 'one two *')
614 def debugwireargs(repo, proto, one, two, others):
615 def debugwireargs(repo, proto, one, two, others):
615 # only accept optional args from the known set
616 # only accept optional args from the known set
616 opts = options('debugwireargs', ['three', 'four'], others)
617 opts = options('debugwireargs', ['three', 'four'], others)
617 return repo.debugwireargs(one, two, **opts)
618 return repo.debugwireargs(one, two, **opts)
618
619
619 @wireprotocommand('getbundle', '*')
620 @wireprotocommand('getbundle', '*')
620 def getbundle(repo, proto, others):
621 def getbundle(repo, proto, others):
621 opts = options('getbundle', ['heads', 'common', 'bundlecaps'], others)
622 opts = options('getbundle', ['heads', 'common', 'bundlecaps'], others)
622 for k, v in opts.iteritems():
623 for k, v in opts.iteritems():
623 if k in ('heads', 'common'):
624 if k in ('heads', 'common'):
624 opts[k] = decodelist(v)
625 opts[k] = decodelist(v)
625 elif k == 'bundlecaps':
626 elif k == 'bundlecaps':
626 opts[k] = set(v.split(','))
627 opts[k] = set(v.split(','))
627 cg = exchange.getbundle(repo, 'serve', **opts)
628 cg = exchange.getbundle(repo, 'serve', **opts)
628 return streamres(proto.groupchunks(cg))
629 return streamres(proto.groupchunks(cg))
629
630
630 @wireprotocommand('heads')
631 @wireprotocommand('heads')
631 def heads(repo, proto):
632 def heads(repo, proto):
632 h = repo.heads()
633 h = repo.heads()
633 return encodelist(h) + "\n"
634 return encodelist(h) + "\n"
634
635
635 @wireprotocommand('hello')
636 @wireprotocommand('hello')
636 def hello(repo, proto):
637 def hello(repo, proto):
637 '''the hello command returns a set of lines describing various
638 '''the hello command returns a set of lines describing various
638 interesting things about the server, in an RFC822-like format.
639 interesting things about the server, in an RFC822-like format.
639 Currently the only one defined is "capabilities", which
640 Currently the only one defined is "capabilities", which
640 consists of a line in the form:
641 consists of a line in the form:
641
642
642 capabilities: space separated list of tokens
643 capabilities: space separated list of tokens
643 '''
644 '''
644 return "capabilities: %s\n" % (capabilities(repo, proto))
645 return "capabilities: %s\n" % (capabilities(repo, proto))
645
646
646 @wireprotocommand('listkeys', 'namespace')
647 @wireprotocommand('listkeys', 'namespace')
647 def listkeys(repo, proto, namespace):
648 def listkeys(repo, proto, namespace):
648 d = repo.listkeys(encoding.tolocal(namespace)).items()
649 d = repo.listkeys(encoding.tolocal(namespace)).items()
649 t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
650 t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
650 for k, v in d])
651 for k, v in d])
651 return t
652 return t
652
653
653 @wireprotocommand('lookup', 'key')
654 @wireprotocommand('lookup', 'key')
654 def lookup(repo, proto, key):
655 def lookup(repo, proto, key):
655 try:
656 try:
656 k = encoding.tolocal(key)
657 k = encoding.tolocal(key)
657 c = repo[k]
658 c = repo[k]
658 r = c.hex()
659 r = c.hex()
659 success = 1
660 success = 1
660 except Exception, inst:
661 except Exception, inst:
661 r = str(inst)
662 r = str(inst)
662 success = 0
663 success = 0
663 return "%s %s\n" % (success, r)
664 return "%s %s\n" % (success, r)
664
665
665 @wireprotocommand('known', 'nodes *')
666 @wireprotocommand('known', 'nodes *')
666 def known(repo, proto, nodes, others):
667 def known(repo, proto, nodes, others):
667 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
668 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
668
669
669 @wireprotocommand('pushkey', 'namespace key old new')
670 @wireprotocommand('pushkey', 'namespace key old new')
670 def pushkey(repo, proto, namespace, key, old, new):
671 def pushkey(repo, proto, namespace, key, old, new):
671 # compatibility with pre-1.8 clients which were accidentally
672 # compatibility with pre-1.8 clients which were accidentally
672 # sending raw binary nodes rather than utf-8-encoded hex
673 # sending raw binary nodes rather than utf-8-encoded hex
673 if len(new) == 20 and new.encode('string-escape') != new:
674 if len(new) == 20 and new.encode('string-escape') != new:
674 # looks like it could be a binary node
675 # looks like it could be a binary node
675 try:
676 try:
676 new.decode('utf-8')
677 new.decode('utf-8')
677 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
678 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
678 except UnicodeDecodeError:
679 except UnicodeDecodeError:
679 pass # binary, leave unmodified
680 pass # binary, leave unmodified
680 else:
681 else:
681 new = encoding.tolocal(new) # normal path
682 new = encoding.tolocal(new) # normal path
682
683
683 if util.safehasattr(proto, 'restore'):
684 if util.safehasattr(proto, 'restore'):
684
685
685 proto.redirect()
686 proto.redirect()
686
687
687 try:
688 try:
688 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
689 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
689 encoding.tolocal(old), new) or False
690 encoding.tolocal(old), new) or False
690 except util.Abort:
691 except util.Abort:
691 r = False
692 r = False
692
693
693 output = proto.restore()
694 output = proto.restore()
694
695
695 return '%s\n%s' % (int(r), output)
696 return '%s\n%s' % (int(r), output)
696
697
697 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
698 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
698 encoding.tolocal(old), new)
699 encoding.tolocal(old), new)
699 return '%s\n' % int(r)
700 return '%s\n' % int(r)
700
701
701 def _allowstream(ui):
702 def _allowstream(ui):
702 return ui.configbool('server', 'uncompressed', True, untrusted=True)
703 return ui.configbool('server', 'uncompressed', True, untrusted=True)
703
704
704 def _walkstreamfiles(repo):
705 def _walkstreamfiles(repo):
705 # this is it's own function so extensions can override it
706 # this is it's own function so extensions can override it
706 return repo.store.walk()
707 return repo.store.walk()
707
708
708 @wireprotocommand('stream_out')
709 @wireprotocommand('stream_out')
709 def stream(repo, proto):
710 def stream(repo, proto):
710 '''If the server supports streaming clone, it advertises the "stream"
711 '''If the server supports streaming clone, it advertises the "stream"
711 capability with a value representing the version and flags of the repo
712 capability with a value representing the version and flags of the repo
712 it is serving. Client checks to see if it understands the format.
713 it is serving. Client checks to see if it understands the format.
713
714
714 The format is simple: the server writes out a line with the amount
715 The format is simple: the server writes out a line with the amount
715 of files, then the total amount of bytes to be transferred (separated
716 of files, then the total amount of bytes to be transferred (separated
716 by a space). Then, for each file, the server first writes the filename
717 by a space). Then, for each file, the server first writes the filename
717 and file size (separated by the null character), then the file contents.
718 and file size (separated by the null character), then the file contents.
718 '''
719 '''
719
720
720 if not _allowstream(repo.ui):
721 if not _allowstream(repo.ui):
721 return '1\n'
722 return '1\n'
722
723
723 entries = []
724 entries = []
724 total_bytes = 0
725 total_bytes = 0
725 try:
726 try:
726 # get consistent snapshot of repo, lock during scan
727 # get consistent snapshot of repo, lock during scan
727 lock = repo.lock()
728 lock = repo.lock()
728 try:
729 try:
729 repo.ui.debug('scanning\n')
730 repo.ui.debug('scanning\n')
730 for name, ename, size in _walkstreamfiles(repo):
731 for name, ename, size in _walkstreamfiles(repo):
731 if size:
732 if size:
732 entries.append((name, size))
733 entries.append((name, size))
733 total_bytes += size
734 total_bytes += size
734 finally:
735 finally:
735 lock.release()
736 lock.release()
736 except error.LockError:
737 except error.LockError:
737 return '2\n' # error: 2
738 return '2\n' # error: 2
738
739
739 def streamer(repo, entries, total):
740 def streamer(repo, entries, total):
740 '''stream out all metadata files in repository.'''
741 '''stream out all metadata files in repository.'''
741 yield '0\n' # success
742 yield '0\n' # success
742 repo.ui.debug('%d files, %d bytes to transfer\n' %
743 repo.ui.debug('%d files, %d bytes to transfer\n' %
743 (len(entries), total_bytes))
744 (len(entries), total_bytes))
744 yield '%d %d\n' % (len(entries), total_bytes)
745 yield '%d %d\n' % (len(entries), total_bytes)
745
746
746 sopener = repo.sopener
747 sopener = repo.sopener
747 oldaudit = sopener.mustaudit
748 oldaudit = sopener.mustaudit
748 debugflag = repo.ui.debugflag
749 debugflag = repo.ui.debugflag
749 sopener.mustaudit = False
750 sopener.mustaudit = False
750
751
751 try:
752 try:
752 for name, size in entries:
753 for name, size in entries:
753 if debugflag:
754 if debugflag:
754 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
755 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
755 # partially encode name over the wire for backwards compat
756 # partially encode name over the wire for backwards compat
756 yield '%s\0%d\n' % (store.encodedir(name), size)
757 yield '%s\0%d\n' % (store.encodedir(name), size)
757 if size <= 65536:
758 if size <= 65536:
758 fp = sopener(name)
759 fp = sopener(name)
759 try:
760 try:
760 data = fp.read(size)
761 data = fp.read(size)
761 finally:
762 finally:
762 fp.close()
763 fp.close()
763 yield data
764 yield data
764 else:
765 else:
765 for chunk in util.filechunkiter(sopener(name), limit=size):
766 for chunk in util.filechunkiter(sopener(name), limit=size):
766 yield chunk
767 yield chunk
767 # replace with "finally:" when support for python 2.4 has been dropped
768 # replace with "finally:" when support for python 2.4 has been dropped
768 except Exception:
769 except Exception:
769 sopener.mustaudit = oldaudit
770 sopener.mustaudit = oldaudit
770 raise
771 raise
771 sopener.mustaudit = oldaudit
772 sopener.mustaudit = oldaudit
772
773
773 return streamres(streamer(repo, entries, total_bytes))
774 return streamres(streamer(repo, entries, total_bytes))
774
775
775 @wireprotocommand('unbundle', 'heads')
776 @wireprotocommand('unbundle', 'heads')
776 def unbundle(repo, proto, heads):
777 def unbundle(repo, proto, heads):
777 their_heads = decodelist(heads)
778 their_heads = decodelist(heads)
778
779
779 try:
780 try:
780 proto.redirect()
781 proto.redirect()
781
782
782 exchange.check_heads(repo, their_heads, 'preparing changes')
783 exchange.check_heads(repo, their_heads, 'preparing changes')
783
784
784 # write bundle data to temporary file because it can be big
785 # write bundle data to temporary file because it can be big
785 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
786 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
786 fp = os.fdopen(fd, 'wb+')
787 fp = os.fdopen(fd, 'wb+')
787 r = 0
788 r = 0
788 try:
789 try:
789 proto.getfile(fp)
790 proto.getfile(fp)
790 fp.seek(0)
791 fp.seek(0)
791 gen = exchange.readbundle(repo.ui, fp, None)
792 gen = exchange.readbundle(repo.ui, fp, None)
792 r = exchange.unbundle(repo, gen, their_heads, 'serve',
793 r = exchange.unbundle(repo, gen, their_heads, 'serve',
793 proto._client())
794 proto._client())
794 if util.safehasattr(r, 'addpart'):
795 if util.safehasattr(r, 'addpart'):
795 # The return looks streameable, we are in the bundle2 case and
796 # The return looks streameable, we are in the bundle2 case and
796 # should return a stream.
797 # should return a stream.
797 return streamres(r.getchunks())
798 return streamres(r.getchunks())
798 return pushres(r)
799 return pushres(r)
799
800
800 finally:
801 finally:
801 fp.close()
802 fp.close()
802 os.unlink(tempname)
803 os.unlink(tempname)
803 except util.Abort, inst:
804 except util.Abort, inst:
804 # The old code we moved used sys.stderr directly.
805 # The old code we moved used sys.stderr directly.
805 # We did not change it to minimise code change.
806 # We did not change it to minimise code change.
806 # This need to be moved to something proper.
807 # This need to be moved to something proper.
807 # Feel free to do it.
808 # Feel free to do it.
808 sys.stderr.write("abort: %s\n" % inst)
809 sys.stderr.write("abort: %s\n" % inst)
809 return pushres(0)
810 return pushres(0)
810 except exchange.PushRaced, exc:
811 except exchange.PushRaced, exc:
811 return pusherr(str(exc))
812 return pusherr(str(exc))
General Comments 0
You need to be logged in to leave comments. Login now