##// END OF EJS Templates
bundle2: return a bundle20 object from exchanges.unbundle...
Pierre-Yves David -
r21071:19b9f23a default
parent child Browse files
Show More
@@ -1,707 +1,705 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno
10 import errno
11 import util, scmutil, changegroup, base85
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2
13
13
14 def readbundle(ui, fh, fname, vfs=None):
14 def readbundle(ui, fh, fname, vfs=None):
15 header = changegroup.readexactly(fh, 4)
15 header = changegroup.readexactly(fh, 4)
16
16
17 alg = None
17 alg = None
18 if not fname:
18 if not fname:
19 fname = "stream"
19 fname = "stream"
20 if not header.startswith('HG') and header.startswith('\0'):
20 if not header.startswith('HG') and header.startswith('\0'):
21 fh = changegroup.headerlessfixup(fh, header)
21 fh = changegroup.headerlessfixup(fh, header)
22 header = "HG10"
22 header = "HG10"
23 alg = 'UN'
23 alg = 'UN'
24 elif vfs:
24 elif vfs:
25 fname = vfs.join(fname)
25 fname = vfs.join(fname)
26
26
27 magic, version = header[0:2], header[2:4]
27 magic, version = header[0:2], header[2:4]
28
28
29 if magic != 'HG':
29 if magic != 'HG':
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 if version == '10':
31 if version == '10':
32 if alg is None:
32 if alg is None:
33 alg = changegroup.readexactly(fh, 2)
33 alg = changegroup.readexactly(fh, 2)
34 return changegroup.unbundle10(fh, alg)
34 return changegroup.unbundle10(fh, alg)
35 elif version == '20':
35 elif version == '20':
36 return bundle2.unbundle20(ui, fh, header=magic + version)
36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 else:
37 else:
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39
39
40
40
41 class pushoperation(object):
41 class pushoperation(object):
42 """A object that represent a single push operation
42 """A object that represent a single push operation
43
43
44 It purpose is to carry push related state and very common operation.
44 It purpose is to carry push related state and very common operation.
45
45
46 A new should be created at the beginning of each push and discarded
46 A new should be created at the beginning of each push and discarded
47 afterward.
47 afterward.
48 """
48 """
49
49
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 # repo we push from
51 # repo we push from
52 self.repo = repo
52 self.repo = repo
53 self.ui = repo.ui
53 self.ui = repo.ui
54 # repo we push to
54 # repo we push to
55 self.remote = remote
55 self.remote = remote
56 # force option provided
56 # force option provided
57 self.force = force
57 self.force = force
58 # revs to be pushed (None is "all")
58 # revs to be pushed (None is "all")
59 self.revs = revs
59 self.revs = revs
60 # allow push of new branch
60 # allow push of new branch
61 self.newbranch = newbranch
61 self.newbranch = newbranch
62 # did a local lock get acquired?
62 # did a local lock get acquired?
63 self.locallocked = None
63 self.locallocked = None
64 # Integer version of the push result
64 # Integer version of the push result
65 # - None means nothing to push
65 # - None means nothing to push
66 # - 0 means HTTP error
66 # - 0 means HTTP error
67 # - 1 means we pushed and remote head count is unchanged *or*
67 # - 1 means we pushed and remote head count is unchanged *or*
68 # we have outgoing changesets but refused to push
68 # we have outgoing changesets but refused to push
69 # - other values as described by addchangegroup()
69 # - other values as described by addchangegroup()
70 self.ret = None
70 self.ret = None
71 # discover.outgoing object (contains common and outgoing data)
71 # discover.outgoing object (contains common and outgoing data)
72 self.outgoing = None
72 self.outgoing = None
73 # all remote heads before the push
73 # all remote heads before the push
74 self.remoteheads = None
74 self.remoteheads = None
75 # testable as a boolean indicating if any nodes are missing locally.
75 # testable as a boolean indicating if any nodes are missing locally.
76 self.incoming = None
76 self.incoming = None
77 # set of all heads common after changeset bundle push
77 # set of all heads common after changeset bundle push
78 self.commonheads = None
78 self.commonheads = None
79
79
80 def push(repo, remote, force=False, revs=None, newbranch=False):
80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 '''Push outgoing changesets (limited by revs) from a local
81 '''Push outgoing changesets (limited by revs) from a local
82 repository to remote. Return an integer:
82 repository to remote. Return an integer:
83 - None means nothing to push
83 - None means nothing to push
84 - 0 means HTTP error
84 - 0 means HTTP error
85 - 1 means we pushed and remote head count is unchanged *or*
85 - 1 means we pushed and remote head count is unchanged *or*
86 we have outgoing changesets but refused to push
86 we have outgoing changesets but refused to push
87 - other values as described by addchangegroup()
87 - other values as described by addchangegroup()
88 '''
88 '''
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 if pushop.remote.local():
90 if pushop.remote.local():
91 missing = (set(pushop.repo.requirements)
91 missing = (set(pushop.repo.requirements)
92 - pushop.remote.local().supported)
92 - pushop.remote.local().supported)
93 if missing:
93 if missing:
94 msg = _("required features are not"
94 msg = _("required features are not"
95 " supported in the destination:"
95 " supported in the destination:"
96 " %s") % (', '.join(sorted(missing)))
96 " %s") % (', '.join(sorted(missing)))
97 raise util.Abort(msg)
97 raise util.Abort(msg)
98
98
99 # there are two ways to push to remote repo:
99 # there are two ways to push to remote repo:
100 #
100 #
101 # addchangegroup assumes local user can lock remote
101 # addchangegroup assumes local user can lock remote
102 # repo (local filesystem, old ssh servers).
102 # repo (local filesystem, old ssh servers).
103 #
103 #
104 # unbundle assumes local user cannot lock remote repo (new ssh
104 # unbundle assumes local user cannot lock remote repo (new ssh
105 # servers, http servers).
105 # servers, http servers).
106
106
107 if not pushop.remote.canpush():
107 if not pushop.remote.canpush():
108 raise util.Abort(_("destination does not support push"))
108 raise util.Abort(_("destination does not support push"))
109 # get local lock as we might write phase data
109 # get local lock as we might write phase data
110 locallock = None
110 locallock = None
111 try:
111 try:
112 locallock = pushop.repo.lock()
112 locallock = pushop.repo.lock()
113 pushop.locallocked = True
113 pushop.locallocked = True
114 except IOError, err:
114 except IOError, err:
115 pushop.locallocked = False
115 pushop.locallocked = False
116 if err.errno != errno.EACCES:
116 if err.errno != errno.EACCES:
117 raise
117 raise
118 # source repo cannot be locked.
118 # source repo cannot be locked.
119 # We do not abort the push, but just disable the local phase
119 # We do not abort the push, but just disable the local phase
120 # synchronisation.
120 # synchronisation.
121 msg = 'cannot lock source repository: %s\n' % err
121 msg = 'cannot lock source repository: %s\n' % err
122 pushop.ui.debug(msg)
122 pushop.ui.debug(msg)
123 try:
123 try:
124 pushop.repo.checkpush(pushop)
124 pushop.repo.checkpush(pushop)
125 lock = None
125 lock = None
126 unbundle = pushop.remote.capable('unbundle')
126 unbundle = pushop.remote.capable('unbundle')
127 if not unbundle:
127 if not unbundle:
128 lock = pushop.remote.lock()
128 lock = pushop.remote.lock()
129 try:
129 try:
130 _pushdiscovery(pushop)
130 _pushdiscovery(pushop)
131 if _pushcheckoutgoing(pushop):
131 if _pushcheckoutgoing(pushop):
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 pushop.remote,
133 pushop.remote,
134 pushop.outgoing)
134 pushop.outgoing)
135 if pushop.remote.capable('bundle2'):
135 if pushop.remote.capable('bundle2'):
136 _pushbundle2(pushop)
136 _pushbundle2(pushop)
137 else:
137 else:
138 _pushchangeset(pushop)
138 _pushchangeset(pushop)
139 _pushcomputecommonheads(pushop)
139 _pushcomputecommonheads(pushop)
140 _pushsyncphase(pushop)
140 _pushsyncphase(pushop)
141 _pushobsolete(pushop)
141 _pushobsolete(pushop)
142 finally:
142 finally:
143 if lock is not None:
143 if lock is not None:
144 lock.release()
144 lock.release()
145 finally:
145 finally:
146 if locallock is not None:
146 if locallock is not None:
147 locallock.release()
147 locallock.release()
148
148
149 _pushbookmark(pushop)
149 _pushbookmark(pushop)
150 return pushop.ret
150 return pushop.ret
151
151
152 def _pushdiscovery(pushop):
152 def _pushdiscovery(pushop):
153 # discovery
153 # discovery
154 unfi = pushop.repo.unfiltered()
154 unfi = pushop.repo.unfiltered()
155 fci = discovery.findcommonincoming
155 fci = discovery.findcommonincoming
156 commoninc = fci(unfi, pushop.remote, force=pushop.force)
156 commoninc = fci(unfi, pushop.remote, force=pushop.force)
157 common, inc, remoteheads = commoninc
157 common, inc, remoteheads = commoninc
158 fco = discovery.findcommonoutgoing
158 fco = discovery.findcommonoutgoing
159 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
159 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
160 commoninc=commoninc, force=pushop.force)
160 commoninc=commoninc, force=pushop.force)
161 pushop.outgoing = outgoing
161 pushop.outgoing = outgoing
162 pushop.remoteheads = remoteheads
162 pushop.remoteheads = remoteheads
163 pushop.incoming = inc
163 pushop.incoming = inc
164
164
165 def _pushcheckoutgoing(pushop):
165 def _pushcheckoutgoing(pushop):
166 outgoing = pushop.outgoing
166 outgoing = pushop.outgoing
167 unfi = pushop.repo.unfiltered()
167 unfi = pushop.repo.unfiltered()
168 if not outgoing.missing:
168 if not outgoing.missing:
169 # nothing to push
169 # nothing to push
170 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
170 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
171 return False
171 return False
172 # something to push
172 # something to push
173 if not pushop.force:
173 if not pushop.force:
174 # if repo.obsstore == False --> no obsolete
174 # if repo.obsstore == False --> no obsolete
175 # then, save the iteration
175 # then, save the iteration
176 if unfi.obsstore:
176 if unfi.obsstore:
177 # this message are here for 80 char limit reason
177 # this message are here for 80 char limit reason
178 mso = _("push includes obsolete changeset: %s!")
178 mso = _("push includes obsolete changeset: %s!")
179 mst = "push includes %s changeset: %s!"
179 mst = "push includes %s changeset: %s!"
180 # plain versions for i18n tool to detect them
180 # plain versions for i18n tool to detect them
181 _("push includes unstable changeset: %s!")
181 _("push includes unstable changeset: %s!")
182 _("push includes bumped changeset: %s!")
182 _("push includes bumped changeset: %s!")
183 _("push includes divergent changeset: %s!")
183 _("push includes divergent changeset: %s!")
184 # If we are to push if there is at least one
184 # If we are to push if there is at least one
185 # obsolete or unstable changeset in missing, at
185 # obsolete or unstable changeset in missing, at
186 # least one of the missinghead will be obsolete or
186 # least one of the missinghead will be obsolete or
187 # unstable. So checking heads only is ok
187 # unstable. So checking heads only is ok
188 for node in outgoing.missingheads:
188 for node in outgoing.missingheads:
189 ctx = unfi[node]
189 ctx = unfi[node]
190 if ctx.obsolete():
190 if ctx.obsolete():
191 raise util.Abort(mso % ctx)
191 raise util.Abort(mso % ctx)
192 elif ctx.troubled():
192 elif ctx.troubled():
193 raise util.Abort(_(mst)
193 raise util.Abort(_(mst)
194 % (ctx.troubles()[0],
194 % (ctx.troubles()[0],
195 ctx))
195 ctx))
196 newbm = pushop.ui.configlist('bookmarks', 'pushing')
196 newbm = pushop.ui.configlist('bookmarks', 'pushing')
197 discovery.checkheads(unfi, pushop.remote, outgoing,
197 discovery.checkheads(unfi, pushop.remote, outgoing,
198 pushop.remoteheads,
198 pushop.remoteheads,
199 pushop.newbranch,
199 pushop.newbranch,
200 bool(pushop.incoming),
200 bool(pushop.incoming),
201 newbm)
201 newbm)
202 return True
202 return True
203
203
204 def _pushbundle2(pushop):
204 def _pushbundle2(pushop):
205 """push data to the remote using bundle2
205 """push data to the remote using bundle2
206
206
207 The only currently supported type of data is changegroup but this will
207 The only currently supported type of data is changegroup but this will
208 evolve in the future."""
208 evolve in the future."""
209 # Send known head to the server for race detection.
209 # Send known head to the server for race detection.
210 bundler = bundle2.bundle20(pushop.ui)
210 bundler = bundle2.bundle20(pushop.ui)
211 if not pushop.force:
211 if not pushop.force:
212 part = bundle2.bundlepart('CHECK:HEADS', data=iter(pushop.remoteheads))
212 part = bundle2.bundlepart('CHECK:HEADS', data=iter(pushop.remoteheads))
213 bundler.addpart(part)
213 bundler.addpart(part)
214 # add the changegroup bundle
214 # add the changegroup bundle
215 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
215 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
216 cgpart = bundle2.bundlepart('CHANGEGROUP', data=cg.getchunks())
216 cgpart = bundle2.bundlepart('CHANGEGROUP', data=cg.getchunks())
217 bundler.addpart(cgpart)
217 bundler.addpart(cgpart)
218 stream = util.chunkbuffer(bundler.getchunks())
218 stream = util.chunkbuffer(bundler.getchunks())
219 reply = pushop.remote.unbundle(stream, ['force'], 'push')
219 reply = pushop.remote.unbundle(stream, ['force'], 'push')
220 try:
220 try:
221 op = bundle2.processbundle(pushop.repo, reply)
221 op = bundle2.processbundle(pushop.repo, reply)
222 except KeyError, exc:
222 except KeyError, exc:
223 raise util.Abort('missing support for %s' % exc)
223 raise util.Abort('missing support for %s' % exc)
224 cgreplies = op.records.getreplies(cgpart.id)
224 cgreplies = op.records.getreplies(cgpart.id)
225 assert len(cgreplies['changegroup']) == 1
225 assert len(cgreplies['changegroup']) == 1
226 pushop.ret = cgreplies['changegroup'][0]['return']
226 pushop.ret = cgreplies['changegroup'][0]['return']
227
227
228 def _pushchangeset(pushop):
228 def _pushchangeset(pushop):
229 """Make the actual push of changeset bundle to remote repo"""
229 """Make the actual push of changeset bundle to remote repo"""
230 outgoing = pushop.outgoing
230 outgoing = pushop.outgoing
231 unbundle = pushop.remote.capable('unbundle')
231 unbundle = pushop.remote.capable('unbundle')
232 # TODO: get bundlecaps from remote
232 # TODO: get bundlecaps from remote
233 bundlecaps = None
233 bundlecaps = None
234 # create a changegroup from local
234 # create a changegroup from local
235 if pushop.revs is None and not (outgoing.excluded
235 if pushop.revs is None and not (outgoing.excluded
236 or pushop.repo.changelog.filteredrevs):
236 or pushop.repo.changelog.filteredrevs):
237 # push everything,
237 # push everything,
238 # use the fast path, no race possible on push
238 # use the fast path, no race possible on push
239 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
239 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
240 cg = changegroup.getsubset(pushop.repo,
240 cg = changegroup.getsubset(pushop.repo,
241 outgoing,
241 outgoing,
242 bundler,
242 bundler,
243 'push',
243 'push',
244 fastpath=True)
244 fastpath=True)
245 else:
245 else:
246 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
246 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
247 bundlecaps)
247 bundlecaps)
248
248
249 # apply changegroup to remote
249 # apply changegroup to remote
250 if unbundle:
250 if unbundle:
251 # local repo finds heads on server, finds out what
251 # local repo finds heads on server, finds out what
252 # revs it must push. once revs transferred, if server
252 # revs it must push. once revs transferred, if server
253 # finds it has different heads (someone else won
253 # finds it has different heads (someone else won
254 # commit/push race), server aborts.
254 # commit/push race), server aborts.
255 if pushop.force:
255 if pushop.force:
256 remoteheads = ['force']
256 remoteheads = ['force']
257 else:
257 else:
258 remoteheads = pushop.remoteheads
258 remoteheads = pushop.remoteheads
259 # ssh: return remote's addchangegroup()
259 # ssh: return remote's addchangegroup()
260 # http: return remote's addchangegroup() or 0 for error
260 # http: return remote's addchangegroup() or 0 for error
261 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
261 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
262 'push')
262 'push')
263 else:
263 else:
264 # we return an integer indicating remote head count
264 # we return an integer indicating remote head count
265 # change
265 # change
266 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
266 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
267
267
268 def _pushcomputecommonheads(pushop):
268 def _pushcomputecommonheads(pushop):
269 unfi = pushop.repo.unfiltered()
269 unfi = pushop.repo.unfiltered()
270 if pushop.ret:
270 if pushop.ret:
271 # push succeed, synchronize target of the push
271 # push succeed, synchronize target of the push
272 cheads = pushop.outgoing.missingheads
272 cheads = pushop.outgoing.missingheads
273 elif pushop.revs is None:
273 elif pushop.revs is None:
274 # All out push fails. synchronize all common
274 # All out push fails. synchronize all common
275 cheads = pushop.outgoing.commonheads
275 cheads = pushop.outgoing.commonheads
276 else:
276 else:
277 # I want cheads = heads(::missingheads and ::commonheads)
277 # I want cheads = heads(::missingheads and ::commonheads)
278 # (missingheads is revs with secret changeset filtered out)
278 # (missingheads is revs with secret changeset filtered out)
279 #
279 #
280 # This can be expressed as:
280 # This can be expressed as:
281 # cheads = ( (missingheads and ::commonheads)
281 # cheads = ( (missingheads and ::commonheads)
282 # + (commonheads and ::missingheads))"
282 # + (commonheads and ::missingheads))"
283 # )
283 # )
284 #
284 #
285 # while trying to push we already computed the following:
285 # while trying to push we already computed the following:
286 # common = (::commonheads)
286 # common = (::commonheads)
287 # missing = ((commonheads::missingheads) - commonheads)
287 # missing = ((commonheads::missingheads) - commonheads)
288 #
288 #
289 # We can pick:
289 # We can pick:
290 # * missingheads part of common (::commonheads)
290 # * missingheads part of common (::commonheads)
291 common = set(pushop.outgoing.common)
291 common = set(pushop.outgoing.common)
292 nm = pushop.repo.changelog.nodemap
292 nm = pushop.repo.changelog.nodemap
293 cheads = [node for node in pushop.revs if nm[node] in common]
293 cheads = [node for node in pushop.revs if nm[node] in common]
294 # and
294 # and
295 # * commonheads parents on missing
295 # * commonheads parents on missing
296 revset = unfi.set('%ln and parents(roots(%ln))',
296 revset = unfi.set('%ln and parents(roots(%ln))',
297 pushop.outgoing.commonheads,
297 pushop.outgoing.commonheads,
298 pushop.outgoing.missing)
298 pushop.outgoing.missing)
299 cheads.extend(c.node() for c in revset)
299 cheads.extend(c.node() for c in revset)
300 pushop.commonheads = cheads
300 pushop.commonheads = cheads
301
301
302 def _pushsyncphase(pushop):
302 def _pushsyncphase(pushop):
303 """synchronise phase information locally and remotely"""
303 """synchronise phase information locally and remotely"""
304 unfi = pushop.repo.unfiltered()
304 unfi = pushop.repo.unfiltered()
305 cheads = pushop.commonheads
305 cheads = pushop.commonheads
306 if pushop.ret:
306 if pushop.ret:
307 # push succeed, synchronize target of the push
307 # push succeed, synchronize target of the push
308 cheads = pushop.outgoing.missingheads
308 cheads = pushop.outgoing.missingheads
309 elif pushop.revs is None:
309 elif pushop.revs is None:
310 # All out push fails. synchronize all common
310 # All out push fails. synchronize all common
311 cheads = pushop.outgoing.commonheads
311 cheads = pushop.outgoing.commonheads
312 else:
312 else:
313 # I want cheads = heads(::missingheads and ::commonheads)
313 # I want cheads = heads(::missingheads and ::commonheads)
314 # (missingheads is revs with secret changeset filtered out)
314 # (missingheads is revs with secret changeset filtered out)
315 #
315 #
316 # This can be expressed as:
316 # This can be expressed as:
317 # cheads = ( (missingheads and ::commonheads)
317 # cheads = ( (missingheads and ::commonheads)
318 # + (commonheads and ::missingheads))"
318 # + (commonheads and ::missingheads))"
319 # )
319 # )
320 #
320 #
321 # while trying to push we already computed the following:
321 # while trying to push we already computed the following:
322 # common = (::commonheads)
322 # common = (::commonheads)
323 # missing = ((commonheads::missingheads) - commonheads)
323 # missing = ((commonheads::missingheads) - commonheads)
324 #
324 #
325 # We can pick:
325 # We can pick:
326 # * missingheads part of common (::commonheads)
326 # * missingheads part of common (::commonheads)
327 common = set(pushop.outgoing.common)
327 common = set(pushop.outgoing.common)
328 nm = pushop.repo.changelog.nodemap
328 nm = pushop.repo.changelog.nodemap
329 cheads = [node for node in pushop.revs if nm[node] in common]
329 cheads = [node for node in pushop.revs if nm[node] in common]
330 # and
330 # and
331 # * commonheads parents on missing
331 # * commonheads parents on missing
332 revset = unfi.set('%ln and parents(roots(%ln))',
332 revset = unfi.set('%ln and parents(roots(%ln))',
333 pushop.outgoing.commonheads,
333 pushop.outgoing.commonheads,
334 pushop.outgoing.missing)
334 pushop.outgoing.missing)
335 cheads.extend(c.node() for c in revset)
335 cheads.extend(c.node() for c in revset)
336 pushop.commonheads = cheads
336 pushop.commonheads = cheads
337 # even when we don't push, exchanging phase data is useful
337 # even when we don't push, exchanging phase data is useful
338 remotephases = pushop.remote.listkeys('phases')
338 remotephases = pushop.remote.listkeys('phases')
339 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
339 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
340 and remotephases # server supports phases
340 and remotephases # server supports phases
341 and pushop.ret is None # nothing was pushed
341 and pushop.ret is None # nothing was pushed
342 and remotephases.get('publishing', False)):
342 and remotephases.get('publishing', False)):
343 # When:
343 # When:
344 # - this is a subrepo push
344 # - this is a subrepo push
345 # - and remote support phase
345 # - and remote support phase
346 # - and no changeset was pushed
346 # - and no changeset was pushed
347 # - and remote is publishing
347 # - and remote is publishing
348 # We may be in issue 3871 case!
348 # We may be in issue 3871 case!
349 # We drop the possible phase synchronisation done by
349 # We drop the possible phase synchronisation done by
350 # courtesy to publish changesets possibly locally draft
350 # courtesy to publish changesets possibly locally draft
351 # on the remote.
351 # on the remote.
352 remotephases = {'publishing': 'True'}
352 remotephases = {'publishing': 'True'}
353 if not remotephases: # old server or public only reply from non-publishing
353 if not remotephases: # old server or public only reply from non-publishing
354 _localphasemove(pushop, cheads)
354 _localphasemove(pushop, cheads)
355 # don't push any phase data as there is nothing to push
355 # don't push any phase data as there is nothing to push
356 else:
356 else:
357 ana = phases.analyzeremotephases(pushop.repo, cheads,
357 ana = phases.analyzeremotephases(pushop.repo, cheads,
358 remotephases)
358 remotephases)
359 pheads, droots = ana
359 pheads, droots = ana
360 ### Apply remote phase on local
360 ### Apply remote phase on local
361 if remotephases.get('publishing', False):
361 if remotephases.get('publishing', False):
362 _localphasemove(pushop, cheads)
362 _localphasemove(pushop, cheads)
363 else: # publish = False
363 else: # publish = False
364 _localphasemove(pushop, pheads)
364 _localphasemove(pushop, pheads)
365 _localphasemove(pushop, cheads, phases.draft)
365 _localphasemove(pushop, cheads, phases.draft)
366 ### Apply local phase on remote
366 ### Apply local phase on remote
367
367
368 # Get the list of all revs draft on remote by public here.
368 # Get the list of all revs draft on remote by public here.
369 # XXX Beware that revset break if droots is not strictly
369 # XXX Beware that revset break if droots is not strictly
370 # XXX root we may want to ensure it is but it is costly
370 # XXX root we may want to ensure it is but it is costly
371 outdated = unfi.set('heads((%ln::%ln) and public())',
371 outdated = unfi.set('heads((%ln::%ln) and public())',
372 droots, cheads)
372 droots, cheads)
373 for newremotehead in outdated:
373 for newremotehead in outdated:
374 r = pushop.remote.pushkey('phases',
374 r = pushop.remote.pushkey('phases',
375 newremotehead.hex(),
375 newremotehead.hex(),
376 str(phases.draft),
376 str(phases.draft),
377 str(phases.public))
377 str(phases.public))
378 if not r:
378 if not r:
379 pushop.ui.warn(_('updating %s to public failed!\n')
379 pushop.ui.warn(_('updating %s to public failed!\n')
380 % newremotehead)
380 % newremotehead)
381
381
382 def _localphasemove(pushop, nodes, phase=phases.public):
382 def _localphasemove(pushop, nodes, phase=phases.public):
383 """move <nodes> to <phase> in the local source repo"""
383 """move <nodes> to <phase> in the local source repo"""
384 if pushop.locallocked:
384 if pushop.locallocked:
385 phases.advanceboundary(pushop.repo, phase, nodes)
385 phases.advanceboundary(pushop.repo, phase, nodes)
386 else:
386 else:
387 # repo is not locked, do not change any phases!
387 # repo is not locked, do not change any phases!
388 # Informs the user that phases should have been moved when
388 # Informs the user that phases should have been moved when
389 # applicable.
389 # applicable.
390 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
390 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
391 phasestr = phases.phasenames[phase]
391 phasestr = phases.phasenames[phase]
392 if actualmoves:
392 if actualmoves:
393 pushop.ui.status(_('cannot lock source repo, skipping '
393 pushop.ui.status(_('cannot lock source repo, skipping '
394 'local %s phase update\n') % phasestr)
394 'local %s phase update\n') % phasestr)
395
395
396 def _pushobsolete(pushop):
396 def _pushobsolete(pushop):
397 """utility function to push obsolete markers to a remote"""
397 """utility function to push obsolete markers to a remote"""
398 pushop.ui.debug('try to push obsolete markers to remote\n')
398 pushop.ui.debug('try to push obsolete markers to remote\n')
399 repo = pushop.repo
399 repo = pushop.repo
400 remote = pushop.remote
400 remote = pushop.remote
401 if (obsolete._enabled and repo.obsstore and
401 if (obsolete._enabled and repo.obsstore and
402 'obsolete' in remote.listkeys('namespaces')):
402 'obsolete' in remote.listkeys('namespaces')):
403 rslts = []
403 rslts = []
404 remotedata = repo.listkeys('obsolete')
404 remotedata = repo.listkeys('obsolete')
405 for key in sorted(remotedata, reverse=True):
405 for key in sorted(remotedata, reverse=True):
406 # reverse sort to ensure we end with dump0
406 # reverse sort to ensure we end with dump0
407 data = remotedata[key]
407 data = remotedata[key]
408 rslts.append(remote.pushkey('obsolete', key, '', data))
408 rslts.append(remote.pushkey('obsolete', key, '', data))
409 if [r for r in rslts if not r]:
409 if [r for r in rslts if not r]:
410 msg = _('failed to push some obsolete markers!\n')
410 msg = _('failed to push some obsolete markers!\n')
411 repo.ui.warn(msg)
411 repo.ui.warn(msg)
412
412
413 def _pushbookmark(pushop):
413 def _pushbookmark(pushop):
414 """Update bookmark position on remote"""
414 """Update bookmark position on remote"""
415 ui = pushop.ui
415 ui = pushop.ui
416 repo = pushop.repo.unfiltered()
416 repo = pushop.repo.unfiltered()
417 remote = pushop.remote
417 remote = pushop.remote
418 ui.debug("checking for updated bookmarks\n")
418 ui.debug("checking for updated bookmarks\n")
419 revnums = map(repo.changelog.rev, pushop.revs or [])
419 revnums = map(repo.changelog.rev, pushop.revs or [])
420 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
420 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
421 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
421 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
422 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
422 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
423 srchex=hex)
423 srchex=hex)
424
424
425 for b, scid, dcid in advsrc:
425 for b, scid, dcid in advsrc:
426 if ancestors and repo[scid].rev() not in ancestors:
426 if ancestors and repo[scid].rev() not in ancestors:
427 continue
427 continue
428 if remote.pushkey('bookmarks', b, dcid, scid):
428 if remote.pushkey('bookmarks', b, dcid, scid):
429 ui.status(_("updating bookmark %s\n") % b)
429 ui.status(_("updating bookmark %s\n") % b)
430 else:
430 else:
431 ui.warn(_('updating bookmark %s failed!\n') % b)
431 ui.warn(_('updating bookmark %s failed!\n') % b)
432
432
433 class pulloperation(object):
433 class pulloperation(object):
434 """A object that represent a single pull operation
434 """A object that represent a single pull operation
435
435
436 It purpose is to carry push related state and very common operation.
436 It purpose is to carry push related state and very common operation.
437
437
438 A new should be created at the beginning of each pull and discarded
438 A new should be created at the beginning of each pull and discarded
439 afterward.
439 afterward.
440 """
440 """
441
441
442 def __init__(self, repo, remote, heads=None, force=False):
442 def __init__(self, repo, remote, heads=None, force=False):
443 # repo we pull into
443 # repo we pull into
444 self.repo = repo
444 self.repo = repo
445 # repo we pull from
445 # repo we pull from
446 self.remote = remote
446 self.remote = remote
447 # revision we try to pull (None is "all")
447 # revision we try to pull (None is "all")
448 self.heads = heads
448 self.heads = heads
449 # do we force pull?
449 # do we force pull?
450 self.force = force
450 self.force = force
451 # the name the pull transaction
451 # the name the pull transaction
452 self._trname = 'pull\n' + util.hidepassword(remote.url())
452 self._trname = 'pull\n' + util.hidepassword(remote.url())
453 # hold the transaction once created
453 # hold the transaction once created
454 self._tr = None
454 self._tr = None
455 # set of common changeset between local and remote before pull
455 # set of common changeset between local and remote before pull
456 self.common = None
456 self.common = None
457 # set of pulled head
457 # set of pulled head
458 self.rheads = None
458 self.rheads = None
459 # list of missing changeset to fetch remotely
459 # list of missing changeset to fetch remotely
460 self.fetch = None
460 self.fetch = None
461 # result of changegroup pulling (used as return code by pull)
461 # result of changegroup pulling (used as return code by pull)
462 self.cgresult = None
462 self.cgresult = None
463 # list of step remaining todo (related to future bundle2 usage)
463 # list of step remaining todo (related to future bundle2 usage)
464 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
464 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
465
465
466 @util.propertycache
466 @util.propertycache
467 def pulledsubset(self):
467 def pulledsubset(self):
468 """heads of the set of changeset target by the pull"""
468 """heads of the set of changeset target by the pull"""
469 # compute target subset
469 # compute target subset
470 if self.heads is None:
470 if self.heads is None:
471 # We pulled every thing possible
471 # We pulled every thing possible
472 # sync on everything common
472 # sync on everything common
473 c = set(self.common)
473 c = set(self.common)
474 ret = list(self.common)
474 ret = list(self.common)
475 for n in self.rheads:
475 for n in self.rheads:
476 if n not in c:
476 if n not in c:
477 ret.append(n)
477 ret.append(n)
478 return ret
478 return ret
479 else:
479 else:
480 # We pulled a specific subset
480 # We pulled a specific subset
481 # sync on this subset
481 # sync on this subset
482 return self.heads
482 return self.heads
483
483
484 def gettransaction(self):
484 def gettransaction(self):
485 """get appropriate pull transaction, creating it if needed"""
485 """get appropriate pull transaction, creating it if needed"""
486 if self._tr is None:
486 if self._tr is None:
487 self._tr = self.repo.transaction(self._trname)
487 self._tr = self.repo.transaction(self._trname)
488 return self._tr
488 return self._tr
489
489
490 def closetransaction(self):
490 def closetransaction(self):
491 """close transaction if created"""
491 """close transaction if created"""
492 if self._tr is not None:
492 if self._tr is not None:
493 self._tr.close()
493 self._tr.close()
494
494
495 def releasetransaction(self):
495 def releasetransaction(self):
496 """release transaction if created"""
496 """release transaction if created"""
497 if self._tr is not None:
497 if self._tr is not None:
498 self._tr.release()
498 self._tr.release()
499
499
500 def pull(repo, remote, heads=None, force=False):
500 def pull(repo, remote, heads=None, force=False):
501 pullop = pulloperation(repo, remote, heads, force)
501 pullop = pulloperation(repo, remote, heads, force)
502 if pullop.remote.local():
502 if pullop.remote.local():
503 missing = set(pullop.remote.requirements) - pullop.repo.supported
503 missing = set(pullop.remote.requirements) - pullop.repo.supported
504 if missing:
504 if missing:
505 msg = _("required features are not"
505 msg = _("required features are not"
506 " supported in the destination:"
506 " supported in the destination:"
507 " %s") % (', '.join(sorted(missing)))
507 " %s") % (', '.join(sorted(missing)))
508 raise util.Abort(msg)
508 raise util.Abort(msg)
509
509
510 lock = pullop.repo.lock()
510 lock = pullop.repo.lock()
511 try:
511 try:
512 _pulldiscovery(pullop)
512 _pulldiscovery(pullop)
513 if pullop.remote.capable('bundle2'):
513 if pullop.remote.capable('bundle2'):
514 _pullbundle2(pullop)
514 _pullbundle2(pullop)
515 if 'changegroup' in pullop.todosteps:
515 if 'changegroup' in pullop.todosteps:
516 _pullchangeset(pullop)
516 _pullchangeset(pullop)
517 if 'phases' in pullop.todosteps:
517 if 'phases' in pullop.todosteps:
518 _pullphase(pullop)
518 _pullphase(pullop)
519 if 'obsmarkers' in pullop.todosteps:
519 if 'obsmarkers' in pullop.todosteps:
520 _pullobsolete(pullop)
520 _pullobsolete(pullop)
521 pullop.closetransaction()
521 pullop.closetransaction()
522 finally:
522 finally:
523 pullop.releasetransaction()
523 pullop.releasetransaction()
524 lock.release()
524 lock.release()
525
525
526 return pullop.cgresult
526 return pullop.cgresult
527
527
528 def _pulldiscovery(pullop):
528 def _pulldiscovery(pullop):
529 """discovery phase for the pull
529 """discovery phase for the pull
530
530
531 Current handle changeset discovery only, will change handle all discovery
531 Current handle changeset discovery only, will change handle all discovery
532 at some point."""
532 at some point."""
533 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
533 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
534 pullop.remote,
534 pullop.remote,
535 heads=pullop.heads,
535 heads=pullop.heads,
536 force=pullop.force)
536 force=pullop.force)
537 pullop.common, pullop.fetch, pullop.rheads = tmp
537 pullop.common, pullop.fetch, pullop.rheads = tmp
538
538
539 def _pullbundle2(pullop):
539 def _pullbundle2(pullop):
540 """pull data using bundle2
540 """pull data using bundle2
541
541
542 For now, the only supported data are changegroup."""
542 For now, the only supported data are changegroup."""
543 kwargs = {'bundlecaps': set(['HG20'])}
543 kwargs = {'bundlecaps': set(['HG20'])}
544 # pulling changegroup
544 # pulling changegroup
545 pullop.todosteps.remove('changegroup')
545 pullop.todosteps.remove('changegroup')
546 if not pullop.fetch:
546 if not pullop.fetch:
547 pullop.repo.ui.status(_("no changes found\n"))
547 pullop.repo.ui.status(_("no changes found\n"))
548 pullop.cgresult = 0
548 pullop.cgresult = 0
549 else:
549 else:
550 kwargs['common'] = pullop.common
550 kwargs['common'] = pullop.common
551 kwargs['heads'] = pullop.heads or pullop.rheads
551 kwargs['heads'] = pullop.heads or pullop.rheads
552 if pullop.heads is None and list(pullop.common) == [nullid]:
552 if pullop.heads is None and list(pullop.common) == [nullid]:
553 pullop.repo.ui.status(_("requesting all changes\n"))
553 pullop.repo.ui.status(_("requesting all changes\n"))
554 if kwargs.keys() == ['format']:
554 if kwargs.keys() == ['format']:
555 return # nothing to pull
555 return # nothing to pull
556 bundle = pullop.remote.getbundle('pull', **kwargs)
556 bundle = pullop.remote.getbundle('pull', **kwargs)
557 try:
557 try:
558 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
558 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
559 except KeyError, exc:
559 except KeyError, exc:
560 raise util.Abort('missing support for %s' % exc)
560 raise util.Abort('missing support for %s' % exc)
561 assert len(op.records['changegroup']) == 1
561 assert len(op.records['changegroup']) == 1
562 pullop.cgresult = op.records['changegroup'][0]['return']
562 pullop.cgresult = op.records['changegroup'][0]['return']
563
563
564 def _pullchangeset(pullop):
564 def _pullchangeset(pullop):
565 """pull changeset from unbundle into the local repo"""
565 """pull changeset from unbundle into the local repo"""
566 # We delay the open of the transaction as late as possible so we
566 # We delay the open of the transaction as late as possible so we
567 # don't open transaction for nothing or you break future useful
567 # don't open transaction for nothing or you break future useful
568 # rollback call
568 # rollback call
569 pullop.todosteps.remove('changegroup')
569 pullop.todosteps.remove('changegroup')
570 if not pullop.fetch:
570 if not pullop.fetch:
571 pullop.repo.ui.status(_("no changes found\n"))
571 pullop.repo.ui.status(_("no changes found\n"))
572 pullop.cgresult = 0
572 pullop.cgresult = 0
573 return
573 return
574 pullop.gettransaction()
574 pullop.gettransaction()
575 if pullop.heads is None and list(pullop.common) == [nullid]:
575 if pullop.heads is None and list(pullop.common) == [nullid]:
576 pullop.repo.ui.status(_("requesting all changes\n"))
576 pullop.repo.ui.status(_("requesting all changes\n"))
577 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
577 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
578 # issue1320, avoid a race if remote changed after discovery
578 # issue1320, avoid a race if remote changed after discovery
579 pullop.heads = pullop.rheads
579 pullop.heads = pullop.rheads
580
580
581 if pullop.remote.capable('getbundle'):
581 if pullop.remote.capable('getbundle'):
582 # TODO: get bundlecaps from remote
582 # TODO: get bundlecaps from remote
583 cg = pullop.remote.getbundle('pull', common=pullop.common,
583 cg = pullop.remote.getbundle('pull', common=pullop.common,
584 heads=pullop.heads or pullop.rheads)
584 heads=pullop.heads or pullop.rheads)
585 elif pullop.heads is None:
585 elif pullop.heads is None:
586 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
586 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
587 elif not pullop.remote.capable('changegroupsubset'):
587 elif not pullop.remote.capable('changegroupsubset'):
588 raise util.Abort(_("partial pull cannot be done because "
588 raise util.Abort(_("partial pull cannot be done because "
589 "other repository doesn't support "
589 "other repository doesn't support "
590 "changegroupsubset."))
590 "changegroupsubset."))
591 else:
591 else:
592 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
592 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
593 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
593 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
594 pullop.remote.url())
594 pullop.remote.url())
595
595
596 def _pullphase(pullop):
596 def _pullphase(pullop):
597 # Get remote phases data from remote
597 # Get remote phases data from remote
598 pullop.todosteps.remove('phases')
598 pullop.todosteps.remove('phases')
599 remotephases = pullop.remote.listkeys('phases')
599 remotephases = pullop.remote.listkeys('phases')
600 publishing = bool(remotephases.get('publishing', False))
600 publishing = bool(remotephases.get('publishing', False))
601 if remotephases and not publishing:
601 if remotephases and not publishing:
602 # remote is new and unpublishing
602 # remote is new and unpublishing
603 pheads, _dr = phases.analyzeremotephases(pullop.repo,
603 pheads, _dr = phases.analyzeremotephases(pullop.repo,
604 pullop.pulledsubset,
604 pullop.pulledsubset,
605 remotephases)
605 remotephases)
606 phases.advanceboundary(pullop.repo, phases.public, pheads)
606 phases.advanceboundary(pullop.repo, phases.public, pheads)
607 phases.advanceboundary(pullop.repo, phases.draft,
607 phases.advanceboundary(pullop.repo, phases.draft,
608 pullop.pulledsubset)
608 pullop.pulledsubset)
609 else:
609 else:
610 # Remote is old or publishing all common changesets
610 # Remote is old or publishing all common changesets
611 # should be seen as public
611 # should be seen as public
612 phases.advanceboundary(pullop.repo, phases.public,
612 phases.advanceboundary(pullop.repo, phases.public,
613 pullop.pulledsubset)
613 pullop.pulledsubset)
614
614
615 def _pullobsolete(pullop):
615 def _pullobsolete(pullop):
616 """utility function to pull obsolete markers from a remote
616 """utility function to pull obsolete markers from a remote
617
617
618 The `gettransaction` is function that return the pull transaction, creating
618 The `gettransaction` is function that return the pull transaction, creating
619 one if necessary. We return the transaction to inform the calling code that
619 one if necessary. We return the transaction to inform the calling code that
620 a new transaction have been created (when applicable).
620 a new transaction have been created (when applicable).
621
621
622 Exists mostly to allow overriding for experimentation purpose"""
622 Exists mostly to allow overriding for experimentation purpose"""
623 pullop.todosteps.remove('obsmarkers')
623 pullop.todosteps.remove('obsmarkers')
624 tr = None
624 tr = None
625 if obsolete._enabled:
625 if obsolete._enabled:
626 pullop.repo.ui.debug('fetching remote obsolete markers\n')
626 pullop.repo.ui.debug('fetching remote obsolete markers\n')
627 remoteobs = pullop.remote.listkeys('obsolete')
627 remoteobs = pullop.remote.listkeys('obsolete')
628 if 'dump0' in remoteobs:
628 if 'dump0' in remoteobs:
629 tr = pullop.gettransaction()
629 tr = pullop.gettransaction()
630 for key in sorted(remoteobs, reverse=True):
630 for key in sorted(remoteobs, reverse=True):
631 if key.startswith('dump'):
631 if key.startswith('dump'):
632 data = base85.b85decode(remoteobs[key])
632 data = base85.b85decode(remoteobs[key])
633 pullop.repo.obsstore.mergemarkers(tr, data)
633 pullop.repo.obsstore.mergemarkers(tr, data)
634 pullop.repo.invalidatevolatilesets()
634 pullop.repo.invalidatevolatilesets()
635 return tr
635 return tr
636
636
637 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
637 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
638 """return a full bundle (with potentially multiple kind of parts)
638 """return a full bundle (with potentially multiple kind of parts)
639
639
640 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
640 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
641 passed. For now, the bundle can contain only changegroup, but this will
641 passed. For now, the bundle can contain only changegroup, but this will
642 changes when more part type will be available for bundle2.
642 changes when more part type will be available for bundle2.
643
643
644 This is different from changegroup.getbundle that only returns an HG10
644 This is different from changegroup.getbundle that only returns an HG10
645 changegroup bundle. They may eventually get reunited in the future when we
645 changegroup bundle. They may eventually get reunited in the future when we
646 have a clearer idea of the API we what to query different data.
646 have a clearer idea of the API we what to query different data.
647
647
648 The implementation is at a very early stage and will get massive rework
648 The implementation is at a very early stage and will get massive rework
649 when the API of bundle is refined.
649 when the API of bundle is refined.
650 """
650 """
651 # build bundle here.
651 # build bundle here.
652 cg = changegroup.getbundle(repo, source, heads=heads,
652 cg = changegroup.getbundle(repo, source, heads=heads,
653 common=common, bundlecaps=bundlecaps)
653 common=common, bundlecaps=bundlecaps)
654 if bundlecaps is None or 'HG20' not in bundlecaps:
654 if bundlecaps is None or 'HG20' not in bundlecaps:
655 return cg
655 return cg
656 # very crude first implementation,
656 # very crude first implementation,
657 # the bundle API will change and the generation will be done lazily.
657 # the bundle API will change and the generation will be done lazily.
658 bundler = bundle2.bundle20(repo.ui)
658 bundler = bundle2.bundle20(repo.ui)
659 part = bundle2.bundlepart('changegroup', data=cg.getchunks())
659 part = bundle2.bundlepart('changegroup', data=cg.getchunks())
660 bundler.addpart(part)
660 bundler.addpart(part)
661 return util.chunkbuffer(bundler.getchunks())
661 return util.chunkbuffer(bundler.getchunks())
662
662
663 class PushRaced(RuntimeError):
663 class PushRaced(RuntimeError):
664 """An exception raised during unbundling that indicate a push race"""
664 """An exception raised during unbundling that indicate a push race"""
665
665
666 def check_heads(repo, their_heads, context):
666 def check_heads(repo, their_heads, context):
667 """check if the heads of a repo have been modified
667 """check if the heads of a repo have been modified
668
668
669 Used by peer for unbundling.
669 Used by peer for unbundling.
670 """
670 """
671 heads = repo.heads()
671 heads = repo.heads()
672 heads_hash = util.sha1(''.join(sorted(heads))).digest()
672 heads_hash = util.sha1(''.join(sorted(heads))).digest()
673 if not (their_heads == ['force'] or their_heads == heads or
673 if not (their_heads == ['force'] or their_heads == heads or
674 their_heads == ['hashed', heads_hash]):
674 their_heads == ['hashed', heads_hash]):
675 # someone else committed/pushed/unbundled while we
675 # someone else committed/pushed/unbundled while we
676 # were transferring data
676 # were transferring data
677 raise PushRaced('repository changed while %s - '
677 raise PushRaced('repository changed while %s - '
678 'please try again' % context)
678 'please try again' % context)
679
679
680 def unbundle(repo, cg, heads, source, url):
680 def unbundle(repo, cg, heads, source, url):
681 """Apply a bundle to a repo.
681 """Apply a bundle to a repo.
682
682
683 this function makes sure the repo is locked during the application and have
683 this function makes sure the repo is locked during the application and have
684 mechanism to check that no push race occurred between the creation of the
684 mechanism to check that no push race occurred between the creation of the
685 bundle and its application.
685 bundle and its application.
686
686
687 If the push was raced as PushRaced exception is raised."""
687 If the push was raced as PushRaced exception is raised."""
688 r = 0
688 r = 0
689 # need a transaction when processing a bundle2 stream
689 # need a transaction when processing a bundle2 stream
690 tr = None
690 tr = None
691 lock = repo.lock()
691 lock = repo.lock()
692 try:
692 try:
693 check_heads(repo, heads, 'uploading changes')
693 check_heads(repo, heads, 'uploading changes')
694 # push can proceed
694 # push can proceed
695 if util.safehasattr(cg, 'params'):
695 if util.safehasattr(cg, 'params'):
696 tr = repo.transaction('unbundle')
696 tr = repo.transaction('unbundle')
697 ret = bundle2.processbundle(repo, cg, lambda: tr)
697 r = bundle2.processbundle(repo, cg, lambda: tr).reply
698 tr.close()
698 tr.close()
699 stream = util.chunkbuffer(ret.reply.getchunks())
700 r = bundle2.unbundle20(repo.ui, stream)
701 else:
699 else:
702 r = changegroup.addchangegroup(repo, cg, source, url)
700 r = changegroup.addchangegroup(repo, cg, source, url)
703 finally:
701 finally:
704 if tr is not None:
702 if tr is not None:
705 tr.release()
703 tr.release()
706 lock.release()
704 lock.release()
707 return r
705 return r
@@ -1,1899 +1,1906 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
11 import lock as lockmod
12 import transaction, store, encoding, exchange, bundle2
12 import transaction, store, encoding, exchange, bundle2
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
19 import branchmap, pathutil
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21 filecache = scmutil.filecache
21 filecache = scmutil.filecache
22
22
23 class repofilecache(filecache):
23 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """
25 """
26
26
27 def __get__(self, repo, type=None):
27 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
29 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
31 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33
33
34 class storecache(repofilecache):
34 class storecache(repofilecache):
35 """filecache for files in the store"""
35 """filecache for files in the store"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj.sjoin(fname)
37 return obj.sjoin(fname)
38
38
39 class unfilteredpropertycache(propertycache):
39 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
40 """propertycache that apply to unfiltered repo only"""
41
41
42 def __get__(self, repo, type=None):
42 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
43 unfi = repo.unfiltered()
44 if unfi is repo:
44 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
46 return getattr(unfi, self.name)
47
47
48 class filteredpropertycache(propertycache):
48 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
49 """propertycache that must take filtering in account"""
50
50
51 def cachevalue(self, obj, value):
51 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
52 object.__setattr__(obj, self.name, value)
53
53
54
54
55 def hasunfilteredcache(repo, name):
55 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
57 return name in vars(repo.unfiltered())
58
58
59 def unfilteredmethod(orig):
59 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
60 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
61 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
63 return wrapper
64
64
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 'bundle2', 'unbundle'))
66 'bundle2', 'unbundle'))
67 legacycaps = moderncaps.union(set(['changegroupsubset']))
67 legacycaps = moderncaps.union(set(['changegroupsubset']))
68
68
69 class localpeer(peer.peerrepository):
69 class localpeer(peer.peerrepository):
70 '''peer for a local repo; reflects only the most recent API'''
70 '''peer for a local repo; reflects only the most recent API'''
71
71
72 def __init__(self, repo, caps=moderncaps):
72 def __init__(self, repo, caps=moderncaps):
73 peer.peerrepository.__init__(self)
73 peer.peerrepository.__init__(self)
74 self._repo = repo.filtered('served')
74 self._repo = repo.filtered('served')
75 self.ui = repo.ui
75 self.ui = repo.ui
76 self._caps = repo._restrictcapabilities(caps)
76 self._caps = repo._restrictcapabilities(caps)
77 self.requirements = repo.requirements
77 self.requirements = repo.requirements
78 self.supportedformats = repo.supportedformats
78 self.supportedformats = repo.supportedformats
79
79
80 def close(self):
80 def close(self):
81 self._repo.close()
81 self._repo.close()
82
82
83 def _capabilities(self):
83 def _capabilities(self):
84 return self._caps
84 return self._caps
85
85
86 def local(self):
86 def local(self):
87 return self._repo
87 return self._repo
88
88
89 def canpush(self):
89 def canpush(self):
90 return True
90 return True
91
91
92 def url(self):
92 def url(self):
93 return self._repo.url()
93 return self._repo.url()
94
94
95 def lookup(self, key):
95 def lookup(self, key):
96 return self._repo.lookup(key)
96 return self._repo.lookup(key)
97
97
98 def branchmap(self):
98 def branchmap(self):
99 return self._repo.branchmap()
99 return self._repo.branchmap()
100
100
101 def heads(self):
101 def heads(self):
102 return self._repo.heads()
102 return self._repo.heads()
103
103
104 def known(self, nodes):
104 def known(self, nodes):
105 return self._repo.known(nodes)
105 return self._repo.known(nodes)
106
106
107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 format='HG10'):
108 format='HG10'):
109 cg = exchange.getbundle(self._repo, source, heads=heads,
109 cg = exchange.getbundle(self._repo, source, heads=heads,
110 common=common, bundlecaps=bundlecaps)
110 common=common, bundlecaps=bundlecaps)
111 if bundlecaps is not None and 'HG20' in bundlecaps:
111 if bundlecaps is not None and 'HG20' in bundlecaps:
112 # When requesting a bundle2, getbundle returns a stream to make the
112 # When requesting a bundle2, getbundle returns a stream to make the
113 # wire level function happier. We need to build a proper object
113 # wire level function happier. We need to build a proper object
114 # from it in local peer.
114 # from it in local peer.
115 cg = bundle2.unbundle20(self.ui, cg)
115 cg = bundle2.unbundle20(self.ui, cg)
116 return cg
116 return cg
117
117
118 # TODO We might want to move the next two calls into legacypeer and add
118 # TODO We might want to move the next two calls into legacypeer and add
119 # unbundle instead.
119 # unbundle instead.
120
120
121 def unbundle(self, cg, heads, url):
121 def unbundle(self, cg, heads, url):
122 """apply a bundle on a repo
122 """apply a bundle on a repo
123
123
124 This function handles the repo locking itself."""
124 This function handles the repo locking itself."""
125 try:
125 try:
126 cg = exchange.readbundle(self.ui, cg, None)
126 cg = exchange.readbundle(self.ui, cg, None)
127 return exchange.unbundle(self._repo, cg, heads, 'push', url)
127 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 if util.safehasattr(ret, 'getchunks'):
129 # This is a bundle20 object, turn it into an unbundler.
130 # This little dance should be dropped eventually when the API
131 # is finally improved.
132 stream = util.chunkbuffer(ret.getchunks())
133 ret = bundle2.unbundle20(self.ui, stream)
134 return ret
128 except exchange.PushRaced, exc:
135 except exchange.PushRaced, exc:
129 raise error.ResponseError(_('push failed:'), exc.message)
136 raise error.ResponseError(_('push failed:'), exc.message)
130
137
131 def lock(self):
138 def lock(self):
132 return self._repo.lock()
139 return self._repo.lock()
133
140
134 def addchangegroup(self, cg, source, url):
141 def addchangegroup(self, cg, source, url):
135 return changegroup.addchangegroup(self._repo, cg, source, url)
142 return changegroup.addchangegroup(self._repo, cg, source, url)
136
143
137 def pushkey(self, namespace, key, old, new):
144 def pushkey(self, namespace, key, old, new):
138 return self._repo.pushkey(namespace, key, old, new)
145 return self._repo.pushkey(namespace, key, old, new)
139
146
140 def listkeys(self, namespace):
147 def listkeys(self, namespace):
141 return self._repo.listkeys(namespace)
148 return self._repo.listkeys(namespace)
142
149
143 def debugwireargs(self, one, two, three=None, four=None, five=None):
150 def debugwireargs(self, one, two, three=None, four=None, five=None):
144 '''used to test argument passing over the wire'''
151 '''used to test argument passing over the wire'''
145 return "%s %s %s %s %s" % (one, two, three, four, five)
152 return "%s %s %s %s %s" % (one, two, three, four, five)
146
153
147 class locallegacypeer(localpeer):
154 class locallegacypeer(localpeer):
148 '''peer extension which implements legacy methods too; used for tests with
155 '''peer extension which implements legacy methods too; used for tests with
149 restricted capabilities'''
156 restricted capabilities'''
150
157
151 def __init__(self, repo):
158 def __init__(self, repo):
152 localpeer.__init__(self, repo, caps=legacycaps)
159 localpeer.__init__(self, repo, caps=legacycaps)
153
160
154 def branches(self, nodes):
161 def branches(self, nodes):
155 return self._repo.branches(nodes)
162 return self._repo.branches(nodes)
156
163
157 def between(self, pairs):
164 def between(self, pairs):
158 return self._repo.between(pairs)
165 return self._repo.between(pairs)
159
166
160 def changegroup(self, basenodes, source):
167 def changegroup(self, basenodes, source):
161 return changegroup.changegroup(self._repo, basenodes, source)
168 return changegroup.changegroup(self._repo, basenodes, source)
162
169
163 def changegroupsubset(self, bases, heads, source):
170 def changegroupsubset(self, bases, heads, source):
164 return changegroup.changegroupsubset(self._repo, bases, heads, source)
171 return changegroup.changegroupsubset(self._repo, bases, heads, source)
165
172
166 class localrepository(object):
173 class localrepository(object):
167
174
168 supportedformats = set(('revlogv1', 'generaldelta'))
175 supportedformats = set(('revlogv1', 'generaldelta'))
169 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
176 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
170 'dotencode'))
177 'dotencode'))
171 openerreqs = set(('revlogv1', 'generaldelta'))
178 openerreqs = set(('revlogv1', 'generaldelta'))
172 requirements = ['revlogv1']
179 requirements = ['revlogv1']
173 filtername = None
180 filtername = None
174
181
175 # a list of (ui, featureset) functions.
182 # a list of (ui, featureset) functions.
176 # only functions defined in module of enabled extensions are invoked
183 # only functions defined in module of enabled extensions are invoked
177 featuresetupfuncs = set()
184 featuresetupfuncs = set()
178
185
179 def _baserequirements(self, create):
186 def _baserequirements(self, create):
180 return self.requirements[:]
187 return self.requirements[:]
181
188
182 def __init__(self, baseui, path=None, create=False):
189 def __init__(self, baseui, path=None, create=False):
183 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
190 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
184 self.wopener = self.wvfs
191 self.wopener = self.wvfs
185 self.root = self.wvfs.base
192 self.root = self.wvfs.base
186 self.path = self.wvfs.join(".hg")
193 self.path = self.wvfs.join(".hg")
187 self.origroot = path
194 self.origroot = path
188 self.auditor = pathutil.pathauditor(self.root, self._checknested)
195 self.auditor = pathutil.pathauditor(self.root, self._checknested)
189 self.vfs = scmutil.vfs(self.path)
196 self.vfs = scmutil.vfs(self.path)
190 self.opener = self.vfs
197 self.opener = self.vfs
191 self.baseui = baseui
198 self.baseui = baseui
192 self.ui = baseui.copy()
199 self.ui = baseui.copy()
193 self.ui.copy = baseui.copy # prevent copying repo configuration
200 self.ui.copy = baseui.copy # prevent copying repo configuration
194 # A list of callback to shape the phase if no data were found.
201 # A list of callback to shape the phase if no data were found.
195 # Callback are in the form: func(repo, roots) --> processed root.
202 # Callback are in the form: func(repo, roots) --> processed root.
196 # This list it to be filled by extension during repo setup
203 # This list it to be filled by extension during repo setup
197 self._phasedefaults = []
204 self._phasedefaults = []
198 try:
205 try:
199 self.ui.readconfig(self.join("hgrc"), self.root)
206 self.ui.readconfig(self.join("hgrc"), self.root)
200 extensions.loadall(self.ui)
207 extensions.loadall(self.ui)
201 except IOError:
208 except IOError:
202 pass
209 pass
203
210
204 if self.featuresetupfuncs:
211 if self.featuresetupfuncs:
205 self.supported = set(self._basesupported) # use private copy
212 self.supported = set(self._basesupported) # use private copy
206 extmods = set(m.__name__ for n, m
213 extmods = set(m.__name__ for n, m
207 in extensions.extensions(self.ui))
214 in extensions.extensions(self.ui))
208 for setupfunc in self.featuresetupfuncs:
215 for setupfunc in self.featuresetupfuncs:
209 if setupfunc.__module__ in extmods:
216 if setupfunc.__module__ in extmods:
210 setupfunc(self.ui, self.supported)
217 setupfunc(self.ui, self.supported)
211 else:
218 else:
212 self.supported = self._basesupported
219 self.supported = self._basesupported
213
220
214 if not self.vfs.isdir():
221 if not self.vfs.isdir():
215 if create:
222 if create:
216 if not self.wvfs.exists():
223 if not self.wvfs.exists():
217 self.wvfs.makedirs()
224 self.wvfs.makedirs()
218 self.vfs.makedir(notindexed=True)
225 self.vfs.makedir(notindexed=True)
219 requirements = self._baserequirements(create)
226 requirements = self._baserequirements(create)
220 if self.ui.configbool('format', 'usestore', True):
227 if self.ui.configbool('format', 'usestore', True):
221 self.vfs.mkdir("store")
228 self.vfs.mkdir("store")
222 requirements.append("store")
229 requirements.append("store")
223 if self.ui.configbool('format', 'usefncache', True):
230 if self.ui.configbool('format', 'usefncache', True):
224 requirements.append("fncache")
231 requirements.append("fncache")
225 if self.ui.configbool('format', 'dotencode', True):
232 if self.ui.configbool('format', 'dotencode', True):
226 requirements.append('dotencode')
233 requirements.append('dotencode')
227 # create an invalid changelog
234 # create an invalid changelog
228 self.vfs.append(
235 self.vfs.append(
229 "00changelog.i",
236 "00changelog.i",
230 '\0\0\0\2' # represents revlogv2
237 '\0\0\0\2' # represents revlogv2
231 ' dummy changelog to prevent using the old repo layout'
238 ' dummy changelog to prevent using the old repo layout'
232 )
239 )
233 if self.ui.configbool('format', 'generaldelta', False):
240 if self.ui.configbool('format', 'generaldelta', False):
234 requirements.append("generaldelta")
241 requirements.append("generaldelta")
235 requirements = set(requirements)
242 requirements = set(requirements)
236 else:
243 else:
237 raise error.RepoError(_("repository %s not found") % path)
244 raise error.RepoError(_("repository %s not found") % path)
238 elif create:
245 elif create:
239 raise error.RepoError(_("repository %s already exists") % path)
246 raise error.RepoError(_("repository %s already exists") % path)
240 else:
247 else:
241 try:
248 try:
242 requirements = scmutil.readrequires(self.vfs, self.supported)
249 requirements = scmutil.readrequires(self.vfs, self.supported)
243 except IOError, inst:
250 except IOError, inst:
244 if inst.errno != errno.ENOENT:
251 if inst.errno != errno.ENOENT:
245 raise
252 raise
246 requirements = set()
253 requirements = set()
247
254
248 self.sharedpath = self.path
255 self.sharedpath = self.path
249 try:
256 try:
250 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
257 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
251 realpath=True)
258 realpath=True)
252 s = vfs.base
259 s = vfs.base
253 if not vfs.exists():
260 if not vfs.exists():
254 raise error.RepoError(
261 raise error.RepoError(
255 _('.hg/sharedpath points to nonexistent directory %s') % s)
262 _('.hg/sharedpath points to nonexistent directory %s') % s)
256 self.sharedpath = s
263 self.sharedpath = s
257 except IOError, inst:
264 except IOError, inst:
258 if inst.errno != errno.ENOENT:
265 if inst.errno != errno.ENOENT:
259 raise
266 raise
260
267
261 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
268 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
262 self.spath = self.store.path
269 self.spath = self.store.path
263 self.svfs = self.store.vfs
270 self.svfs = self.store.vfs
264 self.sopener = self.svfs
271 self.sopener = self.svfs
265 self.sjoin = self.store.join
272 self.sjoin = self.store.join
266 self.vfs.createmode = self.store.createmode
273 self.vfs.createmode = self.store.createmode
267 self._applyrequirements(requirements)
274 self._applyrequirements(requirements)
268 if create:
275 if create:
269 self._writerequirements()
276 self._writerequirements()
270
277
271
278
272 self._branchcaches = {}
279 self._branchcaches = {}
273 self.filterpats = {}
280 self.filterpats = {}
274 self._datafilters = {}
281 self._datafilters = {}
275 self._transref = self._lockref = self._wlockref = None
282 self._transref = self._lockref = self._wlockref = None
276
283
277 # A cache for various files under .hg/ that tracks file changes,
284 # A cache for various files under .hg/ that tracks file changes,
278 # (used by the filecache decorator)
285 # (used by the filecache decorator)
279 #
286 #
280 # Maps a property name to its util.filecacheentry
287 # Maps a property name to its util.filecacheentry
281 self._filecache = {}
288 self._filecache = {}
282
289
283 # hold sets of revision to be filtered
290 # hold sets of revision to be filtered
284 # should be cleared when something might have changed the filter value:
291 # should be cleared when something might have changed the filter value:
285 # - new changesets,
292 # - new changesets,
286 # - phase change,
293 # - phase change,
287 # - new obsolescence marker,
294 # - new obsolescence marker,
288 # - working directory parent change,
295 # - working directory parent change,
289 # - bookmark changes
296 # - bookmark changes
290 self.filteredrevcache = {}
297 self.filteredrevcache = {}
291
298
292 def close(self):
299 def close(self):
293 pass
300 pass
294
301
295 def _restrictcapabilities(self, caps):
302 def _restrictcapabilities(self, caps):
296 # bundle2 is not ready for prime time, drop it unless explicitly
303 # bundle2 is not ready for prime time, drop it unless explicitly
297 # required by the tests (or some brave tester)
304 # required by the tests (or some brave tester)
298 if not self.ui.configbool('server', 'bundle2', False):
305 if not self.ui.configbool('server', 'bundle2', False):
299 caps = set(caps)
306 caps = set(caps)
300 caps.discard('bundle2')
307 caps.discard('bundle2')
301 return caps
308 return caps
302
309
303 def _applyrequirements(self, requirements):
310 def _applyrequirements(self, requirements):
304 self.requirements = requirements
311 self.requirements = requirements
305 self.sopener.options = dict((r, 1) for r in requirements
312 self.sopener.options = dict((r, 1) for r in requirements
306 if r in self.openerreqs)
313 if r in self.openerreqs)
307 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
314 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
308 if chunkcachesize is not None:
315 if chunkcachesize is not None:
309 self.sopener.options['chunkcachesize'] = chunkcachesize
316 self.sopener.options['chunkcachesize'] = chunkcachesize
310
317
311 def _writerequirements(self):
318 def _writerequirements(self):
312 reqfile = self.opener("requires", "w")
319 reqfile = self.opener("requires", "w")
313 for r in sorted(self.requirements):
320 for r in sorted(self.requirements):
314 reqfile.write("%s\n" % r)
321 reqfile.write("%s\n" % r)
315 reqfile.close()
322 reqfile.close()
316
323
317 def _checknested(self, path):
324 def _checknested(self, path):
318 """Determine if path is a legal nested repository."""
325 """Determine if path is a legal nested repository."""
319 if not path.startswith(self.root):
326 if not path.startswith(self.root):
320 return False
327 return False
321 subpath = path[len(self.root) + 1:]
328 subpath = path[len(self.root) + 1:]
322 normsubpath = util.pconvert(subpath)
329 normsubpath = util.pconvert(subpath)
323
330
324 # XXX: Checking against the current working copy is wrong in
331 # XXX: Checking against the current working copy is wrong in
325 # the sense that it can reject things like
332 # the sense that it can reject things like
326 #
333 #
327 # $ hg cat -r 10 sub/x.txt
334 # $ hg cat -r 10 sub/x.txt
328 #
335 #
329 # if sub/ is no longer a subrepository in the working copy
336 # if sub/ is no longer a subrepository in the working copy
330 # parent revision.
337 # parent revision.
331 #
338 #
332 # However, it can of course also allow things that would have
339 # However, it can of course also allow things that would have
333 # been rejected before, such as the above cat command if sub/
340 # been rejected before, such as the above cat command if sub/
334 # is a subrepository now, but was a normal directory before.
341 # is a subrepository now, but was a normal directory before.
335 # The old path auditor would have rejected by mistake since it
342 # The old path auditor would have rejected by mistake since it
336 # panics when it sees sub/.hg/.
343 # panics when it sees sub/.hg/.
337 #
344 #
338 # All in all, checking against the working copy seems sensible
345 # All in all, checking against the working copy seems sensible
339 # since we want to prevent access to nested repositories on
346 # since we want to prevent access to nested repositories on
340 # the filesystem *now*.
347 # the filesystem *now*.
341 ctx = self[None]
348 ctx = self[None]
342 parts = util.splitpath(subpath)
349 parts = util.splitpath(subpath)
343 while parts:
350 while parts:
344 prefix = '/'.join(parts)
351 prefix = '/'.join(parts)
345 if prefix in ctx.substate:
352 if prefix in ctx.substate:
346 if prefix == normsubpath:
353 if prefix == normsubpath:
347 return True
354 return True
348 else:
355 else:
349 sub = ctx.sub(prefix)
356 sub = ctx.sub(prefix)
350 return sub.checknested(subpath[len(prefix) + 1:])
357 return sub.checknested(subpath[len(prefix) + 1:])
351 else:
358 else:
352 parts.pop()
359 parts.pop()
353 return False
360 return False
354
361
355 def peer(self):
362 def peer(self):
356 return localpeer(self) # not cached to avoid reference cycle
363 return localpeer(self) # not cached to avoid reference cycle
357
364
358 def unfiltered(self):
365 def unfiltered(self):
359 """Return unfiltered version of the repository
366 """Return unfiltered version of the repository
360
367
361 Intended to be overwritten by filtered repo."""
368 Intended to be overwritten by filtered repo."""
362 return self
369 return self
363
370
364 def filtered(self, name):
371 def filtered(self, name):
365 """Return a filtered version of a repository"""
372 """Return a filtered version of a repository"""
366 # build a new class with the mixin and the current class
373 # build a new class with the mixin and the current class
367 # (possibly subclass of the repo)
374 # (possibly subclass of the repo)
368 class proxycls(repoview.repoview, self.unfiltered().__class__):
375 class proxycls(repoview.repoview, self.unfiltered().__class__):
369 pass
376 pass
370 return proxycls(self, name)
377 return proxycls(self, name)
371
378
372 @repofilecache('bookmarks')
379 @repofilecache('bookmarks')
373 def _bookmarks(self):
380 def _bookmarks(self):
374 return bookmarks.bmstore(self)
381 return bookmarks.bmstore(self)
375
382
376 @repofilecache('bookmarks.current')
383 @repofilecache('bookmarks.current')
377 def _bookmarkcurrent(self):
384 def _bookmarkcurrent(self):
378 return bookmarks.readcurrent(self)
385 return bookmarks.readcurrent(self)
379
386
380 def bookmarkheads(self, bookmark):
387 def bookmarkheads(self, bookmark):
381 name = bookmark.split('@', 1)[0]
388 name = bookmark.split('@', 1)[0]
382 heads = []
389 heads = []
383 for mark, n in self._bookmarks.iteritems():
390 for mark, n in self._bookmarks.iteritems():
384 if mark.split('@', 1)[0] == name:
391 if mark.split('@', 1)[0] == name:
385 heads.append(n)
392 heads.append(n)
386 return heads
393 return heads
387
394
388 @storecache('phaseroots')
395 @storecache('phaseroots')
389 def _phasecache(self):
396 def _phasecache(self):
390 return phases.phasecache(self, self._phasedefaults)
397 return phases.phasecache(self, self._phasedefaults)
391
398
392 @storecache('obsstore')
399 @storecache('obsstore')
393 def obsstore(self):
400 def obsstore(self):
394 store = obsolete.obsstore(self.sopener)
401 store = obsolete.obsstore(self.sopener)
395 if store and not obsolete._enabled:
402 if store and not obsolete._enabled:
396 # message is rare enough to not be translated
403 # message is rare enough to not be translated
397 msg = 'obsolete feature not enabled but %i markers found!\n'
404 msg = 'obsolete feature not enabled but %i markers found!\n'
398 self.ui.warn(msg % len(list(store)))
405 self.ui.warn(msg % len(list(store)))
399 return store
406 return store
400
407
401 @storecache('00changelog.i')
408 @storecache('00changelog.i')
402 def changelog(self):
409 def changelog(self):
403 c = changelog.changelog(self.sopener)
410 c = changelog.changelog(self.sopener)
404 if 'HG_PENDING' in os.environ:
411 if 'HG_PENDING' in os.environ:
405 p = os.environ['HG_PENDING']
412 p = os.environ['HG_PENDING']
406 if p.startswith(self.root):
413 if p.startswith(self.root):
407 c.readpending('00changelog.i.a')
414 c.readpending('00changelog.i.a')
408 return c
415 return c
409
416
410 @storecache('00manifest.i')
417 @storecache('00manifest.i')
411 def manifest(self):
418 def manifest(self):
412 return manifest.manifest(self.sopener)
419 return manifest.manifest(self.sopener)
413
420
414 @repofilecache('dirstate')
421 @repofilecache('dirstate')
415 def dirstate(self):
422 def dirstate(self):
416 warned = [0]
423 warned = [0]
417 def validate(node):
424 def validate(node):
418 try:
425 try:
419 self.changelog.rev(node)
426 self.changelog.rev(node)
420 return node
427 return node
421 except error.LookupError:
428 except error.LookupError:
422 if not warned[0]:
429 if not warned[0]:
423 warned[0] = True
430 warned[0] = True
424 self.ui.warn(_("warning: ignoring unknown"
431 self.ui.warn(_("warning: ignoring unknown"
425 " working parent %s!\n") % short(node))
432 " working parent %s!\n") % short(node))
426 return nullid
433 return nullid
427
434
428 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
435 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
429
436
430 def __getitem__(self, changeid):
437 def __getitem__(self, changeid):
431 if changeid is None:
438 if changeid is None:
432 return context.workingctx(self)
439 return context.workingctx(self)
433 return context.changectx(self, changeid)
440 return context.changectx(self, changeid)
434
441
435 def __contains__(self, changeid):
442 def __contains__(self, changeid):
436 try:
443 try:
437 return bool(self.lookup(changeid))
444 return bool(self.lookup(changeid))
438 except error.RepoLookupError:
445 except error.RepoLookupError:
439 return False
446 return False
440
447
441 def __nonzero__(self):
448 def __nonzero__(self):
442 return True
449 return True
443
450
444 def __len__(self):
451 def __len__(self):
445 return len(self.changelog)
452 return len(self.changelog)
446
453
447 def __iter__(self):
454 def __iter__(self):
448 return iter(self.changelog)
455 return iter(self.changelog)
449
456
450 def revs(self, expr, *args):
457 def revs(self, expr, *args):
451 '''Return a list of revisions matching the given revset'''
458 '''Return a list of revisions matching the given revset'''
452 expr = revset.formatspec(expr, *args)
459 expr = revset.formatspec(expr, *args)
453 m = revset.match(None, expr)
460 m = revset.match(None, expr)
454 return m(self, revset.spanset(self))
461 return m(self, revset.spanset(self))
455
462
456 def set(self, expr, *args):
463 def set(self, expr, *args):
457 '''
464 '''
458 Yield a context for each matching revision, after doing arg
465 Yield a context for each matching revision, after doing arg
459 replacement via revset.formatspec
466 replacement via revset.formatspec
460 '''
467 '''
461 for r in self.revs(expr, *args):
468 for r in self.revs(expr, *args):
462 yield self[r]
469 yield self[r]
463
470
464 def url(self):
471 def url(self):
465 return 'file:' + self.root
472 return 'file:' + self.root
466
473
467 def hook(self, name, throw=False, **args):
474 def hook(self, name, throw=False, **args):
468 return hook.hook(self.ui, self, name, throw, **args)
475 return hook.hook(self.ui, self, name, throw, **args)
469
476
470 @unfilteredmethod
477 @unfilteredmethod
471 def _tag(self, names, node, message, local, user, date, extra={}):
478 def _tag(self, names, node, message, local, user, date, extra={}):
472 if isinstance(names, str):
479 if isinstance(names, str):
473 names = (names,)
480 names = (names,)
474
481
475 branches = self.branchmap()
482 branches = self.branchmap()
476 for name in names:
483 for name in names:
477 self.hook('pretag', throw=True, node=hex(node), tag=name,
484 self.hook('pretag', throw=True, node=hex(node), tag=name,
478 local=local)
485 local=local)
479 if name in branches:
486 if name in branches:
480 self.ui.warn(_("warning: tag %s conflicts with existing"
487 self.ui.warn(_("warning: tag %s conflicts with existing"
481 " branch name\n") % name)
488 " branch name\n") % name)
482
489
483 def writetags(fp, names, munge, prevtags):
490 def writetags(fp, names, munge, prevtags):
484 fp.seek(0, 2)
491 fp.seek(0, 2)
485 if prevtags and prevtags[-1] != '\n':
492 if prevtags and prevtags[-1] != '\n':
486 fp.write('\n')
493 fp.write('\n')
487 for name in names:
494 for name in names:
488 m = munge and munge(name) or name
495 m = munge and munge(name) or name
489 if (self._tagscache.tagtypes and
496 if (self._tagscache.tagtypes and
490 name in self._tagscache.tagtypes):
497 name in self._tagscache.tagtypes):
491 old = self.tags().get(name, nullid)
498 old = self.tags().get(name, nullid)
492 fp.write('%s %s\n' % (hex(old), m))
499 fp.write('%s %s\n' % (hex(old), m))
493 fp.write('%s %s\n' % (hex(node), m))
500 fp.write('%s %s\n' % (hex(node), m))
494 fp.close()
501 fp.close()
495
502
496 prevtags = ''
503 prevtags = ''
497 if local:
504 if local:
498 try:
505 try:
499 fp = self.opener('localtags', 'r+')
506 fp = self.opener('localtags', 'r+')
500 except IOError:
507 except IOError:
501 fp = self.opener('localtags', 'a')
508 fp = self.opener('localtags', 'a')
502 else:
509 else:
503 prevtags = fp.read()
510 prevtags = fp.read()
504
511
505 # local tags are stored in the current charset
512 # local tags are stored in the current charset
506 writetags(fp, names, None, prevtags)
513 writetags(fp, names, None, prevtags)
507 for name in names:
514 for name in names:
508 self.hook('tag', node=hex(node), tag=name, local=local)
515 self.hook('tag', node=hex(node), tag=name, local=local)
509 return
516 return
510
517
511 try:
518 try:
512 fp = self.wfile('.hgtags', 'rb+')
519 fp = self.wfile('.hgtags', 'rb+')
513 except IOError, e:
520 except IOError, e:
514 if e.errno != errno.ENOENT:
521 if e.errno != errno.ENOENT:
515 raise
522 raise
516 fp = self.wfile('.hgtags', 'ab')
523 fp = self.wfile('.hgtags', 'ab')
517 else:
524 else:
518 prevtags = fp.read()
525 prevtags = fp.read()
519
526
520 # committed tags are stored in UTF-8
527 # committed tags are stored in UTF-8
521 writetags(fp, names, encoding.fromlocal, prevtags)
528 writetags(fp, names, encoding.fromlocal, prevtags)
522
529
523 fp.close()
530 fp.close()
524
531
525 self.invalidatecaches()
532 self.invalidatecaches()
526
533
527 if '.hgtags' not in self.dirstate:
534 if '.hgtags' not in self.dirstate:
528 self[None].add(['.hgtags'])
535 self[None].add(['.hgtags'])
529
536
530 m = matchmod.exact(self.root, '', ['.hgtags'])
537 m = matchmod.exact(self.root, '', ['.hgtags'])
531 tagnode = self.commit(message, user, date, extra=extra, match=m)
538 tagnode = self.commit(message, user, date, extra=extra, match=m)
532
539
533 for name in names:
540 for name in names:
534 self.hook('tag', node=hex(node), tag=name, local=local)
541 self.hook('tag', node=hex(node), tag=name, local=local)
535
542
536 return tagnode
543 return tagnode
537
544
538 def tag(self, names, node, message, local, user, date):
545 def tag(self, names, node, message, local, user, date):
539 '''tag a revision with one or more symbolic names.
546 '''tag a revision with one or more symbolic names.
540
547
541 names is a list of strings or, when adding a single tag, names may be a
548 names is a list of strings or, when adding a single tag, names may be a
542 string.
549 string.
543
550
544 if local is True, the tags are stored in a per-repository file.
551 if local is True, the tags are stored in a per-repository file.
545 otherwise, they are stored in the .hgtags file, and a new
552 otherwise, they are stored in the .hgtags file, and a new
546 changeset is committed with the change.
553 changeset is committed with the change.
547
554
548 keyword arguments:
555 keyword arguments:
549
556
550 local: whether to store tags in non-version-controlled file
557 local: whether to store tags in non-version-controlled file
551 (default False)
558 (default False)
552
559
553 message: commit message to use if committing
560 message: commit message to use if committing
554
561
555 user: name of user to use if committing
562 user: name of user to use if committing
556
563
557 date: date tuple to use if committing'''
564 date: date tuple to use if committing'''
558
565
559 if not local:
566 if not local:
560 for x in self.status()[:5]:
567 for x in self.status()[:5]:
561 if '.hgtags' in x:
568 if '.hgtags' in x:
562 raise util.Abort(_('working copy of .hgtags is changed '
569 raise util.Abort(_('working copy of .hgtags is changed '
563 '(please commit .hgtags manually)'))
570 '(please commit .hgtags manually)'))
564
571
565 self.tags() # instantiate the cache
572 self.tags() # instantiate the cache
566 self._tag(names, node, message, local, user, date)
573 self._tag(names, node, message, local, user, date)
567
574
568 @filteredpropertycache
575 @filteredpropertycache
569 def _tagscache(self):
576 def _tagscache(self):
570 '''Returns a tagscache object that contains various tags related
577 '''Returns a tagscache object that contains various tags related
571 caches.'''
578 caches.'''
572
579
573 # This simplifies its cache management by having one decorated
580 # This simplifies its cache management by having one decorated
574 # function (this one) and the rest simply fetch things from it.
581 # function (this one) and the rest simply fetch things from it.
575 class tagscache(object):
582 class tagscache(object):
576 def __init__(self):
583 def __init__(self):
577 # These two define the set of tags for this repository. tags
584 # These two define the set of tags for this repository. tags
578 # maps tag name to node; tagtypes maps tag name to 'global' or
585 # maps tag name to node; tagtypes maps tag name to 'global' or
579 # 'local'. (Global tags are defined by .hgtags across all
586 # 'local'. (Global tags are defined by .hgtags across all
580 # heads, and local tags are defined in .hg/localtags.)
587 # heads, and local tags are defined in .hg/localtags.)
581 # They constitute the in-memory cache of tags.
588 # They constitute the in-memory cache of tags.
582 self.tags = self.tagtypes = None
589 self.tags = self.tagtypes = None
583
590
584 self.nodetagscache = self.tagslist = None
591 self.nodetagscache = self.tagslist = None
585
592
586 cache = tagscache()
593 cache = tagscache()
587 cache.tags, cache.tagtypes = self._findtags()
594 cache.tags, cache.tagtypes = self._findtags()
588
595
589 return cache
596 return cache
590
597
591 def tags(self):
598 def tags(self):
592 '''return a mapping of tag to node'''
599 '''return a mapping of tag to node'''
593 t = {}
600 t = {}
594 if self.changelog.filteredrevs:
601 if self.changelog.filteredrevs:
595 tags, tt = self._findtags()
602 tags, tt = self._findtags()
596 else:
603 else:
597 tags = self._tagscache.tags
604 tags = self._tagscache.tags
598 for k, v in tags.iteritems():
605 for k, v in tags.iteritems():
599 try:
606 try:
600 # ignore tags to unknown nodes
607 # ignore tags to unknown nodes
601 self.changelog.rev(v)
608 self.changelog.rev(v)
602 t[k] = v
609 t[k] = v
603 except (error.LookupError, ValueError):
610 except (error.LookupError, ValueError):
604 pass
611 pass
605 return t
612 return t
606
613
607 def _findtags(self):
614 def _findtags(self):
608 '''Do the hard work of finding tags. Return a pair of dicts
615 '''Do the hard work of finding tags. Return a pair of dicts
609 (tags, tagtypes) where tags maps tag name to node, and tagtypes
616 (tags, tagtypes) where tags maps tag name to node, and tagtypes
610 maps tag name to a string like \'global\' or \'local\'.
617 maps tag name to a string like \'global\' or \'local\'.
611 Subclasses or extensions are free to add their own tags, but
618 Subclasses or extensions are free to add their own tags, but
612 should be aware that the returned dicts will be retained for the
619 should be aware that the returned dicts will be retained for the
613 duration of the localrepo object.'''
620 duration of the localrepo object.'''
614
621
615 # XXX what tagtype should subclasses/extensions use? Currently
622 # XXX what tagtype should subclasses/extensions use? Currently
616 # mq and bookmarks add tags, but do not set the tagtype at all.
623 # mq and bookmarks add tags, but do not set the tagtype at all.
617 # Should each extension invent its own tag type? Should there
624 # Should each extension invent its own tag type? Should there
618 # be one tagtype for all such "virtual" tags? Or is the status
625 # be one tagtype for all such "virtual" tags? Or is the status
619 # quo fine?
626 # quo fine?
620
627
621 alltags = {} # map tag name to (node, hist)
628 alltags = {} # map tag name to (node, hist)
622 tagtypes = {}
629 tagtypes = {}
623
630
624 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
631 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
625 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
632 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
626
633
627 # Build the return dicts. Have to re-encode tag names because
634 # Build the return dicts. Have to re-encode tag names because
628 # the tags module always uses UTF-8 (in order not to lose info
635 # the tags module always uses UTF-8 (in order not to lose info
629 # writing to the cache), but the rest of Mercurial wants them in
636 # writing to the cache), but the rest of Mercurial wants them in
630 # local encoding.
637 # local encoding.
631 tags = {}
638 tags = {}
632 for (name, (node, hist)) in alltags.iteritems():
639 for (name, (node, hist)) in alltags.iteritems():
633 if node != nullid:
640 if node != nullid:
634 tags[encoding.tolocal(name)] = node
641 tags[encoding.tolocal(name)] = node
635 tags['tip'] = self.changelog.tip()
642 tags['tip'] = self.changelog.tip()
636 tagtypes = dict([(encoding.tolocal(name), value)
643 tagtypes = dict([(encoding.tolocal(name), value)
637 for (name, value) in tagtypes.iteritems()])
644 for (name, value) in tagtypes.iteritems()])
638 return (tags, tagtypes)
645 return (tags, tagtypes)
639
646
640 def tagtype(self, tagname):
647 def tagtype(self, tagname):
641 '''
648 '''
642 return the type of the given tag. result can be:
649 return the type of the given tag. result can be:
643
650
644 'local' : a local tag
651 'local' : a local tag
645 'global' : a global tag
652 'global' : a global tag
646 None : tag does not exist
653 None : tag does not exist
647 '''
654 '''
648
655
649 return self._tagscache.tagtypes.get(tagname)
656 return self._tagscache.tagtypes.get(tagname)
650
657
651 def tagslist(self):
658 def tagslist(self):
652 '''return a list of tags ordered by revision'''
659 '''return a list of tags ordered by revision'''
653 if not self._tagscache.tagslist:
660 if not self._tagscache.tagslist:
654 l = []
661 l = []
655 for t, n in self.tags().iteritems():
662 for t, n in self.tags().iteritems():
656 r = self.changelog.rev(n)
663 r = self.changelog.rev(n)
657 l.append((r, t, n))
664 l.append((r, t, n))
658 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
665 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
659
666
660 return self._tagscache.tagslist
667 return self._tagscache.tagslist
661
668
662 def nodetags(self, node):
669 def nodetags(self, node):
663 '''return the tags associated with a node'''
670 '''return the tags associated with a node'''
664 if not self._tagscache.nodetagscache:
671 if not self._tagscache.nodetagscache:
665 nodetagscache = {}
672 nodetagscache = {}
666 for t, n in self._tagscache.tags.iteritems():
673 for t, n in self._tagscache.tags.iteritems():
667 nodetagscache.setdefault(n, []).append(t)
674 nodetagscache.setdefault(n, []).append(t)
668 for tags in nodetagscache.itervalues():
675 for tags in nodetagscache.itervalues():
669 tags.sort()
676 tags.sort()
670 self._tagscache.nodetagscache = nodetagscache
677 self._tagscache.nodetagscache = nodetagscache
671 return self._tagscache.nodetagscache.get(node, [])
678 return self._tagscache.nodetagscache.get(node, [])
672
679
673 def nodebookmarks(self, node):
680 def nodebookmarks(self, node):
674 marks = []
681 marks = []
675 for bookmark, n in self._bookmarks.iteritems():
682 for bookmark, n in self._bookmarks.iteritems():
676 if n == node:
683 if n == node:
677 marks.append(bookmark)
684 marks.append(bookmark)
678 return sorted(marks)
685 return sorted(marks)
679
686
680 def branchmap(self):
687 def branchmap(self):
681 '''returns a dictionary {branch: [branchheads]} with branchheads
688 '''returns a dictionary {branch: [branchheads]} with branchheads
682 ordered by increasing revision number'''
689 ordered by increasing revision number'''
683 branchmap.updatecache(self)
690 branchmap.updatecache(self)
684 return self._branchcaches[self.filtername]
691 return self._branchcaches[self.filtername]
685
692
686 def branchtip(self, branch):
693 def branchtip(self, branch):
687 '''return the tip node for a given branch'''
694 '''return the tip node for a given branch'''
688 try:
695 try:
689 return self.branchmap().branchtip(branch)
696 return self.branchmap().branchtip(branch)
690 except KeyError:
697 except KeyError:
691 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
698 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
692
699
693 def lookup(self, key):
700 def lookup(self, key):
694 return self[key].node()
701 return self[key].node()
695
702
696 def lookupbranch(self, key, remote=None):
703 def lookupbranch(self, key, remote=None):
697 repo = remote or self
704 repo = remote or self
698 if key in repo.branchmap():
705 if key in repo.branchmap():
699 return key
706 return key
700
707
701 repo = (remote and remote.local()) and remote or self
708 repo = (remote and remote.local()) and remote or self
702 return repo[key].branch()
709 return repo[key].branch()
703
710
704 def known(self, nodes):
711 def known(self, nodes):
705 nm = self.changelog.nodemap
712 nm = self.changelog.nodemap
706 pc = self._phasecache
713 pc = self._phasecache
707 result = []
714 result = []
708 for n in nodes:
715 for n in nodes:
709 r = nm.get(n)
716 r = nm.get(n)
710 resp = not (r is None or pc.phase(self, r) >= phases.secret)
717 resp = not (r is None or pc.phase(self, r) >= phases.secret)
711 result.append(resp)
718 result.append(resp)
712 return result
719 return result
713
720
714 def local(self):
721 def local(self):
715 return self
722 return self
716
723
717 def cancopy(self):
724 def cancopy(self):
718 # so statichttprepo's override of local() works
725 # so statichttprepo's override of local() works
719 if not self.local():
726 if not self.local():
720 return False
727 return False
721 if not self.ui.configbool('phases', 'publish', True):
728 if not self.ui.configbool('phases', 'publish', True):
722 return True
729 return True
723 # if publishing we can't copy if there is filtered content
730 # if publishing we can't copy if there is filtered content
724 return not self.filtered('visible').changelog.filteredrevs
731 return not self.filtered('visible').changelog.filteredrevs
725
732
726 def join(self, f):
733 def join(self, f):
727 return os.path.join(self.path, f)
734 return os.path.join(self.path, f)
728
735
729 def wjoin(self, f):
736 def wjoin(self, f):
730 return os.path.join(self.root, f)
737 return os.path.join(self.root, f)
731
738
732 def file(self, f):
739 def file(self, f):
733 if f[0] == '/':
740 if f[0] == '/':
734 f = f[1:]
741 f = f[1:]
735 return filelog.filelog(self.sopener, f)
742 return filelog.filelog(self.sopener, f)
736
743
737 def changectx(self, changeid):
744 def changectx(self, changeid):
738 return self[changeid]
745 return self[changeid]
739
746
740 def parents(self, changeid=None):
747 def parents(self, changeid=None):
741 '''get list of changectxs for parents of changeid'''
748 '''get list of changectxs for parents of changeid'''
742 return self[changeid].parents()
749 return self[changeid].parents()
743
750
744 def setparents(self, p1, p2=nullid):
751 def setparents(self, p1, p2=nullid):
745 copies = self.dirstate.setparents(p1, p2)
752 copies = self.dirstate.setparents(p1, p2)
746 pctx = self[p1]
753 pctx = self[p1]
747 if copies:
754 if copies:
748 # Adjust copy records, the dirstate cannot do it, it
755 # Adjust copy records, the dirstate cannot do it, it
749 # requires access to parents manifests. Preserve them
756 # requires access to parents manifests. Preserve them
750 # only for entries added to first parent.
757 # only for entries added to first parent.
751 for f in copies:
758 for f in copies:
752 if f not in pctx and copies[f] in pctx:
759 if f not in pctx and copies[f] in pctx:
753 self.dirstate.copy(copies[f], f)
760 self.dirstate.copy(copies[f], f)
754 if p2 == nullid:
761 if p2 == nullid:
755 for f, s in sorted(self.dirstate.copies().items()):
762 for f, s in sorted(self.dirstate.copies().items()):
756 if f not in pctx and s not in pctx:
763 if f not in pctx and s not in pctx:
757 self.dirstate.copy(None, f)
764 self.dirstate.copy(None, f)
758
765
759 def filectx(self, path, changeid=None, fileid=None):
766 def filectx(self, path, changeid=None, fileid=None):
760 """changeid can be a changeset revision, node, or tag.
767 """changeid can be a changeset revision, node, or tag.
761 fileid can be a file revision or node."""
768 fileid can be a file revision or node."""
762 return context.filectx(self, path, changeid, fileid)
769 return context.filectx(self, path, changeid, fileid)
763
770
764 def getcwd(self):
771 def getcwd(self):
765 return self.dirstate.getcwd()
772 return self.dirstate.getcwd()
766
773
767 def pathto(self, f, cwd=None):
774 def pathto(self, f, cwd=None):
768 return self.dirstate.pathto(f, cwd)
775 return self.dirstate.pathto(f, cwd)
769
776
770 def wfile(self, f, mode='r'):
777 def wfile(self, f, mode='r'):
771 return self.wopener(f, mode)
778 return self.wopener(f, mode)
772
779
773 def _link(self, f):
780 def _link(self, f):
774 return self.wvfs.islink(f)
781 return self.wvfs.islink(f)
775
782
776 def _loadfilter(self, filter):
783 def _loadfilter(self, filter):
777 if filter not in self.filterpats:
784 if filter not in self.filterpats:
778 l = []
785 l = []
779 for pat, cmd in self.ui.configitems(filter):
786 for pat, cmd in self.ui.configitems(filter):
780 if cmd == '!':
787 if cmd == '!':
781 continue
788 continue
782 mf = matchmod.match(self.root, '', [pat])
789 mf = matchmod.match(self.root, '', [pat])
783 fn = None
790 fn = None
784 params = cmd
791 params = cmd
785 for name, filterfn in self._datafilters.iteritems():
792 for name, filterfn in self._datafilters.iteritems():
786 if cmd.startswith(name):
793 if cmd.startswith(name):
787 fn = filterfn
794 fn = filterfn
788 params = cmd[len(name):].lstrip()
795 params = cmd[len(name):].lstrip()
789 break
796 break
790 if not fn:
797 if not fn:
791 fn = lambda s, c, **kwargs: util.filter(s, c)
798 fn = lambda s, c, **kwargs: util.filter(s, c)
792 # Wrap old filters not supporting keyword arguments
799 # Wrap old filters not supporting keyword arguments
793 if not inspect.getargspec(fn)[2]:
800 if not inspect.getargspec(fn)[2]:
794 oldfn = fn
801 oldfn = fn
795 fn = lambda s, c, **kwargs: oldfn(s, c)
802 fn = lambda s, c, **kwargs: oldfn(s, c)
796 l.append((mf, fn, params))
803 l.append((mf, fn, params))
797 self.filterpats[filter] = l
804 self.filterpats[filter] = l
798 return self.filterpats[filter]
805 return self.filterpats[filter]
799
806
800 def _filter(self, filterpats, filename, data):
807 def _filter(self, filterpats, filename, data):
801 for mf, fn, cmd in filterpats:
808 for mf, fn, cmd in filterpats:
802 if mf(filename):
809 if mf(filename):
803 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
810 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
804 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
811 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
805 break
812 break
806
813
807 return data
814 return data
808
815
809 @unfilteredpropertycache
816 @unfilteredpropertycache
810 def _encodefilterpats(self):
817 def _encodefilterpats(self):
811 return self._loadfilter('encode')
818 return self._loadfilter('encode')
812
819
813 @unfilteredpropertycache
820 @unfilteredpropertycache
814 def _decodefilterpats(self):
821 def _decodefilterpats(self):
815 return self._loadfilter('decode')
822 return self._loadfilter('decode')
816
823
817 def adddatafilter(self, name, filter):
824 def adddatafilter(self, name, filter):
818 self._datafilters[name] = filter
825 self._datafilters[name] = filter
819
826
820 def wread(self, filename):
827 def wread(self, filename):
821 if self._link(filename):
828 if self._link(filename):
822 data = self.wvfs.readlink(filename)
829 data = self.wvfs.readlink(filename)
823 else:
830 else:
824 data = self.wopener.read(filename)
831 data = self.wopener.read(filename)
825 return self._filter(self._encodefilterpats, filename, data)
832 return self._filter(self._encodefilterpats, filename, data)
826
833
827 def wwrite(self, filename, data, flags):
834 def wwrite(self, filename, data, flags):
828 data = self._filter(self._decodefilterpats, filename, data)
835 data = self._filter(self._decodefilterpats, filename, data)
829 if 'l' in flags:
836 if 'l' in flags:
830 self.wopener.symlink(data, filename)
837 self.wopener.symlink(data, filename)
831 else:
838 else:
832 self.wopener.write(filename, data)
839 self.wopener.write(filename, data)
833 if 'x' in flags:
840 if 'x' in flags:
834 self.wvfs.setflags(filename, False, True)
841 self.wvfs.setflags(filename, False, True)
835
842
836 def wwritedata(self, filename, data):
843 def wwritedata(self, filename, data):
837 return self._filter(self._decodefilterpats, filename, data)
844 return self._filter(self._decodefilterpats, filename, data)
838
845
839 def transaction(self, desc, report=None):
846 def transaction(self, desc, report=None):
840 tr = self._transref and self._transref() or None
847 tr = self._transref and self._transref() or None
841 if tr and tr.running():
848 if tr and tr.running():
842 return tr.nest()
849 return tr.nest()
843
850
844 # abort here if the journal already exists
851 # abort here if the journal already exists
845 if self.svfs.exists("journal"):
852 if self.svfs.exists("journal"):
846 raise error.RepoError(
853 raise error.RepoError(
847 _("abandoned transaction found - run hg recover"))
854 _("abandoned transaction found - run hg recover"))
848
855
849 def onclose():
856 def onclose():
850 self.store.write(tr)
857 self.store.write(tr)
851
858
852 self._writejournal(desc)
859 self._writejournal(desc)
853 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
860 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
854 rp = report and report or self.ui.warn
861 rp = report and report or self.ui.warn
855 tr = transaction.transaction(rp, self.sopener,
862 tr = transaction.transaction(rp, self.sopener,
856 "journal",
863 "journal",
857 aftertrans(renames),
864 aftertrans(renames),
858 self.store.createmode,
865 self.store.createmode,
859 onclose)
866 onclose)
860 self._transref = weakref.ref(tr)
867 self._transref = weakref.ref(tr)
861 return tr
868 return tr
862
869
863 def _journalfiles(self):
870 def _journalfiles(self):
864 return ((self.svfs, 'journal'),
871 return ((self.svfs, 'journal'),
865 (self.vfs, 'journal.dirstate'),
872 (self.vfs, 'journal.dirstate'),
866 (self.vfs, 'journal.branch'),
873 (self.vfs, 'journal.branch'),
867 (self.vfs, 'journal.desc'),
874 (self.vfs, 'journal.desc'),
868 (self.vfs, 'journal.bookmarks'),
875 (self.vfs, 'journal.bookmarks'),
869 (self.svfs, 'journal.phaseroots'))
876 (self.svfs, 'journal.phaseroots'))
870
877
871 def undofiles(self):
878 def undofiles(self):
872 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
879 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
873
880
874 def _writejournal(self, desc):
881 def _writejournal(self, desc):
875 self.opener.write("journal.dirstate",
882 self.opener.write("journal.dirstate",
876 self.opener.tryread("dirstate"))
883 self.opener.tryread("dirstate"))
877 self.opener.write("journal.branch",
884 self.opener.write("journal.branch",
878 encoding.fromlocal(self.dirstate.branch()))
885 encoding.fromlocal(self.dirstate.branch()))
879 self.opener.write("journal.desc",
886 self.opener.write("journal.desc",
880 "%d\n%s\n" % (len(self), desc))
887 "%d\n%s\n" % (len(self), desc))
881 self.opener.write("journal.bookmarks",
888 self.opener.write("journal.bookmarks",
882 self.opener.tryread("bookmarks"))
889 self.opener.tryread("bookmarks"))
883 self.sopener.write("journal.phaseroots",
890 self.sopener.write("journal.phaseroots",
884 self.sopener.tryread("phaseroots"))
891 self.sopener.tryread("phaseroots"))
885
892
886 def recover(self):
893 def recover(self):
887 lock = self.lock()
894 lock = self.lock()
888 try:
895 try:
889 if self.svfs.exists("journal"):
896 if self.svfs.exists("journal"):
890 self.ui.status(_("rolling back interrupted transaction\n"))
897 self.ui.status(_("rolling back interrupted transaction\n"))
891 transaction.rollback(self.sopener, "journal",
898 transaction.rollback(self.sopener, "journal",
892 self.ui.warn)
899 self.ui.warn)
893 self.invalidate()
900 self.invalidate()
894 return True
901 return True
895 else:
902 else:
896 self.ui.warn(_("no interrupted transaction available\n"))
903 self.ui.warn(_("no interrupted transaction available\n"))
897 return False
904 return False
898 finally:
905 finally:
899 lock.release()
906 lock.release()
900
907
901 def rollback(self, dryrun=False, force=False):
908 def rollback(self, dryrun=False, force=False):
902 wlock = lock = None
909 wlock = lock = None
903 try:
910 try:
904 wlock = self.wlock()
911 wlock = self.wlock()
905 lock = self.lock()
912 lock = self.lock()
906 if self.svfs.exists("undo"):
913 if self.svfs.exists("undo"):
907 return self._rollback(dryrun, force)
914 return self._rollback(dryrun, force)
908 else:
915 else:
909 self.ui.warn(_("no rollback information available\n"))
916 self.ui.warn(_("no rollback information available\n"))
910 return 1
917 return 1
911 finally:
918 finally:
912 release(lock, wlock)
919 release(lock, wlock)
913
920
914 @unfilteredmethod # Until we get smarter cache management
921 @unfilteredmethod # Until we get smarter cache management
915 def _rollback(self, dryrun, force):
922 def _rollback(self, dryrun, force):
916 ui = self.ui
923 ui = self.ui
917 try:
924 try:
918 args = self.opener.read('undo.desc').splitlines()
925 args = self.opener.read('undo.desc').splitlines()
919 (oldlen, desc, detail) = (int(args[0]), args[1], None)
926 (oldlen, desc, detail) = (int(args[0]), args[1], None)
920 if len(args) >= 3:
927 if len(args) >= 3:
921 detail = args[2]
928 detail = args[2]
922 oldtip = oldlen - 1
929 oldtip = oldlen - 1
923
930
924 if detail and ui.verbose:
931 if detail and ui.verbose:
925 msg = (_('repository tip rolled back to revision %s'
932 msg = (_('repository tip rolled back to revision %s'
926 ' (undo %s: %s)\n')
933 ' (undo %s: %s)\n')
927 % (oldtip, desc, detail))
934 % (oldtip, desc, detail))
928 else:
935 else:
929 msg = (_('repository tip rolled back to revision %s'
936 msg = (_('repository tip rolled back to revision %s'
930 ' (undo %s)\n')
937 ' (undo %s)\n')
931 % (oldtip, desc))
938 % (oldtip, desc))
932 except IOError:
939 except IOError:
933 msg = _('rolling back unknown transaction\n')
940 msg = _('rolling back unknown transaction\n')
934 desc = None
941 desc = None
935
942
936 if not force and self['.'] != self['tip'] and desc == 'commit':
943 if not force and self['.'] != self['tip'] and desc == 'commit':
937 raise util.Abort(
944 raise util.Abort(
938 _('rollback of last commit while not checked out '
945 _('rollback of last commit while not checked out '
939 'may lose data'), hint=_('use -f to force'))
946 'may lose data'), hint=_('use -f to force'))
940
947
941 ui.status(msg)
948 ui.status(msg)
942 if dryrun:
949 if dryrun:
943 return 0
950 return 0
944
951
945 parents = self.dirstate.parents()
952 parents = self.dirstate.parents()
946 self.destroying()
953 self.destroying()
947 transaction.rollback(self.sopener, 'undo', ui.warn)
954 transaction.rollback(self.sopener, 'undo', ui.warn)
948 if self.vfs.exists('undo.bookmarks'):
955 if self.vfs.exists('undo.bookmarks'):
949 self.vfs.rename('undo.bookmarks', 'bookmarks')
956 self.vfs.rename('undo.bookmarks', 'bookmarks')
950 if self.svfs.exists('undo.phaseroots'):
957 if self.svfs.exists('undo.phaseroots'):
951 self.svfs.rename('undo.phaseroots', 'phaseroots')
958 self.svfs.rename('undo.phaseroots', 'phaseroots')
952 self.invalidate()
959 self.invalidate()
953
960
954 parentgone = (parents[0] not in self.changelog.nodemap or
961 parentgone = (parents[0] not in self.changelog.nodemap or
955 parents[1] not in self.changelog.nodemap)
962 parents[1] not in self.changelog.nodemap)
956 if parentgone:
963 if parentgone:
957 self.vfs.rename('undo.dirstate', 'dirstate')
964 self.vfs.rename('undo.dirstate', 'dirstate')
958 try:
965 try:
959 branch = self.opener.read('undo.branch')
966 branch = self.opener.read('undo.branch')
960 self.dirstate.setbranch(encoding.tolocal(branch))
967 self.dirstate.setbranch(encoding.tolocal(branch))
961 except IOError:
968 except IOError:
962 ui.warn(_('named branch could not be reset: '
969 ui.warn(_('named branch could not be reset: '
963 'current branch is still \'%s\'\n')
970 'current branch is still \'%s\'\n')
964 % self.dirstate.branch())
971 % self.dirstate.branch())
965
972
966 self.dirstate.invalidate()
973 self.dirstate.invalidate()
967 parents = tuple([p.rev() for p in self.parents()])
974 parents = tuple([p.rev() for p in self.parents()])
968 if len(parents) > 1:
975 if len(parents) > 1:
969 ui.status(_('working directory now based on '
976 ui.status(_('working directory now based on '
970 'revisions %d and %d\n') % parents)
977 'revisions %d and %d\n') % parents)
971 else:
978 else:
972 ui.status(_('working directory now based on '
979 ui.status(_('working directory now based on '
973 'revision %d\n') % parents)
980 'revision %d\n') % parents)
974 # TODO: if we know which new heads may result from this rollback, pass
981 # TODO: if we know which new heads may result from this rollback, pass
975 # them to destroy(), which will prevent the branchhead cache from being
982 # them to destroy(), which will prevent the branchhead cache from being
976 # invalidated.
983 # invalidated.
977 self.destroyed()
984 self.destroyed()
978 return 0
985 return 0
979
986
980 def invalidatecaches(self):
987 def invalidatecaches(self):
981
988
982 if '_tagscache' in vars(self):
989 if '_tagscache' in vars(self):
983 # can't use delattr on proxy
990 # can't use delattr on proxy
984 del self.__dict__['_tagscache']
991 del self.__dict__['_tagscache']
985
992
986 self.unfiltered()._branchcaches.clear()
993 self.unfiltered()._branchcaches.clear()
987 self.invalidatevolatilesets()
994 self.invalidatevolatilesets()
988
995
989 def invalidatevolatilesets(self):
996 def invalidatevolatilesets(self):
990 self.filteredrevcache.clear()
997 self.filteredrevcache.clear()
991 obsolete.clearobscaches(self)
998 obsolete.clearobscaches(self)
992
999
993 def invalidatedirstate(self):
1000 def invalidatedirstate(self):
994 '''Invalidates the dirstate, causing the next call to dirstate
1001 '''Invalidates the dirstate, causing the next call to dirstate
995 to check if it was modified since the last time it was read,
1002 to check if it was modified since the last time it was read,
996 rereading it if it has.
1003 rereading it if it has.
997
1004
998 This is different to dirstate.invalidate() that it doesn't always
1005 This is different to dirstate.invalidate() that it doesn't always
999 rereads the dirstate. Use dirstate.invalidate() if you want to
1006 rereads the dirstate. Use dirstate.invalidate() if you want to
1000 explicitly read the dirstate again (i.e. restoring it to a previous
1007 explicitly read the dirstate again (i.e. restoring it to a previous
1001 known good state).'''
1008 known good state).'''
1002 if hasunfilteredcache(self, 'dirstate'):
1009 if hasunfilteredcache(self, 'dirstate'):
1003 for k in self.dirstate._filecache:
1010 for k in self.dirstate._filecache:
1004 try:
1011 try:
1005 delattr(self.dirstate, k)
1012 delattr(self.dirstate, k)
1006 except AttributeError:
1013 except AttributeError:
1007 pass
1014 pass
1008 delattr(self.unfiltered(), 'dirstate')
1015 delattr(self.unfiltered(), 'dirstate')
1009
1016
1010 def invalidate(self):
1017 def invalidate(self):
1011 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1018 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1012 for k in self._filecache:
1019 for k in self._filecache:
1013 # dirstate is invalidated separately in invalidatedirstate()
1020 # dirstate is invalidated separately in invalidatedirstate()
1014 if k == 'dirstate':
1021 if k == 'dirstate':
1015 continue
1022 continue
1016
1023
1017 try:
1024 try:
1018 delattr(unfiltered, k)
1025 delattr(unfiltered, k)
1019 except AttributeError:
1026 except AttributeError:
1020 pass
1027 pass
1021 self.invalidatecaches()
1028 self.invalidatecaches()
1022 self.store.invalidatecaches()
1029 self.store.invalidatecaches()
1023
1030
1024 def invalidateall(self):
1031 def invalidateall(self):
1025 '''Fully invalidates both store and non-store parts, causing the
1032 '''Fully invalidates both store and non-store parts, causing the
1026 subsequent operation to reread any outside changes.'''
1033 subsequent operation to reread any outside changes.'''
1027 # extension should hook this to invalidate its caches
1034 # extension should hook this to invalidate its caches
1028 self.invalidate()
1035 self.invalidate()
1029 self.invalidatedirstate()
1036 self.invalidatedirstate()
1030
1037
1031 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1038 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1032 try:
1039 try:
1033 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1040 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1034 except error.LockHeld, inst:
1041 except error.LockHeld, inst:
1035 if not wait:
1042 if not wait:
1036 raise
1043 raise
1037 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1044 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1038 (desc, inst.locker))
1045 (desc, inst.locker))
1039 # default to 600 seconds timeout
1046 # default to 600 seconds timeout
1040 l = lockmod.lock(vfs, lockname,
1047 l = lockmod.lock(vfs, lockname,
1041 int(self.ui.config("ui", "timeout", "600")),
1048 int(self.ui.config("ui", "timeout", "600")),
1042 releasefn, desc=desc)
1049 releasefn, desc=desc)
1043 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1050 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1044 if acquirefn:
1051 if acquirefn:
1045 acquirefn()
1052 acquirefn()
1046 return l
1053 return l
1047
1054
1048 def _afterlock(self, callback):
1055 def _afterlock(self, callback):
1049 """add a callback to the current repository lock.
1056 """add a callback to the current repository lock.
1050
1057
1051 The callback will be executed on lock release."""
1058 The callback will be executed on lock release."""
1052 l = self._lockref and self._lockref()
1059 l = self._lockref and self._lockref()
1053 if l:
1060 if l:
1054 l.postrelease.append(callback)
1061 l.postrelease.append(callback)
1055 else:
1062 else:
1056 callback()
1063 callback()
1057
1064
1058 def lock(self, wait=True):
1065 def lock(self, wait=True):
1059 '''Lock the repository store (.hg/store) and return a weak reference
1066 '''Lock the repository store (.hg/store) and return a weak reference
1060 to the lock. Use this before modifying the store (e.g. committing or
1067 to the lock. Use this before modifying the store (e.g. committing or
1061 stripping). If you are opening a transaction, get a lock as well.)'''
1068 stripping). If you are opening a transaction, get a lock as well.)'''
1062 l = self._lockref and self._lockref()
1069 l = self._lockref and self._lockref()
1063 if l is not None and l.held:
1070 if l is not None and l.held:
1064 l.lock()
1071 l.lock()
1065 return l
1072 return l
1066
1073
1067 def unlock():
1074 def unlock():
1068 if hasunfilteredcache(self, '_phasecache'):
1075 if hasunfilteredcache(self, '_phasecache'):
1069 self._phasecache.write()
1076 self._phasecache.write()
1070 for k, ce in self._filecache.items():
1077 for k, ce in self._filecache.items():
1071 if k == 'dirstate' or k not in self.__dict__:
1078 if k == 'dirstate' or k not in self.__dict__:
1072 continue
1079 continue
1073 ce.refresh()
1080 ce.refresh()
1074
1081
1075 l = self._lock(self.svfs, "lock", wait, unlock,
1082 l = self._lock(self.svfs, "lock", wait, unlock,
1076 self.invalidate, _('repository %s') % self.origroot)
1083 self.invalidate, _('repository %s') % self.origroot)
1077 self._lockref = weakref.ref(l)
1084 self._lockref = weakref.ref(l)
1078 return l
1085 return l
1079
1086
1080 def wlock(self, wait=True):
1087 def wlock(self, wait=True):
1081 '''Lock the non-store parts of the repository (everything under
1088 '''Lock the non-store parts of the repository (everything under
1082 .hg except .hg/store) and return a weak reference to the lock.
1089 .hg except .hg/store) and return a weak reference to the lock.
1083 Use this before modifying files in .hg.'''
1090 Use this before modifying files in .hg.'''
1084 l = self._wlockref and self._wlockref()
1091 l = self._wlockref and self._wlockref()
1085 if l is not None and l.held:
1092 if l is not None and l.held:
1086 l.lock()
1093 l.lock()
1087 return l
1094 return l
1088
1095
1089 def unlock():
1096 def unlock():
1090 self.dirstate.write()
1097 self.dirstate.write()
1091 self._filecache['dirstate'].refresh()
1098 self._filecache['dirstate'].refresh()
1092
1099
1093 l = self._lock(self.vfs, "wlock", wait, unlock,
1100 l = self._lock(self.vfs, "wlock", wait, unlock,
1094 self.invalidatedirstate, _('working directory of %s') %
1101 self.invalidatedirstate, _('working directory of %s') %
1095 self.origroot)
1102 self.origroot)
1096 self._wlockref = weakref.ref(l)
1103 self._wlockref = weakref.ref(l)
1097 return l
1104 return l
1098
1105
1099 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1106 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1100 """
1107 """
1101 commit an individual file as part of a larger transaction
1108 commit an individual file as part of a larger transaction
1102 """
1109 """
1103
1110
1104 fname = fctx.path()
1111 fname = fctx.path()
1105 text = fctx.data()
1112 text = fctx.data()
1106 flog = self.file(fname)
1113 flog = self.file(fname)
1107 fparent1 = manifest1.get(fname, nullid)
1114 fparent1 = manifest1.get(fname, nullid)
1108 fparent2 = fparent2o = manifest2.get(fname, nullid)
1115 fparent2 = fparent2o = manifest2.get(fname, nullid)
1109
1116
1110 meta = {}
1117 meta = {}
1111 copy = fctx.renamed()
1118 copy = fctx.renamed()
1112 if copy and copy[0] != fname:
1119 if copy and copy[0] != fname:
1113 # Mark the new revision of this file as a copy of another
1120 # Mark the new revision of this file as a copy of another
1114 # file. This copy data will effectively act as a parent
1121 # file. This copy data will effectively act as a parent
1115 # of this new revision. If this is a merge, the first
1122 # of this new revision. If this is a merge, the first
1116 # parent will be the nullid (meaning "look up the copy data")
1123 # parent will be the nullid (meaning "look up the copy data")
1117 # and the second one will be the other parent. For example:
1124 # and the second one will be the other parent. For example:
1118 #
1125 #
1119 # 0 --- 1 --- 3 rev1 changes file foo
1126 # 0 --- 1 --- 3 rev1 changes file foo
1120 # \ / rev2 renames foo to bar and changes it
1127 # \ / rev2 renames foo to bar and changes it
1121 # \- 2 -/ rev3 should have bar with all changes and
1128 # \- 2 -/ rev3 should have bar with all changes and
1122 # should record that bar descends from
1129 # should record that bar descends from
1123 # bar in rev2 and foo in rev1
1130 # bar in rev2 and foo in rev1
1124 #
1131 #
1125 # this allows this merge to succeed:
1132 # this allows this merge to succeed:
1126 #
1133 #
1127 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1134 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1128 # \ / merging rev3 and rev4 should use bar@rev2
1135 # \ / merging rev3 and rev4 should use bar@rev2
1129 # \- 2 --- 4 as the merge base
1136 # \- 2 --- 4 as the merge base
1130 #
1137 #
1131
1138
1132 cfname = copy[0]
1139 cfname = copy[0]
1133 crev = manifest1.get(cfname)
1140 crev = manifest1.get(cfname)
1134 newfparent = fparent2
1141 newfparent = fparent2
1135
1142
1136 if manifest2: # branch merge
1143 if manifest2: # branch merge
1137 if fparent2 == nullid or crev is None: # copied on remote side
1144 if fparent2 == nullid or crev is None: # copied on remote side
1138 if cfname in manifest2:
1145 if cfname in manifest2:
1139 crev = manifest2[cfname]
1146 crev = manifest2[cfname]
1140 newfparent = fparent1
1147 newfparent = fparent1
1141
1148
1142 # find source in nearest ancestor if we've lost track
1149 # find source in nearest ancestor if we've lost track
1143 if not crev:
1150 if not crev:
1144 self.ui.debug(" %s: searching for copy revision for %s\n" %
1151 self.ui.debug(" %s: searching for copy revision for %s\n" %
1145 (fname, cfname))
1152 (fname, cfname))
1146 for ancestor in self[None].ancestors():
1153 for ancestor in self[None].ancestors():
1147 if cfname in ancestor:
1154 if cfname in ancestor:
1148 crev = ancestor[cfname].filenode()
1155 crev = ancestor[cfname].filenode()
1149 break
1156 break
1150
1157
1151 if crev:
1158 if crev:
1152 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1159 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1153 meta["copy"] = cfname
1160 meta["copy"] = cfname
1154 meta["copyrev"] = hex(crev)
1161 meta["copyrev"] = hex(crev)
1155 fparent1, fparent2 = nullid, newfparent
1162 fparent1, fparent2 = nullid, newfparent
1156 else:
1163 else:
1157 self.ui.warn(_("warning: can't find ancestor for '%s' "
1164 self.ui.warn(_("warning: can't find ancestor for '%s' "
1158 "copied from '%s'!\n") % (fname, cfname))
1165 "copied from '%s'!\n") % (fname, cfname))
1159
1166
1160 elif fparent1 == nullid:
1167 elif fparent1 == nullid:
1161 fparent1, fparent2 = fparent2, nullid
1168 fparent1, fparent2 = fparent2, nullid
1162 elif fparent2 != nullid:
1169 elif fparent2 != nullid:
1163 # is one parent an ancestor of the other?
1170 # is one parent an ancestor of the other?
1164 fparentancestors = flog.commonancestors(fparent1, fparent2)
1171 fparentancestors = flog.commonancestors(fparent1, fparent2)
1165 if fparent1 in fparentancestors:
1172 if fparent1 in fparentancestors:
1166 fparent1, fparent2 = fparent2, nullid
1173 fparent1, fparent2 = fparent2, nullid
1167 elif fparent2 in fparentancestors:
1174 elif fparent2 in fparentancestors:
1168 fparent2 = nullid
1175 fparent2 = nullid
1169
1176
1170 # is the file changed?
1177 # is the file changed?
1171 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1178 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1172 changelist.append(fname)
1179 changelist.append(fname)
1173 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1180 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1174
1181
1175 # are just the flags changed during merge?
1182 # are just the flags changed during merge?
1176 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1183 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1177 changelist.append(fname)
1184 changelist.append(fname)
1178
1185
1179 return fparent1
1186 return fparent1
1180
1187
1181 @unfilteredmethod
1188 @unfilteredmethod
1182 def commit(self, text="", user=None, date=None, match=None, force=False,
1189 def commit(self, text="", user=None, date=None, match=None, force=False,
1183 editor=False, extra={}):
1190 editor=False, extra={}):
1184 """Add a new revision to current repository.
1191 """Add a new revision to current repository.
1185
1192
1186 Revision information is gathered from the working directory,
1193 Revision information is gathered from the working directory,
1187 match can be used to filter the committed files. If editor is
1194 match can be used to filter the committed files. If editor is
1188 supplied, it is called to get a commit message.
1195 supplied, it is called to get a commit message.
1189 """
1196 """
1190
1197
1191 def fail(f, msg):
1198 def fail(f, msg):
1192 raise util.Abort('%s: %s' % (f, msg))
1199 raise util.Abort('%s: %s' % (f, msg))
1193
1200
1194 if not match:
1201 if not match:
1195 match = matchmod.always(self.root, '')
1202 match = matchmod.always(self.root, '')
1196
1203
1197 if not force:
1204 if not force:
1198 vdirs = []
1205 vdirs = []
1199 match.explicitdir = vdirs.append
1206 match.explicitdir = vdirs.append
1200 match.bad = fail
1207 match.bad = fail
1201
1208
1202 wlock = self.wlock()
1209 wlock = self.wlock()
1203 try:
1210 try:
1204 wctx = self[None]
1211 wctx = self[None]
1205 merge = len(wctx.parents()) > 1
1212 merge = len(wctx.parents()) > 1
1206
1213
1207 if (not force and merge and match and
1214 if (not force and merge and match and
1208 (match.files() or match.anypats())):
1215 (match.files() or match.anypats())):
1209 raise util.Abort(_('cannot partially commit a merge '
1216 raise util.Abort(_('cannot partially commit a merge '
1210 '(do not specify files or patterns)'))
1217 '(do not specify files or patterns)'))
1211
1218
1212 changes = self.status(match=match, clean=force)
1219 changes = self.status(match=match, clean=force)
1213 if force:
1220 if force:
1214 changes[0].extend(changes[6]) # mq may commit unchanged files
1221 changes[0].extend(changes[6]) # mq may commit unchanged files
1215
1222
1216 # check subrepos
1223 # check subrepos
1217 subs = []
1224 subs = []
1218 commitsubs = set()
1225 commitsubs = set()
1219 newstate = wctx.substate.copy()
1226 newstate = wctx.substate.copy()
1220 # only manage subrepos and .hgsubstate if .hgsub is present
1227 # only manage subrepos and .hgsubstate if .hgsub is present
1221 if '.hgsub' in wctx:
1228 if '.hgsub' in wctx:
1222 # we'll decide whether to track this ourselves, thanks
1229 # we'll decide whether to track this ourselves, thanks
1223 for c in changes[:3]:
1230 for c in changes[:3]:
1224 if '.hgsubstate' in c:
1231 if '.hgsubstate' in c:
1225 c.remove('.hgsubstate')
1232 c.remove('.hgsubstate')
1226
1233
1227 # compare current state to last committed state
1234 # compare current state to last committed state
1228 # build new substate based on last committed state
1235 # build new substate based on last committed state
1229 oldstate = wctx.p1().substate
1236 oldstate = wctx.p1().substate
1230 for s in sorted(newstate.keys()):
1237 for s in sorted(newstate.keys()):
1231 if not match(s):
1238 if not match(s):
1232 # ignore working copy, use old state if present
1239 # ignore working copy, use old state if present
1233 if s in oldstate:
1240 if s in oldstate:
1234 newstate[s] = oldstate[s]
1241 newstate[s] = oldstate[s]
1235 continue
1242 continue
1236 if not force:
1243 if not force:
1237 raise util.Abort(
1244 raise util.Abort(
1238 _("commit with new subrepo %s excluded") % s)
1245 _("commit with new subrepo %s excluded") % s)
1239 if wctx.sub(s).dirty(True):
1246 if wctx.sub(s).dirty(True):
1240 if not self.ui.configbool('ui', 'commitsubrepos'):
1247 if not self.ui.configbool('ui', 'commitsubrepos'):
1241 raise util.Abort(
1248 raise util.Abort(
1242 _("uncommitted changes in subrepo %s") % s,
1249 _("uncommitted changes in subrepo %s") % s,
1243 hint=_("use --subrepos for recursive commit"))
1250 hint=_("use --subrepos for recursive commit"))
1244 subs.append(s)
1251 subs.append(s)
1245 commitsubs.add(s)
1252 commitsubs.add(s)
1246 else:
1253 else:
1247 bs = wctx.sub(s).basestate()
1254 bs = wctx.sub(s).basestate()
1248 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1255 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1249 if oldstate.get(s, (None, None, None))[1] != bs:
1256 if oldstate.get(s, (None, None, None))[1] != bs:
1250 subs.append(s)
1257 subs.append(s)
1251
1258
1252 # check for removed subrepos
1259 # check for removed subrepos
1253 for p in wctx.parents():
1260 for p in wctx.parents():
1254 r = [s for s in p.substate if s not in newstate]
1261 r = [s for s in p.substate if s not in newstate]
1255 subs += [s for s in r if match(s)]
1262 subs += [s for s in r if match(s)]
1256 if subs:
1263 if subs:
1257 if (not match('.hgsub') and
1264 if (not match('.hgsub') and
1258 '.hgsub' in (wctx.modified() + wctx.added())):
1265 '.hgsub' in (wctx.modified() + wctx.added())):
1259 raise util.Abort(
1266 raise util.Abort(
1260 _("can't commit subrepos without .hgsub"))
1267 _("can't commit subrepos without .hgsub"))
1261 changes[0].insert(0, '.hgsubstate')
1268 changes[0].insert(0, '.hgsubstate')
1262
1269
1263 elif '.hgsub' in changes[2]:
1270 elif '.hgsub' in changes[2]:
1264 # clean up .hgsubstate when .hgsub is removed
1271 # clean up .hgsubstate when .hgsub is removed
1265 if ('.hgsubstate' in wctx and
1272 if ('.hgsubstate' in wctx and
1266 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1273 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1267 changes[2].insert(0, '.hgsubstate')
1274 changes[2].insert(0, '.hgsubstate')
1268
1275
1269 # make sure all explicit patterns are matched
1276 # make sure all explicit patterns are matched
1270 if not force and match.files():
1277 if not force and match.files():
1271 matched = set(changes[0] + changes[1] + changes[2])
1278 matched = set(changes[0] + changes[1] + changes[2])
1272
1279
1273 for f in match.files():
1280 for f in match.files():
1274 f = self.dirstate.normalize(f)
1281 f = self.dirstate.normalize(f)
1275 if f == '.' or f in matched or f in wctx.substate:
1282 if f == '.' or f in matched or f in wctx.substate:
1276 continue
1283 continue
1277 if f in changes[3]: # missing
1284 if f in changes[3]: # missing
1278 fail(f, _('file not found!'))
1285 fail(f, _('file not found!'))
1279 if f in vdirs: # visited directory
1286 if f in vdirs: # visited directory
1280 d = f + '/'
1287 d = f + '/'
1281 for mf in matched:
1288 for mf in matched:
1282 if mf.startswith(d):
1289 if mf.startswith(d):
1283 break
1290 break
1284 else:
1291 else:
1285 fail(f, _("no match under directory!"))
1292 fail(f, _("no match under directory!"))
1286 elif f not in self.dirstate:
1293 elif f not in self.dirstate:
1287 fail(f, _("file not tracked!"))
1294 fail(f, _("file not tracked!"))
1288
1295
1289 cctx = context.workingctx(self, text, user, date, extra, changes)
1296 cctx = context.workingctx(self, text, user, date, extra, changes)
1290
1297
1291 if (not force and not extra.get("close") and not merge
1298 if (not force and not extra.get("close") and not merge
1292 and not cctx.files()
1299 and not cctx.files()
1293 and wctx.branch() == wctx.p1().branch()):
1300 and wctx.branch() == wctx.p1().branch()):
1294 return None
1301 return None
1295
1302
1296 if merge and cctx.deleted():
1303 if merge and cctx.deleted():
1297 raise util.Abort(_("cannot commit merge with missing files"))
1304 raise util.Abort(_("cannot commit merge with missing files"))
1298
1305
1299 ms = mergemod.mergestate(self)
1306 ms = mergemod.mergestate(self)
1300 for f in changes[0]:
1307 for f in changes[0]:
1301 if f in ms and ms[f] == 'u':
1308 if f in ms and ms[f] == 'u':
1302 raise util.Abort(_("unresolved merge conflicts "
1309 raise util.Abort(_("unresolved merge conflicts "
1303 "(see hg help resolve)"))
1310 "(see hg help resolve)"))
1304
1311
1305 if editor:
1312 if editor:
1306 cctx._text = editor(self, cctx, subs)
1313 cctx._text = editor(self, cctx, subs)
1307 edited = (text != cctx._text)
1314 edited = (text != cctx._text)
1308
1315
1309 # Save commit message in case this transaction gets rolled back
1316 # Save commit message in case this transaction gets rolled back
1310 # (e.g. by a pretxncommit hook). Leave the content alone on
1317 # (e.g. by a pretxncommit hook). Leave the content alone on
1311 # the assumption that the user will use the same editor again.
1318 # the assumption that the user will use the same editor again.
1312 msgfn = self.savecommitmessage(cctx._text)
1319 msgfn = self.savecommitmessage(cctx._text)
1313
1320
1314 # commit subs and write new state
1321 # commit subs and write new state
1315 if subs:
1322 if subs:
1316 for s in sorted(commitsubs):
1323 for s in sorted(commitsubs):
1317 sub = wctx.sub(s)
1324 sub = wctx.sub(s)
1318 self.ui.status(_('committing subrepository %s\n') %
1325 self.ui.status(_('committing subrepository %s\n') %
1319 subrepo.subrelpath(sub))
1326 subrepo.subrelpath(sub))
1320 sr = sub.commit(cctx._text, user, date)
1327 sr = sub.commit(cctx._text, user, date)
1321 newstate[s] = (newstate[s][0], sr)
1328 newstate[s] = (newstate[s][0], sr)
1322 subrepo.writestate(self, newstate)
1329 subrepo.writestate(self, newstate)
1323
1330
1324 p1, p2 = self.dirstate.parents()
1331 p1, p2 = self.dirstate.parents()
1325 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1332 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1326 try:
1333 try:
1327 self.hook("precommit", throw=True, parent1=hookp1,
1334 self.hook("precommit", throw=True, parent1=hookp1,
1328 parent2=hookp2)
1335 parent2=hookp2)
1329 ret = self.commitctx(cctx, True)
1336 ret = self.commitctx(cctx, True)
1330 except: # re-raises
1337 except: # re-raises
1331 if edited:
1338 if edited:
1332 self.ui.write(
1339 self.ui.write(
1333 _('note: commit message saved in %s\n') % msgfn)
1340 _('note: commit message saved in %s\n') % msgfn)
1334 raise
1341 raise
1335
1342
1336 # update bookmarks, dirstate and mergestate
1343 # update bookmarks, dirstate and mergestate
1337 bookmarks.update(self, [p1, p2], ret)
1344 bookmarks.update(self, [p1, p2], ret)
1338 cctx.markcommitted(ret)
1345 cctx.markcommitted(ret)
1339 ms.reset()
1346 ms.reset()
1340 finally:
1347 finally:
1341 wlock.release()
1348 wlock.release()
1342
1349
1343 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1350 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1344 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1351 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1345 self._afterlock(commithook)
1352 self._afterlock(commithook)
1346 return ret
1353 return ret
1347
1354
1348 @unfilteredmethod
1355 @unfilteredmethod
1349 def commitctx(self, ctx, error=False):
1356 def commitctx(self, ctx, error=False):
1350 """Add a new revision to current repository.
1357 """Add a new revision to current repository.
1351 Revision information is passed via the context argument.
1358 Revision information is passed via the context argument.
1352 """
1359 """
1353
1360
1354 tr = lock = None
1361 tr = lock = None
1355 removed = list(ctx.removed())
1362 removed = list(ctx.removed())
1356 p1, p2 = ctx.p1(), ctx.p2()
1363 p1, p2 = ctx.p1(), ctx.p2()
1357 user = ctx.user()
1364 user = ctx.user()
1358
1365
1359 lock = self.lock()
1366 lock = self.lock()
1360 try:
1367 try:
1361 tr = self.transaction("commit")
1368 tr = self.transaction("commit")
1362 trp = weakref.proxy(tr)
1369 trp = weakref.proxy(tr)
1363
1370
1364 if ctx.files():
1371 if ctx.files():
1365 m1 = p1.manifest().copy()
1372 m1 = p1.manifest().copy()
1366 m2 = p2.manifest()
1373 m2 = p2.manifest()
1367
1374
1368 # check in files
1375 # check in files
1369 new = {}
1376 new = {}
1370 changed = []
1377 changed = []
1371 linkrev = len(self)
1378 linkrev = len(self)
1372 for f in sorted(ctx.modified() + ctx.added()):
1379 for f in sorted(ctx.modified() + ctx.added()):
1373 self.ui.note(f + "\n")
1380 self.ui.note(f + "\n")
1374 try:
1381 try:
1375 fctx = ctx[f]
1382 fctx = ctx[f]
1376 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1383 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1377 changed)
1384 changed)
1378 m1.set(f, fctx.flags())
1385 m1.set(f, fctx.flags())
1379 except OSError, inst:
1386 except OSError, inst:
1380 self.ui.warn(_("trouble committing %s!\n") % f)
1387 self.ui.warn(_("trouble committing %s!\n") % f)
1381 raise
1388 raise
1382 except IOError, inst:
1389 except IOError, inst:
1383 errcode = getattr(inst, 'errno', errno.ENOENT)
1390 errcode = getattr(inst, 'errno', errno.ENOENT)
1384 if error or errcode and errcode != errno.ENOENT:
1391 if error or errcode and errcode != errno.ENOENT:
1385 self.ui.warn(_("trouble committing %s!\n") % f)
1392 self.ui.warn(_("trouble committing %s!\n") % f)
1386 raise
1393 raise
1387 else:
1394 else:
1388 removed.append(f)
1395 removed.append(f)
1389
1396
1390 # update manifest
1397 # update manifest
1391 m1.update(new)
1398 m1.update(new)
1392 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1399 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1393 drop = [f for f in removed if f in m1]
1400 drop = [f for f in removed if f in m1]
1394 for f in drop:
1401 for f in drop:
1395 del m1[f]
1402 del m1[f]
1396 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1403 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1397 p2.manifestnode(), (new, drop))
1404 p2.manifestnode(), (new, drop))
1398 files = changed + removed
1405 files = changed + removed
1399 else:
1406 else:
1400 mn = p1.manifestnode()
1407 mn = p1.manifestnode()
1401 files = []
1408 files = []
1402
1409
1403 # update changelog
1410 # update changelog
1404 self.changelog.delayupdate()
1411 self.changelog.delayupdate()
1405 n = self.changelog.add(mn, files, ctx.description(),
1412 n = self.changelog.add(mn, files, ctx.description(),
1406 trp, p1.node(), p2.node(),
1413 trp, p1.node(), p2.node(),
1407 user, ctx.date(), ctx.extra().copy())
1414 user, ctx.date(), ctx.extra().copy())
1408 p = lambda: self.changelog.writepending() and self.root or ""
1415 p = lambda: self.changelog.writepending() and self.root or ""
1409 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1416 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1410 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1417 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1411 parent2=xp2, pending=p)
1418 parent2=xp2, pending=p)
1412 self.changelog.finalize(trp)
1419 self.changelog.finalize(trp)
1413 # set the new commit is proper phase
1420 # set the new commit is proper phase
1414 targetphase = subrepo.newcommitphase(self.ui, ctx)
1421 targetphase = subrepo.newcommitphase(self.ui, ctx)
1415 if targetphase:
1422 if targetphase:
1416 # retract boundary do not alter parent changeset.
1423 # retract boundary do not alter parent changeset.
1417 # if a parent have higher the resulting phase will
1424 # if a parent have higher the resulting phase will
1418 # be compliant anyway
1425 # be compliant anyway
1419 #
1426 #
1420 # if minimal phase was 0 we don't need to retract anything
1427 # if minimal phase was 0 we don't need to retract anything
1421 phases.retractboundary(self, targetphase, [n])
1428 phases.retractboundary(self, targetphase, [n])
1422 tr.close()
1429 tr.close()
1423 branchmap.updatecache(self.filtered('served'))
1430 branchmap.updatecache(self.filtered('served'))
1424 return n
1431 return n
1425 finally:
1432 finally:
1426 if tr:
1433 if tr:
1427 tr.release()
1434 tr.release()
1428 lock.release()
1435 lock.release()
1429
1436
1430 @unfilteredmethod
1437 @unfilteredmethod
1431 def destroying(self):
1438 def destroying(self):
1432 '''Inform the repository that nodes are about to be destroyed.
1439 '''Inform the repository that nodes are about to be destroyed.
1433 Intended for use by strip and rollback, so there's a common
1440 Intended for use by strip and rollback, so there's a common
1434 place for anything that has to be done before destroying history.
1441 place for anything that has to be done before destroying history.
1435
1442
1436 This is mostly useful for saving state that is in memory and waiting
1443 This is mostly useful for saving state that is in memory and waiting
1437 to be flushed when the current lock is released. Because a call to
1444 to be flushed when the current lock is released. Because a call to
1438 destroyed is imminent, the repo will be invalidated causing those
1445 destroyed is imminent, the repo will be invalidated causing those
1439 changes to stay in memory (waiting for the next unlock), or vanish
1446 changes to stay in memory (waiting for the next unlock), or vanish
1440 completely.
1447 completely.
1441 '''
1448 '''
1442 # When using the same lock to commit and strip, the phasecache is left
1449 # When using the same lock to commit and strip, the phasecache is left
1443 # dirty after committing. Then when we strip, the repo is invalidated,
1450 # dirty after committing. Then when we strip, the repo is invalidated,
1444 # causing those changes to disappear.
1451 # causing those changes to disappear.
1445 if '_phasecache' in vars(self):
1452 if '_phasecache' in vars(self):
1446 self._phasecache.write()
1453 self._phasecache.write()
1447
1454
1448 @unfilteredmethod
1455 @unfilteredmethod
1449 def destroyed(self):
1456 def destroyed(self):
1450 '''Inform the repository that nodes have been destroyed.
1457 '''Inform the repository that nodes have been destroyed.
1451 Intended for use by strip and rollback, so there's a common
1458 Intended for use by strip and rollback, so there's a common
1452 place for anything that has to be done after destroying history.
1459 place for anything that has to be done after destroying history.
1453 '''
1460 '''
1454 # When one tries to:
1461 # When one tries to:
1455 # 1) destroy nodes thus calling this method (e.g. strip)
1462 # 1) destroy nodes thus calling this method (e.g. strip)
1456 # 2) use phasecache somewhere (e.g. commit)
1463 # 2) use phasecache somewhere (e.g. commit)
1457 #
1464 #
1458 # then 2) will fail because the phasecache contains nodes that were
1465 # then 2) will fail because the phasecache contains nodes that were
1459 # removed. We can either remove phasecache from the filecache,
1466 # removed. We can either remove phasecache from the filecache,
1460 # causing it to reload next time it is accessed, or simply filter
1467 # causing it to reload next time it is accessed, or simply filter
1461 # the removed nodes now and write the updated cache.
1468 # the removed nodes now and write the updated cache.
1462 self._phasecache.filterunknown(self)
1469 self._phasecache.filterunknown(self)
1463 self._phasecache.write()
1470 self._phasecache.write()
1464
1471
1465 # update the 'served' branch cache to help read only server process
1472 # update the 'served' branch cache to help read only server process
1466 # Thanks to branchcache collaboration this is done from the nearest
1473 # Thanks to branchcache collaboration this is done from the nearest
1467 # filtered subset and it is expected to be fast.
1474 # filtered subset and it is expected to be fast.
1468 branchmap.updatecache(self.filtered('served'))
1475 branchmap.updatecache(self.filtered('served'))
1469
1476
1470 # Ensure the persistent tag cache is updated. Doing it now
1477 # Ensure the persistent tag cache is updated. Doing it now
1471 # means that the tag cache only has to worry about destroyed
1478 # means that the tag cache only has to worry about destroyed
1472 # heads immediately after a strip/rollback. That in turn
1479 # heads immediately after a strip/rollback. That in turn
1473 # guarantees that "cachetip == currenttip" (comparing both rev
1480 # guarantees that "cachetip == currenttip" (comparing both rev
1474 # and node) always means no nodes have been added or destroyed.
1481 # and node) always means no nodes have been added or destroyed.
1475
1482
1476 # XXX this is suboptimal when qrefresh'ing: we strip the current
1483 # XXX this is suboptimal when qrefresh'ing: we strip the current
1477 # head, refresh the tag cache, then immediately add a new head.
1484 # head, refresh the tag cache, then immediately add a new head.
1478 # But I think doing it this way is necessary for the "instant
1485 # But I think doing it this way is necessary for the "instant
1479 # tag cache retrieval" case to work.
1486 # tag cache retrieval" case to work.
1480 self.invalidate()
1487 self.invalidate()
1481
1488
1482 def walk(self, match, node=None):
1489 def walk(self, match, node=None):
1483 '''
1490 '''
1484 walk recursively through the directory tree or a given
1491 walk recursively through the directory tree or a given
1485 changeset, finding all files matched by the match
1492 changeset, finding all files matched by the match
1486 function
1493 function
1487 '''
1494 '''
1488 return self[node].walk(match)
1495 return self[node].walk(match)
1489
1496
1490 def status(self, node1='.', node2=None, match=None,
1497 def status(self, node1='.', node2=None, match=None,
1491 ignored=False, clean=False, unknown=False,
1498 ignored=False, clean=False, unknown=False,
1492 listsubrepos=False):
1499 listsubrepos=False):
1493 """return status of files between two nodes or node and working
1500 """return status of files between two nodes or node and working
1494 directory.
1501 directory.
1495
1502
1496 If node1 is None, use the first dirstate parent instead.
1503 If node1 is None, use the first dirstate parent instead.
1497 If node2 is None, compare node1 with working directory.
1504 If node2 is None, compare node1 with working directory.
1498 """
1505 """
1499
1506
1500 def mfmatches(ctx):
1507 def mfmatches(ctx):
1501 mf = ctx.manifest().copy()
1508 mf = ctx.manifest().copy()
1502 if match.always():
1509 if match.always():
1503 return mf
1510 return mf
1504 for fn in mf.keys():
1511 for fn in mf.keys():
1505 if not match(fn):
1512 if not match(fn):
1506 del mf[fn]
1513 del mf[fn]
1507 return mf
1514 return mf
1508
1515
1509 ctx1 = self[node1]
1516 ctx1 = self[node1]
1510 ctx2 = self[node2]
1517 ctx2 = self[node2]
1511
1518
1512 working = ctx2.rev() is None
1519 working = ctx2.rev() is None
1513 parentworking = working and ctx1 == self['.']
1520 parentworking = working and ctx1 == self['.']
1514 match = match or matchmod.always(self.root, self.getcwd())
1521 match = match or matchmod.always(self.root, self.getcwd())
1515 listignored, listclean, listunknown = ignored, clean, unknown
1522 listignored, listclean, listunknown = ignored, clean, unknown
1516
1523
1517 # load earliest manifest first for caching reasons
1524 # load earliest manifest first for caching reasons
1518 if not working and ctx2.rev() < ctx1.rev():
1525 if not working and ctx2.rev() < ctx1.rev():
1519 ctx2.manifest()
1526 ctx2.manifest()
1520
1527
1521 if not parentworking:
1528 if not parentworking:
1522 def bad(f, msg):
1529 def bad(f, msg):
1523 # 'f' may be a directory pattern from 'match.files()',
1530 # 'f' may be a directory pattern from 'match.files()',
1524 # so 'f not in ctx1' is not enough
1531 # so 'f not in ctx1' is not enough
1525 if f not in ctx1 and f not in ctx1.dirs():
1532 if f not in ctx1 and f not in ctx1.dirs():
1526 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1533 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1527 match.bad = bad
1534 match.bad = bad
1528
1535
1529 if working: # we need to scan the working dir
1536 if working: # we need to scan the working dir
1530 subrepos = []
1537 subrepos = []
1531 if '.hgsub' in self.dirstate:
1538 if '.hgsub' in self.dirstate:
1532 subrepos = sorted(ctx2.substate)
1539 subrepos = sorted(ctx2.substate)
1533 s = self.dirstate.status(match, subrepos, listignored,
1540 s = self.dirstate.status(match, subrepos, listignored,
1534 listclean, listunknown)
1541 listclean, listunknown)
1535 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1542 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1536
1543
1537 # check for any possibly clean files
1544 # check for any possibly clean files
1538 if parentworking and cmp:
1545 if parentworking and cmp:
1539 fixup = []
1546 fixup = []
1540 # do a full compare of any files that might have changed
1547 # do a full compare of any files that might have changed
1541 for f in sorted(cmp):
1548 for f in sorted(cmp):
1542 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1549 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1543 or ctx1[f].cmp(ctx2[f])):
1550 or ctx1[f].cmp(ctx2[f])):
1544 modified.append(f)
1551 modified.append(f)
1545 else:
1552 else:
1546 fixup.append(f)
1553 fixup.append(f)
1547
1554
1548 # update dirstate for files that are actually clean
1555 # update dirstate for files that are actually clean
1549 if fixup:
1556 if fixup:
1550 if listclean:
1557 if listclean:
1551 clean += fixup
1558 clean += fixup
1552
1559
1553 try:
1560 try:
1554 # updating the dirstate is optional
1561 # updating the dirstate is optional
1555 # so we don't wait on the lock
1562 # so we don't wait on the lock
1556 wlock = self.wlock(False)
1563 wlock = self.wlock(False)
1557 try:
1564 try:
1558 for f in fixup:
1565 for f in fixup:
1559 self.dirstate.normal(f)
1566 self.dirstate.normal(f)
1560 finally:
1567 finally:
1561 wlock.release()
1568 wlock.release()
1562 except error.LockError:
1569 except error.LockError:
1563 pass
1570 pass
1564
1571
1565 if not parentworking:
1572 if not parentworking:
1566 mf1 = mfmatches(ctx1)
1573 mf1 = mfmatches(ctx1)
1567 if working:
1574 if working:
1568 # we are comparing working dir against non-parent
1575 # we are comparing working dir against non-parent
1569 # generate a pseudo-manifest for the working dir
1576 # generate a pseudo-manifest for the working dir
1570 mf2 = mfmatches(self['.'])
1577 mf2 = mfmatches(self['.'])
1571 for f in cmp + modified + added:
1578 for f in cmp + modified + added:
1572 mf2[f] = None
1579 mf2[f] = None
1573 mf2.set(f, ctx2.flags(f))
1580 mf2.set(f, ctx2.flags(f))
1574 for f in removed:
1581 for f in removed:
1575 if f in mf2:
1582 if f in mf2:
1576 del mf2[f]
1583 del mf2[f]
1577 else:
1584 else:
1578 # we are comparing two revisions
1585 # we are comparing two revisions
1579 deleted, unknown, ignored = [], [], []
1586 deleted, unknown, ignored = [], [], []
1580 mf2 = mfmatches(ctx2)
1587 mf2 = mfmatches(ctx2)
1581
1588
1582 modified, added, clean = [], [], []
1589 modified, added, clean = [], [], []
1583 withflags = mf1.withflags() | mf2.withflags()
1590 withflags = mf1.withflags() | mf2.withflags()
1584 for fn, mf2node in mf2.iteritems():
1591 for fn, mf2node in mf2.iteritems():
1585 if fn in mf1:
1592 if fn in mf1:
1586 if (fn not in deleted and
1593 if (fn not in deleted and
1587 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1594 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1588 (mf1[fn] != mf2node and
1595 (mf1[fn] != mf2node and
1589 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1596 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1590 modified.append(fn)
1597 modified.append(fn)
1591 elif listclean:
1598 elif listclean:
1592 clean.append(fn)
1599 clean.append(fn)
1593 del mf1[fn]
1600 del mf1[fn]
1594 elif fn not in deleted:
1601 elif fn not in deleted:
1595 added.append(fn)
1602 added.append(fn)
1596 removed = mf1.keys()
1603 removed = mf1.keys()
1597
1604
1598 if working and modified and not self.dirstate._checklink:
1605 if working and modified and not self.dirstate._checklink:
1599 # Symlink placeholders may get non-symlink-like contents
1606 # Symlink placeholders may get non-symlink-like contents
1600 # via user error or dereferencing by NFS or Samba servers,
1607 # via user error or dereferencing by NFS or Samba servers,
1601 # so we filter out any placeholders that don't look like a
1608 # so we filter out any placeholders that don't look like a
1602 # symlink
1609 # symlink
1603 sane = []
1610 sane = []
1604 for f in modified:
1611 for f in modified:
1605 if ctx2.flags(f) == 'l':
1612 if ctx2.flags(f) == 'l':
1606 d = ctx2[f].data()
1613 d = ctx2[f].data()
1607 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1614 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1608 self.ui.debug('ignoring suspect symlink placeholder'
1615 self.ui.debug('ignoring suspect symlink placeholder'
1609 ' "%s"\n' % f)
1616 ' "%s"\n' % f)
1610 continue
1617 continue
1611 sane.append(f)
1618 sane.append(f)
1612 modified = sane
1619 modified = sane
1613
1620
1614 r = modified, added, removed, deleted, unknown, ignored, clean
1621 r = modified, added, removed, deleted, unknown, ignored, clean
1615
1622
1616 if listsubrepos:
1623 if listsubrepos:
1617 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1624 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1618 if working:
1625 if working:
1619 rev2 = None
1626 rev2 = None
1620 else:
1627 else:
1621 rev2 = ctx2.substate[subpath][1]
1628 rev2 = ctx2.substate[subpath][1]
1622 try:
1629 try:
1623 submatch = matchmod.narrowmatcher(subpath, match)
1630 submatch = matchmod.narrowmatcher(subpath, match)
1624 s = sub.status(rev2, match=submatch, ignored=listignored,
1631 s = sub.status(rev2, match=submatch, ignored=listignored,
1625 clean=listclean, unknown=listunknown,
1632 clean=listclean, unknown=listunknown,
1626 listsubrepos=True)
1633 listsubrepos=True)
1627 for rfiles, sfiles in zip(r, s):
1634 for rfiles, sfiles in zip(r, s):
1628 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1635 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1629 except error.LookupError:
1636 except error.LookupError:
1630 self.ui.status(_("skipping missing subrepository: %s\n")
1637 self.ui.status(_("skipping missing subrepository: %s\n")
1631 % subpath)
1638 % subpath)
1632
1639
1633 for l in r:
1640 for l in r:
1634 l.sort()
1641 l.sort()
1635 return r
1642 return r
1636
1643
1637 def heads(self, start=None):
1644 def heads(self, start=None):
1638 heads = self.changelog.heads(start)
1645 heads = self.changelog.heads(start)
1639 # sort the output in rev descending order
1646 # sort the output in rev descending order
1640 return sorted(heads, key=self.changelog.rev, reverse=True)
1647 return sorted(heads, key=self.changelog.rev, reverse=True)
1641
1648
1642 def branchheads(self, branch=None, start=None, closed=False):
1649 def branchheads(self, branch=None, start=None, closed=False):
1643 '''return a (possibly filtered) list of heads for the given branch
1650 '''return a (possibly filtered) list of heads for the given branch
1644
1651
1645 Heads are returned in topological order, from newest to oldest.
1652 Heads are returned in topological order, from newest to oldest.
1646 If branch is None, use the dirstate branch.
1653 If branch is None, use the dirstate branch.
1647 If start is not None, return only heads reachable from start.
1654 If start is not None, return only heads reachable from start.
1648 If closed is True, return heads that are marked as closed as well.
1655 If closed is True, return heads that are marked as closed as well.
1649 '''
1656 '''
1650 if branch is None:
1657 if branch is None:
1651 branch = self[None].branch()
1658 branch = self[None].branch()
1652 branches = self.branchmap()
1659 branches = self.branchmap()
1653 if branch not in branches:
1660 if branch not in branches:
1654 return []
1661 return []
1655 # the cache returns heads ordered lowest to highest
1662 # the cache returns heads ordered lowest to highest
1656 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1663 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1657 if start is not None:
1664 if start is not None:
1658 # filter out the heads that cannot be reached from startrev
1665 # filter out the heads that cannot be reached from startrev
1659 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1666 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1660 bheads = [h for h in bheads if h in fbheads]
1667 bheads = [h for h in bheads if h in fbheads]
1661 return bheads
1668 return bheads
1662
1669
1663 def branches(self, nodes):
1670 def branches(self, nodes):
1664 if not nodes:
1671 if not nodes:
1665 nodes = [self.changelog.tip()]
1672 nodes = [self.changelog.tip()]
1666 b = []
1673 b = []
1667 for n in nodes:
1674 for n in nodes:
1668 t = n
1675 t = n
1669 while True:
1676 while True:
1670 p = self.changelog.parents(n)
1677 p = self.changelog.parents(n)
1671 if p[1] != nullid or p[0] == nullid:
1678 if p[1] != nullid or p[0] == nullid:
1672 b.append((t, n, p[0], p[1]))
1679 b.append((t, n, p[0], p[1]))
1673 break
1680 break
1674 n = p[0]
1681 n = p[0]
1675 return b
1682 return b
1676
1683
1677 def between(self, pairs):
1684 def between(self, pairs):
1678 r = []
1685 r = []
1679
1686
1680 for top, bottom in pairs:
1687 for top, bottom in pairs:
1681 n, l, i = top, [], 0
1688 n, l, i = top, [], 0
1682 f = 1
1689 f = 1
1683
1690
1684 while n != bottom and n != nullid:
1691 while n != bottom and n != nullid:
1685 p = self.changelog.parents(n)[0]
1692 p = self.changelog.parents(n)[0]
1686 if i == f:
1693 if i == f:
1687 l.append(n)
1694 l.append(n)
1688 f = f * 2
1695 f = f * 2
1689 n = p
1696 n = p
1690 i += 1
1697 i += 1
1691
1698
1692 r.append(l)
1699 r.append(l)
1693
1700
1694 return r
1701 return r
1695
1702
1696 def pull(self, remote, heads=None, force=False):
1703 def pull(self, remote, heads=None, force=False):
1697 return exchange.pull (self, remote, heads, force)
1704 return exchange.pull (self, remote, heads, force)
1698
1705
1699 def checkpush(self, pushop):
1706 def checkpush(self, pushop):
1700 """Extensions can override this function if additional checks have
1707 """Extensions can override this function if additional checks have
1701 to be performed before pushing, or call it if they override push
1708 to be performed before pushing, or call it if they override push
1702 command.
1709 command.
1703 """
1710 """
1704 pass
1711 pass
1705
1712
1706 @unfilteredpropertycache
1713 @unfilteredpropertycache
1707 def prepushoutgoinghooks(self):
1714 def prepushoutgoinghooks(self):
1708 """Return util.hooks consists of "(repo, remote, outgoing)"
1715 """Return util.hooks consists of "(repo, remote, outgoing)"
1709 functions, which are called before pushing changesets.
1716 functions, which are called before pushing changesets.
1710 """
1717 """
1711 return util.hooks()
1718 return util.hooks()
1712
1719
1713 def push(self, remote, force=False, revs=None, newbranch=False):
1720 def push(self, remote, force=False, revs=None, newbranch=False):
1714 return exchange.push(self, remote, force, revs, newbranch)
1721 return exchange.push(self, remote, force, revs, newbranch)
1715
1722
1716 def stream_in(self, remote, requirements):
1723 def stream_in(self, remote, requirements):
1717 lock = self.lock()
1724 lock = self.lock()
1718 try:
1725 try:
1719 # Save remote branchmap. We will use it later
1726 # Save remote branchmap. We will use it later
1720 # to speed up branchcache creation
1727 # to speed up branchcache creation
1721 rbranchmap = None
1728 rbranchmap = None
1722 if remote.capable("branchmap"):
1729 if remote.capable("branchmap"):
1723 rbranchmap = remote.branchmap()
1730 rbranchmap = remote.branchmap()
1724
1731
1725 fp = remote.stream_out()
1732 fp = remote.stream_out()
1726 l = fp.readline()
1733 l = fp.readline()
1727 try:
1734 try:
1728 resp = int(l)
1735 resp = int(l)
1729 except ValueError:
1736 except ValueError:
1730 raise error.ResponseError(
1737 raise error.ResponseError(
1731 _('unexpected response from remote server:'), l)
1738 _('unexpected response from remote server:'), l)
1732 if resp == 1:
1739 if resp == 1:
1733 raise util.Abort(_('operation forbidden by server'))
1740 raise util.Abort(_('operation forbidden by server'))
1734 elif resp == 2:
1741 elif resp == 2:
1735 raise util.Abort(_('locking the remote repository failed'))
1742 raise util.Abort(_('locking the remote repository failed'))
1736 elif resp != 0:
1743 elif resp != 0:
1737 raise util.Abort(_('the server sent an unknown error code'))
1744 raise util.Abort(_('the server sent an unknown error code'))
1738 self.ui.status(_('streaming all changes\n'))
1745 self.ui.status(_('streaming all changes\n'))
1739 l = fp.readline()
1746 l = fp.readline()
1740 try:
1747 try:
1741 total_files, total_bytes = map(int, l.split(' ', 1))
1748 total_files, total_bytes = map(int, l.split(' ', 1))
1742 except (ValueError, TypeError):
1749 except (ValueError, TypeError):
1743 raise error.ResponseError(
1750 raise error.ResponseError(
1744 _('unexpected response from remote server:'), l)
1751 _('unexpected response from remote server:'), l)
1745 self.ui.status(_('%d files to transfer, %s of data\n') %
1752 self.ui.status(_('%d files to transfer, %s of data\n') %
1746 (total_files, util.bytecount(total_bytes)))
1753 (total_files, util.bytecount(total_bytes)))
1747 handled_bytes = 0
1754 handled_bytes = 0
1748 self.ui.progress(_('clone'), 0, total=total_bytes)
1755 self.ui.progress(_('clone'), 0, total=total_bytes)
1749 start = time.time()
1756 start = time.time()
1750
1757
1751 tr = self.transaction(_('clone'))
1758 tr = self.transaction(_('clone'))
1752 try:
1759 try:
1753 for i in xrange(total_files):
1760 for i in xrange(total_files):
1754 # XXX doesn't support '\n' or '\r' in filenames
1761 # XXX doesn't support '\n' or '\r' in filenames
1755 l = fp.readline()
1762 l = fp.readline()
1756 try:
1763 try:
1757 name, size = l.split('\0', 1)
1764 name, size = l.split('\0', 1)
1758 size = int(size)
1765 size = int(size)
1759 except (ValueError, TypeError):
1766 except (ValueError, TypeError):
1760 raise error.ResponseError(
1767 raise error.ResponseError(
1761 _('unexpected response from remote server:'), l)
1768 _('unexpected response from remote server:'), l)
1762 if self.ui.debugflag:
1769 if self.ui.debugflag:
1763 self.ui.debug('adding %s (%s)\n' %
1770 self.ui.debug('adding %s (%s)\n' %
1764 (name, util.bytecount(size)))
1771 (name, util.bytecount(size)))
1765 # for backwards compat, name was partially encoded
1772 # for backwards compat, name was partially encoded
1766 ofp = self.sopener(store.decodedir(name), 'w')
1773 ofp = self.sopener(store.decodedir(name), 'w')
1767 for chunk in util.filechunkiter(fp, limit=size):
1774 for chunk in util.filechunkiter(fp, limit=size):
1768 handled_bytes += len(chunk)
1775 handled_bytes += len(chunk)
1769 self.ui.progress(_('clone'), handled_bytes,
1776 self.ui.progress(_('clone'), handled_bytes,
1770 total=total_bytes)
1777 total=total_bytes)
1771 ofp.write(chunk)
1778 ofp.write(chunk)
1772 ofp.close()
1779 ofp.close()
1773 tr.close()
1780 tr.close()
1774 finally:
1781 finally:
1775 tr.release()
1782 tr.release()
1776
1783
1777 # Writing straight to files circumvented the inmemory caches
1784 # Writing straight to files circumvented the inmemory caches
1778 self.invalidate()
1785 self.invalidate()
1779
1786
1780 elapsed = time.time() - start
1787 elapsed = time.time() - start
1781 if elapsed <= 0:
1788 if elapsed <= 0:
1782 elapsed = 0.001
1789 elapsed = 0.001
1783 self.ui.progress(_('clone'), None)
1790 self.ui.progress(_('clone'), None)
1784 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1791 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1785 (util.bytecount(total_bytes), elapsed,
1792 (util.bytecount(total_bytes), elapsed,
1786 util.bytecount(total_bytes / elapsed)))
1793 util.bytecount(total_bytes / elapsed)))
1787
1794
1788 # new requirements = old non-format requirements +
1795 # new requirements = old non-format requirements +
1789 # new format-related
1796 # new format-related
1790 # requirements from the streamed-in repository
1797 # requirements from the streamed-in repository
1791 requirements.update(set(self.requirements) - self.supportedformats)
1798 requirements.update(set(self.requirements) - self.supportedformats)
1792 self._applyrequirements(requirements)
1799 self._applyrequirements(requirements)
1793 self._writerequirements()
1800 self._writerequirements()
1794
1801
1795 if rbranchmap:
1802 if rbranchmap:
1796 rbheads = []
1803 rbheads = []
1797 for bheads in rbranchmap.itervalues():
1804 for bheads in rbranchmap.itervalues():
1798 rbheads.extend(bheads)
1805 rbheads.extend(bheads)
1799
1806
1800 if rbheads:
1807 if rbheads:
1801 rtiprev = max((int(self.changelog.rev(node))
1808 rtiprev = max((int(self.changelog.rev(node))
1802 for node in rbheads))
1809 for node in rbheads))
1803 cache = branchmap.branchcache(rbranchmap,
1810 cache = branchmap.branchcache(rbranchmap,
1804 self[rtiprev].node(),
1811 self[rtiprev].node(),
1805 rtiprev)
1812 rtiprev)
1806 # Try to stick it as low as possible
1813 # Try to stick it as low as possible
1807 # filter above served are unlikely to be fetch from a clone
1814 # filter above served are unlikely to be fetch from a clone
1808 for candidate in ('base', 'immutable', 'served'):
1815 for candidate in ('base', 'immutable', 'served'):
1809 rview = self.filtered(candidate)
1816 rview = self.filtered(candidate)
1810 if cache.validfor(rview):
1817 if cache.validfor(rview):
1811 self._branchcaches[candidate] = cache
1818 self._branchcaches[candidate] = cache
1812 cache.write(rview)
1819 cache.write(rview)
1813 break
1820 break
1814 self.invalidate()
1821 self.invalidate()
1815 return len(self.heads()) + 1
1822 return len(self.heads()) + 1
1816 finally:
1823 finally:
1817 lock.release()
1824 lock.release()
1818
1825
1819 def clone(self, remote, heads=[], stream=False):
1826 def clone(self, remote, heads=[], stream=False):
1820 '''clone remote repository.
1827 '''clone remote repository.
1821
1828
1822 keyword arguments:
1829 keyword arguments:
1823 heads: list of revs to clone (forces use of pull)
1830 heads: list of revs to clone (forces use of pull)
1824 stream: use streaming clone if possible'''
1831 stream: use streaming clone if possible'''
1825
1832
1826 # now, all clients that can request uncompressed clones can
1833 # now, all clients that can request uncompressed clones can
1827 # read repo formats supported by all servers that can serve
1834 # read repo formats supported by all servers that can serve
1828 # them.
1835 # them.
1829
1836
1830 # if revlog format changes, client will have to check version
1837 # if revlog format changes, client will have to check version
1831 # and format flags on "stream" capability, and use
1838 # and format flags on "stream" capability, and use
1832 # uncompressed only if compatible.
1839 # uncompressed only if compatible.
1833
1840
1834 if not stream:
1841 if not stream:
1835 # if the server explicitly prefers to stream (for fast LANs)
1842 # if the server explicitly prefers to stream (for fast LANs)
1836 stream = remote.capable('stream-preferred')
1843 stream = remote.capable('stream-preferred')
1837
1844
1838 if stream and not heads:
1845 if stream and not heads:
1839 # 'stream' means remote revlog format is revlogv1 only
1846 # 'stream' means remote revlog format is revlogv1 only
1840 if remote.capable('stream'):
1847 if remote.capable('stream'):
1841 return self.stream_in(remote, set(('revlogv1',)))
1848 return self.stream_in(remote, set(('revlogv1',)))
1842 # otherwise, 'streamreqs' contains the remote revlog format
1849 # otherwise, 'streamreqs' contains the remote revlog format
1843 streamreqs = remote.capable('streamreqs')
1850 streamreqs = remote.capable('streamreqs')
1844 if streamreqs:
1851 if streamreqs:
1845 streamreqs = set(streamreqs.split(','))
1852 streamreqs = set(streamreqs.split(','))
1846 # if we support it, stream in and adjust our requirements
1853 # if we support it, stream in and adjust our requirements
1847 if not streamreqs - self.supportedformats:
1854 if not streamreqs - self.supportedformats:
1848 return self.stream_in(remote, streamreqs)
1855 return self.stream_in(remote, streamreqs)
1849 return self.pull(remote, heads)
1856 return self.pull(remote, heads)
1850
1857
1851 def pushkey(self, namespace, key, old, new):
1858 def pushkey(self, namespace, key, old, new):
1852 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1859 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1853 old=old, new=new)
1860 old=old, new=new)
1854 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1861 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1855 ret = pushkey.push(self, namespace, key, old, new)
1862 ret = pushkey.push(self, namespace, key, old, new)
1856 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1863 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1857 ret=ret)
1864 ret=ret)
1858 return ret
1865 return ret
1859
1866
1860 def listkeys(self, namespace):
1867 def listkeys(self, namespace):
1861 self.hook('prelistkeys', throw=True, namespace=namespace)
1868 self.hook('prelistkeys', throw=True, namespace=namespace)
1862 self.ui.debug('listing keys for "%s"\n' % namespace)
1869 self.ui.debug('listing keys for "%s"\n' % namespace)
1863 values = pushkey.list(self, namespace)
1870 values = pushkey.list(self, namespace)
1864 self.hook('listkeys', namespace=namespace, values=values)
1871 self.hook('listkeys', namespace=namespace, values=values)
1865 return values
1872 return values
1866
1873
1867 def debugwireargs(self, one, two, three=None, four=None, five=None):
1874 def debugwireargs(self, one, two, three=None, four=None, five=None):
1868 '''used to test argument passing over the wire'''
1875 '''used to test argument passing over the wire'''
1869 return "%s %s %s %s %s" % (one, two, three, four, five)
1876 return "%s %s %s %s %s" % (one, two, three, four, five)
1870
1877
1871 def savecommitmessage(self, text):
1878 def savecommitmessage(self, text):
1872 fp = self.opener('last-message.txt', 'wb')
1879 fp = self.opener('last-message.txt', 'wb')
1873 try:
1880 try:
1874 fp.write(text)
1881 fp.write(text)
1875 finally:
1882 finally:
1876 fp.close()
1883 fp.close()
1877 return self.pathto(fp.name[len(self.root) + 1:])
1884 return self.pathto(fp.name[len(self.root) + 1:])
1878
1885
1879 # used to avoid circular references so destructors work
1886 # used to avoid circular references so destructors work
1880 def aftertrans(files):
1887 def aftertrans(files):
1881 renamefiles = [tuple(t) for t in files]
1888 renamefiles = [tuple(t) for t in files]
1882 def a():
1889 def a():
1883 for vfs, src, dest in renamefiles:
1890 for vfs, src, dest in renamefiles:
1884 try:
1891 try:
1885 vfs.rename(src, dest)
1892 vfs.rename(src, dest)
1886 except OSError: # journal file does not yet exist
1893 except OSError: # journal file does not yet exist
1887 pass
1894 pass
1888 return a
1895 return a
1889
1896
1890 def undoname(fn):
1897 def undoname(fn):
1891 base, name = os.path.split(fn)
1898 base, name = os.path.split(fn)
1892 assert name.startswith('journal')
1899 assert name.startswith('journal')
1893 return os.path.join(base, name.replace('journal', 'undo', 1))
1900 return os.path.join(base, name.replace('journal', 'undo', 1))
1894
1901
1895 def instance(ui, path, create):
1902 def instance(ui, path, create):
1896 return localrepository(ui, util.urllocalpath(path), create)
1903 return localrepository(ui, util.urllocalpath(path), create)
1897
1904
1898 def islocal(path):
1905 def islocal(path):
1899 return True
1906 return True
General Comments 0
You need to be logged in to leave comments. Login now