##// END OF EJS Templates
localrepo: introduce "prepushoutgoinghooks" to extend outgoing check easily...
FUJIWARA Katsunori -
r21043:6c383c87 default
parent child Browse files
Show More
@@ -1,644 +1,647 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno
10 import errno
11 import util, scmutil, changegroup, base85
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks, bundle2
12 import discovery, phases, obsolete, bookmarks, bundle2
13
13
14
14
15 class pushoperation(object):
15 class pushoperation(object):
16 """A object that represent a single push operation
16 """A object that represent a single push operation
17
17
18 It purpose is to carry push related state and very common operation.
18 It purpose is to carry push related state and very common operation.
19
19
20 A new should be created at the beginning of each push and discarded
20 A new should be created at the beginning of each push and discarded
21 afterward.
21 afterward.
22 """
22 """
23
23
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 # repo we push from
25 # repo we push from
26 self.repo = repo
26 self.repo = repo
27 self.ui = repo.ui
27 self.ui = repo.ui
28 # repo we push to
28 # repo we push to
29 self.remote = remote
29 self.remote = remote
30 # force option provided
30 # force option provided
31 self.force = force
31 self.force = force
32 # revs to be pushed (None is "all")
32 # revs to be pushed (None is "all")
33 self.revs = revs
33 self.revs = revs
34 # allow push of new branch
34 # allow push of new branch
35 self.newbranch = newbranch
35 self.newbranch = newbranch
36 # did a local lock get acquired?
36 # did a local lock get acquired?
37 self.locallocked = None
37 self.locallocked = None
38 # Integer version of the push result
38 # Integer version of the push result
39 # - None means nothing to push
39 # - None means nothing to push
40 # - 0 means HTTP error
40 # - 0 means HTTP error
41 # - 1 means we pushed and remote head count is unchanged *or*
41 # - 1 means we pushed and remote head count is unchanged *or*
42 # we have outgoing changesets but refused to push
42 # we have outgoing changesets but refused to push
43 # - other values as described by addchangegroup()
43 # - other values as described by addchangegroup()
44 self.ret = None
44 self.ret = None
45 # discover.outgoing object (contains common and outgoing data)
45 # discover.outgoing object (contains common and outgoing data)
46 self.outgoing = None
46 self.outgoing = None
47 # all remote heads before the push
47 # all remote heads before the push
48 self.remoteheads = None
48 self.remoteheads = None
49 # testable as a boolean indicating if any nodes are missing locally.
49 # testable as a boolean indicating if any nodes are missing locally.
50 self.incoming = None
50 self.incoming = None
51 # set of all heads common after changeset bundle push
51 # set of all heads common after changeset bundle push
52 self.commonheads = None
52 self.commonheads = None
53
53
54 def push(repo, remote, force=False, revs=None, newbranch=False):
54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 '''Push outgoing changesets (limited by revs) from a local
55 '''Push outgoing changesets (limited by revs) from a local
56 repository to remote. Return an integer:
56 repository to remote. Return an integer:
57 - None means nothing to push
57 - None means nothing to push
58 - 0 means HTTP error
58 - 0 means HTTP error
59 - 1 means we pushed and remote head count is unchanged *or*
59 - 1 means we pushed and remote head count is unchanged *or*
60 we have outgoing changesets but refused to push
60 we have outgoing changesets but refused to push
61 - other values as described by addchangegroup()
61 - other values as described by addchangegroup()
62 '''
62 '''
63 pushop = pushoperation(repo, remote, force, revs, newbranch)
63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 if pushop.remote.local():
64 if pushop.remote.local():
65 missing = (set(pushop.repo.requirements)
65 missing = (set(pushop.repo.requirements)
66 - pushop.remote.local().supported)
66 - pushop.remote.local().supported)
67 if missing:
67 if missing:
68 msg = _("required features are not"
68 msg = _("required features are not"
69 " supported in the destination:"
69 " supported in the destination:"
70 " %s") % (', '.join(sorted(missing)))
70 " %s") % (', '.join(sorted(missing)))
71 raise util.Abort(msg)
71 raise util.Abort(msg)
72
72
73 # there are two ways to push to remote repo:
73 # there are two ways to push to remote repo:
74 #
74 #
75 # addchangegroup assumes local user can lock remote
75 # addchangegroup assumes local user can lock remote
76 # repo (local filesystem, old ssh servers).
76 # repo (local filesystem, old ssh servers).
77 #
77 #
78 # unbundle assumes local user cannot lock remote repo (new ssh
78 # unbundle assumes local user cannot lock remote repo (new ssh
79 # servers, http servers).
79 # servers, http servers).
80
80
81 if not pushop.remote.canpush():
81 if not pushop.remote.canpush():
82 raise util.Abort(_("destination does not support push"))
82 raise util.Abort(_("destination does not support push"))
83 # get local lock as we might write phase data
83 # get local lock as we might write phase data
84 locallock = None
84 locallock = None
85 try:
85 try:
86 locallock = pushop.repo.lock()
86 locallock = pushop.repo.lock()
87 pushop.locallocked = True
87 pushop.locallocked = True
88 except IOError, err:
88 except IOError, err:
89 pushop.locallocked = False
89 pushop.locallocked = False
90 if err.errno != errno.EACCES:
90 if err.errno != errno.EACCES:
91 raise
91 raise
92 # source repo cannot be locked.
92 # source repo cannot be locked.
93 # We do not abort the push, but just disable the local phase
93 # We do not abort the push, but just disable the local phase
94 # synchronisation.
94 # synchronisation.
95 msg = 'cannot lock source repository: %s\n' % err
95 msg = 'cannot lock source repository: %s\n' % err
96 pushop.ui.debug(msg)
96 pushop.ui.debug(msg)
97 try:
97 try:
98 pushop.repo.checkpush(pushop)
98 pushop.repo.checkpush(pushop)
99 lock = None
99 lock = None
100 unbundle = pushop.remote.capable('unbundle')
100 unbundle = pushop.remote.capable('unbundle')
101 if not unbundle:
101 if not unbundle:
102 lock = pushop.remote.lock()
102 lock = pushop.remote.lock()
103 try:
103 try:
104 _pushdiscovery(pushop)
104 _pushdiscovery(pushop)
105 if _pushcheckoutgoing(pushop):
105 if _pushcheckoutgoing(pushop):
106 pushop.repo.prepushoutgoinghooks(pushop.repo,
107 pushop.remote,
108 pushop.outgoing)
106 _pushchangeset(pushop)
109 _pushchangeset(pushop)
107 _pushcomputecommonheads(pushop)
110 _pushcomputecommonheads(pushop)
108 _pushsyncphase(pushop)
111 _pushsyncphase(pushop)
109 _pushobsolete(pushop)
112 _pushobsolete(pushop)
110 finally:
113 finally:
111 if lock is not None:
114 if lock is not None:
112 lock.release()
115 lock.release()
113 finally:
116 finally:
114 if locallock is not None:
117 if locallock is not None:
115 locallock.release()
118 locallock.release()
116
119
117 _pushbookmark(pushop)
120 _pushbookmark(pushop)
118 return pushop.ret
121 return pushop.ret
119
122
120 def _pushdiscovery(pushop):
123 def _pushdiscovery(pushop):
121 # discovery
124 # discovery
122 unfi = pushop.repo.unfiltered()
125 unfi = pushop.repo.unfiltered()
123 fci = discovery.findcommonincoming
126 fci = discovery.findcommonincoming
124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
127 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 common, inc, remoteheads = commoninc
128 common, inc, remoteheads = commoninc
126 fco = discovery.findcommonoutgoing
129 fco = discovery.findcommonoutgoing
127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
130 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 commoninc=commoninc, force=pushop.force)
131 commoninc=commoninc, force=pushop.force)
129 pushop.outgoing = outgoing
132 pushop.outgoing = outgoing
130 pushop.remoteheads = remoteheads
133 pushop.remoteheads = remoteheads
131 pushop.incoming = inc
134 pushop.incoming = inc
132
135
133 def _pushcheckoutgoing(pushop):
136 def _pushcheckoutgoing(pushop):
134 outgoing = pushop.outgoing
137 outgoing = pushop.outgoing
135 unfi = pushop.repo.unfiltered()
138 unfi = pushop.repo.unfiltered()
136 if not outgoing.missing:
139 if not outgoing.missing:
137 # nothing to push
140 # nothing to push
138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
141 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 return False
142 return False
140 # something to push
143 # something to push
141 if not pushop.force:
144 if not pushop.force:
142 # if repo.obsstore == False --> no obsolete
145 # if repo.obsstore == False --> no obsolete
143 # then, save the iteration
146 # then, save the iteration
144 if unfi.obsstore:
147 if unfi.obsstore:
145 # this message are here for 80 char limit reason
148 # this message are here for 80 char limit reason
146 mso = _("push includes obsolete changeset: %s!")
149 mso = _("push includes obsolete changeset: %s!")
147 mst = "push includes %s changeset: %s!"
150 mst = "push includes %s changeset: %s!"
148 # plain versions for i18n tool to detect them
151 # plain versions for i18n tool to detect them
149 _("push includes unstable changeset: %s!")
152 _("push includes unstable changeset: %s!")
150 _("push includes bumped changeset: %s!")
153 _("push includes bumped changeset: %s!")
151 _("push includes divergent changeset: %s!")
154 _("push includes divergent changeset: %s!")
152 # If we are to push if there is at least one
155 # If we are to push if there is at least one
153 # obsolete or unstable changeset in missing, at
156 # obsolete or unstable changeset in missing, at
154 # least one of the missinghead will be obsolete or
157 # least one of the missinghead will be obsolete or
155 # unstable. So checking heads only is ok
158 # unstable. So checking heads only is ok
156 for node in outgoing.missingheads:
159 for node in outgoing.missingheads:
157 ctx = unfi[node]
160 ctx = unfi[node]
158 if ctx.obsolete():
161 if ctx.obsolete():
159 raise util.Abort(mso % ctx)
162 raise util.Abort(mso % ctx)
160 elif ctx.troubled():
163 elif ctx.troubled():
161 raise util.Abort(_(mst)
164 raise util.Abort(_(mst)
162 % (ctx.troubles()[0],
165 % (ctx.troubles()[0],
163 ctx))
166 ctx))
164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
167 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 discovery.checkheads(unfi, pushop.remote, outgoing,
168 discovery.checkheads(unfi, pushop.remote, outgoing,
166 pushop.remoteheads,
169 pushop.remoteheads,
167 pushop.newbranch,
170 pushop.newbranch,
168 bool(pushop.incoming),
171 bool(pushop.incoming),
169 newbm)
172 newbm)
170 return True
173 return True
171
174
172 def _pushchangeset(pushop):
175 def _pushchangeset(pushop):
173 """Make the actual push of changeset bundle to remote repo"""
176 """Make the actual push of changeset bundle to remote repo"""
174 outgoing = pushop.outgoing
177 outgoing = pushop.outgoing
175 unbundle = pushop.remote.capable('unbundle')
178 unbundle = pushop.remote.capable('unbundle')
176 # TODO: get bundlecaps from remote
179 # TODO: get bundlecaps from remote
177 bundlecaps = None
180 bundlecaps = None
178 # create a changegroup from local
181 # create a changegroup from local
179 if pushop.revs is None and not (outgoing.excluded
182 if pushop.revs is None and not (outgoing.excluded
180 or pushop.repo.changelog.filteredrevs):
183 or pushop.repo.changelog.filteredrevs):
181 # push everything,
184 # push everything,
182 # use the fast path, no race possible on push
185 # use the fast path, no race possible on push
183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
186 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 cg = changegroup.getsubset(pushop.repo,
187 cg = changegroup.getsubset(pushop.repo,
185 outgoing,
188 outgoing,
186 bundler,
189 bundler,
187 'push',
190 'push',
188 fastpath=True)
191 fastpath=True)
189 else:
192 else:
190 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
193 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
191 bundlecaps)
194 bundlecaps)
192
195
193 # apply changegroup to remote
196 # apply changegroup to remote
194 if unbundle:
197 if unbundle:
195 # local repo finds heads on server, finds out what
198 # local repo finds heads on server, finds out what
196 # revs it must push. once revs transferred, if server
199 # revs it must push. once revs transferred, if server
197 # finds it has different heads (someone else won
200 # finds it has different heads (someone else won
198 # commit/push race), server aborts.
201 # commit/push race), server aborts.
199 if pushop.force:
202 if pushop.force:
200 remoteheads = ['force']
203 remoteheads = ['force']
201 else:
204 else:
202 remoteheads = pushop.remoteheads
205 remoteheads = pushop.remoteheads
203 # ssh: return remote's addchangegroup()
206 # ssh: return remote's addchangegroup()
204 # http: return remote's addchangegroup() or 0 for error
207 # http: return remote's addchangegroup() or 0 for error
205 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
208 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
206 'push')
209 'push')
207 else:
210 else:
208 # we return an integer indicating remote head count
211 # we return an integer indicating remote head count
209 # change
212 # change
210 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
213 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
211
214
212 def _pushcomputecommonheads(pushop):
215 def _pushcomputecommonheads(pushop):
213 unfi = pushop.repo.unfiltered()
216 unfi = pushop.repo.unfiltered()
214 if pushop.ret:
217 if pushop.ret:
215 # push succeed, synchronize target of the push
218 # push succeed, synchronize target of the push
216 cheads = pushop.outgoing.missingheads
219 cheads = pushop.outgoing.missingheads
217 elif pushop.revs is None:
220 elif pushop.revs is None:
218 # All out push fails. synchronize all common
221 # All out push fails. synchronize all common
219 cheads = pushop.outgoing.commonheads
222 cheads = pushop.outgoing.commonheads
220 else:
223 else:
221 # I want cheads = heads(::missingheads and ::commonheads)
224 # I want cheads = heads(::missingheads and ::commonheads)
222 # (missingheads is revs with secret changeset filtered out)
225 # (missingheads is revs with secret changeset filtered out)
223 #
226 #
224 # This can be expressed as:
227 # This can be expressed as:
225 # cheads = ( (missingheads and ::commonheads)
228 # cheads = ( (missingheads and ::commonheads)
226 # + (commonheads and ::missingheads))"
229 # + (commonheads and ::missingheads))"
227 # )
230 # )
228 #
231 #
229 # while trying to push we already computed the following:
232 # while trying to push we already computed the following:
230 # common = (::commonheads)
233 # common = (::commonheads)
231 # missing = ((commonheads::missingheads) - commonheads)
234 # missing = ((commonheads::missingheads) - commonheads)
232 #
235 #
233 # We can pick:
236 # We can pick:
234 # * missingheads part of common (::commonheads)
237 # * missingheads part of common (::commonheads)
235 common = set(pushop.outgoing.common)
238 common = set(pushop.outgoing.common)
236 nm = pushop.repo.changelog.nodemap
239 nm = pushop.repo.changelog.nodemap
237 cheads = [node for node in pushop.revs if nm[node] in common]
240 cheads = [node for node in pushop.revs if nm[node] in common]
238 # and
241 # and
239 # * commonheads parents on missing
242 # * commonheads parents on missing
240 revset = unfi.set('%ln and parents(roots(%ln))',
243 revset = unfi.set('%ln and parents(roots(%ln))',
241 pushop.outgoing.commonheads,
244 pushop.outgoing.commonheads,
242 pushop.outgoing.missing)
245 pushop.outgoing.missing)
243 cheads.extend(c.node() for c in revset)
246 cheads.extend(c.node() for c in revset)
244 pushop.commonheads = cheads
247 pushop.commonheads = cheads
245
248
246 def _pushsyncphase(pushop):
249 def _pushsyncphase(pushop):
247 """synchronise phase information locally and remotely"""
250 """synchronise phase information locally and remotely"""
248 unfi = pushop.repo.unfiltered()
251 unfi = pushop.repo.unfiltered()
249 cheads = pushop.commonheads
252 cheads = pushop.commonheads
250 if pushop.ret:
253 if pushop.ret:
251 # push succeed, synchronize target of the push
254 # push succeed, synchronize target of the push
252 cheads = pushop.outgoing.missingheads
255 cheads = pushop.outgoing.missingheads
253 elif pushop.revs is None:
256 elif pushop.revs is None:
254 # All out push fails. synchronize all common
257 # All out push fails. synchronize all common
255 cheads = pushop.outgoing.commonheads
258 cheads = pushop.outgoing.commonheads
256 else:
259 else:
257 # I want cheads = heads(::missingheads and ::commonheads)
260 # I want cheads = heads(::missingheads and ::commonheads)
258 # (missingheads is revs with secret changeset filtered out)
261 # (missingheads is revs with secret changeset filtered out)
259 #
262 #
260 # This can be expressed as:
263 # This can be expressed as:
261 # cheads = ( (missingheads and ::commonheads)
264 # cheads = ( (missingheads and ::commonheads)
262 # + (commonheads and ::missingheads))"
265 # + (commonheads and ::missingheads))"
263 # )
266 # )
264 #
267 #
265 # while trying to push we already computed the following:
268 # while trying to push we already computed the following:
266 # common = (::commonheads)
269 # common = (::commonheads)
267 # missing = ((commonheads::missingheads) - commonheads)
270 # missing = ((commonheads::missingheads) - commonheads)
268 #
271 #
269 # We can pick:
272 # We can pick:
270 # * missingheads part of common (::commonheads)
273 # * missingheads part of common (::commonheads)
271 common = set(pushop.outgoing.common)
274 common = set(pushop.outgoing.common)
272 nm = pushop.repo.changelog.nodemap
275 nm = pushop.repo.changelog.nodemap
273 cheads = [node for node in pushop.revs if nm[node] in common]
276 cheads = [node for node in pushop.revs if nm[node] in common]
274 # and
277 # and
275 # * commonheads parents on missing
278 # * commonheads parents on missing
276 revset = unfi.set('%ln and parents(roots(%ln))',
279 revset = unfi.set('%ln and parents(roots(%ln))',
277 pushop.outgoing.commonheads,
280 pushop.outgoing.commonheads,
278 pushop.outgoing.missing)
281 pushop.outgoing.missing)
279 cheads.extend(c.node() for c in revset)
282 cheads.extend(c.node() for c in revset)
280 pushop.commonheads = cheads
283 pushop.commonheads = cheads
281 # even when we don't push, exchanging phase data is useful
284 # even when we don't push, exchanging phase data is useful
282 remotephases = pushop.remote.listkeys('phases')
285 remotephases = pushop.remote.listkeys('phases')
283 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
286 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
284 and remotephases # server supports phases
287 and remotephases # server supports phases
285 and pushop.ret is None # nothing was pushed
288 and pushop.ret is None # nothing was pushed
286 and remotephases.get('publishing', False)):
289 and remotephases.get('publishing', False)):
287 # When:
290 # When:
288 # - this is a subrepo push
291 # - this is a subrepo push
289 # - and remote support phase
292 # - and remote support phase
290 # - and no changeset was pushed
293 # - and no changeset was pushed
291 # - and remote is publishing
294 # - and remote is publishing
292 # We may be in issue 3871 case!
295 # We may be in issue 3871 case!
293 # We drop the possible phase synchronisation done by
296 # We drop the possible phase synchronisation done by
294 # courtesy to publish changesets possibly locally draft
297 # courtesy to publish changesets possibly locally draft
295 # on the remote.
298 # on the remote.
296 remotephases = {'publishing': 'True'}
299 remotephases = {'publishing': 'True'}
297 if not remotephases: # old server or public only reply from non-publishing
300 if not remotephases: # old server or public only reply from non-publishing
298 _localphasemove(pushop, cheads)
301 _localphasemove(pushop, cheads)
299 # don't push any phase data as there is nothing to push
302 # don't push any phase data as there is nothing to push
300 else:
303 else:
301 ana = phases.analyzeremotephases(pushop.repo, cheads,
304 ana = phases.analyzeremotephases(pushop.repo, cheads,
302 remotephases)
305 remotephases)
303 pheads, droots = ana
306 pheads, droots = ana
304 ### Apply remote phase on local
307 ### Apply remote phase on local
305 if remotephases.get('publishing', False):
308 if remotephases.get('publishing', False):
306 _localphasemove(pushop, cheads)
309 _localphasemove(pushop, cheads)
307 else: # publish = False
310 else: # publish = False
308 _localphasemove(pushop, pheads)
311 _localphasemove(pushop, pheads)
309 _localphasemove(pushop, cheads, phases.draft)
312 _localphasemove(pushop, cheads, phases.draft)
310 ### Apply local phase on remote
313 ### Apply local phase on remote
311
314
312 # Get the list of all revs draft on remote by public here.
315 # Get the list of all revs draft on remote by public here.
313 # XXX Beware that revset break if droots is not strictly
316 # XXX Beware that revset break if droots is not strictly
314 # XXX root we may want to ensure it is but it is costly
317 # XXX root we may want to ensure it is but it is costly
315 outdated = unfi.set('heads((%ln::%ln) and public())',
318 outdated = unfi.set('heads((%ln::%ln) and public())',
316 droots, cheads)
319 droots, cheads)
317 for newremotehead in outdated:
320 for newremotehead in outdated:
318 r = pushop.remote.pushkey('phases',
321 r = pushop.remote.pushkey('phases',
319 newremotehead.hex(),
322 newremotehead.hex(),
320 str(phases.draft),
323 str(phases.draft),
321 str(phases.public))
324 str(phases.public))
322 if not r:
325 if not r:
323 pushop.ui.warn(_('updating %s to public failed!\n')
326 pushop.ui.warn(_('updating %s to public failed!\n')
324 % newremotehead)
327 % newremotehead)
325
328
326 def _localphasemove(pushop, nodes, phase=phases.public):
329 def _localphasemove(pushop, nodes, phase=phases.public):
327 """move <nodes> to <phase> in the local source repo"""
330 """move <nodes> to <phase> in the local source repo"""
328 if pushop.locallocked:
331 if pushop.locallocked:
329 phases.advanceboundary(pushop.repo, phase, nodes)
332 phases.advanceboundary(pushop.repo, phase, nodes)
330 else:
333 else:
331 # repo is not locked, do not change any phases!
334 # repo is not locked, do not change any phases!
332 # Informs the user that phases should have been moved when
335 # Informs the user that phases should have been moved when
333 # applicable.
336 # applicable.
334 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
337 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
335 phasestr = phases.phasenames[phase]
338 phasestr = phases.phasenames[phase]
336 if actualmoves:
339 if actualmoves:
337 pushop.ui.status(_('cannot lock source repo, skipping '
340 pushop.ui.status(_('cannot lock source repo, skipping '
338 'local %s phase update\n') % phasestr)
341 'local %s phase update\n') % phasestr)
339
342
340 def _pushobsolete(pushop):
343 def _pushobsolete(pushop):
341 """utility function to push obsolete markers to a remote"""
344 """utility function to push obsolete markers to a remote"""
342 pushop.ui.debug('try to push obsolete markers to remote\n')
345 pushop.ui.debug('try to push obsolete markers to remote\n')
343 repo = pushop.repo
346 repo = pushop.repo
344 remote = pushop.remote
347 remote = pushop.remote
345 if (obsolete._enabled and repo.obsstore and
348 if (obsolete._enabled and repo.obsstore and
346 'obsolete' in remote.listkeys('namespaces')):
349 'obsolete' in remote.listkeys('namespaces')):
347 rslts = []
350 rslts = []
348 remotedata = repo.listkeys('obsolete')
351 remotedata = repo.listkeys('obsolete')
349 for key in sorted(remotedata, reverse=True):
352 for key in sorted(remotedata, reverse=True):
350 # reverse sort to ensure we end with dump0
353 # reverse sort to ensure we end with dump0
351 data = remotedata[key]
354 data = remotedata[key]
352 rslts.append(remote.pushkey('obsolete', key, '', data))
355 rslts.append(remote.pushkey('obsolete', key, '', data))
353 if [r for r in rslts if not r]:
356 if [r for r in rslts if not r]:
354 msg = _('failed to push some obsolete markers!\n')
357 msg = _('failed to push some obsolete markers!\n')
355 repo.ui.warn(msg)
358 repo.ui.warn(msg)
356
359
357 def _pushbookmark(pushop):
360 def _pushbookmark(pushop):
358 """Update bookmark position on remote"""
361 """Update bookmark position on remote"""
359 ui = pushop.ui
362 ui = pushop.ui
360 repo = pushop.repo.unfiltered()
363 repo = pushop.repo.unfiltered()
361 remote = pushop.remote
364 remote = pushop.remote
362 ui.debug("checking for updated bookmarks\n")
365 ui.debug("checking for updated bookmarks\n")
363 revnums = map(repo.changelog.rev, pushop.revs or [])
366 revnums = map(repo.changelog.rev, pushop.revs or [])
364 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
367 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
365 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
368 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
366 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
369 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
367 srchex=hex)
370 srchex=hex)
368
371
369 for b, scid, dcid in advsrc:
372 for b, scid, dcid in advsrc:
370 if ancestors and repo[scid].rev() not in ancestors:
373 if ancestors and repo[scid].rev() not in ancestors:
371 continue
374 continue
372 if remote.pushkey('bookmarks', b, dcid, scid):
375 if remote.pushkey('bookmarks', b, dcid, scid):
373 ui.status(_("updating bookmark %s\n") % b)
376 ui.status(_("updating bookmark %s\n") % b)
374 else:
377 else:
375 ui.warn(_('updating bookmark %s failed!\n') % b)
378 ui.warn(_('updating bookmark %s failed!\n') % b)
376
379
377 class pulloperation(object):
380 class pulloperation(object):
378 """A object that represent a single pull operation
381 """A object that represent a single pull operation
379
382
380 It purpose is to carry push related state and very common operation.
383 It purpose is to carry push related state and very common operation.
381
384
382 A new should be created at the beginning of each pull and discarded
385 A new should be created at the beginning of each pull and discarded
383 afterward.
386 afterward.
384 """
387 """
385
388
386 def __init__(self, repo, remote, heads=None, force=False):
389 def __init__(self, repo, remote, heads=None, force=False):
387 # repo we pull into
390 # repo we pull into
388 self.repo = repo
391 self.repo = repo
389 # repo we pull from
392 # repo we pull from
390 self.remote = remote
393 self.remote = remote
391 # revision we try to pull (None is "all")
394 # revision we try to pull (None is "all")
392 self.heads = heads
395 self.heads = heads
393 # do we force pull?
396 # do we force pull?
394 self.force = force
397 self.force = force
395 # the name the pull transaction
398 # the name the pull transaction
396 self._trname = 'pull\n' + util.hidepassword(remote.url())
399 self._trname = 'pull\n' + util.hidepassword(remote.url())
397 # hold the transaction once created
400 # hold the transaction once created
398 self._tr = None
401 self._tr = None
399 # set of common changeset between local and remote before pull
402 # set of common changeset between local and remote before pull
400 self.common = None
403 self.common = None
401 # set of pulled head
404 # set of pulled head
402 self.rheads = None
405 self.rheads = None
403 # list of missing changeset to fetch remotely
406 # list of missing changeset to fetch remotely
404 self.fetch = None
407 self.fetch = None
405 # result of changegroup pulling (used as return code by pull)
408 # result of changegroup pulling (used as return code by pull)
406 self.cgresult = None
409 self.cgresult = None
407 # list of step remaining todo (related to future bundle2 usage)
410 # list of step remaining todo (related to future bundle2 usage)
408 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
411 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
409
412
410 @util.propertycache
413 @util.propertycache
411 def pulledsubset(self):
414 def pulledsubset(self):
412 """heads of the set of changeset target by the pull"""
415 """heads of the set of changeset target by the pull"""
413 # compute target subset
416 # compute target subset
414 if self.heads is None:
417 if self.heads is None:
415 # We pulled every thing possible
418 # We pulled every thing possible
416 # sync on everything common
419 # sync on everything common
417 c = set(self.common)
420 c = set(self.common)
418 ret = list(self.common)
421 ret = list(self.common)
419 for n in self.rheads:
422 for n in self.rheads:
420 if n not in c:
423 if n not in c:
421 ret.append(n)
424 ret.append(n)
422 return ret
425 return ret
423 else:
426 else:
424 # We pulled a specific subset
427 # We pulled a specific subset
425 # sync on this subset
428 # sync on this subset
426 return self.heads
429 return self.heads
427
430
428 def gettransaction(self):
431 def gettransaction(self):
429 """get appropriate pull transaction, creating it if needed"""
432 """get appropriate pull transaction, creating it if needed"""
430 if self._tr is None:
433 if self._tr is None:
431 self._tr = self.repo.transaction(self._trname)
434 self._tr = self.repo.transaction(self._trname)
432 return self._tr
435 return self._tr
433
436
434 def closetransaction(self):
437 def closetransaction(self):
435 """close transaction if created"""
438 """close transaction if created"""
436 if self._tr is not None:
439 if self._tr is not None:
437 self._tr.close()
440 self._tr.close()
438
441
439 def releasetransaction(self):
442 def releasetransaction(self):
440 """release transaction if created"""
443 """release transaction if created"""
441 if self._tr is not None:
444 if self._tr is not None:
442 self._tr.release()
445 self._tr.release()
443
446
444 def pull(repo, remote, heads=None, force=False):
447 def pull(repo, remote, heads=None, force=False):
445 pullop = pulloperation(repo, remote, heads, force)
448 pullop = pulloperation(repo, remote, heads, force)
446 if pullop.remote.local():
449 if pullop.remote.local():
447 missing = set(pullop.remote.requirements) - pullop.repo.supported
450 missing = set(pullop.remote.requirements) - pullop.repo.supported
448 if missing:
451 if missing:
449 msg = _("required features are not"
452 msg = _("required features are not"
450 " supported in the destination:"
453 " supported in the destination:"
451 " %s") % (', '.join(sorted(missing)))
454 " %s") % (', '.join(sorted(missing)))
452 raise util.Abort(msg)
455 raise util.Abort(msg)
453
456
454 lock = pullop.repo.lock()
457 lock = pullop.repo.lock()
455 try:
458 try:
456 _pulldiscovery(pullop)
459 _pulldiscovery(pullop)
457 if pullop.remote.capable('bundle2'):
460 if pullop.remote.capable('bundle2'):
458 _pullbundle2(pullop)
461 _pullbundle2(pullop)
459 if 'changegroup' in pullop.todosteps:
462 if 'changegroup' in pullop.todosteps:
460 _pullchangeset(pullop)
463 _pullchangeset(pullop)
461 if 'phases' in pullop.todosteps:
464 if 'phases' in pullop.todosteps:
462 _pullphase(pullop)
465 _pullphase(pullop)
463 if 'obsmarkers' in pullop.todosteps:
466 if 'obsmarkers' in pullop.todosteps:
464 _pullobsolete(pullop)
467 _pullobsolete(pullop)
465 pullop.closetransaction()
468 pullop.closetransaction()
466 finally:
469 finally:
467 pullop.releasetransaction()
470 pullop.releasetransaction()
468 lock.release()
471 lock.release()
469
472
470 return pullop.cgresult
473 return pullop.cgresult
471
474
472 def _pulldiscovery(pullop):
475 def _pulldiscovery(pullop):
473 """discovery phase for the pull
476 """discovery phase for the pull
474
477
475 Current handle changeset discovery only, will change handle all discovery
478 Current handle changeset discovery only, will change handle all discovery
476 at some point."""
479 at some point."""
477 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
480 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
478 pullop.remote,
481 pullop.remote,
479 heads=pullop.heads,
482 heads=pullop.heads,
480 force=pullop.force)
483 force=pullop.force)
481 pullop.common, pullop.fetch, pullop.rheads = tmp
484 pullop.common, pullop.fetch, pullop.rheads = tmp
482
485
483 def _pullbundle2(pullop):
486 def _pullbundle2(pullop):
484 """pull data using bundle2
487 """pull data using bundle2
485
488
486 For now, the only supported data are changegroup."""
489 For now, the only supported data are changegroup."""
487 kwargs = {'bundlecaps': set(['HG20'])}
490 kwargs = {'bundlecaps': set(['HG20'])}
488 # pulling changegroup
491 # pulling changegroup
489 pullop.todosteps.remove('changegroup')
492 pullop.todosteps.remove('changegroup')
490 if not pullop.fetch:
493 if not pullop.fetch:
491 pullop.repo.ui.status(_("no changes found\n"))
494 pullop.repo.ui.status(_("no changes found\n"))
492 pullop.cgresult = 0
495 pullop.cgresult = 0
493 else:
496 else:
494 kwargs['common'] = pullop.common
497 kwargs['common'] = pullop.common
495 kwargs['heads'] = pullop.heads or pullop.rheads
498 kwargs['heads'] = pullop.heads or pullop.rheads
496 if pullop.heads is None and list(pullop.common) == [nullid]:
499 if pullop.heads is None and list(pullop.common) == [nullid]:
497 pullop.repo.ui.status(_("requesting all changes\n"))
500 pullop.repo.ui.status(_("requesting all changes\n"))
498 if kwargs.keys() == ['format']:
501 if kwargs.keys() == ['format']:
499 return # nothing to pull
502 return # nothing to pull
500 bundle = pullop.remote.getbundle('pull', **kwargs)
503 bundle = pullop.remote.getbundle('pull', **kwargs)
501 try:
504 try:
502 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
505 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
503 except KeyError, exc:
506 except KeyError, exc:
504 raise util.Abort('missing support for %s' % exc)
507 raise util.Abort('missing support for %s' % exc)
505 assert len(op.records['changegroup']) == 1
508 assert len(op.records['changegroup']) == 1
506 pullop.cgresult = op.records['changegroup'][0]['return']
509 pullop.cgresult = op.records['changegroup'][0]['return']
507
510
508 def _pullchangeset(pullop):
511 def _pullchangeset(pullop):
509 """pull changeset from unbundle into the local repo"""
512 """pull changeset from unbundle into the local repo"""
510 # We delay the open of the transaction as late as possible so we
513 # We delay the open of the transaction as late as possible so we
511 # don't open transaction for nothing or you break future useful
514 # don't open transaction for nothing or you break future useful
512 # rollback call
515 # rollback call
513 pullop.todosteps.remove('changegroup')
516 pullop.todosteps.remove('changegroup')
514 if not pullop.fetch:
517 if not pullop.fetch:
515 pullop.repo.ui.status(_("no changes found\n"))
518 pullop.repo.ui.status(_("no changes found\n"))
516 pullop.cgresult = 0
519 pullop.cgresult = 0
517 return
520 return
518 pullop.gettransaction()
521 pullop.gettransaction()
519 if pullop.heads is None and list(pullop.common) == [nullid]:
522 if pullop.heads is None and list(pullop.common) == [nullid]:
520 pullop.repo.ui.status(_("requesting all changes\n"))
523 pullop.repo.ui.status(_("requesting all changes\n"))
521 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
524 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
522 # issue1320, avoid a race if remote changed after discovery
525 # issue1320, avoid a race if remote changed after discovery
523 pullop.heads = pullop.rheads
526 pullop.heads = pullop.rheads
524
527
525 if pullop.remote.capable('getbundle'):
528 if pullop.remote.capable('getbundle'):
526 # TODO: get bundlecaps from remote
529 # TODO: get bundlecaps from remote
527 cg = pullop.remote.getbundle('pull', common=pullop.common,
530 cg = pullop.remote.getbundle('pull', common=pullop.common,
528 heads=pullop.heads or pullop.rheads)
531 heads=pullop.heads or pullop.rheads)
529 elif pullop.heads is None:
532 elif pullop.heads is None:
530 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
533 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
531 elif not pullop.remote.capable('changegroupsubset'):
534 elif not pullop.remote.capable('changegroupsubset'):
532 raise util.Abort(_("partial pull cannot be done because "
535 raise util.Abort(_("partial pull cannot be done because "
533 "other repository doesn't support "
536 "other repository doesn't support "
534 "changegroupsubset."))
537 "changegroupsubset."))
535 else:
538 else:
536 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
539 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
537 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
540 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
538 pullop.remote.url())
541 pullop.remote.url())
539
542
540 def _pullphase(pullop):
543 def _pullphase(pullop):
541 # Get remote phases data from remote
544 # Get remote phases data from remote
542 pullop.todosteps.remove('phases')
545 pullop.todosteps.remove('phases')
543 remotephases = pullop.remote.listkeys('phases')
546 remotephases = pullop.remote.listkeys('phases')
544 publishing = bool(remotephases.get('publishing', False))
547 publishing = bool(remotephases.get('publishing', False))
545 if remotephases and not publishing:
548 if remotephases and not publishing:
546 # remote is new and unpublishing
549 # remote is new and unpublishing
547 pheads, _dr = phases.analyzeremotephases(pullop.repo,
550 pheads, _dr = phases.analyzeremotephases(pullop.repo,
548 pullop.pulledsubset,
551 pullop.pulledsubset,
549 remotephases)
552 remotephases)
550 phases.advanceboundary(pullop.repo, phases.public, pheads)
553 phases.advanceboundary(pullop.repo, phases.public, pheads)
551 phases.advanceboundary(pullop.repo, phases.draft,
554 phases.advanceboundary(pullop.repo, phases.draft,
552 pullop.pulledsubset)
555 pullop.pulledsubset)
553 else:
556 else:
554 # Remote is old or publishing all common changesets
557 # Remote is old or publishing all common changesets
555 # should be seen as public
558 # should be seen as public
556 phases.advanceboundary(pullop.repo, phases.public,
559 phases.advanceboundary(pullop.repo, phases.public,
557 pullop.pulledsubset)
560 pullop.pulledsubset)
558
561
559 def _pullobsolete(pullop):
562 def _pullobsolete(pullop):
560 """utility function to pull obsolete markers from a remote
563 """utility function to pull obsolete markers from a remote
561
564
562 The `gettransaction` is function that return the pull transaction, creating
565 The `gettransaction` is function that return the pull transaction, creating
563 one if necessary. We return the transaction to inform the calling code that
566 one if necessary. We return the transaction to inform the calling code that
564 a new transaction have been created (when applicable).
567 a new transaction have been created (when applicable).
565
568
566 Exists mostly to allow overriding for experimentation purpose"""
569 Exists mostly to allow overriding for experimentation purpose"""
567 pullop.todosteps.remove('obsmarkers')
570 pullop.todosteps.remove('obsmarkers')
568 tr = None
571 tr = None
569 if obsolete._enabled:
572 if obsolete._enabled:
570 pullop.repo.ui.debug('fetching remote obsolete markers\n')
573 pullop.repo.ui.debug('fetching remote obsolete markers\n')
571 remoteobs = pullop.remote.listkeys('obsolete')
574 remoteobs = pullop.remote.listkeys('obsolete')
572 if 'dump0' in remoteobs:
575 if 'dump0' in remoteobs:
573 tr = pullop.gettransaction()
576 tr = pullop.gettransaction()
574 for key in sorted(remoteobs, reverse=True):
577 for key in sorted(remoteobs, reverse=True):
575 if key.startswith('dump'):
578 if key.startswith('dump'):
576 data = base85.b85decode(remoteobs[key])
579 data = base85.b85decode(remoteobs[key])
577 pullop.repo.obsstore.mergemarkers(tr, data)
580 pullop.repo.obsstore.mergemarkers(tr, data)
578 pullop.repo.invalidatevolatilesets()
581 pullop.repo.invalidatevolatilesets()
579 return tr
582 return tr
580
583
581 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
584 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
582 """return a full bundle (with potentially multiple kind of parts)
585 """return a full bundle (with potentially multiple kind of parts)
583
586
584 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
587 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
585 passed. For now, the bundle can contain only changegroup, but this will
588 passed. For now, the bundle can contain only changegroup, but this will
586 changes when more part type will be available for bundle2.
589 changes when more part type will be available for bundle2.
587
590
588 This is different from changegroup.getbundle that only returns an HG10
591 This is different from changegroup.getbundle that only returns an HG10
589 changegroup bundle. They may eventually get reunited in the future when we
592 changegroup bundle. They may eventually get reunited in the future when we
590 have a clearer idea of the API we what to query different data.
593 have a clearer idea of the API we what to query different data.
591
594
592 The implementation is at a very early stage and will get massive rework
595 The implementation is at a very early stage and will get massive rework
593 when the API of bundle is refined.
596 when the API of bundle is refined.
594 """
597 """
595 # build bundle here.
598 # build bundle here.
596 cg = changegroup.getbundle(repo, source, heads=heads,
599 cg = changegroup.getbundle(repo, source, heads=heads,
597 common=common, bundlecaps=bundlecaps)
600 common=common, bundlecaps=bundlecaps)
598 if bundlecaps is None or 'HG20' not in bundlecaps:
601 if bundlecaps is None or 'HG20' not in bundlecaps:
599 return cg
602 return cg
600 # very crude first implementation,
603 # very crude first implementation,
601 # the bundle API will change and the generation will be done lazily.
604 # the bundle API will change and the generation will be done lazily.
602 bundler = bundle2.bundle20(repo.ui)
605 bundler = bundle2.bundle20(repo.ui)
603 def cgchunks(cg=cg):
606 def cgchunks(cg=cg):
604 yield 'HG10UN'
607 yield 'HG10UN'
605 for c in cg.getchunks():
608 for c in cg.getchunks():
606 yield c
609 yield c
607 part = bundle2.bundlepart('changegroup', data=cgchunks())
610 part = bundle2.bundlepart('changegroup', data=cgchunks())
608 bundler.addpart(part)
611 bundler.addpart(part)
609 return bundle2.unbundle20(repo.ui, util.chunkbuffer(bundler.getchunks()))
612 return bundle2.unbundle20(repo.ui, util.chunkbuffer(bundler.getchunks()))
610
613
611 class PushRaced(RuntimeError):
614 class PushRaced(RuntimeError):
612 """An exception raised during unbundling that indicate a push race"""
615 """An exception raised during unbundling that indicate a push race"""
613
616
614 def check_heads(repo, their_heads, context):
617 def check_heads(repo, their_heads, context):
615 """check if the heads of a repo have been modified
618 """check if the heads of a repo have been modified
616
619
617 Used by peer for unbundling.
620 Used by peer for unbundling.
618 """
621 """
619 heads = repo.heads()
622 heads = repo.heads()
620 heads_hash = util.sha1(''.join(sorted(heads))).digest()
623 heads_hash = util.sha1(''.join(sorted(heads))).digest()
621 if not (their_heads == ['force'] or their_heads == heads or
624 if not (their_heads == ['force'] or their_heads == heads or
622 their_heads == ['hashed', heads_hash]):
625 their_heads == ['hashed', heads_hash]):
623 # someone else committed/pushed/unbundled while we
626 # someone else committed/pushed/unbundled while we
624 # were transferring data
627 # were transferring data
625 raise PushRaced('repository changed while %s - '
628 raise PushRaced('repository changed while %s - '
626 'please try again' % context)
629 'please try again' % context)
627
630
628 def unbundle(repo, cg, heads, source, url):
631 def unbundle(repo, cg, heads, source, url):
629 """Apply a bundle to a repo.
632 """Apply a bundle to a repo.
630
633
631 this function makes sure the repo is locked during the application and have
634 this function makes sure the repo is locked during the application and have
632 mechanism to check that no push race occurred between the creation of the
635 mechanism to check that no push race occurred between the creation of the
633 bundle and its application.
636 bundle and its application.
634
637
635 If the push was raced as PushRaced exception is raised."""
638 If the push was raced as PushRaced exception is raised."""
636 r = 0
639 r = 0
637 lock = repo.lock()
640 lock = repo.lock()
638 try:
641 try:
639 check_heads(repo, heads, 'uploading changes')
642 check_heads(repo, heads, 'uploading changes')
640 # push can proceed
643 # push can proceed
641 r = changegroup.addchangegroup(repo, cg, source, url)
644 r = changegroup.addchangegroup(repo, cg, source, url)
642 finally:
645 finally:
643 lock.release()
646 lock.release()
644 return r
647 return r
@@ -1,1885 +1,1892 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
11 import lock as lockmod
12 import transaction, store, encoding, exchange
12 import transaction, store, encoding, exchange
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
19 import branchmap, pathutil
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21 filecache = scmutil.filecache
21 filecache = scmutil.filecache
22
22
23 class repofilecache(filecache):
23 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """
25 """
26
26
27 def __get__(self, repo, type=None):
27 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
29 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
31 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33
33
34 class storecache(repofilecache):
34 class storecache(repofilecache):
35 """filecache for files in the store"""
35 """filecache for files in the store"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj.sjoin(fname)
37 return obj.sjoin(fname)
38
38
39 class unfilteredpropertycache(propertycache):
39 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
40 """propertycache that apply to unfiltered repo only"""
41
41
42 def __get__(self, repo, type=None):
42 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
43 unfi = repo.unfiltered()
44 if unfi is repo:
44 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
46 return getattr(unfi, self.name)
47
47
48 class filteredpropertycache(propertycache):
48 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
49 """propertycache that must take filtering in account"""
50
50
51 def cachevalue(self, obj, value):
51 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
52 object.__setattr__(obj, self.name, value)
53
53
54
54
55 def hasunfilteredcache(repo, name):
55 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
57 return name in vars(repo.unfiltered())
58
58
59 def unfilteredmethod(orig):
59 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
60 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
61 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
63 return wrapper
64
64
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 'bundle2', 'unbundle'))
66 'bundle2', 'unbundle'))
67 legacycaps = moderncaps.union(set(['changegroupsubset']))
67 legacycaps = moderncaps.union(set(['changegroupsubset']))
68
68
69 class localpeer(peer.peerrepository):
69 class localpeer(peer.peerrepository):
70 '''peer for a local repo; reflects only the most recent API'''
70 '''peer for a local repo; reflects only the most recent API'''
71
71
72 def __init__(self, repo, caps=moderncaps):
72 def __init__(self, repo, caps=moderncaps):
73 peer.peerrepository.__init__(self)
73 peer.peerrepository.__init__(self)
74 self._repo = repo.filtered('served')
74 self._repo = repo.filtered('served')
75 self.ui = repo.ui
75 self.ui = repo.ui
76 self._caps = repo._restrictcapabilities(caps)
76 self._caps = repo._restrictcapabilities(caps)
77 self.requirements = repo.requirements
77 self.requirements = repo.requirements
78 self.supportedformats = repo.supportedformats
78 self.supportedformats = repo.supportedformats
79
79
80 def close(self):
80 def close(self):
81 self._repo.close()
81 self._repo.close()
82
82
83 def _capabilities(self):
83 def _capabilities(self):
84 return self._caps
84 return self._caps
85
85
86 def local(self):
86 def local(self):
87 return self._repo
87 return self._repo
88
88
89 def canpush(self):
89 def canpush(self):
90 return True
90 return True
91
91
92 def url(self):
92 def url(self):
93 return self._repo.url()
93 return self._repo.url()
94
94
95 def lookup(self, key):
95 def lookup(self, key):
96 return self._repo.lookup(key)
96 return self._repo.lookup(key)
97
97
98 def branchmap(self):
98 def branchmap(self):
99 return self._repo.branchmap()
99 return self._repo.branchmap()
100
100
101 def heads(self):
101 def heads(self):
102 return self._repo.heads()
102 return self._repo.heads()
103
103
104 def known(self, nodes):
104 def known(self, nodes):
105 return self._repo.known(nodes)
105 return self._repo.known(nodes)
106
106
107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 format='HG10'):
108 format='HG10'):
109 return exchange.getbundle(self._repo, source, heads=heads,
109 return exchange.getbundle(self._repo, source, heads=heads,
110 common=common, bundlecaps=bundlecaps)
110 common=common, bundlecaps=bundlecaps)
111
111
112 # TODO We might want to move the next two calls into legacypeer and add
112 # TODO We might want to move the next two calls into legacypeer and add
113 # unbundle instead.
113 # unbundle instead.
114
114
115 def unbundle(self, cg, heads, url):
115 def unbundle(self, cg, heads, url):
116 """apply a bundle on a repo
116 """apply a bundle on a repo
117
117
118 This function handles the repo locking itself."""
118 This function handles the repo locking itself."""
119 try:
119 try:
120 return exchange.unbundle(self._repo, cg, heads, 'push', url)
120 return exchange.unbundle(self._repo, cg, heads, 'push', url)
121 except exchange.PushRaced, exc:
121 except exchange.PushRaced, exc:
122 raise error.ResponseError(_('push failed:'), exc.message)
122 raise error.ResponseError(_('push failed:'), exc.message)
123
123
124 def lock(self):
124 def lock(self):
125 return self._repo.lock()
125 return self._repo.lock()
126
126
127 def addchangegroup(self, cg, source, url):
127 def addchangegroup(self, cg, source, url):
128 return changegroup.addchangegroup(self._repo, cg, source, url)
128 return changegroup.addchangegroup(self._repo, cg, source, url)
129
129
130 def pushkey(self, namespace, key, old, new):
130 def pushkey(self, namespace, key, old, new):
131 return self._repo.pushkey(namespace, key, old, new)
131 return self._repo.pushkey(namespace, key, old, new)
132
132
133 def listkeys(self, namespace):
133 def listkeys(self, namespace):
134 return self._repo.listkeys(namespace)
134 return self._repo.listkeys(namespace)
135
135
136 def debugwireargs(self, one, two, three=None, four=None, five=None):
136 def debugwireargs(self, one, two, three=None, four=None, five=None):
137 '''used to test argument passing over the wire'''
137 '''used to test argument passing over the wire'''
138 return "%s %s %s %s %s" % (one, two, three, four, five)
138 return "%s %s %s %s %s" % (one, two, three, four, five)
139
139
140 class locallegacypeer(localpeer):
140 class locallegacypeer(localpeer):
141 '''peer extension which implements legacy methods too; used for tests with
141 '''peer extension which implements legacy methods too; used for tests with
142 restricted capabilities'''
142 restricted capabilities'''
143
143
144 def __init__(self, repo):
144 def __init__(self, repo):
145 localpeer.__init__(self, repo, caps=legacycaps)
145 localpeer.__init__(self, repo, caps=legacycaps)
146
146
147 def branches(self, nodes):
147 def branches(self, nodes):
148 return self._repo.branches(nodes)
148 return self._repo.branches(nodes)
149
149
150 def between(self, pairs):
150 def between(self, pairs):
151 return self._repo.between(pairs)
151 return self._repo.between(pairs)
152
152
153 def changegroup(self, basenodes, source):
153 def changegroup(self, basenodes, source):
154 return changegroup.changegroup(self._repo, basenodes, source)
154 return changegroup.changegroup(self._repo, basenodes, source)
155
155
156 def changegroupsubset(self, bases, heads, source):
156 def changegroupsubset(self, bases, heads, source):
157 return changegroup.changegroupsubset(self._repo, bases, heads, source)
157 return changegroup.changegroupsubset(self._repo, bases, heads, source)
158
158
159 class localrepository(object):
159 class localrepository(object):
160
160
161 supportedformats = set(('revlogv1', 'generaldelta'))
161 supportedformats = set(('revlogv1', 'generaldelta'))
162 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
162 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
163 'dotencode'))
163 'dotencode'))
164 openerreqs = set(('revlogv1', 'generaldelta'))
164 openerreqs = set(('revlogv1', 'generaldelta'))
165 requirements = ['revlogv1']
165 requirements = ['revlogv1']
166 filtername = None
166 filtername = None
167
167
168 # a list of (ui, featureset) functions.
168 # a list of (ui, featureset) functions.
169 # only functions defined in module of enabled extensions are invoked
169 # only functions defined in module of enabled extensions are invoked
170 featuresetupfuncs = set()
170 featuresetupfuncs = set()
171
171
172 def _baserequirements(self, create):
172 def _baserequirements(self, create):
173 return self.requirements[:]
173 return self.requirements[:]
174
174
175 def __init__(self, baseui, path=None, create=False):
175 def __init__(self, baseui, path=None, create=False):
176 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
176 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
177 self.wopener = self.wvfs
177 self.wopener = self.wvfs
178 self.root = self.wvfs.base
178 self.root = self.wvfs.base
179 self.path = self.wvfs.join(".hg")
179 self.path = self.wvfs.join(".hg")
180 self.origroot = path
180 self.origroot = path
181 self.auditor = pathutil.pathauditor(self.root, self._checknested)
181 self.auditor = pathutil.pathauditor(self.root, self._checknested)
182 self.vfs = scmutil.vfs(self.path)
182 self.vfs = scmutil.vfs(self.path)
183 self.opener = self.vfs
183 self.opener = self.vfs
184 self.baseui = baseui
184 self.baseui = baseui
185 self.ui = baseui.copy()
185 self.ui = baseui.copy()
186 self.ui.copy = baseui.copy # prevent copying repo configuration
186 self.ui.copy = baseui.copy # prevent copying repo configuration
187 # A list of callback to shape the phase if no data were found.
187 # A list of callback to shape the phase if no data were found.
188 # Callback are in the form: func(repo, roots) --> processed root.
188 # Callback are in the form: func(repo, roots) --> processed root.
189 # This list it to be filled by extension during repo setup
189 # This list it to be filled by extension during repo setup
190 self._phasedefaults = []
190 self._phasedefaults = []
191 try:
191 try:
192 self.ui.readconfig(self.join("hgrc"), self.root)
192 self.ui.readconfig(self.join("hgrc"), self.root)
193 extensions.loadall(self.ui)
193 extensions.loadall(self.ui)
194 except IOError:
194 except IOError:
195 pass
195 pass
196
196
197 if self.featuresetupfuncs:
197 if self.featuresetupfuncs:
198 self.supported = set(self._basesupported) # use private copy
198 self.supported = set(self._basesupported) # use private copy
199 extmods = set(m.__name__ for n, m
199 extmods = set(m.__name__ for n, m
200 in extensions.extensions(self.ui))
200 in extensions.extensions(self.ui))
201 for setupfunc in self.featuresetupfuncs:
201 for setupfunc in self.featuresetupfuncs:
202 if setupfunc.__module__ in extmods:
202 if setupfunc.__module__ in extmods:
203 setupfunc(self.ui, self.supported)
203 setupfunc(self.ui, self.supported)
204 else:
204 else:
205 self.supported = self._basesupported
205 self.supported = self._basesupported
206
206
207 if not self.vfs.isdir():
207 if not self.vfs.isdir():
208 if create:
208 if create:
209 if not self.wvfs.exists():
209 if not self.wvfs.exists():
210 self.wvfs.makedirs()
210 self.wvfs.makedirs()
211 self.vfs.makedir(notindexed=True)
211 self.vfs.makedir(notindexed=True)
212 requirements = self._baserequirements(create)
212 requirements = self._baserequirements(create)
213 if self.ui.configbool('format', 'usestore', True):
213 if self.ui.configbool('format', 'usestore', True):
214 self.vfs.mkdir("store")
214 self.vfs.mkdir("store")
215 requirements.append("store")
215 requirements.append("store")
216 if self.ui.configbool('format', 'usefncache', True):
216 if self.ui.configbool('format', 'usefncache', True):
217 requirements.append("fncache")
217 requirements.append("fncache")
218 if self.ui.configbool('format', 'dotencode', True):
218 if self.ui.configbool('format', 'dotencode', True):
219 requirements.append('dotencode')
219 requirements.append('dotencode')
220 # create an invalid changelog
220 # create an invalid changelog
221 self.vfs.append(
221 self.vfs.append(
222 "00changelog.i",
222 "00changelog.i",
223 '\0\0\0\2' # represents revlogv2
223 '\0\0\0\2' # represents revlogv2
224 ' dummy changelog to prevent using the old repo layout'
224 ' dummy changelog to prevent using the old repo layout'
225 )
225 )
226 if self.ui.configbool('format', 'generaldelta', False):
226 if self.ui.configbool('format', 'generaldelta', False):
227 requirements.append("generaldelta")
227 requirements.append("generaldelta")
228 requirements = set(requirements)
228 requirements = set(requirements)
229 else:
229 else:
230 raise error.RepoError(_("repository %s not found") % path)
230 raise error.RepoError(_("repository %s not found") % path)
231 elif create:
231 elif create:
232 raise error.RepoError(_("repository %s already exists") % path)
232 raise error.RepoError(_("repository %s already exists") % path)
233 else:
233 else:
234 try:
234 try:
235 requirements = scmutil.readrequires(self.vfs, self.supported)
235 requirements = scmutil.readrequires(self.vfs, self.supported)
236 except IOError, inst:
236 except IOError, inst:
237 if inst.errno != errno.ENOENT:
237 if inst.errno != errno.ENOENT:
238 raise
238 raise
239 requirements = set()
239 requirements = set()
240
240
241 self.sharedpath = self.path
241 self.sharedpath = self.path
242 try:
242 try:
243 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
243 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
244 realpath=True)
244 realpath=True)
245 s = vfs.base
245 s = vfs.base
246 if not vfs.exists():
246 if not vfs.exists():
247 raise error.RepoError(
247 raise error.RepoError(
248 _('.hg/sharedpath points to nonexistent directory %s') % s)
248 _('.hg/sharedpath points to nonexistent directory %s') % s)
249 self.sharedpath = s
249 self.sharedpath = s
250 except IOError, inst:
250 except IOError, inst:
251 if inst.errno != errno.ENOENT:
251 if inst.errno != errno.ENOENT:
252 raise
252 raise
253
253
254 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
254 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
255 self.spath = self.store.path
255 self.spath = self.store.path
256 self.svfs = self.store.vfs
256 self.svfs = self.store.vfs
257 self.sopener = self.svfs
257 self.sopener = self.svfs
258 self.sjoin = self.store.join
258 self.sjoin = self.store.join
259 self.vfs.createmode = self.store.createmode
259 self.vfs.createmode = self.store.createmode
260 self._applyrequirements(requirements)
260 self._applyrequirements(requirements)
261 if create:
261 if create:
262 self._writerequirements()
262 self._writerequirements()
263
263
264
264
265 self._branchcaches = {}
265 self._branchcaches = {}
266 self.filterpats = {}
266 self.filterpats = {}
267 self._datafilters = {}
267 self._datafilters = {}
268 self._transref = self._lockref = self._wlockref = None
268 self._transref = self._lockref = self._wlockref = None
269
269
270 # A cache for various files under .hg/ that tracks file changes,
270 # A cache for various files under .hg/ that tracks file changes,
271 # (used by the filecache decorator)
271 # (used by the filecache decorator)
272 #
272 #
273 # Maps a property name to its util.filecacheentry
273 # Maps a property name to its util.filecacheentry
274 self._filecache = {}
274 self._filecache = {}
275
275
276 # hold sets of revision to be filtered
276 # hold sets of revision to be filtered
277 # should be cleared when something might have changed the filter value:
277 # should be cleared when something might have changed the filter value:
278 # - new changesets,
278 # - new changesets,
279 # - phase change,
279 # - phase change,
280 # - new obsolescence marker,
280 # - new obsolescence marker,
281 # - working directory parent change,
281 # - working directory parent change,
282 # - bookmark changes
282 # - bookmark changes
283 self.filteredrevcache = {}
283 self.filteredrevcache = {}
284
284
285 def close(self):
285 def close(self):
286 pass
286 pass
287
287
288 def _restrictcapabilities(self, caps):
288 def _restrictcapabilities(self, caps):
289 # bundle2 is not ready for prime time, drop it unless explicitly
289 # bundle2 is not ready for prime time, drop it unless explicitly
290 # required by the tests (or some brave tester)
290 # required by the tests (or some brave tester)
291 if not self.ui.configbool('server', 'bundle2', False):
291 if not self.ui.configbool('server', 'bundle2', False):
292 caps = set(caps)
292 caps = set(caps)
293 caps.discard('bundle2')
293 caps.discard('bundle2')
294 return caps
294 return caps
295
295
296 def _applyrequirements(self, requirements):
296 def _applyrequirements(self, requirements):
297 self.requirements = requirements
297 self.requirements = requirements
298 self.sopener.options = dict((r, 1) for r in requirements
298 self.sopener.options = dict((r, 1) for r in requirements
299 if r in self.openerreqs)
299 if r in self.openerreqs)
300 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
300 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
301 if chunkcachesize is not None:
301 if chunkcachesize is not None:
302 self.sopener.options['chunkcachesize'] = chunkcachesize
302 self.sopener.options['chunkcachesize'] = chunkcachesize
303
303
304 def _writerequirements(self):
304 def _writerequirements(self):
305 reqfile = self.opener("requires", "w")
305 reqfile = self.opener("requires", "w")
306 for r in sorted(self.requirements):
306 for r in sorted(self.requirements):
307 reqfile.write("%s\n" % r)
307 reqfile.write("%s\n" % r)
308 reqfile.close()
308 reqfile.close()
309
309
310 def _checknested(self, path):
310 def _checknested(self, path):
311 """Determine if path is a legal nested repository."""
311 """Determine if path is a legal nested repository."""
312 if not path.startswith(self.root):
312 if not path.startswith(self.root):
313 return False
313 return False
314 subpath = path[len(self.root) + 1:]
314 subpath = path[len(self.root) + 1:]
315 normsubpath = util.pconvert(subpath)
315 normsubpath = util.pconvert(subpath)
316
316
317 # XXX: Checking against the current working copy is wrong in
317 # XXX: Checking against the current working copy is wrong in
318 # the sense that it can reject things like
318 # the sense that it can reject things like
319 #
319 #
320 # $ hg cat -r 10 sub/x.txt
320 # $ hg cat -r 10 sub/x.txt
321 #
321 #
322 # if sub/ is no longer a subrepository in the working copy
322 # if sub/ is no longer a subrepository in the working copy
323 # parent revision.
323 # parent revision.
324 #
324 #
325 # However, it can of course also allow things that would have
325 # However, it can of course also allow things that would have
326 # been rejected before, such as the above cat command if sub/
326 # been rejected before, such as the above cat command if sub/
327 # is a subrepository now, but was a normal directory before.
327 # is a subrepository now, but was a normal directory before.
328 # The old path auditor would have rejected by mistake since it
328 # The old path auditor would have rejected by mistake since it
329 # panics when it sees sub/.hg/.
329 # panics when it sees sub/.hg/.
330 #
330 #
331 # All in all, checking against the working copy seems sensible
331 # All in all, checking against the working copy seems sensible
332 # since we want to prevent access to nested repositories on
332 # since we want to prevent access to nested repositories on
333 # the filesystem *now*.
333 # the filesystem *now*.
334 ctx = self[None]
334 ctx = self[None]
335 parts = util.splitpath(subpath)
335 parts = util.splitpath(subpath)
336 while parts:
336 while parts:
337 prefix = '/'.join(parts)
337 prefix = '/'.join(parts)
338 if prefix in ctx.substate:
338 if prefix in ctx.substate:
339 if prefix == normsubpath:
339 if prefix == normsubpath:
340 return True
340 return True
341 else:
341 else:
342 sub = ctx.sub(prefix)
342 sub = ctx.sub(prefix)
343 return sub.checknested(subpath[len(prefix) + 1:])
343 return sub.checknested(subpath[len(prefix) + 1:])
344 else:
344 else:
345 parts.pop()
345 parts.pop()
346 return False
346 return False
347
347
348 def peer(self):
348 def peer(self):
349 return localpeer(self) # not cached to avoid reference cycle
349 return localpeer(self) # not cached to avoid reference cycle
350
350
351 def unfiltered(self):
351 def unfiltered(self):
352 """Return unfiltered version of the repository
352 """Return unfiltered version of the repository
353
353
354 Intended to be overwritten by filtered repo."""
354 Intended to be overwritten by filtered repo."""
355 return self
355 return self
356
356
357 def filtered(self, name):
357 def filtered(self, name):
358 """Return a filtered version of a repository"""
358 """Return a filtered version of a repository"""
359 # build a new class with the mixin and the current class
359 # build a new class with the mixin and the current class
360 # (possibly subclass of the repo)
360 # (possibly subclass of the repo)
361 class proxycls(repoview.repoview, self.unfiltered().__class__):
361 class proxycls(repoview.repoview, self.unfiltered().__class__):
362 pass
362 pass
363 return proxycls(self, name)
363 return proxycls(self, name)
364
364
365 @repofilecache('bookmarks')
365 @repofilecache('bookmarks')
366 def _bookmarks(self):
366 def _bookmarks(self):
367 return bookmarks.bmstore(self)
367 return bookmarks.bmstore(self)
368
368
369 @repofilecache('bookmarks.current')
369 @repofilecache('bookmarks.current')
370 def _bookmarkcurrent(self):
370 def _bookmarkcurrent(self):
371 return bookmarks.readcurrent(self)
371 return bookmarks.readcurrent(self)
372
372
373 def bookmarkheads(self, bookmark):
373 def bookmarkheads(self, bookmark):
374 name = bookmark.split('@', 1)[0]
374 name = bookmark.split('@', 1)[0]
375 heads = []
375 heads = []
376 for mark, n in self._bookmarks.iteritems():
376 for mark, n in self._bookmarks.iteritems():
377 if mark.split('@', 1)[0] == name:
377 if mark.split('@', 1)[0] == name:
378 heads.append(n)
378 heads.append(n)
379 return heads
379 return heads
380
380
381 @storecache('phaseroots')
381 @storecache('phaseroots')
382 def _phasecache(self):
382 def _phasecache(self):
383 return phases.phasecache(self, self._phasedefaults)
383 return phases.phasecache(self, self._phasedefaults)
384
384
385 @storecache('obsstore')
385 @storecache('obsstore')
386 def obsstore(self):
386 def obsstore(self):
387 store = obsolete.obsstore(self.sopener)
387 store = obsolete.obsstore(self.sopener)
388 if store and not obsolete._enabled:
388 if store and not obsolete._enabled:
389 # message is rare enough to not be translated
389 # message is rare enough to not be translated
390 msg = 'obsolete feature not enabled but %i markers found!\n'
390 msg = 'obsolete feature not enabled but %i markers found!\n'
391 self.ui.warn(msg % len(list(store)))
391 self.ui.warn(msg % len(list(store)))
392 return store
392 return store
393
393
394 @storecache('00changelog.i')
394 @storecache('00changelog.i')
395 def changelog(self):
395 def changelog(self):
396 c = changelog.changelog(self.sopener)
396 c = changelog.changelog(self.sopener)
397 if 'HG_PENDING' in os.environ:
397 if 'HG_PENDING' in os.environ:
398 p = os.environ['HG_PENDING']
398 p = os.environ['HG_PENDING']
399 if p.startswith(self.root):
399 if p.startswith(self.root):
400 c.readpending('00changelog.i.a')
400 c.readpending('00changelog.i.a')
401 return c
401 return c
402
402
403 @storecache('00manifest.i')
403 @storecache('00manifest.i')
404 def manifest(self):
404 def manifest(self):
405 return manifest.manifest(self.sopener)
405 return manifest.manifest(self.sopener)
406
406
407 @repofilecache('dirstate')
407 @repofilecache('dirstate')
408 def dirstate(self):
408 def dirstate(self):
409 warned = [0]
409 warned = [0]
410 def validate(node):
410 def validate(node):
411 try:
411 try:
412 self.changelog.rev(node)
412 self.changelog.rev(node)
413 return node
413 return node
414 except error.LookupError:
414 except error.LookupError:
415 if not warned[0]:
415 if not warned[0]:
416 warned[0] = True
416 warned[0] = True
417 self.ui.warn(_("warning: ignoring unknown"
417 self.ui.warn(_("warning: ignoring unknown"
418 " working parent %s!\n") % short(node))
418 " working parent %s!\n") % short(node))
419 return nullid
419 return nullid
420
420
421 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
421 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
422
422
423 def __getitem__(self, changeid):
423 def __getitem__(self, changeid):
424 if changeid is None:
424 if changeid is None:
425 return context.workingctx(self)
425 return context.workingctx(self)
426 return context.changectx(self, changeid)
426 return context.changectx(self, changeid)
427
427
428 def __contains__(self, changeid):
428 def __contains__(self, changeid):
429 try:
429 try:
430 return bool(self.lookup(changeid))
430 return bool(self.lookup(changeid))
431 except error.RepoLookupError:
431 except error.RepoLookupError:
432 return False
432 return False
433
433
434 def __nonzero__(self):
434 def __nonzero__(self):
435 return True
435 return True
436
436
437 def __len__(self):
437 def __len__(self):
438 return len(self.changelog)
438 return len(self.changelog)
439
439
440 def __iter__(self):
440 def __iter__(self):
441 return iter(self.changelog)
441 return iter(self.changelog)
442
442
443 def revs(self, expr, *args):
443 def revs(self, expr, *args):
444 '''Return a list of revisions matching the given revset'''
444 '''Return a list of revisions matching the given revset'''
445 expr = revset.formatspec(expr, *args)
445 expr = revset.formatspec(expr, *args)
446 m = revset.match(None, expr)
446 m = revset.match(None, expr)
447 return m(self, revset.spanset(self))
447 return m(self, revset.spanset(self))
448
448
449 def set(self, expr, *args):
449 def set(self, expr, *args):
450 '''
450 '''
451 Yield a context for each matching revision, after doing arg
451 Yield a context for each matching revision, after doing arg
452 replacement via revset.formatspec
452 replacement via revset.formatspec
453 '''
453 '''
454 for r in self.revs(expr, *args):
454 for r in self.revs(expr, *args):
455 yield self[r]
455 yield self[r]
456
456
457 def url(self):
457 def url(self):
458 return 'file:' + self.root
458 return 'file:' + self.root
459
459
460 def hook(self, name, throw=False, **args):
460 def hook(self, name, throw=False, **args):
461 return hook.hook(self.ui, self, name, throw, **args)
461 return hook.hook(self.ui, self, name, throw, **args)
462
462
463 @unfilteredmethod
463 @unfilteredmethod
464 def _tag(self, names, node, message, local, user, date, extra={}):
464 def _tag(self, names, node, message, local, user, date, extra={}):
465 if isinstance(names, str):
465 if isinstance(names, str):
466 names = (names,)
466 names = (names,)
467
467
468 branches = self.branchmap()
468 branches = self.branchmap()
469 for name in names:
469 for name in names:
470 self.hook('pretag', throw=True, node=hex(node), tag=name,
470 self.hook('pretag', throw=True, node=hex(node), tag=name,
471 local=local)
471 local=local)
472 if name in branches:
472 if name in branches:
473 self.ui.warn(_("warning: tag %s conflicts with existing"
473 self.ui.warn(_("warning: tag %s conflicts with existing"
474 " branch name\n") % name)
474 " branch name\n") % name)
475
475
476 def writetags(fp, names, munge, prevtags):
476 def writetags(fp, names, munge, prevtags):
477 fp.seek(0, 2)
477 fp.seek(0, 2)
478 if prevtags and prevtags[-1] != '\n':
478 if prevtags and prevtags[-1] != '\n':
479 fp.write('\n')
479 fp.write('\n')
480 for name in names:
480 for name in names:
481 m = munge and munge(name) or name
481 m = munge and munge(name) or name
482 if (self._tagscache.tagtypes and
482 if (self._tagscache.tagtypes and
483 name in self._tagscache.tagtypes):
483 name in self._tagscache.tagtypes):
484 old = self.tags().get(name, nullid)
484 old = self.tags().get(name, nullid)
485 fp.write('%s %s\n' % (hex(old), m))
485 fp.write('%s %s\n' % (hex(old), m))
486 fp.write('%s %s\n' % (hex(node), m))
486 fp.write('%s %s\n' % (hex(node), m))
487 fp.close()
487 fp.close()
488
488
489 prevtags = ''
489 prevtags = ''
490 if local:
490 if local:
491 try:
491 try:
492 fp = self.opener('localtags', 'r+')
492 fp = self.opener('localtags', 'r+')
493 except IOError:
493 except IOError:
494 fp = self.opener('localtags', 'a')
494 fp = self.opener('localtags', 'a')
495 else:
495 else:
496 prevtags = fp.read()
496 prevtags = fp.read()
497
497
498 # local tags are stored in the current charset
498 # local tags are stored in the current charset
499 writetags(fp, names, None, prevtags)
499 writetags(fp, names, None, prevtags)
500 for name in names:
500 for name in names:
501 self.hook('tag', node=hex(node), tag=name, local=local)
501 self.hook('tag', node=hex(node), tag=name, local=local)
502 return
502 return
503
503
504 try:
504 try:
505 fp = self.wfile('.hgtags', 'rb+')
505 fp = self.wfile('.hgtags', 'rb+')
506 except IOError, e:
506 except IOError, e:
507 if e.errno != errno.ENOENT:
507 if e.errno != errno.ENOENT:
508 raise
508 raise
509 fp = self.wfile('.hgtags', 'ab')
509 fp = self.wfile('.hgtags', 'ab')
510 else:
510 else:
511 prevtags = fp.read()
511 prevtags = fp.read()
512
512
513 # committed tags are stored in UTF-8
513 # committed tags are stored in UTF-8
514 writetags(fp, names, encoding.fromlocal, prevtags)
514 writetags(fp, names, encoding.fromlocal, prevtags)
515
515
516 fp.close()
516 fp.close()
517
517
518 self.invalidatecaches()
518 self.invalidatecaches()
519
519
520 if '.hgtags' not in self.dirstate:
520 if '.hgtags' not in self.dirstate:
521 self[None].add(['.hgtags'])
521 self[None].add(['.hgtags'])
522
522
523 m = matchmod.exact(self.root, '', ['.hgtags'])
523 m = matchmod.exact(self.root, '', ['.hgtags'])
524 tagnode = self.commit(message, user, date, extra=extra, match=m)
524 tagnode = self.commit(message, user, date, extra=extra, match=m)
525
525
526 for name in names:
526 for name in names:
527 self.hook('tag', node=hex(node), tag=name, local=local)
527 self.hook('tag', node=hex(node), tag=name, local=local)
528
528
529 return tagnode
529 return tagnode
530
530
531 def tag(self, names, node, message, local, user, date):
531 def tag(self, names, node, message, local, user, date):
532 '''tag a revision with one or more symbolic names.
532 '''tag a revision with one or more symbolic names.
533
533
534 names is a list of strings or, when adding a single tag, names may be a
534 names is a list of strings or, when adding a single tag, names may be a
535 string.
535 string.
536
536
537 if local is True, the tags are stored in a per-repository file.
537 if local is True, the tags are stored in a per-repository file.
538 otherwise, they are stored in the .hgtags file, and a new
538 otherwise, they are stored in the .hgtags file, and a new
539 changeset is committed with the change.
539 changeset is committed with the change.
540
540
541 keyword arguments:
541 keyword arguments:
542
542
543 local: whether to store tags in non-version-controlled file
543 local: whether to store tags in non-version-controlled file
544 (default False)
544 (default False)
545
545
546 message: commit message to use if committing
546 message: commit message to use if committing
547
547
548 user: name of user to use if committing
548 user: name of user to use if committing
549
549
550 date: date tuple to use if committing'''
550 date: date tuple to use if committing'''
551
551
552 if not local:
552 if not local:
553 for x in self.status()[:5]:
553 for x in self.status()[:5]:
554 if '.hgtags' in x:
554 if '.hgtags' in x:
555 raise util.Abort(_('working copy of .hgtags is changed '
555 raise util.Abort(_('working copy of .hgtags is changed '
556 '(please commit .hgtags manually)'))
556 '(please commit .hgtags manually)'))
557
557
558 self.tags() # instantiate the cache
558 self.tags() # instantiate the cache
559 self._tag(names, node, message, local, user, date)
559 self._tag(names, node, message, local, user, date)
560
560
561 @filteredpropertycache
561 @filteredpropertycache
562 def _tagscache(self):
562 def _tagscache(self):
563 '''Returns a tagscache object that contains various tags related
563 '''Returns a tagscache object that contains various tags related
564 caches.'''
564 caches.'''
565
565
566 # This simplifies its cache management by having one decorated
566 # This simplifies its cache management by having one decorated
567 # function (this one) and the rest simply fetch things from it.
567 # function (this one) and the rest simply fetch things from it.
568 class tagscache(object):
568 class tagscache(object):
569 def __init__(self):
569 def __init__(self):
570 # These two define the set of tags for this repository. tags
570 # These two define the set of tags for this repository. tags
571 # maps tag name to node; tagtypes maps tag name to 'global' or
571 # maps tag name to node; tagtypes maps tag name to 'global' or
572 # 'local'. (Global tags are defined by .hgtags across all
572 # 'local'. (Global tags are defined by .hgtags across all
573 # heads, and local tags are defined in .hg/localtags.)
573 # heads, and local tags are defined in .hg/localtags.)
574 # They constitute the in-memory cache of tags.
574 # They constitute the in-memory cache of tags.
575 self.tags = self.tagtypes = None
575 self.tags = self.tagtypes = None
576
576
577 self.nodetagscache = self.tagslist = None
577 self.nodetagscache = self.tagslist = None
578
578
579 cache = tagscache()
579 cache = tagscache()
580 cache.tags, cache.tagtypes = self._findtags()
580 cache.tags, cache.tagtypes = self._findtags()
581
581
582 return cache
582 return cache
583
583
584 def tags(self):
584 def tags(self):
585 '''return a mapping of tag to node'''
585 '''return a mapping of tag to node'''
586 t = {}
586 t = {}
587 if self.changelog.filteredrevs:
587 if self.changelog.filteredrevs:
588 tags, tt = self._findtags()
588 tags, tt = self._findtags()
589 else:
589 else:
590 tags = self._tagscache.tags
590 tags = self._tagscache.tags
591 for k, v in tags.iteritems():
591 for k, v in tags.iteritems():
592 try:
592 try:
593 # ignore tags to unknown nodes
593 # ignore tags to unknown nodes
594 self.changelog.rev(v)
594 self.changelog.rev(v)
595 t[k] = v
595 t[k] = v
596 except (error.LookupError, ValueError):
596 except (error.LookupError, ValueError):
597 pass
597 pass
598 return t
598 return t
599
599
600 def _findtags(self):
600 def _findtags(self):
601 '''Do the hard work of finding tags. Return a pair of dicts
601 '''Do the hard work of finding tags. Return a pair of dicts
602 (tags, tagtypes) where tags maps tag name to node, and tagtypes
602 (tags, tagtypes) where tags maps tag name to node, and tagtypes
603 maps tag name to a string like \'global\' or \'local\'.
603 maps tag name to a string like \'global\' or \'local\'.
604 Subclasses or extensions are free to add their own tags, but
604 Subclasses or extensions are free to add their own tags, but
605 should be aware that the returned dicts will be retained for the
605 should be aware that the returned dicts will be retained for the
606 duration of the localrepo object.'''
606 duration of the localrepo object.'''
607
607
608 # XXX what tagtype should subclasses/extensions use? Currently
608 # XXX what tagtype should subclasses/extensions use? Currently
609 # mq and bookmarks add tags, but do not set the tagtype at all.
609 # mq and bookmarks add tags, but do not set the tagtype at all.
610 # Should each extension invent its own tag type? Should there
610 # Should each extension invent its own tag type? Should there
611 # be one tagtype for all such "virtual" tags? Or is the status
611 # be one tagtype for all such "virtual" tags? Or is the status
612 # quo fine?
612 # quo fine?
613
613
614 alltags = {} # map tag name to (node, hist)
614 alltags = {} # map tag name to (node, hist)
615 tagtypes = {}
615 tagtypes = {}
616
616
617 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
617 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
618 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
618 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
619
619
620 # Build the return dicts. Have to re-encode tag names because
620 # Build the return dicts. Have to re-encode tag names because
621 # the tags module always uses UTF-8 (in order not to lose info
621 # the tags module always uses UTF-8 (in order not to lose info
622 # writing to the cache), but the rest of Mercurial wants them in
622 # writing to the cache), but the rest of Mercurial wants them in
623 # local encoding.
623 # local encoding.
624 tags = {}
624 tags = {}
625 for (name, (node, hist)) in alltags.iteritems():
625 for (name, (node, hist)) in alltags.iteritems():
626 if node != nullid:
626 if node != nullid:
627 tags[encoding.tolocal(name)] = node
627 tags[encoding.tolocal(name)] = node
628 tags['tip'] = self.changelog.tip()
628 tags['tip'] = self.changelog.tip()
629 tagtypes = dict([(encoding.tolocal(name), value)
629 tagtypes = dict([(encoding.tolocal(name), value)
630 for (name, value) in tagtypes.iteritems()])
630 for (name, value) in tagtypes.iteritems()])
631 return (tags, tagtypes)
631 return (tags, tagtypes)
632
632
633 def tagtype(self, tagname):
633 def tagtype(self, tagname):
634 '''
634 '''
635 return the type of the given tag. result can be:
635 return the type of the given tag. result can be:
636
636
637 'local' : a local tag
637 'local' : a local tag
638 'global' : a global tag
638 'global' : a global tag
639 None : tag does not exist
639 None : tag does not exist
640 '''
640 '''
641
641
642 return self._tagscache.tagtypes.get(tagname)
642 return self._tagscache.tagtypes.get(tagname)
643
643
644 def tagslist(self):
644 def tagslist(self):
645 '''return a list of tags ordered by revision'''
645 '''return a list of tags ordered by revision'''
646 if not self._tagscache.tagslist:
646 if not self._tagscache.tagslist:
647 l = []
647 l = []
648 for t, n in self.tags().iteritems():
648 for t, n in self.tags().iteritems():
649 r = self.changelog.rev(n)
649 r = self.changelog.rev(n)
650 l.append((r, t, n))
650 l.append((r, t, n))
651 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
651 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
652
652
653 return self._tagscache.tagslist
653 return self._tagscache.tagslist
654
654
655 def nodetags(self, node):
655 def nodetags(self, node):
656 '''return the tags associated with a node'''
656 '''return the tags associated with a node'''
657 if not self._tagscache.nodetagscache:
657 if not self._tagscache.nodetagscache:
658 nodetagscache = {}
658 nodetagscache = {}
659 for t, n in self._tagscache.tags.iteritems():
659 for t, n in self._tagscache.tags.iteritems():
660 nodetagscache.setdefault(n, []).append(t)
660 nodetagscache.setdefault(n, []).append(t)
661 for tags in nodetagscache.itervalues():
661 for tags in nodetagscache.itervalues():
662 tags.sort()
662 tags.sort()
663 self._tagscache.nodetagscache = nodetagscache
663 self._tagscache.nodetagscache = nodetagscache
664 return self._tagscache.nodetagscache.get(node, [])
664 return self._tagscache.nodetagscache.get(node, [])
665
665
666 def nodebookmarks(self, node):
666 def nodebookmarks(self, node):
667 marks = []
667 marks = []
668 for bookmark, n in self._bookmarks.iteritems():
668 for bookmark, n in self._bookmarks.iteritems():
669 if n == node:
669 if n == node:
670 marks.append(bookmark)
670 marks.append(bookmark)
671 return sorted(marks)
671 return sorted(marks)
672
672
673 def branchmap(self):
673 def branchmap(self):
674 '''returns a dictionary {branch: [branchheads]} with branchheads
674 '''returns a dictionary {branch: [branchheads]} with branchheads
675 ordered by increasing revision number'''
675 ordered by increasing revision number'''
676 branchmap.updatecache(self)
676 branchmap.updatecache(self)
677 return self._branchcaches[self.filtername]
677 return self._branchcaches[self.filtername]
678
678
679 def branchtip(self, branch):
679 def branchtip(self, branch):
680 '''return the tip node for a given branch'''
680 '''return the tip node for a given branch'''
681 try:
681 try:
682 return self.branchmap().branchtip(branch)
682 return self.branchmap().branchtip(branch)
683 except KeyError:
683 except KeyError:
684 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
684 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
685
685
686 def lookup(self, key):
686 def lookup(self, key):
687 return self[key].node()
687 return self[key].node()
688
688
689 def lookupbranch(self, key, remote=None):
689 def lookupbranch(self, key, remote=None):
690 repo = remote or self
690 repo = remote or self
691 if key in repo.branchmap():
691 if key in repo.branchmap():
692 return key
692 return key
693
693
694 repo = (remote and remote.local()) and remote or self
694 repo = (remote and remote.local()) and remote or self
695 return repo[key].branch()
695 return repo[key].branch()
696
696
697 def known(self, nodes):
697 def known(self, nodes):
698 nm = self.changelog.nodemap
698 nm = self.changelog.nodemap
699 pc = self._phasecache
699 pc = self._phasecache
700 result = []
700 result = []
701 for n in nodes:
701 for n in nodes:
702 r = nm.get(n)
702 r = nm.get(n)
703 resp = not (r is None or pc.phase(self, r) >= phases.secret)
703 resp = not (r is None or pc.phase(self, r) >= phases.secret)
704 result.append(resp)
704 result.append(resp)
705 return result
705 return result
706
706
707 def local(self):
707 def local(self):
708 return self
708 return self
709
709
710 def cancopy(self):
710 def cancopy(self):
711 # so statichttprepo's override of local() works
711 # so statichttprepo's override of local() works
712 if not self.local():
712 if not self.local():
713 return False
713 return False
714 if not self.ui.configbool('phases', 'publish', True):
714 if not self.ui.configbool('phases', 'publish', True):
715 return True
715 return True
716 # if publishing we can't copy if there is filtered content
716 # if publishing we can't copy if there is filtered content
717 return not self.filtered('visible').changelog.filteredrevs
717 return not self.filtered('visible').changelog.filteredrevs
718
718
719 def join(self, f):
719 def join(self, f):
720 return os.path.join(self.path, f)
720 return os.path.join(self.path, f)
721
721
722 def wjoin(self, f):
722 def wjoin(self, f):
723 return os.path.join(self.root, f)
723 return os.path.join(self.root, f)
724
724
725 def file(self, f):
725 def file(self, f):
726 if f[0] == '/':
726 if f[0] == '/':
727 f = f[1:]
727 f = f[1:]
728 return filelog.filelog(self.sopener, f)
728 return filelog.filelog(self.sopener, f)
729
729
730 def changectx(self, changeid):
730 def changectx(self, changeid):
731 return self[changeid]
731 return self[changeid]
732
732
733 def parents(self, changeid=None):
733 def parents(self, changeid=None):
734 '''get list of changectxs for parents of changeid'''
734 '''get list of changectxs for parents of changeid'''
735 return self[changeid].parents()
735 return self[changeid].parents()
736
736
737 def setparents(self, p1, p2=nullid):
737 def setparents(self, p1, p2=nullid):
738 copies = self.dirstate.setparents(p1, p2)
738 copies = self.dirstate.setparents(p1, p2)
739 pctx = self[p1]
739 pctx = self[p1]
740 if copies:
740 if copies:
741 # Adjust copy records, the dirstate cannot do it, it
741 # Adjust copy records, the dirstate cannot do it, it
742 # requires access to parents manifests. Preserve them
742 # requires access to parents manifests. Preserve them
743 # only for entries added to first parent.
743 # only for entries added to first parent.
744 for f in copies:
744 for f in copies:
745 if f not in pctx and copies[f] in pctx:
745 if f not in pctx and copies[f] in pctx:
746 self.dirstate.copy(copies[f], f)
746 self.dirstate.copy(copies[f], f)
747 if p2 == nullid:
747 if p2 == nullid:
748 for f, s in sorted(self.dirstate.copies().items()):
748 for f, s in sorted(self.dirstate.copies().items()):
749 if f not in pctx and s not in pctx:
749 if f not in pctx and s not in pctx:
750 self.dirstate.copy(None, f)
750 self.dirstate.copy(None, f)
751
751
752 def filectx(self, path, changeid=None, fileid=None):
752 def filectx(self, path, changeid=None, fileid=None):
753 """changeid can be a changeset revision, node, or tag.
753 """changeid can be a changeset revision, node, or tag.
754 fileid can be a file revision or node."""
754 fileid can be a file revision or node."""
755 return context.filectx(self, path, changeid, fileid)
755 return context.filectx(self, path, changeid, fileid)
756
756
757 def getcwd(self):
757 def getcwd(self):
758 return self.dirstate.getcwd()
758 return self.dirstate.getcwd()
759
759
760 def pathto(self, f, cwd=None):
760 def pathto(self, f, cwd=None):
761 return self.dirstate.pathto(f, cwd)
761 return self.dirstate.pathto(f, cwd)
762
762
763 def wfile(self, f, mode='r'):
763 def wfile(self, f, mode='r'):
764 return self.wopener(f, mode)
764 return self.wopener(f, mode)
765
765
766 def _link(self, f):
766 def _link(self, f):
767 return self.wvfs.islink(f)
767 return self.wvfs.islink(f)
768
768
769 def _loadfilter(self, filter):
769 def _loadfilter(self, filter):
770 if filter not in self.filterpats:
770 if filter not in self.filterpats:
771 l = []
771 l = []
772 for pat, cmd in self.ui.configitems(filter):
772 for pat, cmd in self.ui.configitems(filter):
773 if cmd == '!':
773 if cmd == '!':
774 continue
774 continue
775 mf = matchmod.match(self.root, '', [pat])
775 mf = matchmod.match(self.root, '', [pat])
776 fn = None
776 fn = None
777 params = cmd
777 params = cmd
778 for name, filterfn in self._datafilters.iteritems():
778 for name, filterfn in self._datafilters.iteritems():
779 if cmd.startswith(name):
779 if cmd.startswith(name):
780 fn = filterfn
780 fn = filterfn
781 params = cmd[len(name):].lstrip()
781 params = cmd[len(name):].lstrip()
782 break
782 break
783 if not fn:
783 if not fn:
784 fn = lambda s, c, **kwargs: util.filter(s, c)
784 fn = lambda s, c, **kwargs: util.filter(s, c)
785 # Wrap old filters not supporting keyword arguments
785 # Wrap old filters not supporting keyword arguments
786 if not inspect.getargspec(fn)[2]:
786 if not inspect.getargspec(fn)[2]:
787 oldfn = fn
787 oldfn = fn
788 fn = lambda s, c, **kwargs: oldfn(s, c)
788 fn = lambda s, c, **kwargs: oldfn(s, c)
789 l.append((mf, fn, params))
789 l.append((mf, fn, params))
790 self.filterpats[filter] = l
790 self.filterpats[filter] = l
791 return self.filterpats[filter]
791 return self.filterpats[filter]
792
792
793 def _filter(self, filterpats, filename, data):
793 def _filter(self, filterpats, filename, data):
794 for mf, fn, cmd in filterpats:
794 for mf, fn, cmd in filterpats:
795 if mf(filename):
795 if mf(filename):
796 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
796 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
797 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
797 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
798 break
798 break
799
799
800 return data
800 return data
801
801
802 @unfilteredpropertycache
802 @unfilteredpropertycache
803 def _encodefilterpats(self):
803 def _encodefilterpats(self):
804 return self._loadfilter('encode')
804 return self._loadfilter('encode')
805
805
806 @unfilteredpropertycache
806 @unfilteredpropertycache
807 def _decodefilterpats(self):
807 def _decodefilterpats(self):
808 return self._loadfilter('decode')
808 return self._loadfilter('decode')
809
809
810 def adddatafilter(self, name, filter):
810 def adddatafilter(self, name, filter):
811 self._datafilters[name] = filter
811 self._datafilters[name] = filter
812
812
813 def wread(self, filename):
813 def wread(self, filename):
814 if self._link(filename):
814 if self._link(filename):
815 data = self.wvfs.readlink(filename)
815 data = self.wvfs.readlink(filename)
816 else:
816 else:
817 data = self.wopener.read(filename)
817 data = self.wopener.read(filename)
818 return self._filter(self._encodefilterpats, filename, data)
818 return self._filter(self._encodefilterpats, filename, data)
819
819
820 def wwrite(self, filename, data, flags):
820 def wwrite(self, filename, data, flags):
821 data = self._filter(self._decodefilterpats, filename, data)
821 data = self._filter(self._decodefilterpats, filename, data)
822 if 'l' in flags:
822 if 'l' in flags:
823 self.wopener.symlink(data, filename)
823 self.wopener.symlink(data, filename)
824 else:
824 else:
825 self.wopener.write(filename, data)
825 self.wopener.write(filename, data)
826 if 'x' in flags:
826 if 'x' in flags:
827 self.wvfs.setflags(filename, False, True)
827 self.wvfs.setflags(filename, False, True)
828
828
829 def wwritedata(self, filename, data):
829 def wwritedata(self, filename, data):
830 return self._filter(self._decodefilterpats, filename, data)
830 return self._filter(self._decodefilterpats, filename, data)
831
831
832 def transaction(self, desc, report=None):
832 def transaction(self, desc, report=None):
833 tr = self._transref and self._transref() or None
833 tr = self._transref and self._transref() or None
834 if tr and tr.running():
834 if tr and tr.running():
835 return tr.nest()
835 return tr.nest()
836
836
837 # abort here if the journal already exists
837 # abort here if the journal already exists
838 if self.svfs.exists("journal"):
838 if self.svfs.exists("journal"):
839 raise error.RepoError(
839 raise error.RepoError(
840 _("abandoned transaction found - run hg recover"))
840 _("abandoned transaction found - run hg recover"))
841
841
842 def onclose():
842 def onclose():
843 self.store.write(tr)
843 self.store.write(tr)
844
844
845 self._writejournal(desc)
845 self._writejournal(desc)
846 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
846 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
847 rp = report and report or self.ui.warn
847 rp = report and report or self.ui.warn
848 tr = transaction.transaction(rp, self.sopener,
848 tr = transaction.transaction(rp, self.sopener,
849 "journal",
849 "journal",
850 aftertrans(renames),
850 aftertrans(renames),
851 self.store.createmode,
851 self.store.createmode,
852 onclose)
852 onclose)
853 self._transref = weakref.ref(tr)
853 self._transref = weakref.ref(tr)
854 return tr
854 return tr
855
855
856 def _journalfiles(self):
856 def _journalfiles(self):
857 return ((self.svfs, 'journal'),
857 return ((self.svfs, 'journal'),
858 (self.vfs, 'journal.dirstate'),
858 (self.vfs, 'journal.dirstate'),
859 (self.vfs, 'journal.branch'),
859 (self.vfs, 'journal.branch'),
860 (self.vfs, 'journal.desc'),
860 (self.vfs, 'journal.desc'),
861 (self.vfs, 'journal.bookmarks'),
861 (self.vfs, 'journal.bookmarks'),
862 (self.svfs, 'journal.phaseroots'))
862 (self.svfs, 'journal.phaseroots'))
863
863
864 def undofiles(self):
864 def undofiles(self):
865 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
865 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
866
866
867 def _writejournal(self, desc):
867 def _writejournal(self, desc):
868 self.opener.write("journal.dirstate",
868 self.opener.write("journal.dirstate",
869 self.opener.tryread("dirstate"))
869 self.opener.tryread("dirstate"))
870 self.opener.write("journal.branch",
870 self.opener.write("journal.branch",
871 encoding.fromlocal(self.dirstate.branch()))
871 encoding.fromlocal(self.dirstate.branch()))
872 self.opener.write("journal.desc",
872 self.opener.write("journal.desc",
873 "%d\n%s\n" % (len(self), desc))
873 "%d\n%s\n" % (len(self), desc))
874 self.opener.write("journal.bookmarks",
874 self.opener.write("journal.bookmarks",
875 self.opener.tryread("bookmarks"))
875 self.opener.tryread("bookmarks"))
876 self.sopener.write("journal.phaseroots",
876 self.sopener.write("journal.phaseroots",
877 self.sopener.tryread("phaseroots"))
877 self.sopener.tryread("phaseroots"))
878
878
879 def recover(self):
879 def recover(self):
880 lock = self.lock()
880 lock = self.lock()
881 try:
881 try:
882 if self.svfs.exists("journal"):
882 if self.svfs.exists("journal"):
883 self.ui.status(_("rolling back interrupted transaction\n"))
883 self.ui.status(_("rolling back interrupted transaction\n"))
884 transaction.rollback(self.sopener, "journal",
884 transaction.rollback(self.sopener, "journal",
885 self.ui.warn)
885 self.ui.warn)
886 self.invalidate()
886 self.invalidate()
887 return True
887 return True
888 else:
888 else:
889 self.ui.warn(_("no interrupted transaction available\n"))
889 self.ui.warn(_("no interrupted transaction available\n"))
890 return False
890 return False
891 finally:
891 finally:
892 lock.release()
892 lock.release()
893
893
894 def rollback(self, dryrun=False, force=False):
894 def rollback(self, dryrun=False, force=False):
895 wlock = lock = None
895 wlock = lock = None
896 try:
896 try:
897 wlock = self.wlock()
897 wlock = self.wlock()
898 lock = self.lock()
898 lock = self.lock()
899 if self.svfs.exists("undo"):
899 if self.svfs.exists("undo"):
900 return self._rollback(dryrun, force)
900 return self._rollback(dryrun, force)
901 else:
901 else:
902 self.ui.warn(_("no rollback information available\n"))
902 self.ui.warn(_("no rollback information available\n"))
903 return 1
903 return 1
904 finally:
904 finally:
905 release(lock, wlock)
905 release(lock, wlock)
906
906
907 @unfilteredmethod # Until we get smarter cache management
907 @unfilteredmethod # Until we get smarter cache management
908 def _rollback(self, dryrun, force):
908 def _rollback(self, dryrun, force):
909 ui = self.ui
909 ui = self.ui
910 try:
910 try:
911 args = self.opener.read('undo.desc').splitlines()
911 args = self.opener.read('undo.desc').splitlines()
912 (oldlen, desc, detail) = (int(args[0]), args[1], None)
912 (oldlen, desc, detail) = (int(args[0]), args[1], None)
913 if len(args) >= 3:
913 if len(args) >= 3:
914 detail = args[2]
914 detail = args[2]
915 oldtip = oldlen - 1
915 oldtip = oldlen - 1
916
916
917 if detail and ui.verbose:
917 if detail and ui.verbose:
918 msg = (_('repository tip rolled back to revision %s'
918 msg = (_('repository tip rolled back to revision %s'
919 ' (undo %s: %s)\n')
919 ' (undo %s: %s)\n')
920 % (oldtip, desc, detail))
920 % (oldtip, desc, detail))
921 else:
921 else:
922 msg = (_('repository tip rolled back to revision %s'
922 msg = (_('repository tip rolled back to revision %s'
923 ' (undo %s)\n')
923 ' (undo %s)\n')
924 % (oldtip, desc))
924 % (oldtip, desc))
925 except IOError:
925 except IOError:
926 msg = _('rolling back unknown transaction\n')
926 msg = _('rolling back unknown transaction\n')
927 desc = None
927 desc = None
928
928
929 if not force and self['.'] != self['tip'] and desc == 'commit':
929 if not force and self['.'] != self['tip'] and desc == 'commit':
930 raise util.Abort(
930 raise util.Abort(
931 _('rollback of last commit while not checked out '
931 _('rollback of last commit while not checked out '
932 'may lose data'), hint=_('use -f to force'))
932 'may lose data'), hint=_('use -f to force'))
933
933
934 ui.status(msg)
934 ui.status(msg)
935 if dryrun:
935 if dryrun:
936 return 0
936 return 0
937
937
938 parents = self.dirstate.parents()
938 parents = self.dirstate.parents()
939 self.destroying()
939 self.destroying()
940 transaction.rollback(self.sopener, 'undo', ui.warn)
940 transaction.rollback(self.sopener, 'undo', ui.warn)
941 if self.vfs.exists('undo.bookmarks'):
941 if self.vfs.exists('undo.bookmarks'):
942 self.vfs.rename('undo.bookmarks', 'bookmarks')
942 self.vfs.rename('undo.bookmarks', 'bookmarks')
943 if self.svfs.exists('undo.phaseroots'):
943 if self.svfs.exists('undo.phaseroots'):
944 self.svfs.rename('undo.phaseroots', 'phaseroots')
944 self.svfs.rename('undo.phaseroots', 'phaseroots')
945 self.invalidate()
945 self.invalidate()
946
946
947 parentgone = (parents[0] not in self.changelog.nodemap or
947 parentgone = (parents[0] not in self.changelog.nodemap or
948 parents[1] not in self.changelog.nodemap)
948 parents[1] not in self.changelog.nodemap)
949 if parentgone:
949 if parentgone:
950 self.vfs.rename('undo.dirstate', 'dirstate')
950 self.vfs.rename('undo.dirstate', 'dirstate')
951 try:
951 try:
952 branch = self.opener.read('undo.branch')
952 branch = self.opener.read('undo.branch')
953 self.dirstate.setbranch(encoding.tolocal(branch))
953 self.dirstate.setbranch(encoding.tolocal(branch))
954 except IOError:
954 except IOError:
955 ui.warn(_('named branch could not be reset: '
955 ui.warn(_('named branch could not be reset: '
956 'current branch is still \'%s\'\n')
956 'current branch is still \'%s\'\n')
957 % self.dirstate.branch())
957 % self.dirstate.branch())
958
958
959 self.dirstate.invalidate()
959 self.dirstate.invalidate()
960 parents = tuple([p.rev() for p in self.parents()])
960 parents = tuple([p.rev() for p in self.parents()])
961 if len(parents) > 1:
961 if len(parents) > 1:
962 ui.status(_('working directory now based on '
962 ui.status(_('working directory now based on '
963 'revisions %d and %d\n') % parents)
963 'revisions %d and %d\n') % parents)
964 else:
964 else:
965 ui.status(_('working directory now based on '
965 ui.status(_('working directory now based on '
966 'revision %d\n') % parents)
966 'revision %d\n') % parents)
967 # TODO: if we know which new heads may result from this rollback, pass
967 # TODO: if we know which new heads may result from this rollback, pass
968 # them to destroy(), which will prevent the branchhead cache from being
968 # them to destroy(), which will prevent the branchhead cache from being
969 # invalidated.
969 # invalidated.
970 self.destroyed()
970 self.destroyed()
971 return 0
971 return 0
972
972
973 def invalidatecaches(self):
973 def invalidatecaches(self):
974
974
975 if '_tagscache' in vars(self):
975 if '_tagscache' in vars(self):
976 # can't use delattr on proxy
976 # can't use delattr on proxy
977 del self.__dict__['_tagscache']
977 del self.__dict__['_tagscache']
978
978
979 self.unfiltered()._branchcaches.clear()
979 self.unfiltered()._branchcaches.clear()
980 self.invalidatevolatilesets()
980 self.invalidatevolatilesets()
981
981
982 def invalidatevolatilesets(self):
982 def invalidatevolatilesets(self):
983 self.filteredrevcache.clear()
983 self.filteredrevcache.clear()
984 obsolete.clearobscaches(self)
984 obsolete.clearobscaches(self)
985
985
986 def invalidatedirstate(self):
986 def invalidatedirstate(self):
987 '''Invalidates the dirstate, causing the next call to dirstate
987 '''Invalidates the dirstate, causing the next call to dirstate
988 to check if it was modified since the last time it was read,
988 to check if it was modified since the last time it was read,
989 rereading it if it has.
989 rereading it if it has.
990
990
991 This is different to dirstate.invalidate() that it doesn't always
991 This is different to dirstate.invalidate() that it doesn't always
992 rereads the dirstate. Use dirstate.invalidate() if you want to
992 rereads the dirstate. Use dirstate.invalidate() if you want to
993 explicitly read the dirstate again (i.e. restoring it to a previous
993 explicitly read the dirstate again (i.e. restoring it to a previous
994 known good state).'''
994 known good state).'''
995 if hasunfilteredcache(self, 'dirstate'):
995 if hasunfilteredcache(self, 'dirstate'):
996 for k in self.dirstate._filecache:
996 for k in self.dirstate._filecache:
997 try:
997 try:
998 delattr(self.dirstate, k)
998 delattr(self.dirstate, k)
999 except AttributeError:
999 except AttributeError:
1000 pass
1000 pass
1001 delattr(self.unfiltered(), 'dirstate')
1001 delattr(self.unfiltered(), 'dirstate')
1002
1002
1003 def invalidate(self):
1003 def invalidate(self):
1004 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1004 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1005 for k in self._filecache:
1005 for k in self._filecache:
1006 # dirstate is invalidated separately in invalidatedirstate()
1006 # dirstate is invalidated separately in invalidatedirstate()
1007 if k == 'dirstate':
1007 if k == 'dirstate':
1008 continue
1008 continue
1009
1009
1010 try:
1010 try:
1011 delattr(unfiltered, k)
1011 delattr(unfiltered, k)
1012 except AttributeError:
1012 except AttributeError:
1013 pass
1013 pass
1014 self.invalidatecaches()
1014 self.invalidatecaches()
1015 self.store.invalidatecaches()
1015 self.store.invalidatecaches()
1016
1016
1017 def invalidateall(self):
1017 def invalidateall(self):
1018 '''Fully invalidates both store and non-store parts, causing the
1018 '''Fully invalidates both store and non-store parts, causing the
1019 subsequent operation to reread any outside changes.'''
1019 subsequent operation to reread any outside changes.'''
1020 # extension should hook this to invalidate its caches
1020 # extension should hook this to invalidate its caches
1021 self.invalidate()
1021 self.invalidate()
1022 self.invalidatedirstate()
1022 self.invalidatedirstate()
1023
1023
1024 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1024 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1025 try:
1025 try:
1026 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1026 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1027 except error.LockHeld, inst:
1027 except error.LockHeld, inst:
1028 if not wait:
1028 if not wait:
1029 raise
1029 raise
1030 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1030 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1031 (desc, inst.locker))
1031 (desc, inst.locker))
1032 # default to 600 seconds timeout
1032 # default to 600 seconds timeout
1033 l = lockmod.lock(vfs, lockname,
1033 l = lockmod.lock(vfs, lockname,
1034 int(self.ui.config("ui", "timeout", "600")),
1034 int(self.ui.config("ui", "timeout", "600")),
1035 releasefn, desc=desc)
1035 releasefn, desc=desc)
1036 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1036 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1037 if acquirefn:
1037 if acquirefn:
1038 acquirefn()
1038 acquirefn()
1039 return l
1039 return l
1040
1040
1041 def _afterlock(self, callback):
1041 def _afterlock(self, callback):
1042 """add a callback to the current repository lock.
1042 """add a callback to the current repository lock.
1043
1043
1044 The callback will be executed on lock release."""
1044 The callback will be executed on lock release."""
1045 l = self._lockref and self._lockref()
1045 l = self._lockref and self._lockref()
1046 if l:
1046 if l:
1047 l.postrelease.append(callback)
1047 l.postrelease.append(callback)
1048 else:
1048 else:
1049 callback()
1049 callback()
1050
1050
1051 def lock(self, wait=True):
1051 def lock(self, wait=True):
1052 '''Lock the repository store (.hg/store) and return a weak reference
1052 '''Lock the repository store (.hg/store) and return a weak reference
1053 to the lock. Use this before modifying the store (e.g. committing or
1053 to the lock. Use this before modifying the store (e.g. committing or
1054 stripping). If you are opening a transaction, get a lock as well.)'''
1054 stripping). If you are opening a transaction, get a lock as well.)'''
1055 l = self._lockref and self._lockref()
1055 l = self._lockref and self._lockref()
1056 if l is not None and l.held:
1056 if l is not None and l.held:
1057 l.lock()
1057 l.lock()
1058 return l
1058 return l
1059
1059
1060 def unlock():
1060 def unlock():
1061 if hasunfilteredcache(self, '_phasecache'):
1061 if hasunfilteredcache(self, '_phasecache'):
1062 self._phasecache.write()
1062 self._phasecache.write()
1063 for k, ce in self._filecache.items():
1063 for k, ce in self._filecache.items():
1064 if k == 'dirstate' or k not in self.__dict__:
1064 if k == 'dirstate' or k not in self.__dict__:
1065 continue
1065 continue
1066 ce.refresh()
1066 ce.refresh()
1067
1067
1068 l = self._lock(self.svfs, "lock", wait, unlock,
1068 l = self._lock(self.svfs, "lock", wait, unlock,
1069 self.invalidate, _('repository %s') % self.origroot)
1069 self.invalidate, _('repository %s') % self.origroot)
1070 self._lockref = weakref.ref(l)
1070 self._lockref = weakref.ref(l)
1071 return l
1071 return l
1072
1072
1073 def wlock(self, wait=True):
1073 def wlock(self, wait=True):
1074 '''Lock the non-store parts of the repository (everything under
1074 '''Lock the non-store parts of the repository (everything under
1075 .hg except .hg/store) and return a weak reference to the lock.
1075 .hg except .hg/store) and return a weak reference to the lock.
1076 Use this before modifying files in .hg.'''
1076 Use this before modifying files in .hg.'''
1077 l = self._wlockref and self._wlockref()
1077 l = self._wlockref and self._wlockref()
1078 if l is not None and l.held:
1078 if l is not None and l.held:
1079 l.lock()
1079 l.lock()
1080 return l
1080 return l
1081
1081
1082 def unlock():
1082 def unlock():
1083 self.dirstate.write()
1083 self.dirstate.write()
1084 self._filecache['dirstate'].refresh()
1084 self._filecache['dirstate'].refresh()
1085
1085
1086 l = self._lock(self.vfs, "wlock", wait, unlock,
1086 l = self._lock(self.vfs, "wlock", wait, unlock,
1087 self.invalidatedirstate, _('working directory of %s') %
1087 self.invalidatedirstate, _('working directory of %s') %
1088 self.origroot)
1088 self.origroot)
1089 self._wlockref = weakref.ref(l)
1089 self._wlockref = weakref.ref(l)
1090 return l
1090 return l
1091
1091
1092 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1092 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1093 """
1093 """
1094 commit an individual file as part of a larger transaction
1094 commit an individual file as part of a larger transaction
1095 """
1095 """
1096
1096
1097 fname = fctx.path()
1097 fname = fctx.path()
1098 text = fctx.data()
1098 text = fctx.data()
1099 flog = self.file(fname)
1099 flog = self.file(fname)
1100 fparent1 = manifest1.get(fname, nullid)
1100 fparent1 = manifest1.get(fname, nullid)
1101 fparent2 = fparent2o = manifest2.get(fname, nullid)
1101 fparent2 = fparent2o = manifest2.get(fname, nullid)
1102
1102
1103 meta = {}
1103 meta = {}
1104 copy = fctx.renamed()
1104 copy = fctx.renamed()
1105 if copy and copy[0] != fname:
1105 if copy and copy[0] != fname:
1106 # Mark the new revision of this file as a copy of another
1106 # Mark the new revision of this file as a copy of another
1107 # file. This copy data will effectively act as a parent
1107 # file. This copy data will effectively act as a parent
1108 # of this new revision. If this is a merge, the first
1108 # of this new revision. If this is a merge, the first
1109 # parent will be the nullid (meaning "look up the copy data")
1109 # parent will be the nullid (meaning "look up the copy data")
1110 # and the second one will be the other parent. For example:
1110 # and the second one will be the other parent. For example:
1111 #
1111 #
1112 # 0 --- 1 --- 3 rev1 changes file foo
1112 # 0 --- 1 --- 3 rev1 changes file foo
1113 # \ / rev2 renames foo to bar and changes it
1113 # \ / rev2 renames foo to bar and changes it
1114 # \- 2 -/ rev3 should have bar with all changes and
1114 # \- 2 -/ rev3 should have bar with all changes and
1115 # should record that bar descends from
1115 # should record that bar descends from
1116 # bar in rev2 and foo in rev1
1116 # bar in rev2 and foo in rev1
1117 #
1117 #
1118 # this allows this merge to succeed:
1118 # this allows this merge to succeed:
1119 #
1119 #
1120 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1120 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1121 # \ / merging rev3 and rev4 should use bar@rev2
1121 # \ / merging rev3 and rev4 should use bar@rev2
1122 # \- 2 --- 4 as the merge base
1122 # \- 2 --- 4 as the merge base
1123 #
1123 #
1124
1124
1125 cfname = copy[0]
1125 cfname = copy[0]
1126 crev = manifest1.get(cfname)
1126 crev = manifest1.get(cfname)
1127 newfparent = fparent2
1127 newfparent = fparent2
1128
1128
1129 if manifest2: # branch merge
1129 if manifest2: # branch merge
1130 if fparent2 == nullid or crev is None: # copied on remote side
1130 if fparent2 == nullid or crev is None: # copied on remote side
1131 if cfname in manifest2:
1131 if cfname in manifest2:
1132 crev = manifest2[cfname]
1132 crev = manifest2[cfname]
1133 newfparent = fparent1
1133 newfparent = fparent1
1134
1134
1135 # find source in nearest ancestor if we've lost track
1135 # find source in nearest ancestor if we've lost track
1136 if not crev:
1136 if not crev:
1137 self.ui.debug(" %s: searching for copy revision for %s\n" %
1137 self.ui.debug(" %s: searching for copy revision for %s\n" %
1138 (fname, cfname))
1138 (fname, cfname))
1139 for ancestor in self[None].ancestors():
1139 for ancestor in self[None].ancestors():
1140 if cfname in ancestor:
1140 if cfname in ancestor:
1141 crev = ancestor[cfname].filenode()
1141 crev = ancestor[cfname].filenode()
1142 break
1142 break
1143
1143
1144 if crev:
1144 if crev:
1145 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1145 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1146 meta["copy"] = cfname
1146 meta["copy"] = cfname
1147 meta["copyrev"] = hex(crev)
1147 meta["copyrev"] = hex(crev)
1148 fparent1, fparent2 = nullid, newfparent
1148 fparent1, fparent2 = nullid, newfparent
1149 else:
1149 else:
1150 self.ui.warn(_("warning: can't find ancestor for '%s' "
1150 self.ui.warn(_("warning: can't find ancestor for '%s' "
1151 "copied from '%s'!\n") % (fname, cfname))
1151 "copied from '%s'!\n") % (fname, cfname))
1152
1152
1153 elif fparent1 == nullid:
1153 elif fparent1 == nullid:
1154 fparent1, fparent2 = fparent2, nullid
1154 fparent1, fparent2 = fparent2, nullid
1155 elif fparent2 != nullid:
1155 elif fparent2 != nullid:
1156 # is one parent an ancestor of the other?
1156 # is one parent an ancestor of the other?
1157 fparentancestors = flog.commonancestors(fparent1, fparent2)
1157 fparentancestors = flog.commonancestors(fparent1, fparent2)
1158 if fparent1 in fparentancestors:
1158 if fparent1 in fparentancestors:
1159 fparent1, fparent2 = fparent2, nullid
1159 fparent1, fparent2 = fparent2, nullid
1160 elif fparent2 in fparentancestors:
1160 elif fparent2 in fparentancestors:
1161 fparent2 = nullid
1161 fparent2 = nullid
1162
1162
1163 # is the file changed?
1163 # is the file changed?
1164 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1164 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1165 changelist.append(fname)
1165 changelist.append(fname)
1166 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1166 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1167
1167
1168 # are just the flags changed during merge?
1168 # are just the flags changed during merge?
1169 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1169 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1170 changelist.append(fname)
1170 changelist.append(fname)
1171
1171
1172 return fparent1
1172 return fparent1
1173
1173
1174 @unfilteredmethod
1174 @unfilteredmethod
1175 def commit(self, text="", user=None, date=None, match=None, force=False,
1175 def commit(self, text="", user=None, date=None, match=None, force=False,
1176 editor=False, extra={}):
1176 editor=False, extra={}):
1177 """Add a new revision to current repository.
1177 """Add a new revision to current repository.
1178
1178
1179 Revision information is gathered from the working directory,
1179 Revision information is gathered from the working directory,
1180 match can be used to filter the committed files. If editor is
1180 match can be used to filter the committed files. If editor is
1181 supplied, it is called to get a commit message.
1181 supplied, it is called to get a commit message.
1182 """
1182 """
1183
1183
1184 def fail(f, msg):
1184 def fail(f, msg):
1185 raise util.Abort('%s: %s' % (f, msg))
1185 raise util.Abort('%s: %s' % (f, msg))
1186
1186
1187 if not match:
1187 if not match:
1188 match = matchmod.always(self.root, '')
1188 match = matchmod.always(self.root, '')
1189
1189
1190 if not force:
1190 if not force:
1191 vdirs = []
1191 vdirs = []
1192 match.explicitdir = vdirs.append
1192 match.explicitdir = vdirs.append
1193 match.bad = fail
1193 match.bad = fail
1194
1194
1195 wlock = self.wlock()
1195 wlock = self.wlock()
1196 try:
1196 try:
1197 wctx = self[None]
1197 wctx = self[None]
1198 merge = len(wctx.parents()) > 1
1198 merge = len(wctx.parents()) > 1
1199
1199
1200 if (not force and merge and match and
1200 if (not force and merge and match and
1201 (match.files() or match.anypats())):
1201 (match.files() or match.anypats())):
1202 raise util.Abort(_('cannot partially commit a merge '
1202 raise util.Abort(_('cannot partially commit a merge '
1203 '(do not specify files or patterns)'))
1203 '(do not specify files or patterns)'))
1204
1204
1205 changes = self.status(match=match, clean=force)
1205 changes = self.status(match=match, clean=force)
1206 if force:
1206 if force:
1207 changes[0].extend(changes[6]) # mq may commit unchanged files
1207 changes[0].extend(changes[6]) # mq may commit unchanged files
1208
1208
1209 # check subrepos
1209 # check subrepos
1210 subs = []
1210 subs = []
1211 commitsubs = set()
1211 commitsubs = set()
1212 newstate = wctx.substate.copy()
1212 newstate = wctx.substate.copy()
1213 # only manage subrepos and .hgsubstate if .hgsub is present
1213 # only manage subrepos and .hgsubstate if .hgsub is present
1214 if '.hgsub' in wctx:
1214 if '.hgsub' in wctx:
1215 # we'll decide whether to track this ourselves, thanks
1215 # we'll decide whether to track this ourselves, thanks
1216 for c in changes[:3]:
1216 for c in changes[:3]:
1217 if '.hgsubstate' in c:
1217 if '.hgsubstate' in c:
1218 c.remove('.hgsubstate')
1218 c.remove('.hgsubstate')
1219
1219
1220 # compare current state to last committed state
1220 # compare current state to last committed state
1221 # build new substate based on last committed state
1221 # build new substate based on last committed state
1222 oldstate = wctx.p1().substate
1222 oldstate = wctx.p1().substate
1223 for s in sorted(newstate.keys()):
1223 for s in sorted(newstate.keys()):
1224 if not match(s):
1224 if not match(s):
1225 # ignore working copy, use old state if present
1225 # ignore working copy, use old state if present
1226 if s in oldstate:
1226 if s in oldstate:
1227 newstate[s] = oldstate[s]
1227 newstate[s] = oldstate[s]
1228 continue
1228 continue
1229 if not force:
1229 if not force:
1230 raise util.Abort(
1230 raise util.Abort(
1231 _("commit with new subrepo %s excluded") % s)
1231 _("commit with new subrepo %s excluded") % s)
1232 if wctx.sub(s).dirty(True):
1232 if wctx.sub(s).dirty(True):
1233 if not self.ui.configbool('ui', 'commitsubrepos'):
1233 if not self.ui.configbool('ui', 'commitsubrepos'):
1234 raise util.Abort(
1234 raise util.Abort(
1235 _("uncommitted changes in subrepo %s") % s,
1235 _("uncommitted changes in subrepo %s") % s,
1236 hint=_("use --subrepos for recursive commit"))
1236 hint=_("use --subrepos for recursive commit"))
1237 subs.append(s)
1237 subs.append(s)
1238 commitsubs.add(s)
1238 commitsubs.add(s)
1239 else:
1239 else:
1240 bs = wctx.sub(s).basestate()
1240 bs = wctx.sub(s).basestate()
1241 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1241 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1242 if oldstate.get(s, (None, None, None))[1] != bs:
1242 if oldstate.get(s, (None, None, None))[1] != bs:
1243 subs.append(s)
1243 subs.append(s)
1244
1244
1245 # check for removed subrepos
1245 # check for removed subrepos
1246 for p in wctx.parents():
1246 for p in wctx.parents():
1247 r = [s for s in p.substate if s not in newstate]
1247 r = [s for s in p.substate if s not in newstate]
1248 subs += [s for s in r if match(s)]
1248 subs += [s for s in r if match(s)]
1249 if subs:
1249 if subs:
1250 if (not match('.hgsub') and
1250 if (not match('.hgsub') and
1251 '.hgsub' in (wctx.modified() + wctx.added())):
1251 '.hgsub' in (wctx.modified() + wctx.added())):
1252 raise util.Abort(
1252 raise util.Abort(
1253 _("can't commit subrepos without .hgsub"))
1253 _("can't commit subrepos without .hgsub"))
1254 changes[0].insert(0, '.hgsubstate')
1254 changes[0].insert(0, '.hgsubstate')
1255
1255
1256 elif '.hgsub' in changes[2]:
1256 elif '.hgsub' in changes[2]:
1257 # clean up .hgsubstate when .hgsub is removed
1257 # clean up .hgsubstate when .hgsub is removed
1258 if ('.hgsubstate' in wctx and
1258 if ('.hgsubstate' in wctx and
1259 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1259 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1260 changes[2].insert(0, '.hgsubstate')
1260 changes[2].insert(0, '.hgsubstate')
1261
1261
1262 # make sure all explicit patterns are matched
1262 # make sure all explicit patterns are matched
1263 if not force and match.files():
1263 if not force and match.files():
1264 matched = set(changes[0] + changes[1] + changes[2])
1264 matched = set(changes[0] + changes[1] + changes[2])
1265
1265
1266 for f in match.files():
1266 for f in match.files():
1267 f = self.dirstate.normalize(f)
1267 f = self.dirstate.normalize(f)
1268 if f == '.' or f in matched or f in wctx.substate:
1268 if f == '.' or f in matched or f in wctx.substate:
1269 continue
1269 continue
1270 if f in changes[3]: # missing
1270 if f in changes[3]: # missing
1271 fail(f, _('file not found!'))
1271 fail(f, _('file not found!'))
1272 if f in vdirs: # visited directory
1272 if f in vdirs: # visited directory
1273 d = f + '/'
1273 d = f + '/'
1274 for mf in matched:
1274 for mf in matched:
1275 if mf.startswith(d):
1275 if mf.startswith(d):
1276 break
1276 break
1277 else:
1277 else:
1278 fail(f, _("no match under directory!"))
1278 fail(f, _("no match under directory!"))
1279 elif f not in self.dirstate:
1279 elif f not in self.dirstate:
1280 fail(f, _("file not tracked!"))
1280 fail(f, _("file not tracked!"))
1281
1281
1282 cctx = context.workingctx(self, text, user, date, extra, changes)
1282 cctx = context.workingctx(self, text, user, date, extra, changes)
1283
1283
1284 if (not force and not extra.get("close") and not merge
1284 if (not force and not extra.get("close") and not merge
1285 and not cctx.files()
1285 and not cctx.files()
1286 and wctx.branch() == wctx.p1().branch()):
1286 and wctx.branch() == wctx.p1().branch()):
1287 return None
1287 return None
1288
1288
1289 if merge and cctx.deleted():
1289 if merge and cctx.deleted():
1290 raise util.Abort(_("cannot commit merge with missing files"))
1290 raise util.Abort(_("cannot commit merge with missing files"))
1291
1291
1292 ms = mergemod.mergestate(self)
1292 ms = mergemod.mergestate(self)
1293 for f in changes[0]:
1293 for f in changes[0]:
1294 if f in ms and ms[f] == 'u':
1294 if f in ms and ms[f] == 'u':
1295 raise util.Abort(_("unresolved merge conflicts "
1295 raise util.Abort(_("unresolved merge conflicts "
1296 "(see hg help resolve)"))
1296 "(see hg help resolve)"))
1297
1297
1298 if editor:
1298 if editor:
1299 cctx._text = editor(self, cctx, subs)
1299 cctx._text = editor(self, cctx, subs)
1300 edited = (text != cctx._text)
1300 edited = (text != cctx._text)
1301
1301
1302 # Save commit message in case this transaction gets rolled back
1302 # Save commit message in case this transaction gets rolled back
1303 # (e.g. by a pretxncommit hook). Leave the content alone on
1303 # (e.g. by a pretxncommit hook). Leave the content alone on
1304 # the assumption that the user will use the same editor again.
1304 # the assumption that the user will use the same editor again.
1305 msgfn = self.savecommitmessage(cctx._text)
1305 msgfn = self.savecommitmessage(cctx._text)
1306
1306
1307 # commit subs and write new state
1307 # commit subs and write new state
1308 if subs:
1308 if subs:
1309 for s in sorted(commitsubs):
1309 for s in sorted(commitsubs):
1310 sub = wctx.sub(s)
1310 sub = wctx.sub(s)
1311 self.ui.status(_('committing subrepository %s\n') %
1311 self.ui.status(_('committing subrepository %s\n') %
1312 subrepo.subrelpath(sub))
1312 subrepo.subrelpath(sub))
1313 sr = sub.commit(cctx._text, user, date)
1313 sr = sub.commit(cctx._text, user, date)
1314 newstate[s] = (newstate[s][0], sr)
1314 newstate[s] = (newstate[s][0], sr)
1315 subrepo.writestate(self, newstate)
1315 subrepo.writestate(self, newstate)
1316
1316
1317 p1, p2 = self.dirstate.parents()
1317 p1, p2 = self.dirstate.parents()
1318 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1318 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1319 try:
1319 try:
1320 self.hook("precommit", throw=True, parent1=hookp1,
1320 self.hook("precommit", throw=True, parent1=hookp1,
1321 parent2=hookp2)
1321 parent2=hookp2)
1322 ret = self.commitctx(cctx, True)
1322 ret = self.commitctx(cctx, True)
1323 except: # re-raises
1323 except: # re-raises
1324 if edited:
1324 if edited:
1325 self.ui.write(
1325 self.ui.write(
1326 _('note: commit message saved in %s\n') % msgfn)
1326 _('note: commit message saved in %s\n') % msgfn)
1327 raise
1327 raise
1328
1328
1329 # update bookmarks, dirstate and mergestate
1329 # update bookmarks, dirstate and mergestate
1330 bookmarks.update(self, [p1, p2], ret)
1330 bookmarks.update(self, [p1, p2], ret)
1331 cctx.markcommitted(ret)
1331 cctx.markcommitted(ret)
1332 ms.reset()
1332 ms.reset()
1333 finally:
1333 finally:
1334 wlock.release()
1334 wlock.release()
1335
1335
1336 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1336 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1337 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1337 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1338 self._afterlock(commithook)
1338 self._afterlock(commithook)
1339 return ret
1339 return ret
1340
1340
1341 @unfilteredmethod
1341 @unfilteredmethod
1342 def commitctx(self, ctx, error=False):
1342 def commitctx(self, ctx, error=False):
1343 """Add a new revision to current repository.
1343 """Add a new revision to current repository.
1344 Revision information is passed via the context argument.
1344 Revision information is passed via the context argument.
1345 """
1345 """
1346
1346
1347 tr = lock = None
1347 tr = lock = None
1348 removed = list(ctx.removed())
1348 removed = list(ctx.removed())
1349 p1, p2 = ctx.p1(), ctx.p2()
1349 p1, p2 = ctx.p1(), ctx.p2()
1350 user = ctx.user()
1350 user = ctx.user()
1351
1351
1352 lock = self.lock()
1352 lock = self.lock()
1353 try:
1353 try:
1354 tr = self.transaction("commit")
1354 tr = self.transaction("commit")
1355 trp = weakref.proxy(tr)
1355 trp = weakref.proxy(tr)
1356
1356
1357 if ctx.files():
1357 if ctx.files():
1358 m1 = p1.manifest().copy()
1358 m1 = p1.manifest().copy()
1359 m2 = p2.manifest()
1359 m2 = p2.manifest()
1360
1360
1361 # check in files
1361 # check in files
1362 new = {}
1362 new = {}
1363 changed = []
1363 changed = []
1364 linkrev = len(self)
1364 linkrev = len(self)
1365 for f in sorted(ctx.modified() + ctx.added()):
1365 for f in sorted(ctx.modified() + ctx.added()):
1366 self.ui.note(f + "\n")
1366 self.ui.note(f + "\n")
1367 try:
1367 try:
1368 fctx = ctx[f]
1368 fctx = ctx[f]
1369 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1369 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1370 changed)
1370 changed)
1371 m1.set(f, fctx.flags())
1371 m1.set(f, fctx.flags())
1372 except OSError, inst:
1372 except OSError, inst:
1373 self.ui.warn(_("trouble committing %s!\n") % f)
1373 self.ui.warn(_("trouble committing %s!\n") % f)
1374 raise
1374 raise
1375 except IOError, inst:
1375 except IOError, inst:
1376 errcode = getattr(inst, 'errno', errno.ENOENT)
1376 errcode = getattr(inst, 'errno', errno.ENOENT)
1377 if error or errcode and errcode != errno.ENOENT:
1377 if error or errcode and errcode != errno.ENOENT:
1378 self.ui.warn(_("trouble committing %s!\n") % f)
1378 self.ui.warn(_("trouble committing %s!\n") % f)
1379 raise
1379 raise
1380 else:
1380 else:
1381 removed.append(f)
1381 removed.append(f)
1382
1382
1383 # update manifest
1383 # update manifest
1384 m1.update(new)
1384 m1.update(new)
1385 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1385 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1386 drop = [f for f in removed if f in m1]
1386 drop = [f for f in removed if f in m1]
1387 for f in drop:
1387 for f in drop:
1388 del m1[f]
1388 del m1[f]
1389 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1389 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1390 p2.manifestnode(), (new, drop))
1390 p2.manifestnode(), (new, drop))
1391 files = changed + removed
1391 files = changed + removed
1392 else:
1392 else:
1393 mn = p1.manifestnode()
1393 mn = p1.manifestnode()
1394 files = []
1394 files = []
1395
1395
1396 # update changelog
1396 # update changelog
1397 self.changelog.delayupdate()
1397 self.changelog.delayupdate()
1398 n = self.changelog.add(mn, files, ctx.description(),
1398 n = self.changelog.add(mn, files, ctx.description(),
1399 trp, p1.node(), p2.node(),
1399 trp, p1.node(), p2.node(),
1400 user, ctx.date(), ctx.extra().copy())
1400 user, ctx.date(), ctx.extra().copy())
1401 p = lambda: self.changelog.writepending() and self.root or ""
1401 p = lambda: self.changelog.writepending() and self.root or ""
1402 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1402 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1403 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1403 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1404 parent2=xp2, pending=p)
1404 parent2=xp2, pending=p)
1405 self.changelog.finalize(trp)
1405 self.changelog.finalize(trp)
1406 # set the new commit is proper phase
1406 # set the new commit is proper phase
1407 targetphase = subrepo.newcommitphase(self.ui, ctx)
1407 targetphase = subrepo.newcommitphase(self.ui, ctx)
1408 if targetphase:
1408 if targetphase:
1409 # retract boundary do not alter parent changeset.
1409 # retract boundary do not alter parent changeset.
1410 # if a parent have higher the resulting phase will
1410 # if a parent have higher the resulting phase will
1411 # be compliant anyway
1411 # be compliant anyway
1412 #
1412 #
1413 # if minimal phase was 0 we don't need to retract anything
1413 # if minimal phase was 0 we don't need to retract anything
1414 phases.retractboundary(self, targetphase, [n])
1414 phases.retractboundary(self, targetphase, [n])
1415 tr.close()
1415 tr.close()
1416 branchmap.updatecache(self.filtered('served'))
1416 branchmap.updatecache(self.filtered('served'))
1417 return n
1417 return n
1418 finally:
1418 finally:
1419 if tr:
1419 if tr:
1420 tr.release()
1420 tr.release()
1421 lock.release()
1421 lock.release()
1422
1422
1423 @unfilteredmethod
1423 @unfilteredmethod
1424 def destroying(self):
1424 def destroying(self):
1425 '''Inform the repository that nodes are about to be destroyed.
1425 '''Inform the repository that nodes are about to be destroyed.
1426 Intended for use by strip and rollback, so there's a common
1426 Intended for use by strip and rollback, so there's a common
1427 place for anything that has to be done before destroying history.
1427 place for anything that has to be done before destroying history.
1428
1428
1429 This is mostly useful for saving state that is in memory and waiting
1429 This is mostly useful for saving state that is in memory and waiting
1430 to be flushed when the current lock is released. Because a call to
1430 to be flushed when the current lock is released. Because a call to
1431 destroyed is imminent, the repo will be invalidated causing those
1431 destroyed is imminent, the repo will be invalidated causing those
1432 changes to stay in memory (waiting for the next unlock), or vanish
1432 changes to stay in memory (waiting for the next unlock), or vanish
1433 completely.
1433 completely.
1434 '''
1434 '''
1435 # When using the same lock to commit and strip, the phasecache is left
1435 # When using the same lock to commit and strip, the phasecache is left
1436 # dirty after committing. Then when we strip, the repo is invalidated,
1436 # dirty after committing. Then when we strip, the repo is invalidated,
1437 # causing those changes to disappear.
1437 # causing those changes to disappear.
1438 if '_phasecache' in vars(self):
1438 if '_phasecache' in vars(self):
1439 self._phasecache.write()
1439 self._phasecache.write()
1440
1440
1441 @unfilteredmethod
1441 @unfilteredmethod
1442 def destroyed(self):
1442 def destroyed(self):
1443 '''Inform the repository that nodes have been destroyed.
1443 '''Inform the repository that nodes have been destroyed.
1444 Intended for use by strip and rollback, so there's a common
1444 Intended for use by strip and rollback, so there's a common
1445 place for anything that has to be done after destroying history.
1445 place for anything that has to be done after destroying history.
1446 '''
1446 '''
1447 # When one tries to:
1447 # When one tries to:
1448 # 1) destroy nodes thus calling this method (e.g. strip)
1448 # 1) destroy nodes thus calling this method (e.g. strip)
1449 # 2) use phasecache somewhere (e.g. commit)
1449 # 2) use phasecache somewhere (e.g. commit)
1450 #
1450 #
1451 # then 2) will fail because the phasecache contains nodes that were
1451 # then 2) will fail because the phasecache contains nodes that were
1452 # removed. We can either remove phasecache from the filecache,
1452 # removed. We can either remove phasecache from the filecache,
1453 # causing it to reload next time it is accessed, or simply filter
1453 # causing it to reload next time it is accessed, or simply filter
1454 # the removed nodes now and write the updated cache.
1454 # the removed nodes now and write the updated cache.
1455 self._phasecache.filterunknown(self)
1455 self._phasecache.filterunknown(self)
1456 self._phasecache.write()
1456 self._phasecache.write()
1457
1457
1458 # update the 'served' branch cache to help read only server process
1458 # update the 'served' branch cache to help read only server process
1459 # Thanks to branchcache collaboration this is done from the nearest
1459 # Thanks to branchcache collaboration this is done from the nearest
1460 # filtered subset and it is expected to be fast.
1460 # filtered subset and it is expected to be fast.
1461 branchmap.updatecache(self.filtered('served'))
1461 branchmap.updatecache(self.filtered('served'))
1462
1462
1463 # Ensure the persistent tag cache is updated. Doing it now
1463 # Ensure the persistent tag cache is updated. Doing it now
1464 # means that the tag cache only has to worry about destroyed
1464 # means that the tag cache only has to worry about destroyed
1465 # heads immediately after a strip/rollback. That in turn
1465 # heads immediately after a strip/rollback. That in turn
1466 # guarantees that "cachetip == currenttip" (comparing both rev
1466 # guarantees that "cachetip == currenttip" (comparing both rev
1467 # and node) always means no nodes have been added or destroyed.
1467 # and node) always means no nodes have been added or destroyed.
1468
1468
1469 # XXX this is suboptimal when qrefresh'ing: we strip the current
1469 # XXX this is suboptimal when qrefresh'ing: we strip the current
1470 # head, refresh the tag cache, then immediately add a new head.
1470 # head, refresh the tag cache, then immediately add a new head.
1471 # But I think doing it this way is necessary for the "instant
1471 # But I think doing it this way is necessary for the "instant
1472 # tag cache retrieval" case to work.
1472 # tag cache retrieval" case to work.
1473 self.invalidate()
1473 self.invalidate()
1474
1474
1475 def walk(self, match, node=None):
1475 def walk(self, match, node=None):
1476 '''
1476 '''
1477 walk recursively through the directory tree or a given
1477 walk recursively through the directory tree or a given
1478 changeset, finding all files matched by the match
1478 changeset, finding all files matched by the match
1479 function
1479 function
1480 '''
1480 '''
1481 return self[node].walk(match)
1481 return self[node].walk(match)
1482
1482
1483 def status(self, node1='.', node2=None, match=None,
1483 def status(self, node1='.', node2=None, match=None,
1484 ignored=False, clean=False, unknown=False,
1484 ignored=False, clean=False, unknown=False,
1485 listsubrepos=False):
1485 listsubrepos=False):
1486 """return status of files between two nodes or node and working
1486 """return status of files between two nodes or node and working
1487 directory.
1487 directory.
1488
1488
1489 If node1 is None, use the first dirstate parent instead.
1489 If node1 is None, use the first dirstate parent instead.
1490 If node2 is None, compare node1 with working directory.
1490 If node2 is None, compare node1 with working directory.
1491 """
1491 """
1492
1492
1493 def mfmatches(ctx):
1493 def mfmatches(ctx):
1494 mf = ctx.manifest().copy()
1494 mf = ctx.manifest().copy()
1495 if match.always():
1495 if match.always():
1496 return mf
1496 return mf
1497 for fn in mf.keys():
1497 for fn in mf.keys():
1498 if not match(fn):
1498 if not match(fn):
1499 del mf[fn]
1499 del mf[fn]
1500 return mf
1500 return mf
1501
1501
1502 ctx1 = self[node1]
1502 ctx1 = self[node1]
1503 ctx2 = self[node2]
1503 ctx2 = self[node2]
1504
1504
1505 working = ctx2.rev() is None
1505 working = ctx2.rev() is None
1506 parentworking = working and ctx1 == self['.']
1506 parentworking = working and ctx1 == self['.']
1507 match = match or matchmod.always(self.root, self.getcwd())
1507 match = match or matchmod.always(self.root, self.getcwd())
1508 listignored, listclean, listunknown = ignored, clean, unknown
1508 listignored, listclean, listunknown = ignored, clean, unknown
1509
1509
1510 # load earliest manifest first for caching reasons
1510 # load earliest manifest first for caching reasons
1511 if not working and ctx2.rev() < ctx1.rev():
1511 if not working and ctx2.rev() < ctx1.rev():
1512 ctx2.manifest()
1512 ctx2.manifest()
1513
1513
1514 if not parentworking:
1514 if not parentworking:
1515 def bad(f, msg):
1515 def bad(f, msg):
1516 # 'f' may be a directory pattern from 'match.files()',
1516 # 'f' may be a directory pattern from 'match.files()',
1517 # so 'f not in ctx1' is not enough
1517 # so 'f not in ctx1' is not enough
1518 if f not in ctx1 and f not in ctx1.dirs():
1518 if f not in ctx1 and f not in ctx1.dirs():
1519 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1519 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1520 match.bad = bad
1520 match.bad = bad
1521
1521
1522 if working: # we need to scan the working dir
1522 if working: # we need to scan the working dir
1523 subrepos = []
1523 subrepos = []
1524 if '.hgsub' in self.dirstate:
1524 if '.hgsub' in self.dirstate:
1525 subrepos = sorted(ctx2.substate)
1525 subrepos = sorted(ctx2.substate)
1526 s = self.dirstate.status(match, subrepos, listignored,
1526 s = self.dirstate.status(match, subrepos, listignored,
1527 listclean, listunknown)
1527 listclean, listunknown)
1528 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1528 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1529
1529
1530 # check for any possibly clean files
1530 # check for any possibly clean files
1531 if parentworking and cmp:
1531 if parentworking and cmp:
1532 fixup = []
1532 fixup = []
1533 # do a full compare of any files that might have changed
1533 # do a full compare of any files that might have changed
1534 for f in sorted(cmp):
1534 for f in sorted(cmp):
1535 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1535 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1536 or ctx1[f].cmp(ctx2[f])):
1536 or ctx1[f].cmp(ctx2[f])):
1537 modified.append(f)
1537 modified.append(f)
1538 else:
1538 else:
1539 fixup.append(f)
1539 fixup.append(f)
1540
1540
1541 # update dirstate for files that are actually clean
1541 # update dirstate for files that are actually clean
1542 if fixup:
1542 if fixup:
1543 if listclean:
1543 if listclean:
1544 clean += fixup
1544 clean += fixup
1545
1545
1546 try:
1546 try:
1547 # updating the dirstate is optional
1547 # updating the dirstate is optional
1548 # so we don't wait on the lock
1548 # so we don't wait on the lock
1549 wlock = self.wlock(False)
1549 wlock = self.wlock(False)
1550 try:
1550 try:
1551 for f in fixup:
1551 for f in fixup:
1552 self.dirstate.normal(f)
1552 self.dirstate.normal(f)
1553 finally:
1553 finally:
1554 wlock.release()
1554 wlock.release()
1555 except error.LockError:
1555 except error.LockError:
1556 pass
1556 pass
1557
1557
1558 if not parentworking:
1558 if not parentworking:
1559 mf1 = mfmatches(ctx1)
1559 mf1 = mfmatches(ctx1)
1560 if working:
1560 if working:
1561 # we are comparing working dir against non-parent
1561 # we are comparing working dir against non-parent
1562 # generate a pseudo-manifest for the working dir
1562 # generate a pseudo-manifest for the working dir
1563 mf2 = mfmatches(self['.'])
1563 mf2 = mfmatches(self['.'])
1564 for f in cmp + modified + added:
1564 for f in cmp + modified + added:
1565 mf2[f] = None
1565 mf2[f] = None
1566 mf2.set(f, ctx2.flags(f))
1566 mf2.set(f, ctx2.flags(f))
1567 for f in removed:
1567 for f in removed:
1568 if f in mf2:
1568 if f in mf2:
1569 del mf2[f]
1569 del mf2[f]
1570 else:
1570 else:
1571 # we are comparing two revisions
1571 # we are comparing two revisions
1572 deleted, unknown, ignored = [], [], []
1572 deleted, unknown, ignored = [], [], []
1573 mf2 = mfmatches(ctx2)
1573 mf2 = mfmatches(ctx2)
1574
1574
1575 modified, added, clean = [], [], []
1575 modified, added, clean = [], [], []
1576 withflags = mf1.withflags() | mf2.withflags()
1576 withflags = mf1.withflags() | mf2.withflags()
1577 for fn, mf2node in mf2.iteritems():
1577 for fn, mf2node in mf2.iteritems():
1578 if fn in mf1:
1578 if fn in mf1:
1579 if (fn not in deleted and
1579 if (fn not in deleted and
1580 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1580 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1581 (mf1[fn] != mf2node and
1581 (mf1[fn] != mf2node and
1582 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1582 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1583 modified.append(fn)
1583 modified.append(fn)
1584 elif listclean:
1584 elif listclean:
1585 clean.append(fn)
1585 clean.append(fn)
1586 del mf1[fn]
1586 del mf1[fn]
1587 elif fn not in deleted:
1587 elif fn not in deleted:
1588 added.append(fn)
1588 added.append(fn)
1589 removed = mf1.keys()
1589 removed = mf1.keys()
1590
1590
1591 if working and modified and not self.dirstate._checklink:
1591 if working and modified and not self.dirstate._checklink:
1592 # Symlink placeholders may get non-symlink-like contents
1592 # Symlink placeholders may get non-symlink-like contents
1593 # via user error or dereferencing by NFS or Samba servers,
1593 # via user error or dereferencing by NFS or Samba servers,
1594 # so we filter out any placeholders that don't look like a
1594 # so we filter out any placeholders that don't look like a
1595 # symlink
1595 # symlink
1596 sane = []
1596 sane = []
1597 for f in modified:
1597 for f in modified:
1598 if ctx2.flags(f) == 'l':
1598 if ctx2.flags(f) == 'l':
1599 d = ctx2[f].data()
1599 d = ctx2[f].data()
1600 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1600 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1601 self.ui.debug('ignoring suspect symlink placeholder'
1601 self.ui.debug('ignoring suspect symlink placeholder'
1602 ' "%s"\n' % f)
1602 ' "%s"\n' % f)
1603 continue
1603 continue
1604 sane.append(f)
1604 sane.append(f)
1605 modified = sane
1605 modified = sane
1606
1606
1607 r = modified, added, removed, deleted, unknown, ignored, clean
1607 r = modified, added, removed, deleted, unknown, ignored, clean
1608
1608
1609 if listsubrepos:
1609 if listsubrepos:
1610 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1610 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1611 if working:
1611 if working:
1612 rev2 = None
1612 rev2 = None
1613 else:
1613 else:
1614 rev2 = ctx2.substate[subpath][1]
1614 rev2 = ctx2.substate[subpath][1]
1615 try:
1615 try:
1616 submatch = matchmod.narrowmatcher(subpath, match)
1616 submatch = matchmod.narrowmatcher(subpath, match)
1617 s = sub.status(rev2, match=submatch, ignored=listignored,
1617 s = sub.status(rev2, match=submatch, ignored=listignored,
1618 clean=listclean, unknown=listunknown,
1618 clean=listclean, unknown=listunknown,
1619 listsubrepos=True)
1619 listsubrepos=True)
1620 for rfiles, sfiles in zip(r, s):
1620 for rfiles, sfiles in zip(r, s):
1621 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1621 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1622 except error.LookupError:
1622 except error.LookupError:
1623 self.ui.status(_("skipping missing subrepository: %s\n")
1623 self.ui.status(_("skipping missing subrepository: %s\n")
1624 % subpath)
1624 % subpath)
1625
1625
1626 for l in r:
1626 for l in r:
1627 l.sort()
1627 l.sort()
1628 return r
1628 return r
1629
1629
1630 def heads(self, start=None):
1630 def heads(self, start=None):
1631 heads = self.changelog.heads(start)
1631 heads = self.changelog.heads(start)
1632 # sort the output in rev descending order
1632 # sort the output in rev descending order
1633 return sorted(heads, key=self.changelog.rev, reverse=True)
1633 return sorted(heads, key=self.changelog.rev, reverse=True)
1634
1634
1635 def branchheads(self, branch=None, start=None, closed=False):
1635 def branchheads(self, branch=None, start=None, closed=False):
1636 '''return a (possibly filtered) list of heads for the given branch
1636 '''return a (possibly filtered) list of heads for the given branch
1637
1637
1638 Heads are returned in topological order, from newest to oldest.
1638 Heads are returned in topological order, from newest to oldest.
1639 If branch is None, use the dirstate branch.
1639 If branch is None, use the dirstate branch.
1640 If start is not None, return only heads reachable from start.
1640 If start is not None, return only heads reachable from start.
1641 If closed is True, return heads that are marked as closed as well.
1641 If closed is True, return heads that are marked as closed as well.
1642 '''
1642 '''
1643 if branch is None:
1643 if branch is None:
1644 branch = self[None].branch()
1644 branch = self[None].branch()
1645 branches = self.branchmap()
1645 branches = self.branchmap()
1646 if branch not in branches:
1646 if branch not in branches:
1647 return []
1647 return []
1648 # the cache returns heads ordered lowest to highest
1648 # the cache returns heads ordered lowest to highest
1649 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1649 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1650 if start is not None:
1650 if start is not None:
1651 # filter out the heads that cannot be reached from startrev
1651 # filter out the heads that cannot be reached from startrev
1652 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1652 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1653 bheads = [h for h in bheads if h in fbheads]
1653 bheads = [h for h in bheads if h in fbheads]
1654 return bheads
1654 return bheads
1655
1655
1656 def branches(self, nodes):
1656 def branches(self, nodes):
1657 if not nodes:
1657 if not nodes:
1658 nodes = [self.changelog.tip()]
1658 nodes = [self.changelog.tip()]
1659 b = []
1659 b = []
1660 for n in nodes:
1660 for n in nodes:
1661 t = n
1661 t = n
1662 while True:
1662 while True:
1663 p = self.changelog.parents(n)
1663 p = self.changelog.parents(n)
1664 if p[1] != nullid or p[0] == nullid:
1664 if p[1] != nullid or p[0] == nullid:
1665 b.append((t, n, p[0], p[1]))
1665 b.append((t, n, p[0], p[1]))
1666 break
1666 break
1667 n = p[0]
1667 n = p[0]
1668 return b
1668 return b
1669
1669
1670 def between(self, pairs):
1670 def between(self, pairs):
1671 r = []
1671 r = []
1672
1672
1673 for top, bottom in pairs:
1673 for top, bottom in pairs:
1674 n, l, i = top, [], 0
1674 n, l, i = top, [], 0
1675 f = 1
1675 f = 1
1676
1676
1677 while n != bottom and n != nullid:
1677 while n != bottom and n != nullid:
1678 p = self.changelog.parents(n)[0]
1678 p = self.changelog.parents(n)[0]
1679 if i == f:
1679 if i == f:
1680 l.append(n)
1680 l.append(n)
1681 f = f * 2
1681 f = f * 2
1682 n = p
1682 n = p
1683 i += 1
1683 i += 1
1684
1684
1685 r.append(l)
1685 r.append(l)
1686
1686
1687 return r
1687 return r
1688
1688
1689 def pull(self, remote, heads=None, force=False):
1689 def pull(self, remote, heads=None, force=False):
1690 return exchange.pull (self, remote, heads, force)
1690 return exchange.pull (self, remote, heads, force)
1691
1691
1692 def checkpush(self, pushop):
1692 def checkpush(self, pushop):
1693 """Extensions can override this function if additional checks have
1693 """Extensions can override this function if additional checks have
1694 to be performed before pushing, or call it if they override push
1694 to be performed before pushing, or call it if they override push
1695 command.
1695 command.
1696 """
1696 """
1697 pass
1697 pass
1698
1698
1699 @unfilteredpropertycache
1700 def prepushoutgoinghooks(self):
1701 """Return util.hooks consists of "(repo, remote, outgoing)"
1702 functions, which are called before pushing changesets.
1703 """
1704 return util.hooks()
1705
1699 def push(self, remote, force=False, revs=None, newbranch=False):
1706 def push(self, remote, force=False, revs=None, newbranch=False):
1700 return exchange.push(self, remote, force, revs, newbranch)
1707 return exchange.push(self, remote, force, revs, newbranch)
1701
1708
1702 def stream_in(self, remote, requirements):
1709 def stream_in(self, remote, requirements):
1703 lock = self.lock()
1710 lock = self.lock()
1704 try:
1711 try:
1705 # Save remote branchmap. We will use it later
1712 # Save remote branchmap. We will use it later
1706 # to speed up branchcache creation
1713 # to speed up branchcache creation
1707 rbranchmap = None
1714 rbranchmap = None
1708 if remote.capable("branchmap"):
1715 if remote.capable("branchmap"):
1709 rbranchmap = remote.branchmap()
1716 rbranchmap = remote.branchmap()
1710
1717
1711 fp = remote.stream_out()
1718 fp = remote.stream_out()
1712 l = fp.readline()
1719 l = fp.readline()
1713 try:
1720 try:
1714 resp = int(l)
1721 resp = int(l)
1715 except ValueError:
1722 except ValueError:
1716 raise error.ResponseError(
1723 raise error.ResponseError(
1717 _('unexpected response from remote server:'), l)
1724 _('unexpected response from remote server:'), l)
1718 if resp == 1:
1725 if resp == 1:
1719 raise util.Abort(_('operation forbidden by server'))
1726 raise util.Abort(_('operation forbidden by server'))
1720 elif resp == 2:
1727 elif resp == 2:
1721 raise util.Abort(_('locking the remote repository failed'))
1728 raise util.Abort(_('locking the remote repository failed'))
1722 elif resp != 0:
1729 elif resp != 0:
1723 raise util.Abort(_('the server sent an unknown error code'))
1730 raise util.Abort(_('the server sent an unknown error code'))
1724 self.ui.status(_('streaming all changes\n'))
1731 self.ui.status(_('streaming all changes\n'))
1725 l = fp.readline()
1732 l = fp.readline()
1726 try:
1733 try:
1727 total_files, total_bytes = map(int, l.split(' ', 1))
1734 total_files, total_bytes = map(int, l.split(' ', 1))
1728 except (ValueError, TypeError):
1735 except (ValueError, TypeError):
1729 raise error.ResponseError(
1736 raise error.ResponseError(
1730 _('unexpected response from remote server:'), l)
1737 _('unexpected response from remote server:'), l)
1731 self.ui.status(_('%d files to transfer, %s of data\n') %
1738 self.ui.status(_('%d files to transfer, %s of data\n') %
1732 (total_files, util.bytecount(total_bytes)))
1739 (total_files, util.bytecount(total_bytes)))
1733 handled_bytes = 0
1740 handled_bytes = 0
1734 self.ui.progress(_('clone'), 0, total=total_bytes)
1741 self.ui.progress(_('clone'), 0, total=total_bytes)
1735 start = time.time()
1742 start = time.time()
1736
1743
1737 tr = self.transaction(_('clone'))
1744 tr = self.transaction(_('clone'))
1738 try:
1745 try:
1739 for i in xrange(total_files):
1746 for i in xrange(total_files):
1740 # XXX doesn't support '\n' or '\r' in filenames
1747 # XXX doesn't support '\n' or '\r' in filenames
1741 l = fp.readline()
1748 l = fp.readline()
1742 try:
1749 try:
1743 name, size = l.split('\0', 1)
1750 name, size = l.split('\0', 1)
1744 size = int(size)
1751 size = int(size)
1745 except (ValueError, TypeError):
1752 except (ValueError, TypeError):
1746 raise error.ResponseError(
1753 raise error.ResponseError(
1747 _('unexpected response from remote server:'), l)
1754 _('unexpected response from remote server:'), l)
1748 if self.ui.debugflag:
1755 if self.ui.debugflag:
1749 self.ui.debug('adding %s (%s)\n' %
1756 self.ui.debug('adding %s (%s)\n' %
1750 (name, util.bytecount(size)))
1757 (name, util.bytecount(size)))
1751 # for backwards compat, name was partially encoded
1758 # for backwards compat, name was partially encoded
1752 ofp = self.sopener(store.decodedir(name), 'w')
1759 ofp = self.sopener(store.decodedir(name), 'w')
1753 for chunk in util.filechunkiter(fp, limit=size):
1760 for chunk in util.filechunkiter(fp, limit=size):
1754 handled_bytes += len(chunk)
1761 handled_bytes += len(chunk)
1755 self.ui.progress(_('clone'), handled_bytes,
1762 self.ui.progress(_('clone'), handled_bytes,
1756 total=total_bytes)
1763 total=total_bytes)
1757 ofp.write(chunk)
1764 ofp.write(chunk)
1758 ofp.close()
1765 ofp.close()
1759 tr.close()
1766 tr.close()
1760 finally:
1767 finally:
1761 tr.release()
1768 tr.release()
1762
1769
1763 # Writing straight to files circumvented the inmemory caches
1770 # Writing straight to files circumvented the inmemory caches
1764 self.invalidate()
1771 self.invalidate()
1765
1772
1766 elapsed = time.time() - start
1773 elapsed = time.time() - start
1767 if elapsed <= 0:
1774 if elapsed <= 0:
1768 elapsed = 0.001
1775 elapsed = 0.001
1769 self.ui.progress(_('clone'), None)
1776 self.ui.progress(_('clone'), None)
1770 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1777 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1771 (util.bytecount(total_bytes), elapsed,
1778 (util.bytecount(total_bytes), elapsed,
1772 util.bytecount(total_bytes / elapsed)))
1779 util.bytecount(total_bytes / elapsed)))
1773
1780
1774 # new requirements = old non-format requirements +
1781 # new requirements = old non-format requirements +
1775 # new format-related
1782 # new format-related
1776 # requirements from the streamed-in repository
1783 # requirements from the streamed-in repository
1777 requirements.update(set(self.requirements) - self.supportedformats)
1784 requirements.update(set(self.requirements) - self.supportedformats)
1778 self._applyrequirements(requirements)
1785 self._applyrequirements(requirements)
1779 self._writerequirements()
1786 self._writerequirements()
1780
1787
1781 if rbranchmap:
1788 if rbranchmap:
1782 rbheads = []
1789 rbheads = []
1783 for bheads in rbranchmap.itervalues():
1790 for bheads in rbranchmap.itervalues():
1784 rbheads.extend(bheads)
1791 rbheads.extend(bheads)
1785
1792
1786 if rbheads:
1793 if rbheads:
1787 rtiprev = max((int(self.changelog.rev(node))
1794 rtiprev = max((int(self.changelog.rev(node))
1788 for node in rbheads))
1795 for node in rbheads))
1789 cache = branchmap.branchcache(rbranchmap,
1796 cache = branchmap.branchcache(rbranchmap,
1790 self[rtiprev].node(),
1797 self[rtiprev].node(),
1791 rtiprev)
1798 rtiprev)
1792 # Try to stick it as low as possible
1799 # Try to stick it as low as possible
1793 # filter above served are unlikely to be fetch from a clone
1800 # filter above served are unlikely to be fetch from a clone
1794 for candidate in ('base', 'immutable', 'served'):
1801 for candidate in ('base', 'immutable', 'served'):
1795 rview = self.filtered(candidate)
1802 rview = self.filtered(candidate)
1796 if cache.validfor(rview):
1803 if cache.validfor(rview):
1797 self._branchcaches[candidate] = cache
1804 self._branchcaches[candidate] = cache
1798 cache.write(rview)
1805 cache.write(rview)
1799 break
1806 break
1800 self.invalidate()
1807 self.invalidate()
1801 return len(self.heads()) + 1
1808 return len(self.heads()) + 1
1802 finally:
1809 finally:
1803 lock.release()
1810 lock.release()
1804
1811
1805 def clone(self, remote, heads=[], stream=False):
1812 def clone(self, remote, heads=[], stream=False):
1806 '''clone remote repository.
1813 '''clone remote repository.
1807
1814
1808 keyword arguments:
1815 keyword arguments:
1809 heads: list of revs to clone (forces use of pull)
1816 heads: list of revs to clone (forces use of pull)
1810 stream: use streaming clone if possible'''
1817 stream: use streaming clone if possible'''
1811
1818
1812 # now, all clients that can request uncompressed clones can
1819 # now, all clients that can request uncompressed clones can
1813 # read repo formats supported by all servers that can serve
1820 # read repo formats supported by all servers that can serve
1814 # them.
1821 # them.
1815
1822
1816 # if revlog format changes, client will have to check version
1823 # if revlog format changes, client will have to check version
1817 # and format flags on "stream" capability, and use
1824 # and format flags on "stream" capability, and use
1818 # uncompressed only if compatible.
1825 # uncompressed only if compatible.
1819
1826
1820 if not stream:
1827 if not stream:
1821 # if the server explicitly prefers to stream (for fast LANs)
1828 # if the server explicitly prefers to stream (for fast LANs)
1822 stream = remote.capable('stream-preferred')
1829 stream = remote.capable('stream-preferred')
1823
1830
1824 if stream and not heads:
1831 if stream and not heads:
1825 # 'stream' means remote revlog format is revlogv1 only
1832 # 'stream' means remote revlog format is revlogv1 only
1826 if remote.capable('stream'):
1833 if remote.capable('stream'):
1827 return self.stream_in(remote, set(('revlogv1',)))
1834 return self.stream_in(remote, set(('revlogv1',)))
1828 # otherwise, 'streamreqs' contains the remote revlog format
1835 # otherwise, 'streamreqs' contains the remote revlog format
1829 streamreqs = remote.capable('streamreqs')
1836 streamreqs = remote.capable('streamreqs')
1830 if streamreqs:
1837 if streamreqs:
1831 streamreqs = set(streamreqs.split(','))
1838 streamreqs = set(streamreqs.split(','))
1832 # if we support it, stream in and adjust our requirements
1839 # if we support it, stream in and adjust our requirements
1833 if not streamreqs - self.supportedformats:
1840 if not streamreqs - self.supportedformats:
1834 return self.stream_in(remote, streamreqs)
1841 return self.stream_in(remote, streamreqs)
1835 return self.pull(remote, heads)
1842 return self.pull(remote, heads)
1836
1843
1837 def pushkey(self, namespace, key, old, new):
1844 def pushkey(self, namespace, key, old, new):
1838 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1845 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1839 old=old, new=new)
1846 old=old, new=new)
1840 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1847 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1841 ret = pushkey.push(self, namespace, key, old, new)
1848 ret = pushkey.push(self, namespace, key, old, new)
1842 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1849 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1843 ret=ret)
1850 ret=ret)
1844 return ret
1851 return ret
1845
1852
1846 def listkeys(self, namespace):
1853 def listkeys(self, namespace):
1847 self.hook('prelistkeys', throw=True, namespace=namespace)
1854 self.hook('prelistkeys', throw=True, namespace=namespace)
1848 self.ui.debug('listing keys for "%s"\n' % namespace)
1855 self.ui.debug('listing keys for "%s"\n' % namespace)
1849 values = pushkey.list(self, namespace)
1856 values = pushkey.list(self, namespace)
1850 self.hook('listkeys', namespace=namespace, values=values)
1857 self.hook('listkeys', namespace=namespace, values=values)
1851 return values
1858 return values
1852
1859
1853 def debugwireargs(self, one, two, three=None, four=None, five=None):
1860 def debugwireargs(self, one, two, three=None, four=None, five=None):
1854 '''used to test argument passing over the wire'''
1861 '''used to test argument passing over the wire'''
1855 return "%s %s %s %s %s" % (one, two, three, four, five)
1862 return "%s %s %s %s %s" % (one, two, three, four, five)
1856
1863
1857 def savecommitmessage(self, text):
1864 def savecommitmessage(self, text):
1858 fp = self.opener('last-message.txt', 'wb')
1865 fp = self.opener('last-message.txt', 'wb')
1859 try:
1866 try:
1860 fp.write(text)
1867 fp.write(text)
1861 finally:
1868 finally:
1862 fp.close()
1869 fp.close()
1863 return self.pathto(fp.name[len(self.root) + 1:])
1870 return self.pathto(fp.name[len(self.root) + 1:])
1864
1871
1865 # used to avoid circular references so destructors work
1872 # used to avoid circular references so destructors work
1866 def aftertrans(files):
1873 def aftertrans(files):
1867 renamefiles = [tuple(t) for t in files]
1874 renamefiles = [tuple(t) for t in files]
1868 def a():
1875 def a():
1869 for vfs, src, dest in renamefiles:
1876 for vfs, src, dest in renamefiles:
1870 try:
1877 try:
1871 vfs.rename(src, dest)
1878 vfs.rename(src, dest)
1872 except OSError: # journal file does not yet exist
1879 except OSError: # journal file does not yet exist
1873 pass
1880 pass
1874 return a
1881 return a
1875
1882
1876 def undoname(fn):
1883 def undoname(fn):
1877 base, name = os.path.split(fn)
1884 base, name = os.path.split(fn)
1878 assert name.startswith('journal')
1885 assert name.startswith('journal')
1879 return os.path.join(base, name.replace('journal', 'undo', 1))
1886 return os.path.join(base, name.replace('journal', 'undo', 1))
1880
1887
1881 def instance(ui, path, create):
1888 def instance(ui, path, create):
1882 return localrepository(ui, util.urllocalpath(path), create)
1889 return localrepository(ui, util.urllocalpath(path), create)
1883
1890
1884 def islocal(path):
1891 def islocal(path):
1885 return True
1892 return True
General Comments 0
You need to be logged in to leave comments. Login now