##// END OF EJS Templates
bundle2: add an exchange.getbundle function...
Pierre-Yves David -
r20954:dba91f80 default
parent child Browse files
Show More
@@ -1,554 +1,585 b''
1 # exchange.py - utily to exchange data between repo.
1 # exchange.py - utily to exchange data between repo.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno
10 import errno
11 import util, scmutil, changegroup, base85
11 import util, scmutil, changegroup, base85
12 import discovery, phases, obsolete, bookmarks
12 import discovery, phases, obsolete, bookmarks
13
13
14
14
15 class pushoperation(object):
15 class pushoperation(object):
16 """A object that represent a single push operation
16 """A object that represent a single push operation
17
17
18 It purpose is to carry push related state and very common operation.
18 It purpose is to carry push related state and very common operation.
19
19
20 A new should be created at the begining of each push and discarded
20 A new should be created at the begining of each push and discarded
21 afterward.
21 afterward.
22 """
22 """
23
23
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 # repo we push from
25 # repo we push from
26 self.repo = repo
26 self.repo = repo
27 self.ui = repo.ui
27 self.ui = repo.ui
28 # repo we push to
28 # repo we push to
29 self.remote = remote
29 self.remote = remote
30 # force option provided
30 # force option provided
31 self.force = force
31 self.force = force
32 # revs to be pushed (None is "all")
32 # revs to be pushed (None is "all")
33 self.revs = revs
33 self.revs = revs
34 # allow push of new branch
34 # allow push of new branch
35 self.newbranch = newbranch
35 self.newbranch = newbranch
36 # did a local lock get acquired?
36 # did a local lock get acquired?
37 self.locallocked = None
37 self.locallocked = None
38 # Integer version of the push result
38 # Integer version of the push result
39 # - None means nothing to push
39 # - None means nothing to push
40 # - 0 means HTTP error
40 # - 0 means HTTP error
41 # - 1 means we pushed and remote head count is unchanged *or*
41 # - 1 means we pushed and remote head count is unchanged *or*
42 # we have outgoing changesets but refused to push
42 # we have outgoing changesets but refused to push
43 # - other values as described by addchangegroup()
43 # - other values as described by addchangegroup()
44 self.ret = None
44 self.ret = None
45 # discover.outgoing object (contains common and outgoin data)
45 # discover.outgoing object (contains common and outgoin data)
46 self.outgoing = None
46 self.outgoing = None
47 # all remote heads before the push
47 # all remote heads before the push
48 self.remoteheads = None
48 self.remoteheads = None
49 # testable as a boolean indicating if any nodes are missing locally.
49 # testable as a boolean indicating if any nodes are missing locally.
50 self.incoming = None
50 self.incoming = None
51 # set of all heads common after changeset bundle push
51 # set of all heads common after changeset bundle push
52 self.commonheads = None
52 self.commonheads = None
53
53
54 def push(repo, remote, force=False, revs=None, newbranch=False):
54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 '''Push outgoing changesets (limited by revs) from a local
55 '''Push outgoing changesets (limited by revs) from a local
56 repository to remote. Return an integer:
56 repository to remote. Return an integer:
57 - None means nothing to push
57 - None means nothing to push
58 - 0 means HTTP error
58 - 0 means HTTP error
59 - 1 means we pushed and remote head count is unchanged *or*
59 - 1 means we pushed and remote head count is unchanged *or*
60 we have outgoing changesets but refused to push
60 we have outgoing changesets but refused to push
61 - other values as described by addchangegroup()
61 - other values as described by addchangegroup()
62 '''
62 '''
63 pushop = pushoperation(repo, remote, force, revs, newbranch)
63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 if pushop.remote.local():
64 if pushop.remote.local():
65 missing = (set(pushop.repo.requirements)
65 missing = (set(pushop.repo.requirements)
66 - pushop.remote.local().supported)
66 - pushop.remote.local().supported)
67 if missing:
67 if missing:
68 msg = _("required features are not"
68 msg = _("required features are not"
69 " supported in the destination:"
69 " supported in the destination:"
70 " %s") % (', '.join(sorted(missing)))
70 " %s") % (', '.join(sorted(missing)))
71 raise util.Abort(msg)
71 raise util.Abort(msg)
72
72
73 # there are two ways to push to remote repo:
73 # there are two ways to push to remote repo:
74 #
74 #
75 # addchangegroup assumes local user can lock remote
75 # addchangegroup assumes local user can lock remote
76 # repo (local filesystem, old ssh servers).
76 # repo (local filesystem, old ssh servers).
77 #
77 #
78 # unbundle assumes local user cannot lock remote repo (new ssh
78 # unbundle assumes local user cannot lock remote repo (new ssh
79 # servers, http servers).
79 # servers, http servers).
80
80
81 if not pushop.remote.canpush():
81 if not pushop.remote.canpush():
82 raise util.Abort(_("destination does not support push"))
82 raise util.Abort(_("destination does not support push"))
83 # get local lock as we might write phase data
83 # get local lock as we might write phase data
84 locallock = None
84 locallock = None
85 try:
85 try:
86 locallock = pushop.repo.lock()
86 locallock = pushop.repo.lock()
87 pushop.locallocked = True
87 pushop.locallocked = True
88 except IOError, err:
88 except IOError, err:
89 pushop.locallocked = False
89 pushop.locallocked = False
90 if err.errno != errno.EACCES:
90 if err.errno != errno.EACCES:
91 raise
91 raise
92 # source repo cannot be locked.
92 # source repo cannot be locked.
93 # We do not abort the push, but just disable the local phase
93 # We do not abort the push, but just disable the local phase
94 # synchronisation.
94 # synchronisation.
95 msg = 'cannot lock source repository: %s\n' % err
95 msg = 'cannot lock source repository: %s\n' % err
96 pushop.ui.debug(msg)
96 pushop.ui.debug(msg)
97 try:
97 try:
98 pushop.repo.checkpush(pushop)
98 pushop.repo.checkpush(pushop)
99 lock = None
99 lock = None
100 unbundle = pushop.remote.capable('unbundle')
100 unbundle = pushop.remote.capable('unbundle')
101 if not unbundle:
101 if not unbundle:
102 lock = pushop.remote.lock()
102 lock = pushop.remote.lock()
103 try:
103 try:
104 _pushdiscovery(pushop)
104 _pushdiscovery(pushop)
105 if _pushcheckoutgoing(pushop):
105 if _pushcheckoutgoing(pushop):
106 _pushchangeset(pushop)
106 _pushchangeset(pushop)
107 _pushcomputecommonheads(pushop)
107 _pushcomputecommonheads(pushop)
108 _pushsyncphase(pushop)
108 _pushsyncphase(pushop)
109 _pushobsolete(pushop)
109 _pushobsolete(pushop)
110 finally:
110 finally:
111 if lock is not None:
111 if lock is not None:
112 lock.release()
112 lock.release()
113 finally:
113 finally:
114 if locallock is not None:
114 if locallock is not None:
115 locallock.release()
115 locallock.release()
116
116
117 _pushbookmark(pushop)
117 _pushbookmark(pushop)
118 return pushop.ret
118 return pushop.ret
119
119
120 def _pushdiscovery(pushop):
120 def _pushdiscovery(pushop):
121 # discovery
121 # discovery
122 unfi = pushop.repo.unfiltered()
122 unfi = pushop.repo.unfiltered()
123 fci = discovery.findcommonincoming
123 fci = discovery.findcommonincoming
124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 common, inc, remoteheads = commoninc
125 common, inc, remoteheads = commoninc
126 fco = discovery.findcommonoutgoing
126 fco = discovery.findcommonoutgoing
127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 commoninc=commoninc, force=pushop.force)
128 commoninc=commoninc, force=pushop.force)
129 pushop.outgoing = outgoing
129 pushop.outgoing = outgoing
130 pushop.remoteheads = remoteheads
130 pushop.remoteheads = remoteheads
131 pushop.incoming = inc
131 pushop.incoming = inc
132
132
133 def _pushcheckoutgoing(pushop):
133 def _pushcheckoutgoing(pushop):
134 outgoing = pushop.outgoing
134 outgoing = pushop.outgoing
135 unfi = pushop.repo.unfiltered()
135 unfi = pushop.repo.unfiltered()
136 if not outgoing.missing:
136 if not outgoing.missing:
137 # nothing to push
137 # nothing to push
138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 return False
139 return False
140 # something to push
140 # something to push
141 if not pushop.force:
141 if not pushop.force:
142 # if repo.obsstore == False --> no obsolete
142 # if repo.obsstore == False --> no obsolete
143 # then, save the iteration
143 # then, save the iteration
144 if unfi.obsstore:
144 if unfi.obsstore:
145 # this message are here for 80 char limit reason
145 # this message are here for 80 char limit reason
146 mso = _("push includes obsolete changeset: %s!")
146 mso = _("push includes obsolete changeset: %s!")
147 mst = "push includes %s changeset: %s!"
147 mst = "push includes %s changeset: %s!"
148 # plain versions for i18n tool to detect them
148 # plain versions for i18n tool to detect them
149 _("push includes unstable changeset: %s!")
149 _("push includes unstable changeset: %s!")
150 _("push includes bumped changeset: %s!")
150 _("push includes bumped changeset: %s!")
151 _("push includes divergent changeset: %s!")
151 _("push includes divergent changeset: %s!")
152 # If we are to push if there is at least one
152 # If we are to push if there is at least one
153 # obsolete or unstable changeset in missing, at
153 # obsolete or unstable changeset in missing, at
154 # least one of the missinghead will be obsolete or
154 # least one of the missinghead will be obsolete or
155 # unstable. So checking heads only is ok
155 # unstable. So checking heads only is ok
156 for node in outgoing.missingheads:
156 for node in outgoing.missingheads:
157 ctx = unfi[node]
157 ctx = unfi[node]
158 if ctx.obsolete():
158 if ctx.obsolete():
159 raise util.Abort(mso % ctx)
159 raise util.Abort(mso % ctx)
160 elif ctx.troubled():
160 elif ctx.troubled():
161 raise util.Abort(_(mst)
161 raise util.Abort(_(mst)
162 % (ctx.troubles()[0],
162 % (ctx.troubles()[0],
163 ctx))
163 ctx))
164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 discovery.checkheads(unfi, pushop.remote, outgoing,
165 discovery.checkheads(unfi, pushop.remote, outgoing,
166 pushop.remoteheads,
166 pushop.remoteheads,
167 pushop.newbranch,
167 pushop.newbranch,
168 bool(pushop.incoming),
168 bool(pushop.incoming),
169 newbm)
169 newbm)
170 return True
170 return True
171
171
172 def _pushchangeset(pushop):
172 def _pushchangeset(pushop):
173 """Make the actual push of changeset bundle to remote repo"""
173 """Make the actual push of changeset bundle to remote repo"""
174 outgoing = pushop.outgoing
174 outgoing = pushop.outgoing
175 unbundle = pushop.remote.capable('unbundle')
175 unbundle = pushop.remote.capable('unbundle')
176 # TODO: get bundlecaps from remote
176 # TODO: get bundlecaps from remote
177 bundlecaps = None
177 bundlecaps = None
178 # create a changegroup from local
178 # create a changegroup from local
179 if pushop.revs is None and not (outgoing.excluded
179 if pushop.revs is None and not (outgoing.excluded
180 or pushop.repo.changelog.filteredrevs):
180 or pushop.repo.changelog.filteredrevs):
181 # push everything,
181 # push everything,
182 # use the fast path, no race possible on push
182 # use the fast path, no race possible on push
183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 cg = changegroup.getsubset(pushop.repo,
184 cg = changegroup.getsubset(pushop.repo,
185 outgoing,
185 outgoing,
186 bundler,
186 bundler,
187 'push',
187 'push',
188 fastpath=True)
188 fastpath=True)
189 else:
189 else:
190 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
190 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
191 bundlecaps)
191 bundlecaps)
192
192
193 # apply changegroup to remote
193 # apply changegroup to remote
194 if unbundle:
194 if unbundle:
195 # local repo finds heads on server, finds out what
195 # local repo finds heads on server, finds out what
196 # revs it must push. once revs transferred, if server
196 # revs it must push. once revs transferred, if server
197 # finds it has different heads (someone else won
197 # finds it has different heads (someone else won
198 # commit/push race), server aborts.
198 # commit/push race), server aborts.
199 if pushop.force:
199 if pushop.force:
200 remoteheads = ['force']
200 remoteheads = ['force']
201 else:
201 else:
202 remoteheads = pushop.remoteheads
202 remoteheads = pushop.remoteheads
203 # ssh: return remote's addchangegroup()
203 # ssh: return remote's addchangegroup()
204 # http: return remote's addchangegroup() or 0 for error
204 # http: return remote's addchangegroup() or 0 for error
205 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
205 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
206 'push')
206 'push')
207 else:
207 else:
208 # we return an integer indicating remote head count
208 # we return an integer indicating remote head count
209 # change
209 # change
210 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
210 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
211 pushop.repo.url())
211 pushop.repo.url())
212
212
213 def _pushcomputecommonheads(pushop):
213 def _pushcomputecommonheads(pushop):
214 unfi = pushop.repo.unfiltered()
214 unfi = pushop.repo.unfiltered()
215 if pushop.ret:
215 if pushop.ret:
216 # push succeed, synchronize target of the push
216 # push succeed, synchronize target of the push
217 cheads = pushop.outgoing.missingheads
217 cheads = pushop.outgoing.missingheads
218 elif pushop.revs is None:
218 elif pushop.revs is None:
219 # All out push fails. synchronize all common
219 # All out push fails. synchronize all common
220 cheads = pushop.outgoing.commonheads
220 cheads = pushop.outgoing.commonheads
221 else:
221 else:
222 # I want cheads = heads(::missingheads and ::commonheads)
222 # I want cheads = heads(::missingheads and ::commonheads)
223 # (missingheads is revs with secret changeset filtered out)
223 # (missingheads is revs with secret changeset filtered out)
224 #
224 #
225 # This can be expressed as:
225 # This can be expressed as:
226 # cheads = ( (missingheads and ::commonheads)
226 # cheads = ( (missingheads and ::commonheads)
227 # + (commonheads and ::missingheads))"
227 # + (commonheads and ::missingheads))"
228 # )
228 # )
229 #
229 #
230 # while trying to push we already computed the following:
230 # while trying to push we already computed the following:
231 # common = (::commonheads)
231 # common = (::commonheads)
232 # missing = ((commonheads::missingheads) - commonheads)
232 # missing = ((commonheads::missingheads) - commonheads)
233 #
233 #
234 # We can pick:
234 # We can pick:
235 # * missingheads part of common (::commonheads)
235 # * missingheads part of common (::commonheads)
236 common = set(pushop.outgoing.common)
236 common = set(pushop.outgoing.common)
237 nm = pushop.repo.changelog.nodemap
237 nm = pushop.repo.changelog.nodemap
238 cheads = [node for node in pushop.revs if nm[node] in common]
238 cheads = [node for node in pushop.revs if nm[node] in common]
239 # and
239 # and
240 # * commonheads parents on missing
240 # * commonheads parents on missing
241 revset = unfi.set('%ln and parents(roots(%ln))',
241 revset = unfi.set('%ln and parents(roots(%ln))',
242 pushop.outgoing.commonheads,
242 pushop.outgoing.commonheads,
243 pushop.outgoing.missing)
243 pushop.outgoing.missing)
244 cheads.extend(c.node() for c in revset)
244 cheads.extend(c.node() for c in revset)
245 pushop.commonheads = cheads
245 pushop.commonheads = cheads
246
246
247 def _pushsyncphase(pushop):
247 def _pushsyncphase(pushop):
248 """synchronise phase information locally and remotly"""
248 """synchronise phase information locally and remotly"""
249 unfi = pushop.repo.unfiltered()
249 unfi = pushop.repo.unfiltered()
250 cheads = pushop.commonheads
250 cheads = pushop.commonheads
251 if pushop.ret:
251 if pushop.ret:
252 # push succeed, synchronize target of the push
252 # push succeed, synchronize target of the push
253 cheads = pushop.outgoing.missingheads
253 cheads = pushop.outgoing.missingheads
254 elif pushop.revs is None:
254 elif pushop.revs is None:
255 # All out push fails. synchronize all common
255 # All out push fails. synchronize all common
256 cheads = pushop.outgoing.commonheads
256 cheads = pushop.outgoing.commonheads
257 else:
257 else:
258 # I want cheads = heads(::missingheads and ::commonheads)
258 # I want cheads = heads(::missingheads and ::commonheads)
259 # (missingheads is revs with secret changeset filtered out)
259 # (missingheads is revs with secret changeset filtered out)
260 #
260 #
261 # This can be expressed as:
261 # This can be expressed as:
262 # cheads = ( (missingheads and ::commonheads)
262 # cheads = ( (missingheads and ::commonheads)
263 # + (commonheads and ::missingheads))"
263 # + (commonheads and ::missingheads))"
264 # )
264 # )
265 #
265 #
266 # while trying to push we already computed the following:
266 # while trying to push we already computed the following:
267 # common = (::commonheads)
267 # common = (::commonheads)
268 # missing = ((commonheads::missingheads) - commonheads)
268 # missing = ((commonheads::missingheads) - commonheads)
269 #
269 #
270 # We can pick:
270 # We can pick:
271 # * missingheads part of common (::commonheads)
271 # * missingheads part of common (::commonheads)
272 common = set(pushop.outgoing.common)
272 common = set(pushop.outgoing.common)
273 nm = pushop.repo.changelog.nodemap
273 nm = pushop.repo.changelog.nodemap
274 cheads = [node for node in pushop.revs if nm[node] in common]
274 cheads = [node for node in pushop.revs if nm[node] in common]
275 # and
275 # and
276 # * commonheads parents on missing
276 # * commonheads parents on missing
277 revset = unfi.set('%ln and parents(roots(%ln))',
277 revset = unfi.set('%ln and parents(roots(%ln))',
278 pushop.outgoing.commonheads,
278 pushop.outgoing.commonheads,
279 pushop.outgoing.missing)
279 pushop.outgoing.missing)
280 cheads.extend(c.node() for c in revset)
280 cheads.extend(c.node() for c in revset)
281 pushop.commonheads = cheads
281 pushop.commonheads = cheads
282 # even when we don't push, exchanging phase data is useful
282 # even when we don't push, exchanging phase data is useful
283 remotephases = pushop.remote.listkeys('phases')
283 remotephases = pushop.remote.listkeys('phases')
284 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
284 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
285 and remotephases # server supports phases
285 and remotephases # server supports phases
286 and pushop.ret is None # nothing was pushed
286 and pushop.ret is None # nothing was pushed
287 and remotephases.get('publishing', False)):
287 and remotephases.get('publishing', False)):
288 # When:
288 # When:
289 # - this is a subrepo push
289 # - this is a subrepo push
290 # - and remote support phase
290 # - and remote support phase
291 # - and no changeset was pushed
291 # - and no changeset was pushed
292 # - and remote is publishing
292 # - and remote is publishing
293 # We may be in issue 3871 case!
293 # We may be in issue 3871 case!
294 # We drop the possible phase synchronisation done by
294 # We drop the possible phase synchronisation done by
295 # courtesy to publish changesets possibly locally draft
295 # courtesy to publish changesets possibly locally draft
296 # on the remote.
296 # on the remote.
297 remotephases = {'publishing': 'True'}
297 remotephases = {'publishing': 'True'}
298 if not remotephases: # old server or public only rer
298 if not remotephases: # old server or public only rer
299 _localphasemove(pushop, cheads)
299 _localphasemove(pushop, cheads)
300 # don't push any phase data as there is nothing to push
300 # don't push any phase data as there is nothing to push
301 else:
301 else:
302 ana = phases.analyzeremotephases(pushop.repo, cheads,
302 ana = phases.analyzeremotephases(pushop.repo, cheads,
303 remotephases)
303 remotephases)
304 pheads, droots = ana
304 pheads, droots = ana
305 ### Apply remote phase on local
305 ### Apply remote phase on local
306 if remotephases.get('publishing', False):
306 if remotephases.get('publishing', False):
307 _localphasemove(pushop, cheads)
307 _localphasemove(pushop, cheads)
308 else: # publish = False
308 else: # publish = False
309 _localphasemove(pushop, pheads)
309 _localphasemove(pushop, pheads)
310 _localphasemove(pushop, cheads, phases.draft)
310 _localphasemove(pushop, cheads, phases.draft)
311 ### Apply local phase on remote
311 ### Apply local phase on remote
312
312
313 # Get the list of all revs draft on remote by public here.
313 # Get the list of all revs draft on remote by public here.
314 # XXX Beware that revset break if droots is not strictly
314 # XXX Beware that revset break if droots is not strictly
315 # XXX root we may want to ensure it is but it is costly
315 # XXX root we may want to ensure it is but it is costly
316 outdated = unfi.set('heads((%ln::%ln) and public())',
316 outdated = unfi.set('heads((%ln::%ln) and public())',
317 droots, cheads)
317 droots, cheads)
318 for newremotehead in outdated:
318 for newremotehead in outdated:
319 r = pushop.remote.pushkey('phases',
319 r = pushop.remote.pushkey('phases',
320 newremotehead.hex(),
320 newremotehead.hex(),
321 str(phases.draft),
321 str(phases.draft),
322 str(phases.public))
322 str(phases.public))
323 if not r:
323 if not r:
324 pushop.ui.warn(_('updating %s to public failed!\n')
324 pushop.ui.warn(_('updating %s to public failed!\n')
325 % newremotehead)
325 % newremotehead)
326
326
327 def _localphasemove(pushop, nodes, phase=phases.public):
327 def _localphasemove(pushop, nodes, phase=phases.public):
328 """move <nodes> to <phase> in the local source repo"""
328 """move <nodes> to <phase> in the local source repo"""
329 if pushop.locallocked:
329 if pushop.locallocked:
330 phases.advanceboundary(pushop.repo, phase, nodes)
330 phases.advanceboundary(pushop.repo, phase, nodes)
331 else:
331 else:
332 # repo is not locked, do not change any phases!
332 # repo is not locked, do not change any phases!
333 # Informs the user that phases should have been moved when
333 # Informs the user that phases should have been moved when
334 # applicable.
334 # applicable.
335 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
335 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
336 phasestr = phases.phasenames[phase]
336 phasestr = phases.phasenames[phase]
337 if actualmoves:
337 if actualmoves:
338 pushop.ui.status(_('cannot lock source repo, skipping '
338 pushop.ui.status(_('cannot lock source repo, skipping '
339 'local %s phase update\n') % phasestr)
339 'local %s phase update\n') % phasestr)
340
340
341 def _pushobsolete(pushop):
341 def _pushobsolete(pushop):
342 """utility function to push obsolete markers to a remote"""
342 """utility function to push obsolete markers to a remote"""
343 pushop.ui.debug('try to push obsolete markers to remote\n')
343 pushop.ui.debug('try to push obsolete markers to remote\n')
344 repo = pushop.repo
344 repo = pushop.repo
345 remote = pushop.remote
345 remote = pushop.remote
346 if (obsolete._enabled and repo.obsstore and
346 if (obsolete._enabled and repo.obsstore and
347 'obsolete' in remote.listkeys('namespaces')):
347 'obsolete' in remote.listkeys('namespaces')):
348 rslts = []
348 rslts = []
349 remotedata = repo.listkeys('obsolete')
349 remotedata = repo.listkeys('obsolete')
350 for key in sorted(remotedata, reverse=True):
350 for key in sorted(remotedata, reverse=True):
351 # reverse sort to ensure we end with dump0
351 # reverse sort to ensure we end with dump0
352 data = remotedata[key]
352 data = remotedata[key]
353 rslts.append(remote.pushkey('obsolete', key, '', data))
353 rslts.append(remote.pushkey('obsolete', key, '', data))
354 if [r for r in rslts if not r]:
354 if [r for r in rslts if not r]:
355 msg = _('failed to push some obsolete markers!\n')
355 msg = _('failed to push some obsolete markers!\n')
356 repo.ui.warn(msg)
356 repo.ui.warn(msg)
357
357
358 def _pushbookmark(pushop):
358 def _pushbookmark(pushop):
359 """Update bookmark position on remote"""
359 """Update bookmark position on remote"""
360 ui = pushop.ui
360 ui = pushop.ui
361 repo = pushop.repo.unfiltered()
361 repo = pushop.repo.unfiltered()
362 remote = pushop.remote
362 remote = pushop.remote
363 ui.debug("checking for updated bookmarks\n")
363 ui.debug("checking for updated bookmarks\n")
364 revnums = map(repo.changelog.rev, pushop.revs or [])
364 revnums = map(repo.changelog.rev, pushop.revs or [])
365 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
365 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
366 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
366 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
367 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
367 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
368 srchex=hex)
368 srchex=hex)
369
369
370 for b, scid, dcid in advsrc:
370 for b, scid, dcid in advsrc:
371 if ancestors and repo[scid].rev() not in ancestors:
371 if ancestors and repo[scid].rev() not in ancestors:
372 continue
372 continue
373 if remote.pushkey('bookmarks', b, dcid, scid):
373 if remote.pushkey('bookmarks', b, dcid, scid):
374 ui.status(_("updating bookmark %s\n") % b)
374 ui.status(_("updating bookmark %s\n") % b)
375 else:
375 else:
376 ui.warn(_('updating bookmark %s failed!\n') % b)
376 ui.warn(_('updating bookmark %s failed!\n') % b)
377
377
378 class pulloperation(object):
378 class pulloperation(object):
379 """A object that represent a single pull operation
379 """A object that represent a single pull operation
380
380
381 It purpose is to carry push related state and very common operation.
381 It purpose is to carry push related state and very common operation.
382
382
383 A new should be created at the begining of each pull and discarded
383 A new should be created at the begining of each pull and discarded
384 afterward.
384 afterward.
385 """
385 """
386
386
387 def __init__(self, repo, remote, heads=None, force=False):
387 def __init__(self, repo, remote, heads=None, force=False):
388 # repo we pull into
388 # repo we pull into
389 self.repo = repo
389 self.repo = repo
390 # repo we pull from
390 # repo we pull from
391 self.remote = remote
391 self.remote = remote
392 # revision we try to pull (None is "all")
392 # revision we try to pull (None is "all")
393 self.heads = heads
393 self.heads = heads
394 # do we force pull?
394 # do we force pull?
395 self.force = force
395 self.force = force
396 # the name the pull transaction
396 # the name the pull transaction
397 self._trname = 'pull\n' + util.hidepassword(remote.url())
397 self._trname = 'pull\n' + util.hidepassword(remote.url())
398 # hold the transaction once created
398 # hold the transaction once created
399 self._tr = None
399 self._tr = None
400 # set of common changeset between local and remote before pull
400 # set of common changeset between local and remote before pull
401 self.common = None
401 self.common = None
402 # set of pulled head
402 # set of pulled head
403 self.rheads = None
403 self.rheads = None
404 # list of missing changeset to fetch remotly
404 # list of missing changeset to fetch remotly
405 self.fetch = None
405 self.fetch = None
406 # result of changegroup pulling (used as returng code by pull)
406 # result of changegroup pulling (used as returng code by pull)
407 self.cgresult = None
407 self.cgresult = None
408 # list of step remaining todo (related to future bundle2 usage)
408 # list of step remaining todo (related to future bundle2 usage)
409 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
409 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
410
410
411 @util.propertycache
411 @util.propertycache
412 def pulledsubset(self):
412 def pulledsubset(self):
413 """heads of the set of changeset target by the pull"""
413 """heads of the set of changeset target by the pull"""
414 # compute target subset
414 # compute target subset
415 if self.heads is None:
415 if self.heads is None:
416 # We pulled every thing possible
416 # We pulled every thing possible
417 # sync on everything common
417 # sync on everything common
418 c = set(self.common)
418 c = set(self.common)
419 ret = list(self.common)
419 ret = list(self.common)
420 for n in self.rheads:
420 for n in self.rheads:
421 if n not in c:
421 if n not in c:
422 ret.append(n)
422 ret.append(n)
423 return ret
423 return ret
424 else:
424 else:
425 # We pulled a specific subset
425 # We pulled a specific subset
426 # sync on this subset
426 # sync on this subset
427 return self.heads
427 return self.heads
428
428
429 def gettransaction(self):
429 def gettransaction(self):
430 """get appropriate pull transaction, creating it if needed"""
430 """get appropriate pull transaction, creating it if needed"""
431 if self._tr is None:
431 if self._tr is None:
432 self._tr = self.repo.transaction(self._trname)
432 self._tr = self.repo.transaction(self._trname)
433 return self._tr
433 return self._tr
434
434
435 def closetransaction(self):
435 def closetransaction(self):
436 """close transaction if created"""
436 """close transaction if created"""
437 if self._tr is not None:
437 if self._tr is not None:
438 self._tr.close()
438 self._tr.close()
439
439
440 def releasetransaction(self):
440 def releasetransaction(self):
441 """release transaction if created"""
441 """release transaction if created"""
442 if self._tr is not None:
442 if self._tr is not None:
443 self._tr.release()
443 self._tr.release()
444
444
445 def pull(repo, remote, heads=None, force=False):
445 def pull(repo, remote, heads=None, force=False):
446 pullop = pulloperation(repo, remote, heads, force)
446 pullop = pulloperation(repo, remote, heads, force)
447 if pullop.remote.local():
447 if pullop.remote.local():
448 missing = set(pullop.remote.requirements) - pullop.repo.supported
448 missing = set(pullop.remote.requirements) - pullop.repo.supported
449 if missing:
449 if missing:
450 msg = _("required features are not"
450 msg = _("required features are not"
451 " supported in the destination:"
451 " supported in the destination:"
452 " %s") % (', '.join(sorted(missing)))
452 " %s") % (', '.join(sorted(missing)))
453 raise util.Abort(msg)
453 raise util.Abort(msg)
454
454
455 lock = pullop.repo.lock()
455 lock = pullop.repo.lock()
456 try:
456 try:
457 _pulldiscovery(pullop)
457 _pulldiscovery(pullop)
458 if 'changegroup' in pullop.todosteps:
458 if 'changegroup' in pullop.todosteps:
459 _pullchangeset(pullop)
459 _pullchangeset(pullop)
460 if 'phases' in pullop.todosteps:
460 if 'phases' in pullop.todosteps:
461 _pullphase(pullop)
461 _pullphase(pullop)
462 if 'obsmarkers' in pullop.todosteps:
462 if 'obsmarkers' in pullop.todosteps:
463 _pullobsolete(pullop)
463 _pullobsolete(pullop)
464 pullop.closetransaction()
464 pullop.closetransaction()
465 finally:
465 finally:
466 pullop.releasetransaction()
466 pullop.releasetransaction()
467 lock.release()
467 lock.release()
468
468
469 return pullop.cgresult
469 return pullop.cgresult
470
470
471 def _pulldiscovery(pullop):
471 def _pulldiscovery(pullop):
472 """discovery phase for the pull
472 """discovery phase for the pull
473
473
474 Current handle changeset discovery only, will change handle all discovery
474 Current handle changeset discovery only, will change handle all discovery
475 at some point."""
475 at some point."""
476 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
476 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
477 pullop.remote,
477 pullop.remote,
478 heads=pullop.heads,
478 heads=pullop.heads,
479 force=pullop.force)
479 force=pullop.force)
480 pullop.common, pullop.fetch, pullop.rheads = tmp
480 pullop.common, pullop.fetch, pullop.rheads = tmp
481
481
482 def _pullchangeset(pullop):
482 def _pullchangeset(pullop):
483 """pull changeset from unbundle into the local repo"""
483 """pull changeset from unbundle into the local repo"""
484 # We delay the open of the transaction as late as possible so we
484 # We delay the open of the transaction as late as possible so we
485 # don't open transaction for nothing or you break future useful
485 # don't open transaction for nothing or you break future useful
486 # rollback call
486 # rollback call
487 pullop.todosteps.remove('changegroup')
487 pullop.todosteps.remove('changegroup')
488 if not pullop.fetch:
488 if not pullop.fetch:
489 pullop.repo.ui.status(_("no changes found\n"))
489 pullop.repo.ui.status(_("no changes found\n"))
490 pullop.cgresult = 0
490 pullop.cgresult = 0
491 return
491 return
492 pullop.gettransaction()
492 pullop.gettransaction()
493 if pullop.heads is None and list(pullop.common) == [nullid]:
493 if pullop.heads is None and list(pullop.common) == [nullid]:
494 pullop.repo.ui.status(_("requesting all changes\n"))
494 pullop.repo.ui.status(_("requesting all changes\n"))
495 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
495 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
496 # issue1320, avoid a race if remote changed after discovery
496 # issue1320, avoid a race if remote changed after discovery
497 pullop.heads = pullop.rheads
497 pullop.heads = pullop.rheads
498
498
499 if pullop.remote.capable('getbundle'):
499 if pullop.remote.capable('getbundle'):
500 # TODO: get bundlecaps from remote
500 # TODO: get bundlecaps from remote
501 cg = pullop.remote.getbundle('pull', common=pullop.common,
501 cg = pullop.remote.getbundle('pull', common=pullop.common,
502 heads=pullop.heads or pullop.rheads)
502 heads=pullop.heads or pullop.rheads)
503 elif pullop.heads is None:
503 elif pullop.heads is None:
504 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
504 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
505 elif not pullop.remote.capable('changegroupsubset'):
505 elif not pullop.remote.capable('changegroupsubset'):
506 raise util.Abort(_("partial pull cannot be done because "
506 raise util.Abort(_("partial pull cannot be done because "
507 "other repository doesn't support "
507 "other repository doesn't support "
508 "changegroupsubset."))
508 "changegroupsubset."))
509 else:
509 else:
510 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
510 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
511 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
511 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
512 pullop.remote.url())
512 pullop.remote.url())
513
513
514 def _pullphase(pullop):
514 def _pullphase(pullop):
515 # Get remote phases data from remote
515 # Get remote phases data from remote
516 pullop.todosteps.remove('phases')
516 pullop.todosteps.remove('phases')
517 remotephases = pullop.remote.listkeys('phases')
517 remotephases = pullop.remote.listkeys('phases')
518 publishing = bool(remotephases.get('publishing', False))
518 publishing = bool(remotephases.get('publishing', False))
519 if remotephases and not publishing:
519 if remotephases and not publishing:
520 # remote is new and unpublishing
520 # remote is new and unpublishing
521 pheads, _dr = phases.analyzeremotephases(pullop.repo,
521 pheads, _dr = phases.analyzeremotephases(pullop.repo,
522 pullop.pulledsubset,
522 pullop.pulledsubset,
523 remotephases)
523 remotephases)
524 phases.advanceboundary(pullop.repo, phases.public, pheads)
524 phases.advanceboundary(pullop.repo, phases.public, pheads)
525 phases.advanceboundary(pullop.repo, phases.draft,
525 phases.advanceboundary(pullop.repo, phases.draft,
526 pullop.pulledsubset)
526 pullop.pulledsubset)
527 else:
527 else:
528 # Remote is old or publishing all common changesets
528 # Remote is old or publishing all common changesets
529 # should be seen as public
529 # should be seen as public
530 phases.advanceboundary(pullop.repo, phases.public,
530 phases.advanceboundary(pullop.repo, phases.public,
531 pullop.pulledsubset)
531 pullop.pulledsubset)
532
532
533 def _pullobsolete(pullop):
533 def _pullobsolete(pullop):
534 """utility function to pull obsolete markers from a remote
534 """utility function to pull obsolete markers from a remote
535
535
536 The `gettransaction` is function that return the pull transaction, creating
536 The `gettransaction` is function that return the pull transaction, creating
537 one if necessary. We return the transaction to inform the calling code that
537 one if necessary. We return the transaction to inform the calling code that
538 a new transaction have been created (when applicable).
538 a new transaction have been created (when applicable).
539
539
540 Exists mostly to allow overriding for experimentation purpose"""
540 Exists mostly to allow overriding for experimentation purpose"""
541 pullop.todosteps.remove('obsmarkers')
541 pullop.todosteps.remove('obsmarkers')
542 tr = None
542 tr = None
543 if obsolete._enabled:
543 if obsolete._enabled:
544 pullop.repo.ui.debug('fetching remote obsolete markers\n')
544 pullop.repo.ui.debug('fetching remote obsolete markers\n')
545 remoteobs = pullop.remote.listkeys('obsolete')
545 remoteobs = pullop.remote.listkeys('obsolete')
546 if 'dump0' in remoteobs:
546 if 'dump0' in remoteobs:
547 tr = pullop.gettransaction()
547 tr = pullop.gettransaction()
548 for key in sorted(remoteobs, reverse=True):
548 for key in sorted(remoteobs, reverse=True):
549 if key.startswith('dump'):
549 if key.startswith('dump'):
550 data = base85.b85decode(remoteobs[key])
550 data = base85.b85decode(remoteobs[key])
551 pullop.repo.obsstore.mergemarkers(tr, data)
551 pullop.repo.obsstore.mergemarkers(tr, data)
552 pullop.repo.invalidatevolatilesets()
552 pullop.repo.invalidatevolatilesets()
553 return tr
553 return tr
554
554
555 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
556 """return a full bundle (with potentially multiple kind of parts)
557
558 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
559 passed. For now, the bundle can contain only changegroup, but this will
560 changes when more part type will be available for bundle2.
561
562 This is different from changegroup.getbundle that only returns an HG10
563 changegroup bundle. They may eventually get reunited in the future when we
564 have a clearer idea of the API we what to query different data.
565
566 The implementation is at a very early stage and will get massive rework
567 when the API of bundle is refined.
568 """
569 # build bundle here.
570 cg = changegroup.getbundle(repo, source, heads=heads,
571 common=common, bundlecaps=None)
572 if bundlecaps is None or 'HG20' not in bundlecaps:
573 return cg
574 # very crude first implementation,
575 # the bundle API will change and the generation will be done lazily.
576 bundler = bundle2.bundle20(repo.ui)
577 tempname = changegroup.writebundle(cg, None, 'HG10UN')
578 data = open(tempname).read()
579 part = bundle2.part('changegroup', data=data)
580 bundler.addpart(part)
581 temp = cStringIO.StringIO()
582 for c in bundler.getchunks():
583 temp.write(c)
584 temp.seek(0)
585 return bundle2.unbundle20(repo.ui, temp)
@@ -1,1870 +1,1870 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
11 import lock as lockmod
12 import transaction, store, encoding, exchange
12 import transaction, store, encoding, exchange
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
19 import branchmap, pathutil
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21 filecache = scmutil.filecache
21 filecache = scmutil.filecache
22
22
23 class repofilecache(filecache):
23 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """
25 """
26
26
27 def __get__(self, repo, type=None):
27 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
29 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
31 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33
33
34 class storecache(repofilecache):
34 class storecache(repofilecache):
35 """filecache for files in the store"""
35 """filecache for files in the store"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj.sjoin(fname)
37 return obj.sjoin(fname)
38
38
39 class unfilteredpropertycache(propertycache):
39 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
40 """propertycache that apply to unfiltered repo only"""
41
41
42 def __get__(self, repo, type=None):
42 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
43 unfi = repo.unfiltered()
44 if unfi is repo:
44 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
46 return getattr(unfi, self.name)
47
47
48 class filteredpropertycache(propertycache):
48 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
49 """propertycache that must take filtering in account"""
50
50
51 def cachevalue(self, obj, value):
51 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
52 object.__setattr__(obj, self.name, value)
53
53
54
54
55 def hasunfilteredcache(repo, name):
55 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
57 return name in vars(repo.unfiltered())
58
58
59 def unfilteredmethod(orig):
59 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
60 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
61 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
63 return wrapper
64
64
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
66 legacycaps = moderncaps.union(set(['changegroupsubset']))
66 legacycaps = moderncaps.union(set(['changegroupsubset']))
67
67
68 class localpeer(peer.peerrepository):
68 class localpeer(peer.peerrepository):
69 '''peer for a local repo; reflects only the most recent API'''
69 '''peer for a local repo; reflects only the most recent API'''
70
70
71 def __init__(self, repo, caps=moderncaps):
71 def __init__(self, repo, caps=moderncaps):
72 peer.peerrepository.__init__(self)
72 peer.peerrepository.__init__(self)
73 self._repo = repo.filtered('served')
73 self._repo = repo.filtered('served')
74 self.ui = repo.ui
74 self.ui = repo.ui
75 self._caps = repo._restrictcapabilities(caps)
75 self._caps = repo._restrictcapabilities(caps)
76 self.requirements = repo.requirements
76 self.requirements = repo.requirements
77 self.supportedformats = repo.supportedformats
77 self.supportedformats = repo.supportedformats
78
78
79 def close(self):
79 def close(self):
80 self._repo.close()
80 self._repo.close()
81
81
82 def _capabilities(self):
82 def _capabilities(self):
83 return self._caps
83 return self._caps
84
84
85 def local(self):
85 def local(self):
86 return self._repo
86 return self._repo
87
87
88 def canpush(self):
88 def canpush(self):
89 return True
89 return True
90
90
91 def url(self):
91 def url(self):
92 return self._repo.url()
92 return self._repo.url()
93
93
94 def lookup(self, key):
94 def lookup(self, key):
95 return self._repo.lookup(key)
95 return self._repo.lookup(key)
96
96
97 def branchmap(self):
97 def branchmap(self):
98 return self._repo.branchmap()
98 return self._repo.branchmap()
99
99
100 def heads(self):
100 def heads(self):
101 return self._repo.heads()
101 return self._repo.heads()
102
102
103 def known(self, nodes):
103 def known(self, nodes):
104 return self._repo.known(nodes)
104 return self._repo.known(nodes)
105
105
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
106 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
107 format='HG10'):
107 format='HG10'):
108 return changegroup.getbundle(self._repo, source, heads=heads,
108 return exchange.getbundle(self._repo, source, heads=heads,
109 common=common, bundlecaps=bundlecaps)
109 common=common, bundlecaps=bundlecaps)
110
110
111 # TODO We might want to move the next two calls into legacypeer and add
111 # TODO We might want to move the next two calls into legacypeer and add
112 # unbundle instead.
112 # unbundle instead.
113
113
114 def lock(self):
114 def lock(self):
115 return self._repo.lock()
115 return self._repo.lock()
116
116
117 def addchangegroup(self, cg, source, url):
117 def addchangegroup(self, cg, source, url):
118 return changegroup.addchangegroup(self._repo, cg, source, url)
118 return changegroup.addchangegroup(self._repo, cg, source, url)
119
119
120 def pushkey(self, namespace, key, old, new):
120 def pushkey(self, namespace, key, old, new):
121 return self._repo.pushkey(namespace, key, old, new)
121 return self._repo.pushkey(namespace, key, old, new)
122
122
123 def listkeys(self, namespace):
123 def listkeys(self, namespace):
124 return self._repo.listkeys(namespace)
124 return self._repo.listkeys(namespace)
125
125
126 def debugwireargs(self, one, two, three=None, four=None, five=None):
126 def debugwireargs(self, one, two, three=None, four=None, five=None):
127 '''used to test argument passing over the wire'''
127 '''used to test argument passing over the wire'''
128 return "%s %s %s %s %s" % (one, two, three, four, five)
128 return "%s %s %s %s %s" % (one, two, three, four, five)
129
129
130 class locallegacypeer(localpeer):
130 class locallegacypeer(localpeer):
131 '''peer extension which implements legacy methods too; used for tests with
131 '''peer extension which implements legacy methods too; used for tests with
132 restricted capabilities'''
132 restricted capabilities'''
133
133
134 def __init__(self, repo):
134 def __init__(self, repo):
135 localpeer.__init__(self, repo, caps=legacycaps)
135 localpeer.__init__(self, repo, caps=legacycaps)
136
136
137 def branches(self, nodes):
137 def branches(self, nodes):
138 return self._repo.branches(nodes)
138 return self._repo.branches(nodes)
139
139
140 def between(self, pairs):
140 def between(self, pairs):
141 return self._repo.between(pairs)
141 return self._repo.between(pairs)
142
142
143 def changegroup(self, basenodes, source):
143 def changegroup(self, basenodes, source):
144 return changegroup.changegroup(self._repo, basenodes, source)
144 return changegroup.changegroup(self._repo, basenodes, source)
145
145
146 def changegroupsubset(self, bases, heads, source):
146 def changegroupsubset(self, bases, heads, source):
147 return changegroup.changegroupsubset(self._repo, bases, heads, source)
147 return changegroup.changegroupsubset(self._repo, bases, heads, source)
148
148
149 class localrepository(object):
149 class localrepository(object):
150
150
151 supportedformats = set(('revlogv1', 'generaldelta'))
151 supportedformats = set(('revlogv1', 'generaldelta'))
152 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
152 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
153 'dotencode'))
153 'dotencode'))
154 openerreqs = set(('revlogv1', 'generaldelta'))
154 openerreqs = set(('revlogv1', 'generaldelta'))
155 requirements = ['revlogv1']
155 requirements = ['revlogv1']
156 filtername = None
156 filtername = None
157
157
158 # a list of (ui, featureset) functions.
158 # a list of (ui, featureset) functions.
159 # only functions defined in module of enabled extensions are invoked
159 # only functions defined in module of enabled extensions are invoked
160 featuresetupfuncs = set()
160 featuresetupfuncs = set()
161
161
162 def _baserequirements(self, create):
162 def _baserequirements(self, create):
163 return self.requirements[:]
163 return self.requirements[:]
164
164
165 def __init__(self, baseui, path=None, create=False):
165 def __init__(self, baseui, path=None, create=False):
166 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
166 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
167 self.wopener = self.wvfs
167 self.wopener = self.wvfs
168 self.root = self.wvfs.base
168 self.root = self.wvfs.base
169 self.path = self.wvfs.join(".hg")
169 self.path = self.wvfs.join(".hg")
170 self.origroot = path
170 self.origroot = path
171 self.auditor = pathutil.pathauditor(self.root, self._checknested)
171 self.auditor = pathutil.pathauditor(self.root, self._checknested)
172 self.vfs = scmutil.vfs(self.path)
172 self.vfs = scmutil.vfs(self.path)
173 self.opener = self.vfs
173 self.opener = self.vfs
174 self.baseui = baseui
174 self.baseui = baseui
175 self.ui = baseui.copy()
175 self.ui = baseui.copy()
176 self.ui.copy = baseui.copy # prevent copying repo configuration
176 self.ui.copy = baseui.copy # prevent copying repo configuration
177 # A list of callback to shape the phase if no data were found.
177 # A list of callback to shape the phase if no data were found.
178 # Callback are in the form: func(repo, roots) --> processed root.
178 # Callback are in the form: func(repo, roots) --> processed root.
179 # This list it to be filled by extension during repo setup
179 # This list it to be filled by extension during repo setup
180 self._phasedefaults = []
180 self._phasedefaults = []
181 try:
181 try:
182 self.ui.readconfig(self.join("hgrc"), self.root)
182 self.ui.readconfig(self.join("hgrc"), self.root)
183 extensions.loadall(self.ui)
183 extensions.loadall(self.ui)
184 except IOError:
184 except IOError:
185 pass
185 pass
186
186
187 if self.featuresetupfuncs:
187 if self.featuresetupfuncs:
188 self.supported = set(self._basesupported) # use private copy
188 self.supported = set(self._basesupported) # use private copy
189 extmods = set(m.__name__ for n, m
189 extmods = set(m.__name__ for n, m
190 in extensions.extensions(self.ui))
190 in extensions.extensions(self.ui))
191 for setupfunc in self.featuresetupfuncs:
191 for setupfunc in self.featuresetupfuncs:
192 if setupfunc.__module__ in extmods:
192 if setupfunc.__module__ in extmods:
193 setupfunc(self.ui, self.supported)
193 setupfunc(self.ui, self.supported)
194 else:
194 else:
195 self.supported = self._basesupported
195 self.supported = self._basesupported
196
196
197 if not self.vfs.isdir():
197 if not self.vfs.isdir():
198 if create:
198 if create:
199 if not self.wvfs.exists():
199 if not self.wvfs.exists():
200 self.wvfs.makedirs()
200 self.wvfs.makedirs()
201 self.vfs.makedir(notindexed=True)
201 self.vfs.makedir(notindexed=True)
202 requirements = self._baserequirements(create)
202 requirements = self._baserequirements(create)
203 if self.ui.configbool('format', 'usestore', True):
203 if self.ui.configbool('format', 'usestore', True):
204 self.vfs.mkdir("store")
204 self.vfs.mkdir("store")
205 requirements.append("store")
205 requirements.append("store")
206 if self.ui.configbool('format', 'usefncache', True):
206 if self.ui.configbool('format', 'usefncache', True):
207 requirements.append("fncache")
207 requirements.append("fncache")
208 if self.ui.configbool('format', 'dotencode', True):
208 if self.ui.configbool('format', 'dotencode', True):
209 requirements.append('dotencode')
209 requirements.append('dotencode')
210 # create an invalid changelog
210 # create an invalid changelog
211 self.vfs.append(
211 self.vfs.append(
212 "00changelog.i",
212 "00changelog.i",
213 '\0\0\0\2' # represents revlogv2
213 '\0\0\0\2' # represents revlogv2
214 ' dummy changelog to prevent using the old repo layout'
214 ' dummy changelog to prevent using the old repo layout'
215 )
215 )
216 if self.ui.configbool('format', 'generaldelta', False):
216 if self.ui.configbool('format', 'generaldelta', False):
217 requirements.append("generaldelta")
217 requirements.append("generaldelta")
218 requirements = set(requirements)
218 requirements = set(requirements)
219 else:
219 else:
220 raise error.RepoError(_("repository %s not found") % path)
220 raise error.RepoError(_("repository %s not found") % path)
221 elif create:
221 elif create:
222 raise error.RepoError(_("repository %s already exists") % path)
222 raise error.RepoError(_("repository %s already exists") % path)
223 else:
223 else:
224 try:
224 try:
225 requirements = scmutil.readrequires(self.vfs, self.supported)
225 requirements = scmutil.readrequires(self.vfs, self.supported)
226 except IOError, inst:
226 except IOError, inst:
227 if inst.errno != errno.ENOENT:
227 if inst.errno != errno.ENOENT:
228 raise
228 raise
229 requirements = set()
229 requirements = set()
230
230
231 self.sharedpath = self.path
231 self.sharedpath = self.path
232 try:
232 try:
233 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
233 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
234 realpath=True)
234 realpath=True)
235 s = vfs.base
235 s = vfs.base
236 if not vfs.exists():
236 if not vfs.exists():
237 raise error.RepoError(
237 raise error.RepoError(
238 _('.hg/sharedpath points to nonexistent directory %s') % s)
238 _('.hg/sharedpath points to nonexistent directory %s') % s)
239 self.sharedpath = s
239 self.sharedpath = s
240 except IOError, inst:
240 except IOError, inst:
241 if inst.errno != errno.ENOENT:
241 if inst.errno != errno.ENOENT:
242 raise
242 raise
243
243
244 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
244 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
245 self.spath = self.store.path
245 self.spath = self.store.path
246 self.svfs = self.store.vfs
246 self.svfs = self.store.vfs
247 self.sopener = self.svfs
247 self.sopener = self.svfs
248 self.sjoin = self.store.join
248 self.sjoin = self.store.join
249 self.vfs.createmode = self.store.createmode
249 self.vfs.createmode = self.store.createmode
250 self._applyrequirements(requirements)
250 self._applyrequirements(requirements)
251 if create:
251 if create:
252 self._writerequirements()
252 self._writerequirements()
253
253
254
254
255 self._branchcaches = {}
255 self._branchcaches = {}
256 self.filterpats = {}
256 self.filterpats = {}
257 self._datafilters = {}
257 self._datafilters = {}
258 self._transref = self._lockref = self._wlockref = None
258 self._transref = self._lockref = self._wlockref = None
259
259
260 # A cache for various files under .hg/ that tracks file changes,
260 # A cache for various files under .hg/ that tracks file changes,
261 # (used by the filecache decorator)
261 # (used by the filecache decorator)
262 #
262 #
263 # Maps a property name to its util.filecacheentry
263 # Maps a property name to its util.filecacheentry
264 self._filecache = {}
264 self._filecache = {}
265
265
266 # hold sets of revision to be filtered
266 # hold sets of revision to be filtered
267 # should be cleared when something might have changed the filter value:
267 # should be cleared when something might have changed the filter value:
268 # - new changesets,
268 # - new changesets,
269 # - phase change,
269 # - phase change,
270 # - new obsolescence marker,
270 # - new obsolescence marker,
271 # - working directory parent change,
271 # - working directory parent change,
272 # - bookmark changes
272 # - bookmark changes
273 self.filteredrevcache = {}
273 self.filteredrevcache = {}
274
274
275 def close(self):
275 def close(self):
276 pass
276 pass
277
277
278 def _restrictcapabilities(self, caps):
278 def _restrictcapabilities(self, caps):
279 return caps
279 return caps
280
280
281 def _applyrequirements(self, requirements):
281 def _applyrequirements(self, requirements):
282 self.requirements = requirements
282 self.requirements = requirements
283 self.sopener.options = dict((r, 1) for r in requirements
283 self.sopener.options = dict((r, 1) for r in requirements
284 if r in self.openerreqs)
284 if r in self.openerreqs)
285 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
285 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
286 if chunkcachesize is not None:
286 if chunkcachesize is not None:
287 self.sopener.options['chunkcachesize'] = chunkcachesize
287 self.sopener.options['chunkcachesize'] = chunkcachesize
288
288
289 def _writerequirements(self):
289 def _writerequirements(self):
290 reqfile = self.opener("requires", "w")
290 reqfile = self.opener("requires", "w")
291 for r in sorted(self.requirements):
291 for r in sorted(self.requirements):
292 reqfile.write("%s\n" % r)
292 reqfile.write("%s\n" % r)
293 reqfile.close()
293 reqfile.close()
294
294
295 def _checknested(self, path):
295 def _checknested(self, path):
296 """Determine if path is a legal nested repository."""
296 """Determine if path is a legal nested repository."""
297 if not path.startswith(self.root):
297 if not path.startswith(self.root):
298 return False
298 return False
299 subpath = path[len(self.root) + 1:]
299 subpath = path[len(self.root) + 1:]
300 normsubpath = util.pconvert(subpath)
300 normsubpath = util.pconvert(subpath)
301
301
302 # XXX: Checking against the current working copy is wrong in
302 # XXX: Checking against the current working copy is wrong in
303 # the sense that it can reject things like
303 # the sense that it can reject things like
304 #
304 #
305 # $ hg cat -r 10 sub/x.txt
305 # $ hg cat -r 10 sub/x.txt
306 #
306 #
307 # if sub/ is no longer a subrepository in the working copy
307 # if sub/ is no longer a subrepository in the working copy
308 # parent revision.
308 # parent revision.
309 #
309 #
310 # However, it can of course also allow things that would have
310 # However, it can of course also allow things that would have
311 # been rejected before, such as the above cat command if sub/
311 # been rejected before, such as the above cat command if sub/
312 # is a subrepository now, but was a normal directory before.
312 # is a subrepository now, but was a normal directory before.
313 # The old path auditor would have rejected by mistake since it
313 # The old path auditor would have rejected by mistake since it
314 # panics when it sees sub/.hg/.
314 # panics when it sees sub/.hg/.
315 #
315 #
316 # All in all, checking against the working copy seems sensible
316 # All in all, checking against the working copy seems sensible
317 # since we want to prevent access to nested repositories on
317 # since we want to prevent access to nested repositories on
318 # the filesystem *now*.
318 # the filesystem *now*.
319 ctx = self[None]
319 ctx = self[None]
320 parts = util.splitpath(subpath)
320 parts = util.splitpath(subpath)
321 while parts:
321 while parts:
322 prefix = '/'.join(parts)
322 prefix = '/'.join(parts)
323 if prefix in ctx.substate:
323 if prefix in ctx.substate:
324 if prefix == normsubpath:
324 if prefix == normsubpath:
325 return True
325 return True
326 else:
326 else:
327 sub = ctx.sub(prefix)
327 sub = ctx.sub(prefix)
328 return sub.checknested(subpath[len(prefix) + 1:])
328 return sub.checknested(subpath[len(prefix) + 1:])
329 else:
329 else:
330 parts.pop()
330 parts.pop()
331 return False
331 return False
332
332
333 def peer(self):
333 def peer(self):
334 return localpeer(self) # not cached to avoid reference cycle
334 return localpeer(self) # not cached to avoid reference cycle
335
335
336 def unfiltered(self):
336 def unfiltered(self):
337 """Return unfiltered version of the repository
337 """Return unfiltered version of the repository
338
338
339 Intended to be overwritten by filtered repo."""
339 Intended to be overwritten by filtered repo."""
340 return self
340 return self
341
341
342 def filtered(self, name):
342 def filtered(self, name):
343 """Return a filtered version of a repository"""
343 """Return a filtered version of a repository"""
344 # build a new class with the mixin and the current class
344 # build a new class with the mixin and the current class
345 # (possibly subclass of the repo)
345 # (possibly subclass of the repo)
346 class proxycls(repoview.repoview, self.unfiltered().__class__):
346 class proxycls(repoview.repoview, self.unfiltered().__class__):
347 pass
347 pass
348 return proxycls(self, name)
348 return proxycls(self, name)
349
349
350 @repofilecache('bookmarks')
350 @repofilecache('bookmarks')
351 def _bookmarks(self):
351 def _bookmarks(self):
352 return bookmarks.bmstore(self)
352 return bookmarks.bmstore(self)
353
353
354 @repofilecache('bookmarks.current')
354 @repofilecache('bookmarks.current')
355 def _bookmarkcurrent(self):
355 def _bookmarkcurrent(self):
356 return bookmarks.readcurrent(self)
356 return bookmarks.readcurrent(self)
357
357
358 def bookmarkheads(self, bookmark):
358 def bookmarkheads(self, bookmark):
359 name = bookmark.split('@', 1)[0]
359 name = bookmark.split('@', 1)[0]
360 heads = []
360 heads = []
361 for mark, n in self._bookmarks.iteritems():
361 for mark, n in self._bookmarks.iteritems():
362 if mark.split('@', 1)[0] == name:
362 if mark.split('@', 1)[0] == name:
363 heads.append(n)
363 heads.append(n)
364 return heads
364 return heads
365
365
366 @storecache('phaseroots')
366 @storecache('phaseroots')
367 def _phasecache(self):
367 def _phasecache(self):
368 return phases.phasecache(self, self._phasedefaults)
368 return phases.phasecache(self, self._phasedefaults)
369
369
370 @storecache('obsstore')
370 @storecache('obsstore')
371 def obsstore(self):
371 def obsstore(self):
372 store = obsolete.obsstore(self.sopener)
372 store = obsolete.obsstore(self.sopener)
373 if store and not obsolete._enabled:
373 if store and not obsolete._enabled:
374 # message is rare enough to not be translated
374 # message is rare enough to not be translated
375 msg = 'obsolete feature not enabled but %i markers found!\n'
375 msg = 'obsolete feature not enabled but %i markers found!\n'
376 self.ui.warn(msg % len(list(store)))
376 self.ui.warn(msg % len(list(store)))
377 return store
377 return store
378
378
379 @storecache('00changelog.i')
379 @storecache('00changelog.i')
380 def changelog(self):
380 def changelog(self):
381 c = changelog.changelog(self.sopener)
381 c = changelog.changelog(self.sopener)
382 if 'HG_PENDING' in os.environ:
382 if 'HG_PENDING' in os.environ:
383 p = os.environ['HG_PENDING']
383 p = os.environ['HG_PENDING']
384 if p.startswith(self.root):
384 if p.startswith(self.root):
385 c.readpending('00changelog.i.a')
385 c.readpending('00changelog.i.a')
386 return c
386 return c
387
387
388 @storecache('00manifest.i')
388 @storecache('00manifest.i')
389 def manifest(self):
389 def manifest(self):
390 return manifest.manifest(self.sopener)
390 return manifest.manifest(self.sopener)
391
391
392 @repofilecache('dirstate')
392 @repofilecache('dirstate')
393 def dirstate(self):
393 def dirstate(self):
394 warned = [0]
394 warned = [0]
395 def validate(node):
395 def validate(node):
396 try:
396 try:
397 self.changelog.rev(node)
397 self.changelog.rev(node)
398 return node
398 return node
399 except error.LookupError:
399 except error.LookupError:
400 if not warned[0]:
400 if not warned[0]:
401 warned[0] = True
401 warned[0] = True
402 self.ui.warn(_("warning: ignoring unknown"
402 self.ui.warn(_("warning: ignoring unknown"
403 " working parent %s!\n") % short(node))
403 " working parent %s!\n") % short(node))
404 return nullid
404 return nullid
405
405
406 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
406 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
407
407
408 def __getitem__(self, changeid):
408 def __getitem__(self, changeid):
409 if changeid is None:
409 if changeid is None:
410 return context.workingctx(self)
410 return context.workingctx(self)
411 return context.changectx(self, changeid)
411 return context.changectx(self, changeid)
412
412
413 def __contains__(self, changeid):
413 def __contains__(self, changeid):
414 try:
414 try:
415 return bool(self.lookup(changeid))
415 return bool(self.lookup(changeid))
416 except error.RepoLookupError:
416 except error.RepoLookupError:
417 return False
417 return False
418
418
419 def __nonzero__(self):
419 def __nonzero__(self):
420 return True
420 return True
421
421
422 def __len__(self):
422 def __len__(self):
423 return len(self.changelog)
423 return len(self.changelog)
424
424
425 def __iter__(self):
425 def __iter__(self):
426 return iter(self.changelog)
426 return iter(self.changelog)
427
427
428 def revs(self, expr, *args):
428 def revs(self, expr, *args):
429 '''Return a list of revisions matching the given revset'''
429 '''Return a list of revisions matching the given revset'''
430 expr = revset.formatspec(expr, *args)
430 expr = revset.formatspec(expr, *args)
431 m = revset.match(None, expr)
431 m = revset.match(None, expr)
432 return m(self, revset.spanset(self))
432 return m(self, revset.spanset(self))
433
433
434 def set(self, expr, *args):
434 def set(self, expr, *args):
435 '''
435 '''
436 Yield a context for each matching revision, after doing arg
436 Yield a context for each matching revision, after doing arg
437 replacement via revset.formatspec
437 replacement via revset.formatspec
438 '''
438 '''
439 for r in self.revs(expr, *args):
439 for r in self.revs(expr, *args):
440 yield self[r]
440 yield self[r]
441
441
442 def url(self):
442 def url(self):
443 return 'file:' + self.root
443 return 'file:' + self.root
444
444
445 def hook(self, name, throw=False, **args):
445 def hook(self, name, throw=False, **args):
446 return hook.hook(self.ui, self, name, throw, **args)
446 return hook.hook(self.ui, self, name, throw, **args)
447
447
448 @unfilteredmethod
448 @unfilteredmethod
449 def _tag(self, names, node, message, local, user, date, extra={}):
449 def _tag(self, names, node, message, local, user, date, extra={}):
450 if isinstance(names, str):
450 if isinstance(names, str):
451 names = (names,)
451 names = (names,)
452
452
453 branches = self.branchmap()
453 branches = self.branchmap()
454 for name in names:
454 for name in names:
455 self.hook('pretag', throw=True, node=hex(node), tag=name,
455 self.hook('pretag', throw=True, node=hex(node), tag=name,
456 local=local)
456 local=local)
457 if name in branches:
457 if name in branches:
458 self.ui.warn(_("warning: tag %s conflicts with existing"
458 self.ui.warn(_("warning: tag %s conflicts with existing"
459 " branch name\n") % name)
459 " branch name\n") % name)
460
460
461 def writetags(fp, names, munge, prevtags):
461 def writetags(fp, names, munge, prevtags):
462 fp.seek(0, 2)
462 fp.seek(0, 2)
463 if prevtags and prevtags[-1] != '\n':
463 if prevtags and prevtags[-1] != '\n':
464 fp.write('\n')
464 fp.write('\n')
465 for name in names:
465 for name in names:
466 m = munge and munge(name) or name
466 m = munge and munge(name) or name
467 if (self._tagscache.tagtypes and
467 if (self._tagscache.tagtypes and
468 name in self._tagscache.tagtypes):
468 name in self._tagscache.tagtypes):
469 old = self.tags().get(name, nullid)
469 old = self.tags().get(name, nullid)
470 fp.write('%s %s\n' % (hex(old), m))
470 fp.write('%s %s\n' % (hex(old), m))
471 fp.write('%s %s\n' % (hex(node), m))
471 fp.write('%s %s\n' % (hex(node), m))
472 fp.close()
472 fp.close()
473
473
474 prevtags = ''
474 prevtags = ''
475 if local:
475 if local:
476 try:
476 try:
477 fp = self.opener('localtags', 'r+')
477 fp = self.opener('localtags', 'r+')
478 except IOError:
478 except IOError:
479 fp = self.opener('localtags', 'a')
479 fp = self.opener('localtags', 'a')
480 else:
480 else:
481 prevtags = fp.read()
481 prevtags = fp.read()
482
482
483 # local tags are stored in the current charset
483 # local tags are stored in the current charset
484 writetags(fp, names, None, prevtags)
484 writetags(fp, names, None, prevtags)
485 for name in names:
485 for name in names:
486 self.hook('tag', node=hex(node), tag=name, local=local)
486 self.hook('tag', node=hex(node), tag=name, local=local)
487 return
487 return
488
488
489 try:
489 try:
490 fp = self.wfile('.hgtags', 'rb+')
490 fp = self.wfile('.hgtags', 'rb+')
491 except IOError, e:
491 except IOError, e:
492 if e.errno != errno.ENOENT:
492 if e.errno != errno.ENOENT:
493 raise
493 raise
494 fp = self.wfile('.hgtags', 'ab')
494 fp = self.wfile('.hgtags', 'ab')
495 else:
495 else:
496 prevtags = fp.read()
496 prevtags = fp.read()
497
497
498 # committed tags are stored in UTF-8
498 # committed tags are stored in UTF-8
499 writetags(fp, names, encoding.fromlocal, prevtags)
499 writetags(fp, names, encoding.fromlocal, prevtags)
500
500
501 fp.close()
501 fp.close()
502
502
503 self.invalidatecaches()
503 self.invalidatecaches()
504
504
505 if '.hgtags' not in self.dirstate:
505 if '.hgtags' not in self.dirstate:
506 self[None].add(['.hgtags'])
506 self[None].add(['.hgtags'])
507
507
508 m = matchmod.exact(self.root, '', ['.hgtags'])
508 m = matchmod.exact(self.root, '', ['.hgtags'])
509 tagnode = self.commit(message, user, date, extra=extra, match=m)
509 tagnode = self.commit(message, user, date, extra=extra, match=m)
510
510
511 for name in names:
511 for name in names:
512 self.hook('tag', node=hex(node), tag=name, local=local)
512 self.hook('tag', node=hex(node), tag=name, local=local)
513
513
514 return tagnode
514 return tagnode
515
515
516 def tag(self, names, node, message, local, user, date):
516 def tag(self, names, node, message, local, user, date):
517 '''tag a revision with one or more symbolic names.
517 '''tag a revision with one or more symbolic names.
518
518
519 names is a list of strings or, when adding a single tag, names may be a
519 names is a list of strings or, when adding a single tag, names may be a
520 string.
520 string.
521
521
522 if local is True, the tags are stored in a per-repository file.
522 if local is True, the tags are stored in a per-repository file.
523 otherwise, they are stored in the .hgtags file, and a new
523 otherwise, they are stored in the .hgtags file, and a new
524 changeset is committed with the change.
524 changeset is committed with the change.
525
525
526 keyword arguments:
526 keyword arguments:
527
527
528 local: whether to store tags in non-version-controlled file
528 local: whether to store tags in non-version-controlled file
529 (default False)
529 (default False)
530
530
531 message: commit message to use if committing
531 message: commit message to use if committing
532
532
533 user: name of user to use if committing
533 user: name of user to use if committing
534
534
535 date: date tuple to use if committing'''
535 date: date tuple to use if committing'''
536
536
537 if not local:
537 if not local:
538 for x in self.status()[:5]:
538 for x in self.status()[:5]:
539 if '.hgtags' in x:
539 if '.hgtags' in x:
540 raise util.Abort(_('working copy of .hgtags is changed '
540 raise util.Abort(_('working copy of .hgtags is changed '
541 '(please commit .hgtags manually)'))
541 '(please commit .hgtags manually)'))
542
542
543 self.tags() # instantiate the cache
543 self.tags() # instantiate the cache
544 self._tag(names, node, message, local, user, date)
544 self._tag(names, node, message, local, user, date)
545
545
546 @filteredpropertycache
546 @filteredpropertycache
547 def _tagscache(self):
547 def _tagscache(self):
548 '''Returns a tagscache object that contains various tags related
548 '''Returns a tagscache object that contains various tags related
549 caches.'''
549 caches.'''
550
550
551 # This simplifies its cache management by having one decorated
551 # This simplifies its cache management by having one decorated
552 # function (this one) and the rest simply fetch things from it.
552 # function (this one) and the rest simply fetch things from it.
553 class tagscache(object):
553 class tagscache(object):
554 def __init__(self):
554 def __init__(self):
555 # These two define the set of tags for this repository. tags
555 # These two define the set of tags for this repository. tags
556 # maps tag name to node; tagtypes maps tag name to 'global' or
556 # maps tag name to node; tagtypes maps tag name to 'global' or
557 # 'local'. (Global tags are defined by .hgtags across all
557 # 'local'. (Global tags are defined by .hgtags across all
558 # heads, and local tags are defined in .hg/localtags.)
558 # heads, and local tags are defined in .hg/localtags.)
559 # They constitute the in-memory cache of tags.
559 # They constitute the in-memory cache of tags.
560 self.tags = self.tagtypes = None
560 self.tags = self.tagtypes = None
561
561
562 self.nodetagscache = self.tagslist = None
562 self.nodetagscache = self.tagslist = None
563
563
564 cache = tagscache()
564 cache = tagscache()
565 cache.tags, cache.tagtypes = self._findtags()
565 cache.tags, cache.tagtypes = self._findtags()
566
566
567 return cache
567 return cache
568
568
569 def tags(self):
569 def tags(self):
570 '''return a mapping of tag to node'''
570 '''return a mapping of tag to node'''
571 t = {}
571 t = {}
572 if self.changelog.filteredrevs:
572 if self.changelog.filteredrevs:
573 tags, tt = self._findtags()
573 tags, tt = self._findtags()
574 else:
574 else:
575 tags = self._tagscache.tags
575 tags = self._tagscache.tags
576 for k, v in tags.iteritems():
576 for k, v in tags.iteritems():
577 try:
577 try:
578 # ignore tags to unknown nodes
578 # ignore tags to unknown nodes
579 self.changelog.rev(v)
579 self.changelog.rev(v)
580 t[k] = v
580 t[k] = v
581 except (error.LookupError, ValueError):
581 except (error.LookupError, ValueError):
582 pass
582 pass
583 return t
583 return t
584
584
585 def _findtags(self):
585 def _findtags(self):
586 '''Do the hard work of finding tags. Return a pair of dicts
586 '''Do the hard work of finding tags. Return a pair of dicts
587 (tags, tagtypes) where tags maps tag name to node, and tagtypes
587 (tags, tagtypes) where tags maps tag name to node, and tagtypes
588 maps tag name to a string like \'global\' or \'local\'.
588 maps tag name to a string like \'global\' or \'local\'.
589 Subclasses or extensions are free to add their own tags, but
589 Subclasses or extensions are free to add their own tags, but
590 should be aware that the returned dicts will be retained for the
590 should be aware that the returned dicts will be retained for the
591 duration of the localrepo object.'''
591 duration of the localrepo object.'''
592
592
593 # XXX what tagtype should subclasses/extensions use? Currently
593 # XXX what tagtype should subclasses/extensions use? Currently
594 # mq and bookmarks add tags, but do not set the tagtype at all.
594 # mq and bookmarks add tags, but do not set the tagtype at all.
595 # Should each extension invent its own tag type? Should there
595 # Should each extension invent its own tag type? Should there
596 # be one tagtype for all such "virtual" tags? Or is the status
596 # be one tagtype for all such "virtual" tags? Or is the status
597 # quo fine?
597 # quo fine?
598
598
599 alltags = {} # map tag name to (node, hist)
599 alltags = {} # map tag name to (node, hist)
600 tagtypes = {}
600 tagtypes = {}
601
601
602 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
602 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
603 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
603 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
604
604
605 # Build the return dicts. Have to re-encode tag names because
605 # Build the return dicts. Have to re-encode tag names because
606 # the tags module always uses UTF-8 (in order not to lose info
606 # the tags module always uses UTF-8 (in order not to lose info
607 # writing to the cache), but the rest of Mercurial wants them in
607 # writing to the cache), but the rest of Mercurial wants them in
608 # local encoding.
608 # local encoding.
609 tags = {}
609 tags = {}
610 for (name, (node, hist)) in alltags.iteritems():
610 for (name, (node, hist)) in alltags.iteritems():
611 if node != nullid:
611 if node != nullid:
612 tags[encoding.tolocal(name)] = node
612 tags[encoding.tolocal(name)] = node
613 tags['tip'] = self.changelog.tip()
613 tags['tip'] = self.changelog.tip()
614 tagtypes = dict([(encoding.tolocal(name), value)
614 tagtypes = dict([(encoding.tolocal(name), value)
615 for (name, value) in tagtypes.iteritems()])
615 for (name, value) in tagtypes.iteritems()])
616 return (tags, tagtypes)
616 return (tags, tagtypes)
617
617
618 def tagtype(self, tagname):
618 def tagtype(self, tagname):
619 '''
619 '''
620 return the type of the given tag. result can be:
620 return the type of the given tag. result can be:
621
621
622 'local' : a local tag
622 'local' : a local tag
623 'global' : a global tag
623 'global' : a global tag
624 None : tag does not exist
624 None : tag does not exist
625 '''
625 '''
626
626
627 return self._tagscache.tagtypes.get(tagname)
627 return self._tagscache.tagtypes.get(tagname)
628
628
629 def tagslist(self):
629 def tagslist(self):
630 '''return a list of tags ordered by revision'''
630 '''return a list of tags ordered by revision'''
631 if not self._tagscache.tagslist:
631 if not self._tagscache.tagslist:
632 l = []
632 l = []
633 for t, n in self.tags().iteritems():
633 for t, n in self.tags().iteritems():
634 r = self.changelog.rev(n)
634 r = self.changelog.rev(n)
635 l.append((r, t, n))
635 l.append((r, t, n))
636 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
636 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
637
637
638 return self._tagscache.tagslist
638 return self._tagscache.tagslist
639
639
640 def nodetags(self, node):
640 def nodetags(self, node):
641 '''return the tags associated with a node'''
641 '''return the tags associated with a node'''
642 if not self._tagscache.nodetagscache:
642 if not self._tagscache.nodetagscache:
643 nodetagscache = {}
643 nodetagscache = {}
644 for t, n in self._tagscache.tags.iteritems():
644 for t, n in self._tagscache.tags.iteritems():
645 nodetagscache.setdefault(n, []).append(t)
645 nodetagscache.setdefault(n, []).append(t)
646 for tags in nodetagscache.itervalues():
646 for tags in nodetagscache.itervalues():
647 tags.sort()
647 tags.sort()
648 self._tagscache.nodetagscache = nodetagscache
648 self._tagscache.nodetagscache = nodetagscache
649 return self._tagscache.nodetagscache.get(node, [])
649 return self._tagscache.nodetagscache.get(node, [])
650
650
651 def nodebookmarks(self, node):
651 def nodebookmarks(self, node):
652 marks = []
652 marks = []
653 for bookmark, n in self._bookmarks.iteritems():
653 for bookmark, n in self._bookmarks.iteritems():
654 if n == node:
654 if n == node:
655 marks.append(bookmark)
655 marks.append(bookmark)
656 return sorted(marks)
656 return sorted(marks)
657
657
658 def branchmap(self):
658 def branchmap(self):
659 '''returns a dictionary {branch: [branchheads]} with branchheads
659 '''returns a dictionary {branch: [branchheads]} with branchheads
660 ordered by increasing revision number'''
660 ordered by increasing revision number'''
661 branchmap.updatecache(self)
661 branchmap.updatecache(self)
662 return self._branchcaches[self.filtername]
662 return self._branchcaches[self.filtername]
663
663
664 def branchtip(self, branch):
664 def branchtip(self, branch):
665 '''return the tip node for a given branch'''
665 '''return the tip node for a given branch'''
666 try:
666 try:
667 return self.branchmap().branchtip(branch)
667 return self.branchmap().branchtip(branch)
668 except KeyError:
668 except KeyError:
669 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
669 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
670
670
671 def lookup(self, key):
671 def lookup(self, key):
672 return self[key].node()
672 return self[key].node()
673
673
674 def lookupbranch(self, key, remote=None):
674 def lookupbranch(self, key, remote=None):
675 repo = remote or self
675 repo = remote or self
676 if key in repo.branchmap():
676 if key in repo.branchmap():
677 return key
677 return key
678
678
679 repo = (remote and remote.local()) and remote or self
679 repo = (remote and remote.local()) and remote or self
680 return repo[key].branch()
680 return repo[key].branch()
681
681
682 def known(self, nodes):
682 def known(self, nodes):
683 nm = self.changelog.nodemap
683 nm = self.changelog.nodemap
684 pc = self._phasecache
684 pc = self._phasecache
685 result = []
685 result = []
686 for n in nodes:
686 for n in nodes:
687 r = nm.get(n)
687 r = nm.get(n)
688 resp = not (r is None or pc.phase(self, r) >= phases.secret)
688 resp = not (r is None or pc.phase(self, r) >= phases.secret)
689 result.append(resp)
689 result.append(resp)
690 return result
690 return result
691
691
692 def local(self):
692 def local(self):
693 return self
693 return self
694
694
695 def cancopy(self):
695 def cancopy(self):
696 # so statichttprepo's override of local() works
696 # so statichttprepo's override of local() works
697 if not self.local():
697 if not self.local():
698 return False
698 return False
699 if not self.ui.configbool('phases', 'publish', True):
699 if not self.ui.configbool('phases', 'publish', True):
700 return True
700 return True
701 # if publishing we can't copy if there is filtered content
701 # if publishing we can't copy if there is filtered content
702 return not self.filtered('visible').changelog.filteredrevs
702 return not self.filtered('visible').changelog.filteredrevs
703
703
704 def join(self, f):
704 def join(self, f):
705 return os.path.join(self.path, f)
705 return os.path.join(self.path, f)
706
706
707 def wjoin(self, f):
707 def wjoin(self, f):
708 return os.path.join(self.root, f)
708 return os.path.join(self.root, f)
709
709
710 def file(self, f):
710 def file(self, f):
711 if f[0] == '/':
711 if f[0] == '/':
712 f = f[1:]
712 f = f[1:]
713 return filelog.filelog(self.sopener, f)
713 return filelog.filelog(self.sopener, f)
714
714
715 def changectx(self, changeid):
715 def changectx(self, changeid):
716 return self[changeid]
716 return self[changeid]
717
717
718 def parents(self, changeid=None):
718 def parents(self, changeid=None):
719 '''get list of changectxs for parents of changeid'''
719 '''get list of changectxs for parents of changeid'''
720 return self[changeid].parents()
720 return self[changeid].parents()
721
721
722 def setparents(self, p1, p2=nullid):
722 def setparents(self, p1, p2=nullid):
723 copies = self.dirstate.setparents(p1, p2)
723 copies = self.dirstate.setparents(p1, p2)
724 pctx = self[p1]
724 pctx = self[p1]
725 if copies:
725 if copies:
726 # Adjust copy records, the dirstate cannot do it, it
726 # Adjust copy records, the dirstate cannot do it, it
727 # requires access to parents manifests. Preserve them
727 # requires access to parents manifests. Preserve them
728 # only for entries added to first parent.
728 # only for entries added to first parent.
729 for f in copies:
729 for f in copies:
730 if f not in pctx and copies[f] in pctx:
730 if f not in pctx and copies[f] in pctx:
731 self.dirstate.copy(copies[f], f)
731 self.dirstate.copy(copies[f], f)
732 if p2 == nullid:
732 if p2 == nullid:
733 for f, s in sorted(self.dirstate.copies().items()):
733 for f, s in sorted(self.dirstate.copies().items()):
734 if f not in pctx and s not in pctx:
734 if f not in pctx and s not in pctx:
735 self.dirstate.copy(None, f)
735 self.dirstate.copy(None, f)
736
736
737 def filectx(self, path, changeid=None, fileid=None):
737 def filectx(self, path, changeid=None, fileid=None):
738 """changeid can be a changeset revision, node, or tag.
738 """changeid can be a changeset revision, node, or tag.
739 fileid can be a file revision or node."""
739 fileid can be a file revision or node."""
740 return context.filectx(self, path, changeid, fileid)
740 return context.filectx(self, path, changeid, fileid)
741
741
742 def getcwd(self):
742 def getcwd(self):
743 return self.dirstate.getcwd()
743 return self.dirstate.getcwd()
744
744
745 def pathto(self, f, cwd=None):
745 def pathto(self, f, cwd=None):
746 return self.dirstate.pathto(f, cwd)
746 return self.dirstate.pathto(f, cwd)
747
747
748 def wfile(self, f, mode='r'):
748 def wfile(self, f, mode='r'):
749 return self.wopener(f, mode)
749 return self.wopener(f, mode)
750
750
751 def _link(self, f):
751 def _link(self, f):
752 return self.wvfs.islink(f)
752 return self.wvfs.islink(f)
753
753
754 def _loadfilter(self, filter):
754 def _loadfilter(self, filter):
755 if filter not in self.filterpats:
755 if filter not in self.filterpats:
756 l = []
756 l = []
757 for pat, cmd in self.ui.configitems(filter):
757 for pat, cmd in self.ui.configitems(filter):
758 if cmd == '!':
758 if cmd == '!':
759 continue
759 continue
760 mf = matchmod.match(self.root, '', [pat])
760 mf = matchmod.match(self.root, '', [pat])
761 fn = None
761 fn = None
762 params = cmd
762 params = cmd
763 for name, filterfn in self._datafilters.iteritems():
763 for name, filterfn in self._datafilters.iteritems():
764 if cmd.startswith(name):
764 if cmd.startswith(name):
765 fn = filterfn
765 fn = filterfn
766 params = cmd[len(name):].lstrip()
766 params = cmd[len(name):].lstrip()
767 break
767 break
768 if not fn:
768 if not fn:
769 fn = lambda s, c, **kwargs: util.filter(s, c)
769 fn = lambda s, c, **kwargs: util.filter(s, c)
770 # Wrap old filters not supporting keyword arguments
770 # Wrap old filters not supporting keyword arguments
771 if not inspect.getargspec(fn)[2]:
771 if not inspect.getargspec(fn)[2]:
772 oldfn = fn
772 oldfn = fn
773 fn = lambda s, c, **kwargs: oldfn(s, c)
773 fn = lambda s, c, **kwargs: oldfn(s, c)
774 l.append((mf, fn, params))
774 l.append((mf, fn, params))
775 self.filterpats[filter] = l
775 self.filterpats[filter] = l
776 return self.filterpats[filter]
776 return self.filterpats[filter]
777
777
778 def _filter(self, filterpats, filename, data):
778 def _filter(self, filterpats, filename, data):
779 for mf, fn, cmd in filterpats:
779 for mf, fn, cmd in filterpats:
780 if mf(filename):
780 if mf(filename):
781 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
781 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
782 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
782 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
783 break
783 break
784
784
785 return data
785 return data
786
786
787 @unfilteredpropertycache
787 @unfilteredpropertycache
788 def _encodefilterpats(self):
788 def _encodefilterpats(self):
789 return self._loadfilter('encode')
789 return self._loadfilter('encode')
790
790
791 @unfilteredpropertycache
791 @unfilteredpropertycache
792 def _decodefilterpats(self):
792 def _decodefilterpats(self):
793 return self._loadfilter('decode')
793 return self._loadfilter('decode')
794
794
795 def adddatafilter(self, name, filter):
795 def adddatafilter(self, name, filter):
796 self._datafilters[name] = filter
796 self._datafilters[name] = filter
797
797
798 def wread(self, filename):
798 def wread(self, filename):
799 if self._link(filename):
799 if self._link(filename):
800 data = self.wvfs.readlink(filename)
800 data = self.wvfs.readlink(filename)
801 else:
801 else:
802 data = self.wopener.read(filename)
802 data = self.wopener.read(filename)
803 return self._filter(self._encodefilterpats, filename, data)
803 return self._filter(self._encodefilterpats, filename, data)
804
804
805 def wwrite(self, filename, data, flags):
805 def wwrite(self, filename, data, flags):
806 data = self._filter(self._decodefilterpats, filename, data)
806 data = self._filter(self._decodefilterpats, filename, data)
807 if 'l' in flags:
807 if 'l' in flags:
808 self.wopener.symlink(data, filename)
808 self.wopener.symlink(data, filename)
809 else:
809 else:
810 self.wopener.write(filename, data)
810 self.wopener.write(filename, data)
811 if 'x' in flags:
811 if 'x' in flags:
812 self.wvfs.setflags(filename, False, True)
812 self.wvfs.setflags(filename, False, True)
813
813
814 def wwritedata(self, filename, data):
814 def wwritedata(self, filename, data):
815 return self._filter(self._decodefilterpats, filename, data)
815 return self._filter(self._decodefilterpats, filename, data)
816
816
817 def transaction(self, desc, report=None):
817 def transaction(self, desc, report=None):
818 tr = self._transref and self._transref() or None
818 tr = self._transref and self._transref() or None
819 if tr and tr.running():
819 if tr and tr.running():
820 return tr.nest()
820 return tr.nest()
821
821
822 # abort here if the journal already exists
822 # abort here if the journal already exists
823 if self.svfs.exists("journal"):
823 if self.svfs.exists("journal"):
824 raise error.RepoError(
824 raise error.RepoError(
825 _("abandoned transaction found - run hg recover"))
825 _("abandoned transaction found - run hg recover"))
826
826
827 def onclose():
827 def onclose():
828 self.store.write(tr)
828 self.store.write(tr)
829
829
830 self._writejournal(desc)
830 self._writejournal(desc)
831 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
831 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
832 rp = report and report or self.ui.warn
832 rp = report and report or self.ui.warn
833 tr = transaction.transaction(rp, self.sopener,
833 tr = transaction.transaction(rp, self.sopener,
834 "journal",
834 "journal",
835 aftertrans(renames),
835 aftertrans(renames),
836 self.store.createmode,
836 self.store.createmode,
837 onclose)
837 onclose)
838 self._transref = weakref.ref(tr)
838 self._transref = weakref.ref(tr)
839 return tr
839 return tr
840
840
841 def _journalfiles(self):
841 def _journalfiles(self):
842 return ((self.svfs, 'journal'),
842 return ((self.svfs, 'journal'),
843 (self.vfs, 'journal.dirstate'),
843 (self.vfs, 'journal.dirstate'),
844 (self.vfs, 'journal.branch'),
844 (self.vfs, 'journal.branch'),
845 (self.vfs, 'journal.desc'),
845 (self.vfs, 'journal.desc'),
846 (self.vfs, 'journal.bookmarks'),
846 (self.vfs, 'journal.bookmarks'),
847 (self.svfs, 'journal.phaseroots'))
847 (self.svfs, 'journal.phaseroots'))
848
848
849 def undofiles(self):
849 def undofiles(self):
850 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
850 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
851
851
852 def _writejournal(self, desc):
852 def _writejournal(self, desc):
853 self.opener.write("journal.dirstate",
853 self.opener.write("journal.dirstate",
854 self.opener.tryread("dirstate"))
854 self.opener.tryread("dirstate"))
855 self.opener.write("journal.branch",
855 self.opener.write("journal.branch",
856 encoding.fromlocal(self.dirstate.branch()))
856 encoding.fromlocal(self.dirstate.branch()))
857 self.opener.write("journal.desc",
857 self.opener.write("journal.desc",
858 "%d\n%s\n" % (len(self), desc))
858 "%d\n%s\n" % (len(self), desc))
859 self.opener.write("journal.bookmarks",
859 self.opener.write("journal.bookmarks",
860 self.opener.tryread("bookmarks"))
860 self.opener.tryread("bookmarks"))
861 self.sopener.write("journal.phaseroots",
861 self.sopener.write("journal.phaseroots",
862 self.sopener.tryread("phaseroots"))
862 self.sopener.tryread("phaseroots"))
863
863
864 def recover(self):
864 def recover(self):
865 lock = self.lock()
865 lock = self.lock()
866 try:
866 try:
867 if self.svfs.exists("journal"):
867 if self.svfs.exists("journal"):
868 self.ui.status(_("rolling back interrupted transaction\n"))
868 self.ui.status(_("rolling back interrupted transaction\n"))
869 transaction.rollback(self.sopener, "journal",
869 transaction.rollback(self.sopener, "journal",
870 self.ui.warn)
870 self.ui.warn)
871 self.invalidate()
871 self.invalidate()
872 return True
872 return True
873 else:
873 else:
874 self.ui.warn(_("no interrupted transaction available\n"))
874 self.ui.warn(_("no interrupted transaction available\n"))
875 return False
875 return False
876 finally:
876 finally:
877 lock.release()
877 lock.release()
878
878
879 def rollback(self, dryrun=False, force=False):
879 def rollback(self, dryrun=False, force=False):
880 wlock = lock = None
880 wlock = lock = None
881 try:
881 try:
882 wlock = self.wlock()
882 wlock = self.wlock()
883 lock = self.lock()
883 lock = self.lock()
884 if self.svfs.exists("undo"):
884 if self.svfs.exists("undo"):
885 return self._rollback(dryrun, force)
885 return self._rollback(dryrun, force)
886 else:
886 else:
887 self.ui.warn(_("no rollback information available\n"))
887 self.ui.warn(_("no rollback information available\n"))
888 return 1
888 return 1
889 finally:
889 finally:
890 release(lock, wlock)
890 release(lock, wlock)
891
891
892 @unfilteredmethod # Until we get smarter cache management
892 @unfilteredmethod # Until we get smarter cache management
893 def _rollback(self, dryrun, force):
893 def _rollback(self, dryrun, force):
894 ui = self.ui
894 ui = self.ui
895 try:
895 try:
896 args = self.opener.read('undo.desc').splitlines()
896 args = self.opener.read('undo.desc').splitlines()
897 (oldlen, desc, detail) = (int(args[0]), args[1], None)
897 (oldlen, desc, detail) = (int(args[0]), args[1], None)
898 if len(args) >= 3:
898 if len(args) >= 3:
899 detail = args[2]
899 detail = args[2]
900 oldtip = oldlen - 1
900 oldtip = oldlen - 1
901
901
902 if detail and ui.verbose:
902 if detail and ui.verbose:
903 msg = (_('repository tip rolled back to revision %s'
903 msg = (_('repository tip rolled back to revision %s'
904 ' (undo %s: %s)\n')
904 ' (undo %s: %s)\n')
905 % (oldtip, desc, detail))
905 % (oldtip, desc, detail))
906 else:
906 else:
907 msg = (_('repository tip rolled back to revision %s'
907 msg = (_('repository tip rolled back to revision %s'
908 ' (undo %s)\n')
908 ' (undo %s)\n')
909 % (oldtip, desc))
909 % (oldtip, desc))
910 except IOError:
910 except IOError:
911 msg = _('rolling back unknown transaction\n')
911 msg = _('rolling back unknown transaction\n')
912 desc = None
912 desc = None
913
913
914 if not force and self['.'] != self['tip'] and desc == 'commit':
914 if not force and self['.'] != self['tip'] and desc == 'commit':
915 raise util.Abort(
915 raise util.Abort(
916 _('rollback of last commit while not checked out '
916 _('rollback of last commit while not checked out '
917 'may lose data'), hint=_('use -f to force'))
917 'may lose data'), hint=_('use -f to force'))
918
918
919 ui.status(msg)
919 ui.status(msg)
920 if dryrun:
920 if dryrun:
921 return 0
921 return 0
922
922
923 parents = self.dirstate.parents()
923 parents = self.dirstate.parents()
924 self.destroying()
924 self.destroying()
925 transaction.rollback(self.sopener, 'undo', ui.warn)
925 transaction.rollback(self.sopener, 'undo', ui.warn)
926 if self.vfs.exists('undo.bookmarks'):
926 if self.vfs.exists('undo.bookmarks'):
927 self.vfs.rename('undo.bookmarks', 'bookmarks')
927 self.vfs.rename('undo.bookmarks', 'bookmarks')
928 if self.svfs.exists('undo.phaseroots'):
928 if self.svfs.exists('undo.phaseroots'):
929 self.svfs.rename('undo.phaseroots', 'phaseroots')
929 self.svfs.rename('undo.phaseroots', 'phaseroots')
930 self.invalidate()
930 self.invalidate()
931
931
932 parentgone = (parents[0] not in self.changelog.nodemap or
932 parentgone = (parents[0] not in self.changelog.nodemap or
933 parents[1] not in self.changelog.nodemap)
933 parents[1] not in self.changelog.nodemap)
934 if parentgone:
934 if parentgone:
935 self.vfs.rename('undo.dirstate', 'dirstate')
935 self.vfs.rename('undo.dirstate', 'dirstate')
936 try:
936 try:
937 branch = self.opener.read('undo.branch')
937 branch = self.opener.read('undo.branch')
938 self.dirstate.setbranch(encoding.tolocal(branch))
938 self.dirstate.setbranch(encoding.tolocal(branch))
939 except IOError:
939 except IOError:
940 ui.warn(_('named branch could not be reset: '
940 ui.warn(_('named branch could not be reset: '
941 'current branch is still \'%s\'\n')
941 'current branch is still \'%s\'\n')
942 % self.dirstate.branch())
942 % self.dirstate.branch())
943
943
944 self.dirstate.invalidate()
944 self.dirstate.invalidate()
945 parents = tuple([p.rev() for p in self.parents()])
945 parents = tuple([p.rev() for p in self.parents()])
946 if len(parents) > 1:
946 if len(parents) > 1:
947 ui.status(_('working directory now based on '
947 ui.status(_('working directory now based on '
948 'revisions %d and %d\n') % parents)
948 'revisions %d and %d\n') % parents)
949 else:
949 else:
950 ui.status(_('working directory now based on '
950 ui.status(_('working directory now based on '
951 'revision %d\n') % parents)
951 'revision %d\n') % parents)
952 # TODO: if we know which new heads may result from this rollback, pass
952 # TODO: if we know which new heads may result from this rollback, pass
953 # them to destroy(), which will prevent the branchhead cache from being
953 # them to destroy(), which will prevent the branchhead cache from being
954 # invalidated.
954 # invalidated.
955 self.destroyed()
955 self.destroyed()
956 return 0
956 return 0
957
957
958 def invalidatecaches(self):
958 def invalidatecaches(self):
959
959
960 if '_tagscache' in vars(self):
960 if '_tagscache' in vars(self):
961 # can't use delattr on proxy
961 # can't use delattr on proxy
962 del self.__dict__['_tagscache']
962 del self.__dict__['_tagscache']
963
963
964 self.unfiltered()._branchcaches.clear()
964 self.unfiltered()._branchcaches.clear()
965 self.invalidatevolatilesets()
965 self.invalidatevolatilesets()
966
966
967 def invalidatevolatilesets(self):
967 def invalidatevolatilesets(self):
968 self.filteredrevcache.clear()
968 self.filteredrevcache.clear()
969 obsolete.clearobscaches(self)
969 obsolete.clearobscaches(self)
970
970
971 def invalidatedirstate(self):
971 def invalidatedirstate(self):
972 '''Invalidates the dirstate, causing the next call to dirstate
972 '''Invalidates the dirstate, causing the next call to dirstate
973 to check if it was modified since the last time it was read,
973 to check if it was modified since the last time it was read,
974 rereading it if it has.
974 rereading it if it has.
975
975
976 This is different to dirstate.invalidate() that it doesn't always
976 This is different to dirstate.invalidate() that it doesn't always
977 rereads the dirstate. Use dirstate.invalidate() if you want to
977 rereads the dirstate. Use dirstate.invalidate() if you want to
978 explicitly read the dirstate again (i.e. restoring it to a previous
978 explicitly read the dirstate again (i.e. restoring it to a previous
979 known good state).'''
979 known good state).'''
980 if hasunfilteredcache(self, 'dirstate'):
980 if hasunfilteredcache(self, 'dirstate'):
981 for k in self.dirstate._filecache:
981 for k in self.dirstate._filecache:
982 try:
982 try:
983 delattr(self.dirstate, k)
983 delattr(self.dirstate, k)
984 except AttributeError:
984 except AttributeError:
985 pass
985 pass
986 delattr(self.unfiltered(), 'dirstate')
986 delattr(self.unfiltered(), 'dirstate')
987
987
988 def invalidate(self):
988 def invalidate(self):
989 unfiltered = self.unfiltered() # all file caches are stored unfiltered
989 unfiltered = self.unfiltered() # all file caches are stored unfiltered
990 for k in self._filecache:
990 for k in self._filecache:
991 # dirstate is invalidated separately in invalidatedirstate()
991 # dirstate is invalidated separately in invalidatedirstate()
992 if k == 'dirstate':
992 if k == 'dirstate':
993 continue
993 continue
994
994
995 try:
995 try:
996 delattr(unfiltered, k)
996 delattr(unfiltered, k)
997 except AttributeError:
997 except AttributeError:
998 pass
998 pass
999 self.invalidatecaches()
999 self.invalidatecaches()
1000 self.store.invalidatecaches()
1000 self.store.invalidatecaches()
1001
1001
1002 def invalidateall(self):
1002 def invalidateall(self):
1003 '''Fully invalidates both store and non-store parts, causing the
1003 '''Fully invalidates both store and non-store parts, causing the
1004 subsequent operation to reread any outside changes.'''
1004 subsequent operation to reread any outside changes.'''
1005 # extension should hook this to invalidate its caches
1005 # extension should hook this to invalidate its caches
1006 self.invalidate()
1006 self.invalidate()
1007 self.invalidatedirstate()
1007 self.invalidatedirstate()
1008
1008
1009 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1009 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1010 try:
1010 try:
1011 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1011 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1012 except error.LockHeld, inst:
1012 except error.LockHeld, inst:
1013 if not wait:
1013 if not wait:
1014 raise
1014 raise
1015 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1015 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1016 (desc, inst.locker))
1016 (desc, inst.locker))
1017 # default to 600 seconds timeout
1017 # default to 600 seconds timeout
1018 l = lockmod.lock(vfs, lockname,
1018 l = lockmod.lock(vfs, lockname,
1019 int(self.ui.config("ui", "timeout", "600")),
1019 int(self.ui.config("ui", "timeout", "600")),
1020 releasefn, desc=desc)
1020 releasefn, desc=desc)
1021 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1021 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1022 if acquirefn:
1022 if acquirefn:
1023 acquirefn()
1023 acquirefn()
1024 return l
1024 return l
1025
1025
1026 def _afterlock(self, callback):
1026 def _afterlock(self, callback):
1027 """add a callback to the current repository lock.
1027 """add a callback to the current repository lock.
1028
1028
1029 The callback will be executed on lock release."""
1029 The callback will be executed on lock release."""
1030 l = self._lockref and self._lockref()
1030 l = self._lockref and self._lockref()
1031 if l:
1031 if l:
1032 l.postrelease.append(callback)
1032 l.postrelease.append(callback)
1033 else:
1033 else:
1034 callback()
1034 callback()
1035
1035
1036 def lock(self, wait=True):
1036 def lock(self, wait=True):
1037 '''Lock the repository store (.hg/store) and return a weak reference
1037 '''Lock the repository store (.hg/store) and return a weak reference
1038 to the lock. Use this before modifying the store (e.g. committing or
1038 to the lock. Use this before modifying the store (e.g. committing or
1039 stripping). If you are opening a transaction, get a lock as well.)'''
1039 stripping). If you are opening a transaction, get a lock as well.)'''
1040 l = self._lockref and self._lockref()
1040 l = self._lockref and self._lockref()
1041 if l is not None and l.held:
1041 if l is not None and l.held:
1042 l.lock()
1042 l.lock()
1043 return l
1043 return l
1044
1044
1045 def unlock():
1045 def unlock():
1046 if hasunfilteredcache(self, '_phasecache'):
1046 if hasunfilteredcache(self, '_phasecache'):
1047 self._phasecache.write()
1047 self._phasecache.write()
1048 for k, ce in self._filecache.items():
1048 for k, ce in self._filecache.items():
1049 if k == 'dirstate' or k not in self.__dict__:
1049 if k == 'dirstate' or k not in self.__dict__:
1050 continue
1050 continue
1051 ce.refresh()
1051 ce.refresh()
1052
1052
1053 l = self._lock(self.svfs, "lock", wait, unlock,
1053 l = self._lock(self.svfs, "lock", wait, unlock,
1054 self.invalidate, _('repository %s') % self.origroot)
1054 self.invalidate, _('repository %s') % self.origroot)
1055 self._lockref = weakref.ref(l)
1055 self._lockref = weakref.ref(l)
1056 return l
1056 return l
1057
1057
1058 def wlock(self, wait=True):
1058 def wlock(self, wait=True):
1059 '''Lock the non-store parts of the repository (everything under
1059 '''Lock the non-store parts of the repository (everything under
1060 .hg except .hg/store) and return a weak reference to the lock.
1060 .hg except .hg/store) and return a weak reference to the lock.
1061 Use this before modifying files in .hg.'''
1061 Use this before modifying files in .hg.'''
1062 l = self._wlockref and self._wlockref()
1062 l = self._wlockref and self._wlockref()
1063 if l is not None and l.held:
1063 if l is not None and l.held:
1064 l.lock()
1064 l.lock()
1065 return l
1065 return l
1066
1066
1067 def unlock():
1067 def unlock():
1068 self.dirstate.write()
1068 self.dirstate.write()
1069 self._filecache['dirstate'].refresh()
1069 self._filecache['dirstate'].refresh()
1070
1070
1071 l = self._lock(self.vfs, "wlock", wait, unlock,
1071 l = self._lock(self.vfs, "wlock", wait, unlock,
1072 self.invalidatedirstate, _('working directory of %s') %
1072 self.invalidatedirstate, _('working directory of %s') %
1073 self.origroot)
1073 self.origroot)
1074 self._wlockref = weakref.ref(l)
1074 self._wlockref = weakref.ref(l)
1075 return l
1075 return l
1076
1076
1077 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1077 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1078 """
1078 """
1079 commit an individual file as part of a larger transaction
1079 commit an individual file as part of a larger transaction
1080 """
1080 """
1081
1081
1082 fname = fctx.path()
1082 fname = fctx.path()
1083 text = fctx.data()
1083 text = fctx.data()
1084 flog = self.file(fname)
1084 flog = self.file(fname)
1085 fparent1 = manifest1.get(fname, nullid)
1085 fparent1 = manifest1.get(fname, nullid)
1086 fparent2 = fparent2o = manifest2.get(fname, nullid)
1086 fparent2 = fparent2o = manifest2.get(fname, nullid)
1087
1087
1088 meta = {}
1088 meta = {}
1089 copy = fctx.renamed()
1089 copy = fctx.renamed()
1090 if copy and copy[0] != fname:
1090 if copy and copy[0] != fname:
1091 # Mark the new revision of this file as a copy of another
1091 # Mark the new revision of this file as a copy of another
1092 # file. This copy data will effectively act as a parent
1092 # file. This copy data will effectively act as a parent
1093 # of this new revision. If this is a merge, the first
1093 # of this new revision. If this is a merge, the first
1094 # parent will be the nullid (meaning "look up the copy data")
1094 # parent will be the nullid (meaning "look up the copy data")
1095 # and the second one will be the other parent. For example:
1095 # and the second one will be the other parent. For example:
1096 #
1096 #
1097 # 0 --- 1 --- 3 rev1 changes file foo
1097 # 0 --- 1 --- 3 rev1 changes file foo
1098 # \ / rev2 renames foo to bar and changes it
1098 # \ / rev2 renames foo to bar and changes it
1099 # \- 2 -/ rev3 should have bar with all changes and
1099 # \- 2 -/ rev3 should have bar with all changes and
1100 # should record that bar descends from
1100 # should record that bar descends from
1101 # bar in rev2 and foo in rev1
1101 # bar in rev2 and foo in rev1
1102 #
1102 #
1103 # this allows this merge to succeed:
1103 # this allows this merge to succeed:
1104 #
1104 #
1105 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1105 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1106 # \ / merging rev3 and rev4 should use bar@rev2
1106 # \ / merging rev3 and rev4 should use bar@rev2
1107 # \- 2 --- 4 as the merge base
1107 # \- 2 --- 4 as the merge base
1108 #
1108 #
1109
1109
1110 cfname = copy[0]
1110 cfname = copy[0]
1111 crev = manifest1.get(cfname)
1111 crev = manifest1.get(cfname)
1112 newfparent = fparent2
1112 newfparent = fparent2
1113
1113
1114 if manifest2: # branch merge
1114 if manifest2: # branch merge
1115 if fparent2 == nullid or crev is None: # copied on remote side
1115 if fparent2 == nullid or crev is None: # copied on remote side
1116 if cfname in manifest2:
1116 if cfname in manifest2:
1117 crev = manifest2[cfname]
1117 crev = manifest2[cfname]
1118 newfparent = fparent1
1118 newfparent = fparent1
1119
1119
1120 # find source in nearest ancestor if we've lost track
1120 # find source in nearest ancestor if we've lost track
1121 if not crev:
1121 if not crev:
1122 self.ui.debug(" %s: searching for copy revision for %s\n" %
1122 self.ui.debug(" %s: searching for copy revision for %s\n" %
1123 (fname, cfname))
1123 (fname, cfname))
1124 for ancestor in self[None].ancestors():
1124 for ancestor in self[None].ancestors():
1125 if cfname in ancestor:
1125 if cfname in ancestor:
1126 crev = ancestor[cfname].filenode()
1126 crev = ancestor[cfname].filenode()
1127 break
1127 break
1128
1128
1129 if crev:
1129 if crev:
1130 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1130 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1131 meta["copy"] = cfname
1131 meta["copy"] = cfname
1132 meta["copyrev"] = hex(crev)
1132 meta["copyrev"] = hex(crev)
1133 fparent1, fparent2 = nullid, newfparent
1133 fparent1, fparent2 = nullid, newfparent
1134 else:
1134 else:
1135 self.ui.warn(_("warning: can't find ancestor for '%s' "
1135 self.ui.warn(_("warning: can't find ancestor for '%s' "
1136 "copied from '%s'!\n") % (fname, cfname))
1136 "copied from '%s'!\n") % (fname, cfname))
1137
1137
1138 elif fparent1 == nullid:
1138 elif fparent1 == nullid:
1139 fparent1, fparent2 = fparent2, nullid
1139 fparent1, fparent2 = fparent2, nullid
1140 elif fparent2 != nullid:
1140 elif fparent2 != nullid:
1141 # is one parent an ancestor of the other?
1141 # is one parent an ancestor of the other?
1142 fparentancestor = flog.ancestor(fparent1, fparent2)
1142 fparentancestor = flog.ancestor(fparent1, fparent2)
1143 if fparentancestor == fparent1:
1143 if fparentancestor == fparent1:
1144 fparent1, fparent2 = fparent2, nullid
1144 fparent1, fparent2 = fparent2, nullid
1145 elif fparentancestor == fparent2:
1145 elif fparentancestor == fparent2:
1146 fparent2 = nullid
1146 fparent2 = nullid
1147
1147
1148 # is the file changed?
1148 # is the file changed?
1149 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1149 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1150 changelist.append(fname)
1150 changelist.append(fname)
1151 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1151 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1152
1152
1153 # are just the flags changed during merge?
1153 # are just the flags changed during merge?
1154 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1154 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1155 changelist.append(fname)
1155 changelist.append(fname)
1156
1156
1157 return fparent1
1157 return fparent1
1158
1158
1159 @unfilteredmethod
1159 @unfilteredmethod
1160 def commit(self, text="", user=None, date=None, match=None, force=False,
1160 def commit(self, text="", user=None, date=None, match=None, force=False,
1161 editor=False, extra={}):
1161 editor=False, extra={}):
1162 """Add a new revision to current repository.
1162 """Add a new revision to current repository.
1163
1163
1164 Revision information is gathered from the working directory,
1164 Revision information is gathered from the working directory,
1165 match can be used to filter the committed files. If editor is
1165 match can be used to filter the committed files. If editor is
1166 supplied, it is called to get a commit message.
1166 supplied, it is called to get a commit message.
1167 """
1167 """
1168
1168
1169 def fail(f, msg):
1169 def fail(f, msg):
1170 raise util.Abort('%s: %s' % (f, msg))
1170 raise util.Abort('%s: %s' % (f, msg))
1171
1171
1172 if not match:
1172 if not match:
1173 match = matchmod.always(self.root, '')
1173 match = matchmod.always(self.root, '')
1174
1174
1175 if not force:
1175 if not force:
1176 vdirs = []
1176 vdirs = []
1177 match.explicitdir = vdirs.append
1177 match.explicitdir = vdirs.append
1178 match.bad = fail
1178 match.bad = fail
1179
1179
1180 wlock = self.wlock()
1180 wlock = self.wlock()
1181 try:
1181 try:
1182 wctx = self[None]
1182 wctx = self[None]
1183 merge = len(wctx.parents()) > 1
1183 merge = len(wctx.parents()) > 1
1184
1184
1185 if (not force and merge and match and
1185 if (not force and merge and match and
1186 (match.files() or match.anypats())):
1186 (match.files() or match.anypats())):
1187 raise util.Abort(_('cannot partially commit a merge '
1187 raise util.Abort(_('cannot partially commit a merge '
1188 '(do not specify files or patterns)'))
1188 '(do not specify files or patterns)'))
1189
1189
1190 changes = self.status(match=match, clean=force)
1190 changes = self.status(match=match, clean=force)
1191 if force:
1191 if force:
1192 changes[0].extend(changes[6]) # mq may commit unchanged files
1192 changes[0].extend(changes[6]) # mq may commit unchanged files
1193
1193
1194 # check subrepos
1194 # check subrepos
1195 subs = []
1195 subs = []
1196 commitsubs = set()
1196 commitsubs = set()
1197 newstate = wctx.substate.copy()
1197 newstate = wctx.substate.copy()
1198 # only manage subrepos and .hgsubstate if .hgsub is present
1198 # only manage subrepos and .hgsubstate if .hgsub is present
1199 if '.hgsub' in wctx:
1199 if '.hgsub' in wctx:
1200 # we'll decide whether to track this ourselves, thanks
1200 # we'll decide whether to track this ourselves, thanks
1201 for c in changes[:3]:
1201 for c in changes[:3]:
1202 if '.hgsubstate' in c:
1202 if '.hgsubstate' in c:
1203 c.remove('.hgsubstate')
1203 c.remove('.hgsubstate')
1204
1204
1205 # compare current state to last committed state
1205 # compare current state to last committed state
1206 # build new substate based on last committed state
1206 # build new substate based on last committed state
1207 oldstate = wctx.p1().substate
1207 oldstate = wctx.p1().substate
1208 for s in sorted(newstate.keys()):
1208 for s in sorted(newstate.keys()):
1209 if not match(s):
1209 if not match(s):
1210 # ignore working copy, use old state if present
1210 # ignore working copy, use old state if present
1211 if s in oldstate:
1211 if s in oldstate:
1212 newstate[s] = oldstate[s]
1212 newstate[s] = oldstate[s]
1213 continue
1213 continue
1214 if not force:
1214 if not force:
1215 raise util.Abort(
1215 raise util.Abort(
1216 _("commit with new subrepo %s excluded") % s)
1216 _("commit with new subrepo %s excluded") % s)
1217 if wctx.sub(s).dirty(True):
1217 if wctx.sub(s).dirty(True):
1218 if not self.ui.configbool('ui', 'commitsubrepos'):
1218 if not self.ui.configbool('ui', 'commitsubrepos'):
1219 raise util.Abort(
1219 raise util.Abort(
1220 _("uncommitted changes in subrepo %s") % s,
1220 _("uncommitted changes in subrepo %s") % s,
1221 hint=_("use --subrepos for recursive commit"))
1221 hint=_("use --subrepos for recursive commit"))
1222 subs.append(s)
1222 subs.append(s)
1223 commitsubs.add(s)
1223 commitsubs.add(s)
1224 else:
1224 else:
1225 bs = wctx.sub(s).basestate()
1225 bs = wctx.sub(s).basestate()
1226 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1226 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1227 if oldstate.get(s, (None, None, None))[1] != bs:
1227 if oldstate.get(s, (None, None, None))[1] != bs:
1228 subs.append(s)
1228 subs.append(s)
1229
1229
1230 # check for removed subrepos
1230 # check for removed subrepos
1231 for p in wctx.parents():
1231 for p in wctx.parents():
1232 r = [s for s in p.substate if s not in newstate]
1232 r = [s for s in p.substate if s not in newstate]
1233 subs += [s for s in r if match(s)]
1233 subs += [s for s in r if match(s)]
1234 if subs:
1234 if subs:
1235 if (not match('.hgsub') and
1235 if (not match('.hgsub') and
1236 '.hgsub' in (wctx.modified() + wctx.added())):
1236 '.hgsub' in (wctx.modified() + wctx.added())):
1237 raise util.Abort(
1237 raise util.Abort(
1238 _("can't commit subrepos without .hgsub"))
1238 _("can't commit subrepos without .hgsub"))
1239 changes[0].insert(0, '.hgsubstate')
1239 changes[0].insert(0, '.hgsubstate')
1240
1240
1241 elif '.hgsub' in changes[2]:
1241 elif '.hgsub' in changes[2]:
1242 # clean up .hgsubstate when .hgsub is removed
1242 # clean up .hgsubstate when .hgsub is removed
1243 if ('.hgsubstate' in wctx and
1243 if ('.hgsubstate' in wctx and
1244 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1244 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1245 changes[2].insert(0, '.hgsubstate')
1245 changes[2].insert(0, '.hgsubstate')
1246
1246
1247 # make sure all explicit patterns are matched
1247 # make sure all explicit patterns are matched
1248 if not force and match.files():
1248 if not force and match.files():
1249 matched = set(changes[0] + changes[1] + changes[2])
1249 matched = set(changes[0] + changes[1] + changes[2])
1250
1250
1251 for f in match.files():
1251 for f in match.files():
1252 f = self.dirstate.normalize(f)
1252 f = self.dirstate.normalize(f)
1253 if f == '.' or f in matched or f in wctx.substate:
1253 if f == '.' or f in matched or f in wctx.substate:
1254 continue
1254 continue
1255 if f in changes[3]: # missing
1255 if f in changes[3]: # missing
1256 fail(f, _('file not found!'))
1256 fail(f, _('file not found!'))
1257 if f in vdirs: # visited directory
1257 if f in vdirs: # visited directory
1258 d = f + '/'
1258 d = f + '/'
1259 for mf in matched:
1259 for mf in matched:
1260 if mf.startswith(d):
1260 if mf.startswith(d):
1261 break
1261 break
1262 else:
1262 else:
1263 fail(f, _("no match under directory!"))
1263 fail(f, _("no match under directory!"))
1264 elif f not in self.dirstate:
1264 elif f not in self.dirstate:
1265 fail(f, _("file not tracked!"))
1265 fail(f, _("file not tracked!"))
1266
1266
1267 cctx = context.workingctx(self, text, user, date, extra, changes)
1267 cctx = context.workingctx(self, text, user, date, extra, changes)
1268
1268
1269 if (not force and not extra.get("close") and not merge
1269 if (not force and not extra.get("close") and not merge
1270 and not cctx.files()
1270 and not cctx.files()
1271 and wctx.branch() == wctx.p1().branch()):
1271 and wctx.branch() == wctx.p1().branch()):
1272 return None
1272 return None
1273
1273
1274 if merge and cctx.deleted():
1274 if merge and cctx.deleted():
1275 raise util.Abort(_("cannot commit merge with missing files"))
1275 raise util.Abort(_("cannot commit merge with missing files"))
1276
1276
1277 ms = mergemod.mergestate(self)
1277 ms = mergemod.mergestate(self)
1278 for f in changes[0]:
1278 for f in changes[0]:
1279 if f in ms and ms[f] == 'u':
1279 if f in ms and ms[f] == 'u':
1280 raise util.Abort(_("unresolved merge conflicts "
1280 raise util.Abort(_("unresolved merge conflicts "
1281 "(see hg help resolve)"))
1281 "(see hg help resolve)"))
1282
1282
1283 if editor:
1283 if editor:
1284 cctx._text = editor(self, cctx, subs)
1284 cctx._text = editor(self, cctx, subs)
1285 edited = (text != cctx._text)
1285 edited = (text != cctx._text)
1286
1286
1287 # Save commit message in case this transaction gets rolled back
1287 # Save commit message in case this transaction gets rolled back
1288 # (e.g. by a pretxncommit hook). Leave the content alone on
1288 # (e.g. by a pretxncommit hook). Leave the content alone on
1289 # the assumption that the user will use the same editor again.
1289 # the assumption that the user will use the same editor again.
1290 msgfn = self.savecommitmessage(cctx._text)
1290 msgfn = self.savecommitmessage(cctx._text)
1291
1291
1292 # commit subs and write new state
1292 # commit subs and write new state
1293 if subs:
1293 if subs:
1294 for s in sorted(commitsubs):
1294 for s in sorted(commitsubs):
1295 sub = wctx.sub(s)
1295 sub = wctx.sub(s)
1296 self.ui.status(_('committing subrepository %s\n') %
1296 self.ui.status(_('committing subrepository %s\n') %
1297 subrepo.subrelpath(sub))
1297 subrepo.subrelpath(sub))
1298 sr = sub.commit(cctx._text, user, date)
1298 sr = sub.commit(cctx._text, user, date)
1299 newstate[s] = (newstate[s][0], sr)
1299 newstate[s] = (newstate[s][0], sr)
1300 subrepo.writestate(self, newstate)
1300 subrepo.writestate(self, newstate)
1301
1301
1302 p1, p2 = self.dirstate.parents()
1302 p1, p2 = self.dirstate.parents()
1303 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1303 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1304 try:
1304 try:
1305 self.hook("precommit", throw=True, parent1=hookp1,
1305 self.hook("precommit", throw=True, parent1=hookp1,
1306 parent2=hookp2)
1306 parent2=hookp2)
1307 ret = self.commitctx(cctx, True)
1307 ret = self.commitctx(cctx, True)
1308 except: # re-raises
1308 except: # re-raises
1309 if edited:
1309 if edited:
1310 self.ui.write(
1310 self.ui.write(
1311 _('note: commit message saved in %s\n') % msgfn)
1311 _('note: commit message saved in %s\n') % msgfn)
1312 raise
1312 raise
1313
1313
1314 # update bookmarks, dirstate and mergestate
1314 # update bookmarks, dirstate and mergestate
1315 bookmarks.update(self, [p1, p2], ret)
1315 bookmarks.update(self, [p1, p2], ret)
1316 cctx.markcommitted(ret)
1316 cctx.markcommitted(ret)
1317 ms.reset()
1317 ms.reset()
1318 finally:
1318 finally:
1319 wlock.release()
1319 wlock.release()
1320
1320
1321 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1321 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1322 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1322 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1323 self._afterlock(commithook)
1323 self._afterlock(commithook)
1324 return ret
1324 return ret
1325
1325
1326 @unfilteredmethod
1326 @unfilteredmethod
1327 def commitctx(self, ctx, error=False):
1327 def commitctx(self, ctx, error=False):
1328 """Add a new revision to current repository.
1328 """Add a new revision to current repository.
1329 Revision information is passed via the context argument.
1329 Revision information is passed via the context argument.
1330 """
1330 """
1331
1331
1332 tr = lock = None
1332 tr = lock = None
1333 removed = list(ctx.removed())
1333 removed = list(ctx.removed())
1334 p1, p2 = ctx.p1(), ctx.p2()
1334 p1, p2 = ctx.p1(), ctx.p2()
1335 user = ctx.user()
1335 user = ctx.user()
1336
1336
1337 lock = self.lock()
1337 lock = self.lock()
1338 try:
1338 try:
1339 tr = self.transaction("commit")
1339 tr = self.transaction("commit")
1340 trp = weakref.proxy(tr)
1340 trp = weakref.proxy(tr)
1341
1341
1342 if ctx.files():
1342 if ctx.files():
1343 m1 = p1.manifest().copy()
1343 m1 = p1.manifest().copy()
1344 m2 = p2.manifest()
1344 m2 = p2.manifest()
1345
1345
1346 # check in files
1346 # check in files
1347 new = {}
1347 new = {}
1348 changed = []
1348 changed = []
1349 linkrev = len(self)
1349 linkrev = len(self)
1350 for f in sorted(ctx.modified() + ctx.added()):
1350 for f in sorted(ctx.modified() + ctx.added()):
1351 self.ui.note(f + "\n")
1351 self.ui.note(f + "\n")
1352 try:
1352 try:
1353 fctx = ctx[f]
1353 fctx = ctx[f]
1354 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1354 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1355 changed)
1355 changed)
1356 m1.set(f, fctx.flags())
1356 m1.set(f, fctx.flags())
1357 except OSError, inst:
1357 except OSError, inst:
1358 self.ui.warn(_("trouble committing %s!\n") % f)
1358 self.ui.warn(_("trouble committing %s!\n") % f)
1359 raise
1359 raise
1360 except IOError, inst:
1360 except IOError, inst:
1361 errcode = getattr(inst, 'errno', errno.ENOENT)
1361 errcode = getattr(inst, 'errno', errno.ENOENT)
1362 if error or errcode and errcode != errno.ENOENT:
1362 if error or errcode and errcode != errno.ENOENT:
1363 self.ui.warn(_("trouble committing %s!\n") % f)
1363 self.ui.warn(_("trouble committing %s!\n") % f)
1364 raise
1364 raise
1365 else:
1365 else:
1366 removed.append(f)
1366 removed.append(f)
1367
1367
1368 # update manifest
1368 # update manifest
1369 m1.update(new)
1369 m1.update(new)
1370 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1370 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1371 drop = [f for f in removed if f in m1]
1371 drop = [f for f in removed if f in m1]
1372 for f in drop:
1372 for f in drop:
1373 del m1[f]
1373 del m1[f]
1374 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1374 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1375 p2.manifestnode(), (new, drop))
1375 p2.manifestnode(), (new, drop))
1376 files = changed + removed
1376 files = changed + removed
1377 else:
1377 else:
1378 mn = p1.manifestnode()
1378 mn = p1.manifestnode()
1379 files = []
1379 files = []
1380
1380
1381 # update changelog
1381 # update changelog
1382 self.changelog.delayupdate()
1382 self.changelog.delayupdate()
1383 n = self.changelog.add(mn, files, ctx.description(),
1383 n = self.changelog.add(mn, files, ctx.description(),
1384 trp, p1.node(), p2.node(),
1384 trp, p1.node(), p2.node(),
1385 user, ctx.date(), ctx.extra().copy())
1385 user, ctx.date(), ctx.extra().copy())
1386 p = lambda: self.changelog.writepending() and self.root or ""
1386 p = lambda: self.changelog.writepending() and self.root or ""
1387 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1387 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1388 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1388 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1389 parent2=xp2, pending=p)
1389 parent2=xp2, pending=p)
1390 self.changelog.finalize(trp)
1390 self.changelog.finalize(trp)
1391 # set the new commit is proper phase
1391 # set the new commit is proper phase
1392 targetphase = subrepo.newcommitphase(self.ui, ctx)
1392 targetphase = subrepo.newcommitphase(self.ui, ctx)
1393 if targetphase:
1393 if targetphase:
1394 # retract boundary do not alter parent changeset.
1394 # retract boundary do not alter parent changeset.
1395 # if a parent have higher the resulting phase will
1395 # if a parent have higher the resulting phase will
1396 # be compliant anyway
1396 # be compliant anyway
1397 #
1397 #
1398 # if minimal phase was 0 we don't need to retract anything
1398 # if minimal phase was 0 we don't need to retract anything
1399 phases.retractboundary(self, targetphase, [n])
1399 phases.retractboundary(self, targetphase, [n])
1400 tr.close()
1400 tr.close()
1401 branchmap.updatecache(self.filtered('served'))
1401 branchmap.updatecache(self.filtered('served'))
1402 return n
1402 return n
1403 finally:
1403 finally:
1404 if tr:
1404 if tr:
1405 tr.release()
1405 tr.release()
1406 lock.release()
1406 lock.release()
1407
1407
1408 @unfilteredmethod
1408 @unfilteredmethod
1409 def destroying(self):
1409 def destroying(self):
1410 '''Inform the repository that nodes are about to be destroyed.
1410 '''Inform the repository that nodes are about to be destroyed.
1411 Intended for use by strip and rollback, so there's a common
1411 Intended for use by strip and rollback, so there's a common
1412 place for anything that has to be done before destroying history.
1412 place for anything that has to be done before destroying history.
1413
1413
1414 This is mostly useful for saving state that is in memory and waiting
1414 This is mostly useful for saving state that is in memory and waiting
1415 to be flushed when the current lock is released. Because a call to
1415 to be flushed when the current lock is released. Because a call to
1416 destroyed is imminent, the repo will be invalidated causing those
1416 destroyed is imminent, the repo will be invalidated causing those
1417 changes to stay in memory (waiting for the next unlock), or vanish
1417 changes to stay in memory (waiting for the next unlock), or vanish
1418 completely.
1418 completely.
1419 '''
1419 '''
1420 # When using the same lock to commit and strip, the phasecache is left
1420 # When using the same lock to commit and strip, the phasecache is left
1421 # dirty after committing. Then when we strip, the repo is invalidated,
1421 # dirty after committing. Then when we strip, the repo is invalidated,
1422 # causing those changes to disappear.
1422 # causing those changes to disappear.
1423 if '_phasecache' in vars(self):
1423 if '_phasecache' in vars(self):
1424 self._phasecache.write()
1424 self._phasecache.write()
1425
1425
1426 @unfilteredmethod
1426 @unfilteredmethod
1427 def destroyed(self):
1427 def destroyed(self):
1428 '''Inform the repository that nodes have been destroyed.
1428 '''Inform the repository that nodes have been destroyed.
1429 Intended for use by strip and rollback, so there's a common
1429 Intended for use by strip and rollback, so there's a common
1430 place for anything that has to be done after destroying history.
1430 place for anything that has to be done after destroying history.
1431 '''
1431 '''
1432 # When one tries to:
1432 # When one tries to:
1433 # 1) destroy nodes thus calling this method (e.g. strip)
1433 # 1) destroy nodes thus calling this method (e.g. strip)
1434 # 2) use phasecache somewhere (e.g. commit)
1434 # 2) use phasecache somewhere (e.g. commit)
1435 #
1435 #
1436 # then 2) will fail because the phasecache contains nodes that were
1436 # then 2) will fail because the phasecache contains nodes that were
1437 # removed. We can either remove phasecache from the filecache,
1437 # removed. We can either remove phasecache from the filecache,
1438 # causing it to reload next time it is accessed, or simply filter
1438 # causing it to reload next time it is accessed, or simply filter
1439 # the removed nodes now and write the updated cache.
1439 # the removed nodes now and write the updated cache.
1440 self._phasecache.filterunknown(self)
1440 self._phasecache.filterunknown(self)
1441 self._phasecache.write()
1441 self._phasecache.write()
1442
1442
1443 # update the 'served' branch cache to help read only server process
1443 # update the 'served' branch cache to help read only server process
1444 # Thanks to branchcache collaboration this is done from the nearest
1444 # Thanks to branchcache collaboration this is done from the nearest
1445 # filtered subset and it is expected to be fast.
1445 # filtered subset and it is expected to be fast.
1446 branchmap.updatecache(self.filtered('served'))
1446 branchmap.updatecache(self.filtered('served'))
1447
1447
1448 # Ensure the persistent tag cache is updated. Doing it now
1448 # Ensure the persistent tag cache is updated. Doing it now
1449 # means that the tag cache only has to worry about destroyed
1449 # means that the tag cache only has to worry about destroyed
1450 # heads immediately after a strip/rollback. That in turn
1450 # heads immediately after a strip/rollback. That in turn
1451 # guarantees that "cachetip == currenttip" (comparing both rev
1451 # guarantees that "cachetip == currenttip" (comparing both rev
1452 # and node) always means no nodes have been added or destroyed.
1452 # and node) always means no nodes have been added or destroyed.
1453
1453
1454 # XXX this is suboptimal when qrefresh'ing: we strip the current
1454 # XXX this is suboptimal when qrefresh'ing: we strip the current
1455 # head, refresh the tag cache, then immediately add a new head.
1455 # head, refresh the tag cache, then immediately add a new head.
1456 # But I think doing it this way is necessary for the "instant
1456 # But I think doing it this way is necessary for the "instant
1457 # tag cache retrieval" case to work.
1457 # tag cache retrieval" case to work.
1458 self.invalidate()
1458 self.invalidate()
1459
1459
1460 def walk(self, match, node=None):
1460 def walk(self, match, node=None):
1461 '''
1461 '''
1462 walk recursively through the directory tree or a given
1462 walk recursively through the directory tree or a given
1463 changeset, finding all files matched by the match
1463 changeset, finding all files matched by the match
1464 function
1464 function
1465 '''
1465 '''
1466 return self[node].walk(match)
1466 return self[node].walk(match)
1467
1467
1468 def status(self, node1='.', node2=None, match=None,
1468 def status(self, node1='.', node2=None, match=None,
1469 ignored=False, clean=False, unknown=False,
1469 ignored=False, clean=False, unknown=False,
1470 listsubrepos=False):
1470 listsubrepos=False):
1471 """return status of files between two nodes or node and working
1471 """return status of files between two nodes or node and working
1472 directory.
1472 directory.
1473
1473
1474 If node1 is None, use the first dirstate parent instead.
1474 If node1 is None, use the first dirstate parent instead.
1475 If node2 is None, compare node1 with working directory.
1475 If node2 is None, compare node1 with working directory.
1476 """
1476 """
1477
1477
1478 def mfmatches(ctx):
1478 def mfmatches(ctx):
1479 mf = ctx.manifest().copy()
1479 mf = ctx.manifest().copy()
1480 if match.always():
1480 if match.always():
1481 return mf
1481 return mf
1482 for fn in mf.keys():
1482 for fn in mf.keys():
1483 if not match(fn):
1483 if not match(fn):
1484 del mf[fn]
1484 del mf[fn]
1485 return mf
1485 return mf
1486
1486
1487 ctx1 = self[node1]
1487 ctx1 = self[node1]
1488 ctx2 = self[node2]
1488 ctx2 = self[node2]
1489
1489
1490 working = ctx2.rev() is None
1490 working = ctx2.rev() is None
1491 parentworking = working and ctx1 == self['.']
1491 parentworking = working and ctx1 == self['.']
1492 match = match or matchmod.always(self.root, self.getcwd())
1492 match = match or matchmod.always(self.root, self.getcwd())
1493 listignored, listclean, listunknown = ignored, clean, unknown
1493 listignored, listclean, listunknown = ignored, clean, unknown
1494
1494
1495 # load earliest manifest first for caching reasons
1495 # load earliest manifest first for caching reasons
1496 if not working and ctx2.rev() < ctx1.rev():
1496 if not working and ctx2.rev() < ctx1.rev():
1497 ctx2.manifest()
1497 ctx2.manifest()
1498
1498
1499 if not parentworking:
1499 if not parentworking:
1500 def bad(f, msg):
1500 def bad(f, msg):
1501 # 'f' may be a directory pattern from 'match.files()',
1501 # 'f' may be a directory pattern from 'match.files()',
1502 # so 'f not in ctx1' is not enough
1502 # so 'f not in ctx1' is not enough
1503 if f not in ctx1 and f not in ctx1.dirs():
1503 if f not in ctx1 and f not in ctx1.dirs():
1504 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1504 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1505 match.bad = bad
1505 match.bad = bad
1506
1506
1507 if working: # we need to scan the working dir
1507 if working: # we need to scan the working dir
1508 subrepos = []
1508 subrepos = []
1509 if '.hgsub' in self.dirstate:
1509 if '.hgsub' in self.dirstate:
1510 subrepos = sorted(ctx2.substate)
1510 subrepos = sorted(ctx2.substate)
1511 s = self.dirstate.status(match, subrepos, listignored,
1511 s = self.dirstate.status(match, subrepos, listignored,
1512 listclean, listunknown)
1512 listclean, listunknown)
1513 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1513 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1514
1514
1515 # check for any possibly clean files
1515 # check for any possibly clean files
1516 if parentworking and cmp:
1516 if parentworking and cmp:
1517 fixup = []
1517 fixup = []
1518 # do a full compare of any files that might have changed
1518 # do a full compare of any files that might have changed
1519 for f in sorted(cmp):
1519 for f in sorted(cmp):
1520 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1520 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1521 or ctx1[f].cmp(ctx2[f])):
1521 or ctx1[f].cmp(ctx2[f])):
1522 modified.append(f)
1522 modified.append(f)
1523 else:
1523 else:
1524 fixup.append(f)
1524 fixup.append(f)
1525
1525
1526 # update dirstate for files that are actually clean
1526 # update dirstate for files that are actually clean
1527 if fixup:
1527 if fixup:
1528 if listclean:
1528 if listclean:
1529 clean += fixup
1529 clean += fixup
1530
1530
1531 try:
1531 try:
1532 # updating the dirstate is optional
1532 # updating the dirstate is optional
1533 # so we don't wait on the lock
1533 # so we don't wait on the lock
1534 wlock = self.wlock(False)
1534 wlock = self.wlock(False)
1535 try:
1535 try:
1536 for f in fixup:
1536 for f in fixup:
1537 self.dirstate.normal(f)
1537 self.dirstate.normal(f)
1538 finally:
1538 finally:
1539 wlock.release()
1539 wlock.release()
1540 except error.LockError:
1540 except error.LockError:
1541 pass
1541 pass
1542
1542
1543 if not parentworking:
1543 if not parentworking:
1544 mf1 = mfmatches(ctx1)
1544 mf1 = mfmatches(ctx1)
1545 if working:
1545 if working:
1546 # we are comparing working dir against non-parent
1546 # we are comparing working dir against non-parent
1547 # generate a pseudo-manifest for the working dir
1547 # generate a pseudo-manifest for the working dir
1548 mf2 = mfmatches(self['.'])
1548 mf2 = mfmatches(self['.'])
1549 for f in cmp + modified + added:
1549 for f in cmp + modified + added:
1550 mf2[f] = None
1550 mf2[f] = None
1551 mf2.set(f, ctx2.flags(f))
1551 mf2.set(f, ctx2.flags(f))
1552 for f in removed:
1552 for f in removed:
1553 if f in mf2:
1553 if f in mf2:
1554 del mf2[f]
1554 del mf2[f]
1555 else:
1555 else:
1556 # we are comparing two revisions
1556 # we are comparing two revisions
1557 deleted, unknown, ignored = [], [], []
1557 deleted, unknown, ignored = [], [], []
1558 mf2 = mfmatches(ctx2)
1558 mf2 = mfmatches(ctx2)
1559
1559
1560 modified, added, clean = [], [], []
1560 modified, added, clean = [], [], []
1561 withflags = mf1.withflags() | mf2.withflags()
1561 withflags = mf1.withflags() | mf2.withflags()
1562 for fn, mf2node in mf2.iteritems():
1562 for fn, mf2node in mf2.iteritems():
1563 if fn in mf1:
1563 if fn in mf1:
1564 if (fn not in deleted and
1564 if (fn not in deleted and
1565 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1565 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1566 (mf1[fn] != mf2node and
1566 (mf1[fn] != mf2node and
1567 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1567 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1568 modified.append(fn)
1568 modified.append(fn)
1569 elif listclean:
1569 elif listclean:
1570 clean.append(fn)
1570 clean.append(fn)
1571 del mf1[fn]
1571 del mf1[fn]
1572 elif fn not in deleted:
1572 elif fn not in deleted:
1573 added.append(fn)
1573 added.append(fn)
1574 removed = mf1.keys()
1574 removed = mf1.keys()
1575
1575
1576 if working and modified and not self.dirstate._checklink:
1576 if working and modified and not self.dirstate._checklink:
1577 # Symlink placeholders may get non-symlink-like contents
1577 # Symlink placeholders may get non-symlink-like contents
1578 # via user error or dereferencing by NFS or Samba servers,
1578 # via user error or dereferencing by NFS or Samba servers,
1579 # so we filter out any placeholders that don't look like a
1579 # so we filter out any placeholders that don't look like a
1580 # symlink
1580 # symlink
1581 sane = []
1581 sane = []
1582 for f in modified:
1582 for f in modified:
1583 if ctx2.flags(f) == 'l':
1583 if ctx2.flags(f) == 'l':
1584 d = ctx2[f].data()
1584 d = ctx2[f].data()
1585 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1585 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1586 self.ui.debug('ignoring suspect symlink placeholder'
1586 self.ui.debug('ignoring suspect symlink placeholder'
1587 ' "%s"\n' % f)
1587 ' "%s"\n' % f)
1588 continue
1588 continue
1589 sane.append(f)
1589 sane.append(f)
1590 modified = sane
1590 modified = sane
1591
1591
1592 r = modified, added, removed, deleted, unknown, ignored, clean
1592 r = modified, added, removed, deleted, unknown, ignored, clean
1593
1593
1594 if listsubrepos:
1594 if listsubrepos:
1595 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1595 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1596 if working:
1596 if working:
1597 rev2 = None
1597 rev2 = None
1598 else:
1598 else:
1599 rev2 = ctx2.substate[subpath][1]
1599 rev2 = ctx2.substate[subpath][1]
1600 try:
1600 try:
1601 submatch = matchmod.narrowmatcher(subpath, match)
1601 submatch = matchmod.narrowmatcher(subpath, match)
1602 s = sub.status(rev2, match=submatch, ignored=listignored,
1602 s = sub.status(rev2, match=submatch, ignored=listignored,
1603 clean=listclean, unknown=listunknown,
1603 clean=listclean, unknown=listunknown,
1604 listsubrepos=True)
1604 listsubrepos=True)
1605 for rfiles, sfiles in zip(r, s):
1605 for rfiles, sfiles in zip(r, s):
1606 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1606 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1607 except error.LookupError:
1607 except error.LookupError:
1608 self.ui.status(_("skipping missing subrepository: %s\n")
1608 self.ui.status(_("skipping missing subrepository: %s\n")
1609 % subpath)
1609 % subpath)
1610
1610
1611 for l in r:
1611 for l in r:
1612 l.sort()
1612 l.sort()
1613 return r
1613 return r
1614
1614
1615 def heads(self, start=None):
1615 def heads(self, start=None):
1616 heads = self.changelog.heads(start)
1616 heads = self.changelog.heads(start)
1617 # sort the output in rev descending order
1617 # sort the output in rev descending order
1618 return sorted(heads, key=self.changelog.rev, reverse=True)
1618 return sorted(heads, key=self.changelog.rev, reverse=True)
1619
1619
1620 def branchheads(self, branch=None, start=None, closed=False):
1620 def branchheads(self, branch=None, start=None, closed=False):
1621 '''return a (possibly filtered) list of heads for the given branch
1621 '''return a (possibly filtered) list of heads for the given branch
1622
1622
1623 Heads are returned in topological order, from newest to oldest.
1623 Heads are returned in topological order, from newest to oldest.
1624 If branch is None, use the dirstate branch.
1624 If branch is None, use the dirstate branch.
1625 If start is not None, return only heads reachable from start.
1625 If start is not None, return only heads reachable from start.
1626 If closed is True, return heads that are marked as closed as well.
1626 If closed is True, return heads that are marked as closed as well.
1627 '''
1627 '''
1628 if branch is None:
1628 if branch is None:
1629 branch = self[None].branch()
1629 branch = self[None].branch()
1630 branches = self.branchmap()
1630 branches = self.branchmap()
1631 if branch not in branches:
1631 if branch not in branches:
1632 return []
1632 return []
1633 # the cache returns heads ordered lowest to highest
1633 # the cache returns heads ordered lowest to highest
1634 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1634 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1635 if start is not None:
1635 if start is not None:
1636 # filter out the heads that cannot be reached from startrev
1636 # filter out the heads that cannot be reached from startrev
1637 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1637 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1638 bheads = [h for h in bheads if h in fbheads]
1638 bheads = [h for h in bheads if h in fbheads]
1639 return bheads
1639 return bheads
1640
1640
1641 def branches(self, nodes):
1641 def branches(self, nodes):
1642 if not nodes:
1642 if not nodes:
1643 nodes = [self.changelog.tip()]
1643 nodes = [self.changelog.tip()]
1644 b = []
1644 b = []
1645 for n in nodes:
1645 for n in nodes:
1646 t = n
1646 t = n
1647 while True:
1647 while True:
1648 p = self.changelog.parents(n)
1648 p = self.changelog.parents(n)
1649 if p[1] != nullid or p[0] == nullid:
1649 if p[1] != nullid or p[0] == nullid:
1650 b.append((t, n, p[0], p[1]))
1650 b.append((t, n, p[0], p[1]))
1651 break
1651 break
1652 n = p[0]
1652 n = p[0]
1653 return b
1653 return b
1654
1654
1655 def between(self, pairs):
1655 def between(self, pairs):
1656 r = []
1656 r = []
1657
1657
1658 for top, bottom in pairs:
1658 for top, bottom in pairs:
1659 n, l, i = top, [], 0
1659 n, l, i = top, [], 0
1660 f = 1
1660 f = 1
1661
1661
1662 while n != bottom and n != nullid:
1662 while n != bottom and n != nullid:
1663 p = self.changelog.parents(n)[0]
1663 p = self.changelog.parents(n)[0]
1664 if i == f:
1664 if i == f:
1665 l.append(n)
1665 l.append(n)
1666 f = f * 2
1666 f = f * 2
1667 n = p
1667 n = p
1668 i += 1
1668 i += 1
1669
1669
1670 r.append(l)
1670 r.append(l)
1671
1671
1672 return r
1672 return r
1673
1673
1674 def pull(self, remote, heads=None, force=False):
1674 def pull(self, remote, heads=None, force=False):
1675 return exchange.pull (self, remote, heads, force)
1675 return exchange.pull (self, remote, heads, force)
1676
1676
1677 def checkpush(self, pushop):
1677 def checkpush(self, pushop):
1678 """Extensions can override this function if additional checks have
1678 """Extensions can override this function if additional checks have
1679 to be performed before pushing, or call it if they override push
1679 to be performed before pushing, or call it if they override push
1680 command.
1680 command.
1681 """
1681 """
1682 pass
1682 pass
1683
1683
1684 def push(self, remote, force=False, revs=None, newbranch=False):
1684 def push(self, remote, force=False, revs=None, newbranch=False):
1685 return exchange.push(self, remote, force, revs, newbranch)
1685 return exchange.push(self, remote, force, revs, newbranch)
1686
1686
1687 def stream_in(self, remote, requirements):
1687 def stream_in(self, remote, requirements):
1688 lock = self.lock()
1688 lock = self.lock()
1689 try:
1689 try:
1690 # Save remote branchmap. We will use it later
1690 # Save remote branchmap. We will use it later
1691 # to speed up branchcache creation
1691 # to speed up branchcache creation
1692 rbranchmap = None
1692 rbranchmap = None
1693 if remote.capable("branchmap"):
1693 if remote.capable("branchmap"):
1694 rbranchmap = remote.branchmap()
1694 rbranchmap = remote.branchmap()
1695
1695
1696 fp = remote.stream_out()
1696 fp = remote.stream_out()
1697 l = fp.readline()
1697 l = fp.readline()
1698 try:
1698 try:
1699 resp = int(l)
1699 resp = int(l)
1700 except ValueError:
1700 except ValueError:
1701 raise error.ResponseError(
1701 raise error.ResponseError(
1702 _('unexpected response from remote server:'), l)
1702 _('unexpected response from remote server:'), l)
1703 if resp == 1:
1703 if resp == 1:
1704 raise util.Abort(_('operation forbidden by server'))
1704 raise util.Abort(_('operation forbidden by server'))
1705 elif resp == 2:
1705 elif resp == 2:
1706 raise util.Abort(_('locking the remote repository failed'))
1706 raise util.Abort(_('locking the remote repository failed'))
1707 elif resp != 0:
1707 elif resp != 0:
1708 raise util.Abort(_('the server sent an unknown error code'))
1708 raise util.Abort(_('the server sent an unknown error code'))
1709 self.ui.status(_('streaming all changes\n'))
1709 self.ui.status(_('streaming all changes\n'))
1710 l = fp.readline()
1710 l = fp.readline()
1711 try:
1711 try:
1712 total_files, total_bytes = map(int, l.split(' ', 1))
1712 total_files, total_bytes = map(int, l.split(' ', 1))
1713 except (ValueError, TypeError):
1713 except (ValueError, TypeError):
1714 raise error.ResponseError(
1714 raise error.ResponseError(
1715 _('unexpected response from remote server:'), l)
1715 _('unexpected response from remote server:'), l)
1716 self.ui.status(_('%d files to transfer, %s of data\n') %
1716 self.ui.status(_('%d files to transfer, %s of data\n') %
1717 (total_files, util.bytecount(total_bytes)))
1717 (total_files, util.bytecount(total_bytes)))
1718 handled_bytes = 0
1718 handled_bytes = 0
1719 self.ui.progress(_('clone'), 0, total=total_bytes)
1719 self.ui.progress(_('clone'), 0, total=total_bytes)
1720 start = time.time()
1720 start = time.time()
1721
1721
1722 tr = self.transaction(_('clone'))
1722 tr = self.transaction(_('clone'))
1723 try:
1723 try:
1724 for i in xrange(total_files):
1724 for i in xrange(total_files):
1725 # XXX doesn't support '\n' or '\r' in filenames
1725 # XXX doesn't support '\n' or '\r' in filenames
1726 l = fp.readline()
1726 l = fp.readline()
1727 try:
1727 try:
1728 name, size = l.split('\0', 1)
1728 name, size = l.split('\0', 1)
1729 size = int(size)
1729 size = int(size)
1730 except (ValueError, TypeError):
1730 except (ValueError, TypeError):
1731 raise error.ResponseError(
1731 raise error.ResponseError(
1732 _('unexpected response from remote server:'), l)
1732 _('unexpected response from remote server:'), l)
1733 if self.ui.debugflag:
1733 if self.ui.debugflag:
1734 self.ui.debug('adding %s (%s)\n' %
1734 self.ui.debug('adding %s (%s)\n' %
1735 (name, util.bytecount(size)))
1735 (name, util.bytecount(size)))
1736 # for backwards compat, name was partially encoded
1736 # for backwards compat, name was partially encoded
1737 ofp = self.sopener(store.decodedir(name), 'w')
1737 ofp = self.sopener(store.decodedir(name), 'w')
1738 for chunk in util.filechunkiter(fp, limit=size):
1738 for chunk in util.filechunkiter(fp, limit=size):
1739 handled_bytes += len(chunk)
1739 handled_bytes += len(chunk)
1740 self.ui.progress(_('clone'), handled_bytes,
1740 self.ui.progress(_('clone'), handled_bytes,
1741 total=total_bytes)
1741 total=total_bytes)
1742 ofp.write(chunk)
1742 ofp.write(chunk)
1743 ofp.close()
1743 ofp.close()
1744 tr.close()
1744 tr.close()
1745 finally:
1745 finally:
1746 tr.release()
1746 tr.release()
1747
1747
1748 # Writing straight to files circumvented the inmemory caches
1748 # Writing straight to files circumvented the inmemory caches
1749 self.invalidate()
1749 self.invalidate()
1750
1750
1751 elapsed = time.time() - start
1751 elapsed = time.time() - start
1752 if elapsed <= 0:
1752 if elapsed <= 0:
1753 elapsed = 0.001
1753 elapsed = 0.001
1754 self.ui.progress(_('clone'), None)
1754 self.ui.progress(_('clone'), None)
1755 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1755 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1756 (util.bytecount(total_bytes), elapsed,
1756 (util.bytecount(total_bytes), elapsed,
1757 util.bytecount(total_bytes / elapsed)))
1757 util.bytecount(total_bytes / elapsed)))
1758
1758
1759 # new requirements = old non-format requirements +
1759 # new requirements = old non-format requirements +
1760 # new format-related
1760 # new format-related
1761 # requirements from the streamed-in repository
1761 # requirements from the streamed-in repository
1762 requirements.update(set(self.requirements) - self.supportedformats)
1762 requirements.update(set(self.requirements) - self.supportedformats)
1763 self._applyrequirements(requirements)
1763 self._applyrequirements(requirements)
1764 self._writerequirements()
1764 self._writerequirements()
1765
1765
1766 if rbranchmap:
1766 if rbranchmap:
1767 rbheads = []
1767 rbheads = []
1768 for bheads in rbranchmap.itervalues():
1768 for bheads in rbranchmap.itervalues():
1769 rbheads.extend(bheads)
1769 rbheads.extend(bheads)
1770
1770
1771 if rbheads:
1771 if rbheads:
1772 rtiprev = max((int(self.changelog.rev(node))
1772 rtiprev = max((int(self.changelog.rev(node))
1773 for node in rbheads))
1773 for node in rbheads))
1774 cache = branchmap.branchcache(rbranchmap,
1774 cache = branchmap.branchcache(rbranchmap,
1775 self[rtiprev].node(),
1775 self[rtiprev].node(),
1776 rtiprev)
1776 rtiprev)
1777 # Try to stick it as low as possible
1777 # Try to stick it as low as possible
1778 # filter above served are unlikely to be fetch from a clone
1778 # filter above served are unlikely to be fetch from a clone
1779 for candidate in ('base', 'immutable', 'served'):
1779 for candidate in ('base', 'immutable', 'served'):
1780 rview = self.filtered(candidate)
1780 rview = self.filtered(candidate)
1781 if cache.validfor(rview):
1781 if cache.validfor(rview):
1782 self._branchcaches[candidate] = cache
1782 self._branchcaches[candidate] = cache
1783 cache.write(rview)
1783 cache.write(rview)
1784 break
1784 break
1785 self.invalidate()
1785 self.invalidate()
1786 return len(self.heads()) + 1
1786 return len(self.heads()) + 1
1787 finally:
1787 finally:
1788 lock.release()
1788 lock.release()
1789
1789
1790 def clone(self, remote, heads=[], stream=False):
1790 def clone(self, remote, heads=[], stream=False):
1791 '''clone remote repository.
1791 '''clone remote repository.
1792
1792
1793 keyword arguments:
1793 keyword arguments:
1794 heads: list of revs to clone (forces use of pull)
1794 heads: list of revs to clone (forces use of pull)
1795 stream: use streaming clone if possible'''
1795 stream: use streaming clone if possible'''
1796
1796
1797 # now, all clients that can request uncompressed clones can
1797 # now, all clients that can request uncompressed clones can
1798 # read repo formats supported by all servers that can serve
1798 # read repo formats supported by all servers that can serve
1799 # them.
1799 # them.
1800
1800
1801 # if revlog format changes, client will have to check version
1801 # if revlog format changes, client will have to check version
1802 # and format flags on "stream" capability, and use
1802 # and format flags on "stream" capability, and use
1803 # uncompressed only if compatible.
1803 # uncompressed only if compatible.
1804
1804
1805 if not stream:
1805 if not stream:
1806 # if the server explicitly prefers to stream (for fast LANs)
1806 # if the server explicitly prefers to stream (for fast LANs)
1807 stream = remote.capable('stream-preferred')
1807 stream = remote.capable('stream-preferred')
1808
1808
1809 if stream and not heads:
1809 if stream and not heads:
1810 # 'stream' means remote revlog format is revlogv1 only
1810 # 'stream' means remote revlog format is revlogv1 only
1811 if remote.capable('stream'):
1811 if remote.capable('stream'):
1812 return self.stream_in(remote, set(('revlogv1',)))
1812 return self.stream_in(remote, set(('revlogv1',)))
1813 # otherwise, 'streamreqs' contains the remote revlog format
1813 # otherwise, 'streamreqs' contains the remote revlog format
1814 streamreqs = remote.capable('streamreqs')
1814 streamreqs = remote.capable('streamreqs')
1815 if streamreqs:
1815 if streamreqs:
1816 streamreqs = set(streamreqs.split(','))
1816 streamreqs = set(streamreqs.split(','))
1817 # if we support it, stream in and adjust our requirements
1817 # if we support it, stream in and adjust our requirements
1818 if not streamreqs - self.supportedformats:
1818 if not streamreqs - self.supportedformats:
1819 return self.stream_in(remote, streamreqs)
1819 return self.stream_in(remote, streamreqs)
1820 return self.pull(remote, heads)
1820 return self.pull(remote, heads)
1821
1821
1822 def pushkey(self, namespace, key, old, new):
1822 def pushkey(self, namespace, key, old, new):
1823 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1823 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1824 old=old, new=new)
1824 old=old, new=new)
1825 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1825 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1826 ret = pushkey.push(self, namespace, key, old, new)
1826 ret = pushkey.push(self, namespace, key, old, new)
1827 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1827 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1828 ret=ret)
1828 ret=ret)
1829 return ret
1829 return ret
1830
1830
1831 def listkeys(self, namespace):
1831 def listkeys(self, namespace):
1832 self.hook('prelistkeys', throw=True, namespace=namespace)
1832 self.hook('prelistkeys', throw=True, namespace=namespace)
1833 self.ui.debug('listing keys for "%s"\n' % namespace)
1833 self.ui.debug('listing keys for "%s"\n' % namespace)
1834 values = pushkey.list(self, namespace)
1834 values = pushkey.list(self, namespace)
1835 self.hook('listkeys', namespace=namespace, values=values)
1835 self.hook('listkeys', namespace=namespace, values=values)
1836 return values
1836 return values
1837
1837
1838 def debugwireargs(self, one, two, three=None, four=None, five=None):
1838 def debugwireargs(self, one, two, three=None, four=None, five=None):
1839 '''used to test argument passing over the wire'''
1839 '''used to test argument passing over the wire'''
1840 return "%s %s %s %s %s" % (one, two, three, four, five)
1840 return "%s %s %s %s %s" % (one, two, three, four, five)
1841
1841
1842 def savecommitmessage(self, text):
1842 def savecommitmessage(self, text):
1843 fp = self.opener('last-message.txt', 'wb')
1843 fp = self.opener('last-message.txt', 'wb')
1844 try:
1844 try:
1845 fp.write(text)
1845 fp.write(text)
1846 finally:
1846 finally:
1847 fp.close()
1847 fp.close()
1848 return self.pathto(fp.name[len(self.root) + 1:])
1848 return self.pathto(fp.name[len(self.root) + 1:])
1849
1849
1850 # used to avoid circular references so destructors work
1850 # used to avoid circular references so destructors work
1851 def aftertrans(files):
1851 def aftertrans(files):
1852 renamefiles = [tuple(t) for t in files]
1852 renamefiles = [tuple(t) for t in files]
1853 def a():
1853 def a():
1854 for vfs, src, dest in renamefiles:
1854 for vfs, src, dest in renamefiles:
1855 try:
1855 try:
1856 vfs.rename(src, dest)
1856 vfs.rename(src, dest)
1857 except OSError: # journal file does not yet exist
1857 except OSError: # journal file does not yet exist
1858 pass
1858 pass
1859 return a
1859 return a
1860
1860
1861 def undoname(fn):
1861 def undoname(fn):
1862 base, name = os.path.split(fn)
1862 base, name = os.path.split(fn)
1863 assert name.startswith('journal')
1863 assert name.startswith('journal')
1864 return os.path.join(base, name.replace('journal', 'undo', 1))
1864 return os.path.join(base, name.replace('journal', 'undo', 1))
1865
1865
1866 def instance(ui, path, create):
1866 def instance(ui, path, create):
1867 return localrepository(ui, util.urllocalpath(path), create)
1867 return localrepository(ui, util.urllocalpath(path), create)
1868
1868
1869 def islocal(path):
1869 def islocal(path):
1870 return True
1870 return True
General Comments 0
You need to be logged in to leave comments. Login now