##// END OF EJS Templates
clfilter: rename `unfilteredmeth` to `unfilteredmethod`...
Pierre-Yves David -
r18016:2a393df0 default
parent child Browse files
Show More
@@ -1,479 +1,479 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13
13
14 from mercurial import context, error, manifest, match as match_, util
14 from mercurial import context, error, manifest, match as match_, util
15 from mercurial import node as node_
15 from mercurial import node as node_
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial import localrepo
17 from mercurial import localrepo
18
18
19 import lfcommands
19 import lfcommands
20 import proto
20 import proto
21 import lfutil
21 import lfutil
22
22
23 def reposetup(ui, repo):
23 def reposetup(ui, repo):
24 # wire repositories should be given new wireproto functions but not the
24 # wire repositories should be given new wireproto functions but not the
25 # other largefiles modifications
25 # other largefiles modifications
26 if not repo.local():
26 if not repo.local():
27 return proto.wirereposetup(ui, repo)
27 return proto.wirereposetup(ui, repo)
28
28
29 for name in ('status', 'commitctx', 'commit', 'push'):
29 for name in ('status', 'commitctx', 'commit', 'push'):
30 method = getattr(repo, name)
30 method = getattr(repo, name)
31 if (isinstance(method, types.FunctionType) and
31 if (isinstance(method, types.FunctionType) and
32 method.func_name == 'wrap'):
32 method.func_name == 'wrap'):
33 ui.warn(_('largefiles: repo method %r appears to have already been'
33 ui.warn(_('largefiles: repo method %r appears to have already been'
34 ' wrapped by another extension: '
34 ' wrapped by another extension: '
35 'largefiles may behave incorrectly\n')
35 'largefiles may behave incorrectly\n')
36 % name)
36 % name)
37
37
38 class lfilesrepo(repo.__class__):
38 class lfilesrepo(repo.__class__):
39 lfstatus = False
39 lfstatus = False
40 def status_nolfiles(self, *args, **kwargs):
40 def status_nolfiles(self, *args, **kwargs):
41 return super(lfilesrepo, self).status(*args, **kwargs)
41 return super(lfilesrepo, self).status(*args, **kwargs)
42
42
43 # When lfstatus is set, return a context that gives the names
43 # When lfstatus is set, return a context that gives the names
44 # of largefiles instead of their corresponding standins and
44 # of largefiles instead of their corresponding standins and
45 # identifies the largefiles as always binary, regardless of
45 # identifies the largefiles as always binary, regardless of
46 # their actual contents.
46 # their actual contents.
47 def __getitem__(self, changeid):
47 def __getitem__(self, changeid):
48 ctx = super(lfilesrepo, self).__getitem__(changeid)
48 ctx = super(lfilesrepo, self).__getitem__(changeid)
49 if self.lfstatus:
49 if self.lfstatus:
50 class lfilesmanifestdict(manifest.manifestdict):
50 class lfilesmanifestdict(manifest.manifestdict):
51 def __contains__(self, filename):
51 def __contains__(self, filename):
52 if super(lfilesmanifestdict,
52 if super(lfilesmanifestdict,
53 self).__contains__(filename):
53 self).__contains__(filename):
54 return True
54 return True
55 return super(lfilesmanifestdict,
55 return super(lfilesmanifestdict,
56 self).__contains__(lfutil.standin(filename))
56 self).__contains__(lfutil.standin(filename))
57 class lfilesctx(ctx.__class__):
57 class lfilesctx(ctx.__class__):
58 def files(self):
58 def files(self):
59 filenames = super(lfilesctx, self).files()
59 filenames = super(lfilesctx, self).files()
60 return [lfutil.splitstandin(f) or f for f in filenames]
60 return [lfutil.splitstandin(f) or f for f in filenames]
61 def manifest(self):
61 def manifest(self):
62 man1 = super(lfilesctx, self).manifest()
62 man1 = super(lfilesctx, self).manifest()
63 man1.__class__ = lfilesmanifestdict
63 man1.__class__ = lfilesmanifestdict
64 return man1
64 return man1
65 def filectx(self, path, fileid=None, filelog=None):
65 def filectx(self, path, fileid=None, filelog=None):
66 try:
66 try:
67 if filelog is not None:
67 if filelog is not None:
68 result = super(lfilesctx, self).filectx(
68 result = super(lfilesctx, self).filectx(
69 path, fileid, filelog)
69 path, fileid, filelog)
70 else:
70 else:
71 result = super(lfilesctx, self).filectx(
71 result = super(lfilesctx, self).filectx(
72 path, fileid)
72 path, fileid)
73 except error.LookupError:
73 except error.LookupError:
74 # Adding a null character will cause Mercurial to
74 # Adding a null character will cause Mercurial to
75 # identify this as a binary file.
75 # identify this as a binary file.
76 if filelog is not None:
76 if filelog is not None:
77 result = super(lfilesctx, self).filectx(
77 result = super(lfilesctx, self).filectx(
78 lfutil.standin(path), fileid, filelog)
78 lfutil.standin(path), fileid, filelog)
79 else:
79 else:
80 result = super(lfilesctx, self).filectx(
80 result = super(lfilesctx, self).filectx(
81 lfutil.standin(path), fileid)
81 lfutil.standin(path), fileid)
82 olddata = result.data
82 olddata = result.data
83 result.data = lambda: olddata() + '\0'
83 result.data = lambda: olddata() + '\0'
84 return result
84 return result
85 ctx.__class__ = lfilesctx
85 ctx.__class__ = lfilesctx
86 return ctx
86 return ctx
87
87
88 # Figure out the status of big files and insert them into the
88 # Figure out the status of big files and insert them into the
89 # appropriate list in the result. Also removes standin files
89 # appropriate list in the result. Also removes standin files
90 # from the listing. Revert to the original status if
90 # from the listing. Revert to the original status if
91 # self.lfstatus is False.
91 # self.lfstatus is False.
92 # XXX large file status is buggy when used on repo proxy.
92 # XXX large file status is buggy when used on repo proxy.
93 # XXX this needs to be investigated.
93 # XXX this needs to be investigated.
94 @localrepo.unfilteredmeth
94 @localrepo.unfilteredmethod
95 def status(self, node1='.', node2=None, match=None, ignored=False,
95 def status(self, node1='.', node2=None, match=None, ignored=False,
96 clean=False, unknown=False, listsubrepos=False):
96 clean=False, unknown=False, listsubrepos=False):
97 listignored, listclean, listunknown = ignored, clean, unknown
97 listignored, listclean, listunknown = ignored, clean, unknown
98 if not self.lfstatus:
98 if not self.lfstatus:
99 return super(lfilesrepo, self).status(node1, node2, match,
99 return super(lfilesrepo, self).status(node1, node2, match,
100 listignored, listclean, listunknown, listsubrepos)
100 listignored, listclean, listunknown, listsubrepos)
101 else:
101 else:
102 # some calls in this function rely on the old version of status
102 # some calls in this function rely on the old version of status
103 self.lfstatus = False
103 self.lfstatus = False
104 if isinstance(node1, context.changectx):
104 if isinstance(node1, context.changectx):
105 ctx1 = node1
105 ctx1 = node1
106 else:
106 else:
107 ctx1 = self[node1]
107 ctx1 = self[node1]
108 if isinstance(node2, context.changectx):
108 if isinstance(node2, context.changectx):
109 ctx2 = node2
109 ctx2 = node2
110 else:
110 else:
111 ctx2 = self[node2]
111 ctx2 = self[node2]
112 working = ctx2.rev() is None
112 working = ctx2.rev() is None
113 parentworking = working and ctx1 == self['.']
113 parentworking = working and ctx1 == self['.']
114
114
115 def inctx(file, ctx):
115 def inctx(file, ctx):
116 try:
116 try:
117 if ctx.rev() is None:
117 if ctx.rev() is None:
118 return file in ctx.manifest()
118 return file in ctx.manifest()
119 ctx[file]
119 ctx[file]
120 return True
120 return True
121 except KeyError:
121 except KeyError:
122 return False
122 return False
123
123
124 if match is None:
124 if match is None:
125 match = match_.always(self.root, self.getcwd())
125 match = match_.always(self.root, self.getcwd())
126
126
127 # First check if there were files specified on the
127 # First check if there were files specified on the
128 # command line. If there were, and none of them were
128 # command line. If there were, and none of them were
129 # largefiles, we should just bail here and let super
129 # largefiles, we should just bail here and let super
130 # handle it -- thus gaining a big performance boost.
130 # handle it -- thus gaining a big performance boost.
131 lfdirstate = lfutil.openlfdirstate(ui, self)
131 lfdirstate = lfutil.openlfdirstate(ui, self)
132 if match.files() and not match.anypats():
132 if match.files() and not match.anypats():
133 for f in lfdirstate:
133 for f in lfdirstate:
134 if match(f):
134 if match(f):
135 break
135 break
136 else:
136 else:
137 return super(lfilesrepo, self).status(node1, node2,
137 return super(lfilesrepo, self).status(node1, node2,
138 match, listignored, listclean,
138 match, listignored, listclean,
139 listunknown, listsubrepos)
139 listunknown, listsubrepos)
140
140
141 # Create a copy of match that matches standins instead
141 # Create a copy of match that matches standins instead
142 # of largefiles.
142 # of largefiles.
143 def tostandins(files):
143 def tostandins(files):
144 if not working:
144 if not working:
145 return files
145 return files
146 newfiles = []
146 newfiles = []
147 dirstate = self.dirstate
147 dirstate = self.dirstate
148 for f in files:
148 for f in files:
149 sf = lfutil.standin(f)
149 sf = lfutil.standin(f)
150 if sf in dirstate:
150 if sf in dirstate:
151 newfiles.append(sf)
151 newfiles.append(sf)
152 elif sf in dirstate.dirs():
152 elif sf in dirstate.dirs():
153 # Directory entries could be regular or
153 # Directory entries could be regular or
154 # standin, check both
154 # standin, check both
155 newfiles.extend((f, sf))
155 newfiles.extend((f, sf))
156 else:
156 else:
157 newfiles.append(f)
157 newfiles.append(f)
158 return newfiles
158 return newfiles
159
159
160 # Create a function that we can use to override what is
160 # Create a function that we can use to override what is
161 # normally the ignore matcher. We've already checked
161 # normally the ignore matcher. We've already checked
162 # for ignored files on the first dirstate walk, and
162 # for ignored files on the first dirstate walk, and
163 # unnecessarily re-checking here causes a huge performance
163 # unnecessarily re-checking here causes a huge performance
164 # hit because lfdirstate only knows about largefiles
164 # hit because lfdirstate only knows about largefiles
165 def _ignoreoverride(self):
165 def _ignoreoverride(self):
166 return False
166 return False
167
167
168 m = copy.copy(match)
168 m = copy.copy(match)
169 m._files = tostandins(m._files)
169 m._files = tostandins(m._files)
170
170
171 # Get ignored files here even if we weren't asked for them; we
171 # Get ignored files here even if we weren't asked for them; we
172 # must use the result here for filtering later
172 # must use the result here for filtering later
173 result = super(lfilesrepo, self).status(node1, node2, m,
173 result = super(lfilesrepo, self).status(node1, node2, m,
174 True, clean, unknown, listsubrepos)
174 True, clean, unknown, listsubrepos)
175 if working:
175 if working:
176 try:
176 try:
177 # Any non-largefiles that were explicitly listed must be
177 # Any non-largefiles that were explicitly listed must be
178 # taken out or lfdirstate.status will report an error.
178 # taken out or lfdirstate.status will report an error.
179 # The status of these files was already computed using
179 # The status of these files was already computed using
180 # super's status.
180 # super's status.
181 # Override lfdirstate's ignore matcher to not do
181 # Override lfdirstate's ignore matcher to not do
182 # anything
182 # anything
183 origignore = lfdirstate._ignore
183 origignore = lfdirstate._ignore
184 lfdirstate._ignore = _ignoreoverride
184 lfdirstate._ignore = _ignoreoverride
185
185
186 def sfindirstate(f):
186 def sfindirstate(f):
187 sf = lfutil.standin(f)
187 sf = lfutil.standin(f)
188 dirstate = self.dirstate
188 dirstate = self.dirstate
189 return sf in dirstate or sf in dirstate.dirs()
189 return sf in dirstate or sf in dirstate.dirs()
190 match._files = [f for f in match._files
190 match._files = [f for f in match._files
191 if sfindirstate(f)]
191 if sfindirstate(f)]
192 # Don't waste time getting the ignored and unknown
192 # Don't waste time getting the ignored and unknown
193 # files again; we already have them
193 # files again; we already have them
194 s = lfdirstate.status(match, [], False,
194 s = lfdirstate.status(match, [], False,
195 listclean, False)
195 listclean, False)
196 (unsure, modified, added, removed, missing, unknown,
196 (unsure, modified, added, removed, missing, unknown,
197 ignored, clean) = s
197 ignored, clean) = s
198 # Replace the list of ignored and unknown files with
198 # Replace the list of ignored and unknown files with
199 # the previously calculated lists, and strip out the
199 # the previously calculated lists, and strip out the
200 # largefiles
200 # largefiles
201 lfiles = set(lfdirstate._map)
201 lfiles = set(lfdirstate._map)
202 ignored = set(result[5]).difference(lfiles)
202 ignored = set(result[5]).difference(lfiles)
203 unknown = set(result[4]).difference(lfiles)
203 unknown = set(result[4]).difference(lfiles)
204 if parentworking:
204 if parentworking:
205 for lfile in unsure:
205 for lfile in unsure:
206 standin = lfutil.standin(lfile)
206 standin = lfutil.standin(lfile)
207 if standin not in ctx1:
207 if standin not in ctx1:
208 # from second parent
208 # from second parent
209 modified.append(lfile)
209 modified.append(lfile)
210 elif ctx1[standin].data().strip() \
210 elif ctx1[standin].data().strip() \
211 != lfutil.hashfile(self.wjoin(lfile)):
211 != lfutil.hashfile(self.wjoin(lfile)):
212 modified.append(lfile)
212 modified.append(lfile)
213 else:
213 else:
214 clean.append(lfile)
214 clean.append(lfile)
215 lfdirstate.normal(lfile)
215 lfdirstate.normal(lfile)
216 else:
216 else:
217 tocheck = unsure + modified + added + clean
217 tocheck = unsure + modified + added + clean
218 modified, added, clean = [], [], []
218 modified, added, clean = [], [], []
219
219
220 for lfile in tocheck:
220 for lfile in tocheck:
221 standin = lfutil.standin(lfile)
221 standin = lfutil.standin(lfile)
222 if inctx(standin, ctx1):
222 if inctx(standin, ctx1):
223 if ctx1[standin].data().strip() != \
223 if ctx1[standin].data().strip() != \
224 lfutil.hashfile(self.wjoin(lfile)):
224 lfutil.hashfile(self.wjoin(lfile)):
225 modified.append(lfile)
225 modified.append(lfile)
226 else:
226 else:
227 clean.append(lfile)
227 clean.append(lfile)
228 else:
228 else:
229 added.append(lfile)
229 added.append(lfile)
230 finally:
230 finally:
231 # Replace the original ignore function
231 # Replace the original ignore function
232 lfdirstate._ignore = origignore
232 lfdirstate._ignore = origignore
233
233
234 for standin in ctx1.manifest():
234 for standin in ctx1.manifest():
235 if not lfutil.isstandin(standin):
235 if not lfutil.isstandin(standin):
236 continue
236 continue
237 lfile = lfutil.splitstandin(standin)
237 lfile = lfutil.splitstandin(standin)
238 if not match(lfile):
238 if not match(lfile):
239 continue
239 continue
240 if lfile not in lfdirstate:
240 if lfile not in lfdirstate:
241 removed.append(lfile)
241 removed.append(lfile)
242
242
243 # Filter result lists
243 # Filter result lists
244 result = list(result)
244 result = list(result)
245
245
246 # Largefiles are not really removed when they're
246 # Largefiles are not really removed when they're
247 # still in the normal dirstate. Likewise, normal
247 # still in the normal dirstate. Likewise, normal
248 # files are not really removed if it's still in
248 # files are not really removed if it's still in
249 # lfdirstate. This happens in merges where files
249 # lfdirstate. This happens in merges where files
250 # change type.
250 # change type.
251 removed = [f for f in removed if f not in self.dirstate]
251 removed = [f for f in removed if f not in self.dirstate]
252 result[2] = [f for f in result[2] if f not in lfdirstate]
252 result[2] = [f for f in result[2] if f not in lfdirstate]
253
253
254 # Unknown files
254 # Unknown files
255 unknown = set(unknown).difference(ignored)
255 unknown = set(unknown).difference(ignored)
256 result[4] = [f for f in unknown
256 result[4] = [f for f in unknown
257 if (self.dirstate[f] == '?' and
257 if (self.dirstate[f] == '?' and
258 not lfutil.isstandin(f))]
258 not lfutil.isstandin(f))]
259 # Ignored files were calculated earlier by the dirstate,
259 # Ignored files were calculated earlier by the dirstate,
260 # and we already stripped out the largefiles from the list
260 # and we already stripped out the largefiles from the list
261 result[5] = ignored
261 result[5] = ignored
262 # combine normal files and largefiles
262 # combine normal files and largefiles
263 normals = [[fn for fn in filelist
263 normals = [[fn for fn in filelist
264 if not lfutil.isstandin(fn)]
264 if not lfutil.isstandin(fn)]
265 for filelist in result]
265 for filelist in result]
266 lfiles = (modified, added, removed, missing, [], [], clean)
266 lfiles = (modified, added, removed, missing, [], [], clean)
267 result = [sorted(list1 + list2)
267 result = [sorted(list1 + list2)
268 for (list1, list2) in zip(normals, lfiles)]
268 for (list1, list2) in zip(normals, lfiles)]
269 else:
269 else:
270 def toname(f):
270 def toname(f):
271 if lfutil.isstandin(f):
271 if lfutil.isstandin(f):
272 return lfutil.splitstandin(f)
272 return lfutil.splitstandin(f)
273 return f
273 return f
274 result = [[toname(f) for f in items] for items in result]
274 result = [[toname(f) for f in items] for items in result]
275
275
276 if not listunknown:
276 if not listunknown:
277 result[4] = []
277 result[4] = []
278 if not listignored:
278 if not listignored:
279 result[5] = []
279 result[5] = []
280 if not listclean:
280 if not listclean:
281 result[6] = []
281 result[6] = []
282 self.lfstatus = True
282 self.lfstatus = True
283 return result
283 return result
284
284
285 # As part of committing, copy all of the largefiles into the
285 # As part of committing, copy all of the largefiles into the
286 # cache.
286 # cache.
287 def commitctx(self, *args, **kwargs):
287 def commitctx(self, *args, **kwargs):
288 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
288 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
289 lfutil.copyalltostore(self, node)
289 lfutil.copyalltostore(self, node)
290 return node
290 return node
291
291
292 # Before commit, largefile standins have not had their
292 # Before commit, largefile standins have not had their
293 # contents updated to reflect the hash of their largefile.
293 # contents updated to reflect the hash of their largefile.
294 # Do that here.
294 # Do that here.
295 def commit(self, text="", user=None, date=None, match=None,
295 def commit(self, text="", user=None, date=None, match=None,
296 force=False, editor=False, extra={}):
296 force=False, editor=False, extra={}):
297 orig = super(lfilesrepo, self).commit
297 orig = super(lfilesrepo, self).commit
298
298
299 wlock = self.wlock()
299 wlock = self.wlock()
300 try:
300 try:
301 # Case 0: Rebase or Transplant
301 # Case 0: Rebase or Transplant
302 # We have to take the time to pull down the new largefiles now.
302 # We have to take the time to pull down the new largefiles now.
303 # Otherwise, any largefiles that were modified in the
303 # Otherwise, any largefiles that were modified in the
304 # destination changesets get overwritten, either by the rebase
304 # destination changesets get overwritten, either by the rebase
305 # or in the first commit after the rebase or transplant.
305 # or in the first commit after the rebase or transplant.
306 # updatelfiles will update the dirstate to mark any pulled
306 # updatelfiles will update the dirstate to mark any pulled
307 # largefiles as modified
307 # largefiles as modified
308 if getattr(self, "_isrebasing", False) or \
308 if getattr(self, "_isrebasing", False) or \
309 getattr(self, "_istransplanting", False):
309 getattr(self, "_istransplanting", False):
310 lfcommands.updatelfiles(self.ui, self, filelist=None,
310 lfcommands.updatelfiles(self.ui, self, filelist=None,
311 printmessage=False)
311 printmessage=False)
312 result = orig(text=text, user=user, date=date, match=match,
312 result = orig(text=text, user=user, date=date, match=match,
313 force=force, editor=editor, extra=extra)
313 force=force, editor=editor, extra=extra)
314 return result
314 return result
315 # Case 1: user calls commit with no specific files or
315 # Case 1: user calls commit with no specific files or
316 # include/exclude patterns: refresh and commit all files that
316 # include/exclude patterns: refresh and commit all files that
317 # are "dirty".
317 # are "dirty".
318 if ((match is None) or
318 if ((match is None) or
319 (not match.anypats() and not match.files())):
319 (not match.anypats() and not match.files())):
320 # Spend a bit of time here to get a list of files we know
320 # Spend a bit of time here to get a list of files we know
321 # are modified so we can compare only against those.
321 # are modified so we can compare only against those.
322 # It can cost a lot of time (several seconds)
322 # It can cost a lot of time (several seconds)
323 # otherwise to update all standins if the largefiles are
323 # otherwise to update all standins if the largefiles are
324 # large.
324 # large.
325 lfdirstate = lfutil.openlfdirstate(ui, self)
325 lfdirstate = lfutil.openlfdirstate(ui, self)
326 dirtymatch = match_.always(self.root, self.getcwd())
326 dirtymatch = match_.always(self.root, self.getcwd())
327 s = lfdirstate.status(dirtymatch, [], False, False, False)
327 s = lfdirstate.status(dirtymatch, [], False, False, False)
328 modifiedfiles = []
328 modifiedfiles = []
329 for i in s:
329 for i in s:
330 modifiedfiles.extend(i)
330 modifiedfiles.extend(i)
331 lfiles = lfutil.listlfiles(self)
331 lfiles = lfutil.listlfiles(self)
332 # this only loops through largefiles that exist (not
332 # this only loops through largefiles that exist (not
333 # removed/renamed)
333 # removed/renamed)
334 for lfile in lfiles:
334 for lfile in lfiles:
335 if lfile in modifiedfiles:
335 if lfile in modifiedfiles:
336 if os.path.exists(
336 if os.path.exists(
337 self.wjoin(lfutil.standin(lfile))):
337 self.wjoin(lfutil.standin(lfile))):
338 # this handles the case where a rebase is being
338 # this handles the case where a rebase is being
339 # performed and the working copy is not updated
339 # performed and the working copy is not updated
340 # yet.
340 # yet.
341 if os.path.exists(self.wjoin(lfile)):
341 if os.path.exists(self.wjoin(lfile)):
342 lfutil.updatestandin(self,
342 lfutil.updatestandin(self,
343 lfutil.standin(lfile))
343 lfutil.standin(lfile))
344 lfdirstate.normal(lfile)
344 lfdirstate.normal(lfile)
345
345
346 result = orig(text=text, user=user, date=date, match=match,
346 result = orig(text=text, user=user, date=date, match=match,
347 force=force, editor=editor, extra=extra)
347 force=force, editor=editor, extra=extra)
348
348
349 if result is not None:
349 if result is not None:
350 for lfile in lfdirstate:
350 for lfile in lfdirstate:
351 if lfile in modifiedfiles:
351 if lfile in modifiedfiles:
352 if (not os.path.exists(self.wjoin(
352 if (not os.path.exists(self.wjoin(
353 lfutil.standin(lfile)))) or \
353 lfutil.standin(lfile)))) or \
354 (not os.path.exists(self.wjoin(lfile))):
354 (not os.path.exists(self.wjoin(lfile))):
355 lfdirstate.drop(lfile)
355 lfdirstate.drop(lfile)
356
356
357 # This needs to be after commit; otherwise precommit hooks
357 # This needs to be after commit; otherwise precommit hooks
358 # get the wrong status
358 # get the wrong status
359 lfdirstate.write()
359 lfdirstate.write()
360 return result
360 return result
361
361
362 for f in match.files():
362 for f in match.files():
363 if lfutil.isstandin(f):
363 if lfutil.isstandin(f):
364 raise util.Abort(
364 raise util.Abort(
365 _('file "%s" is a largefile standin') % f,
365 _('file "%s" is a largefile standin') % f,
366 hint=('commit the largefile itself instead'))
366 hint=('commit the largefile itself instead'))
367
367
368 # Case 2: user calls commit with specified patterns: refresh
368 # Case 2: user calls commit with specified patterns: refresh
369 # any matching big files.
369 # any matching big files.
370 smatcher = lfutil.composestandinmatcher(self, match)
370 smatcher = lfutil.composestandinmatcher(self, match)
371 standins = lfutil.dirstatewalk(self.dirstate, smatcher)
371 standins = lfutil.dirstatewalk(self.dirstate, smatcher)
372
372
373 # No matching big files: get out of the way and pass control to
373 # No matching big files: get out of the way and pass control to
374 # the usual commit() method.
374 # the usual commit() method.
375 if not standins:
375 if not standins:
376 return orig(text=text, user=user, date=date, match=match,
376 return orig(text=text, user=user, date=date, match=match,
377 force=force, editor=editor, extra=extra)
377 force=force, editor=editor, extra=extra)
378
378
379 # Refresh all matching big files. It's possible that the
379 # Refresh all matching big files. It's possible that the
380 # commit will end up failing, in which case the big files will
380 # commit will end up failing, in which case the big files will
381 # stay refreshed. No harm done: the user modified them and
381 # stay refreshed. No harm done: the user modified them and
382 # asked to commit them, so sooner or later we're going to
382 # asked to commit them, so sooner or later we're going to
383 # refresh the standins. Might as well leave them refreshed.
383 # refresh the standins. Might as well leave them refreshed.
384 lfdirstate = lfutil.openlfdirstate(ui, self)
384 lfdirstate = lfutil.openlfdirstate(ui, self)
385 for standin in standins:
385 for standin in standins:
386 lfile = lfutil.splitstandin(standin)
386 lfile = lfutil.splitstandin(standin)
387 if lfdirstate[lfile] <> 'r':
387 if lfdirstate[lfile] <> 'r':
388 lfutil.updatestandin(self, standin)
388 lfutil.updatestandin(self, standin)
389 lfdirstate.normal(lfile)
389 lfdirstate.normal(lfile)
390 else:
390 else:
391 lfdirstate.drop(lfile)
391 lfdirstate.drop(lfile)
392
392
393 # Cook up a new matcher that only matches regular files or
393 # Cook up a new matcher that only matches regular files or
394 # standins corresponding to the big files requested by the
394 # standins corresponding to the big files requested by the
395 # user. Have to modify _files to prevent commit() from
395 # user. Have to modify _files to prevent commit() from
396 # complaining "not tracked" for big files.
396 # complaining "not tracked" for big files.
397 lfiles = lfutil.listlfiles(self)
397 lfiles = lfutil.listlfiles(self)
398 match = copy.copy(match)
398 match = copy.copy(match)
399 origmatchfn = match.matchfn
399 origmatchfn = match.matchfn
400
400
401 # Check both the list of largefiles and the list of
401 # Check both the list of largefiles and the list of
402 # standins because if a largefile was removed, it
402 # standins because if a largefile was removed, it
403 # won't be in the list of largefiles at this point
403 # won't be in the list of largefiles at this point
404 match._files += sorted(standins)
404 match._files += sorted(standins)
405
405
406 actualfiles = []
406 actualfiles = []
407 for f in match._files:
407 for f in match._files:
408 fstandin = lfutil.standin(f)
408 fstandin = lfutil.standin(f)
409
409
410 # ignore known largefiles and standins
410 # ignore known largefiles and standins
411 if f in lfiles or fstandin in standins:
411 if f in lfiles or fstandin in standins:
412 continue
412 continue
413
413
414 # append directory separator to avoid collisions
414 # append directory separator to avoid collisions
415 if not fstandin.endswith(os.sep):
415 if not fstandin.endswith(os.sep):
416 fstandin += os.sep
416 fstandin += os.sep
417
417
418 actualfiles.append(f)
418 actualfiles.append(f)
419 match._files = actualfiles
419 match._files = actualfiles
420
420
421 def matchfn(f):
421 def matchfn(f):
422 if origmatchfn(f):
422 if origmatchfn(f):
423 return f not in lfiles
423 return f not in lfiles
424 else:
424 else:
425 return f in standins
425 return f in standins
426
426
427 match.matchfn = matchfn
427 match.matchfn = matchfn
428 result = orig(text=text, user=user, date=date, match=match,
428 result = orig(text=text, user=user, date=date, match=match,
429 force=force, editor=editor, extra=extra)
429 force=force, editor=editor, extra=extra)
430 # This needs to be after commit; otherwise precommit hooks
430 # This needs to be after commit; otherwise precommit hooks
431 # get the wrong status
431 # get the wrong status
432 lfdirstate.write()
432 lfdirstate.write()
433 return result
433 return result
434 finally:
434 finally:
435 wlock.release()
435 wlock.release()
436
436
437 def push(self, remote, force=False, revs=None, newbranch=False):
437 def push(self, remote, force=False, revs=None, newbranch=False):
438 o = lfutil.findoutgoing(self, remote, force)
438 o = lfutil.findoutgoing(self, remote, force)
439 if o:
439 if o:
440 toupload = set()
440 toupload = set()
441 o = self.changelog.nodesbetween(o, revs)[0]
441 o = self.changelog.nodesbetween(o, revs)[0]
442 for n in o:
442 for n in o:
443 parents = [p for p in self.changelog.parents(n)
443 parents = [p for p in self.changelog.parents(n)
444 if p != node_.nullid]
444 if p != node_.nullid]
445 ctx = self[n]
445 ctx = self[n]
446 files = set(ctx.files())
446 files = set(ctx.files())
447 if len(parents) == 2:
447 if len(parents) == 2:
448 mc = ctx.manifest()
448 mc = ctx.manifest()
449 mp1 = ctx.parents()[0].manifest()
449 mp1 = ctx.parents()[0].manifest()
450 mp2 = ctx.parents()[1].manifest()
450 mp2 = ctx.parents()[1].manifest()
451 for f in mp1:
451 for f in mp1:
452 if f not in mc:
452 if f not in mc:
453 files.add(f)
453 files.add(f)
454 for f in mp2:
454 for f in mp2:
455 if f not in mc:
455 if f not in mc:
456 files.add(f)
456 files.add(f)
457 for f in mc:
457 for f in mc:
458 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
458 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
459 None):
459 None):
460 files.add(f)
460 files.add(f)
461
461
462 toupload = toupload.union(
462 toupload = toupload.union(
463 set([ctx[f].data().strip()
463 set([ctx[f].data().strip()
464 for f in files
464 for f in files
465 if lfutil.isstandin(f) and f in ctx]))
465 if lfutil.isstandin(f) and f in ctx]))
466 lfcommands.uploadlfiles(ui, self, remote, toupload)
466 lfcommands.uploadlfiles(ui, self, remote, toupload)
467 return super(lfilesrepo, self).push(remote, force, revs,
467 return super(lfilesrepo, self).push(remote, force, revs,
468 newbranch)
468 newbranch)
469
469
470 repo.__class__ = lfilesrepo
470 repo.__class__ = lfilesrepo
471
471
472 def checkrequireslfiles(ui, repo, **kwargs):
472 def checkrequireslfiles(ui, repo, **kwargs):
473 if 'largefiles' not in repo.requirements and util.any(
473 if 'largefiles' not in repo.requirements and util.any(
474 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
474 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
475 repo.requirements.add('largefiles')
475 repo.requirements.add('largefiles')
476 repo._writerequirements()
476 repo._writerequirements()
477
477
478 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
478 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
479 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
479 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
@@ -1,2680 +1,2680 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class repofilecache(filecache):
21 class repofilecache(filecache):
22 """All filecache usage on repo are done for logic that should be unfiltered
22 """All filecache usage on repo are done for logic that should be unfiltered
23 """
23 """
24
24
25 def __get__(self, repo, type=None):
25 def __get__(self, repo, type=None):
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 def __set__(self, repo, value):
27 def __set__(self, repo, value):
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 def __delete__(self, repo):
29 def __delete__(self, repo):
30 return super(repofilecache, self).__delete__(repo.unfiltered())
30 return super(repofilecache, self).__delete__(repo.unfiltered())
31
31
32 class storecache(repofilecache):
32 class storecache(repofilecache):
33 """filecache for files in the store"""
33 """filecache for files in the store"""
34 def join(self, obj, fname):
34 def join(self, obj, fname):
35 return obj.sjoin(fname)
35 return obj.sjoin(fname)
36
36
37 class unfilteredpropertycache(propertycache):
37 class unfilteredpropertycache(propertycache):
38 """propertycache that apply to unfiltered repo only"""
38 """propertycache that apply to unfiltered repo only"""
39
39
40 def __get__(self, repo, type=None):
40 def __get__(self, repo, type=None):
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42
42
43 class filteredpropertycache(propertycache):
43 class filteredpropertycache(propertycache):
44 """propertycache that must take filtering in account"""
44 """propertycache that must take filtering in account"""
45
45
46 def cachevalue(self, obj, value):
46 def cachevalue(self, obj, value):
47 object.__setattr__(obj, self.name, value)
47 object.__setattr__(obj, self.name, value)
48
48
49
49
50 def hasunfilteredcache(repo, name):
50 def hasunfilteredcache(repo, name):
51 """check if an repo and a unfilteredproperty cached value for <name>"""
51 """check if an repo and a unfilteredproperty cached value for <name>"""
52 return name in vars(repo.unfiltered())
52 return name in vars(repo.unfiltered())
53
53
54 def unfilteredmeth(orig):
54 def unfilteredmethod(orig):
55 """decorate method that always need to be run on unfiltered version"""
55 """decorate method that always need to be run on unfiltered version"""
56 def wrapper(repo, *args, **kwargs):
56 def wrapper(repo, *args, **kwargs):
57 return orig(repo.unfiltered(), *args, **kwargs)
57 return orig(repo.unfiltered(), *args, **kwargs)
58 return wrapper
58 return wrapper
59
59
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62
62
63 class localpeer(peer.peerrepository):
63 class localpeer(peer.peerrepository):
64 '''peer for a local repo; reflects only the most recent API'''
64 '''peer for a local repo; reflects only the most recent API'''
65
65
66 def __init__(self, repo, caps=MODERNCAPS):
66 def __init__(self, repo, caps=MODERNCAPS):
67 peer.peerrepository.__init__(self)
67 peer.peerrepository.__init__(self)
68 self._repo = repo
68 self._repo = repo
69 self.ui = repo.ui
69 self.ui = repo.ui
70 self._caps = repo._restrictcapabilities(caps)
70 self._caps = repo._restrictcapabilities(caps)
71 self.requirements = repo.requirements
71 self.requirements = repo.requirements
72 self.supportedformats = repo.supportedformats
72 self.supportedformats = repo.supportedformats
73
73
74 def close(self):
74 def close(self):
75 self._repo.close()
75 self._repo.close()
76
76
77 def _capabilities(self):
77 def _capabilities(self):
78 return self._caps
78 return self._caps
79
79
80 def local(self):
80 def local(self):
81 return self._repo
81 return self._repo
82
82
83 def canpush(self):
83 def canpush(self):
84 return True
84 return True
85
85
86 def url(self):
86 def url(self):
87 return self._repo.url()
87 return self._repo.url()
88
88
89 def lookup(self, key):
89 def lookup(self, key):
90 return self._repo.lookup(key)
90 return self._repo.lookup(key)
91
91
92 def branchmap(self):
92 def branchmap(self):
93 return discovery.visiblebranchmap(self._repo)
93 return discovery.visiblebranchmap(self._repo)
94
94
95 def heads(self):
95 def heads(self):
96 return discovery.visibleheads(self._repo)
96 return discovery.visibleheads(self._repo)
97
97
98 def known(self, nodes):
98 def known(self, nodes):
99 return self._repo.known(nodes)
99 return self._repo.known(nodes)
100
100
101 def getbundle(self, source, heads=None, common=None):
101 def getbundle(self, source, heads=None, common=None):
102 return self._repo.getbundle(source, heads=heads, common=common)
102 return self._repo.getbundle(source, heads=heads, common=common)
103
103
104 # TODO We might want to move the next two calls into legacypeer and add
104 # TODO We might want to move the next two calls into legacypeer and add
105 # unbundle instead.
105 # unbundle instead.
106
106
107 def lock(self):
107 def lock(self):
108 return self._repo.lock()
108 return self._repo.lock()
109
109
110 def addchangegroup(self, cg, source, url):
110 def addchangegroup(self, cg, source, url):
111 return self._repo.addchangegroup(cg, source, url)
111 return self._repo.addchangegroup(cg, source, url)
112
112
113 def pushkey(self, namespace, key, old, new):
113 def pushkey(self, namespace, key, old, new):
114 return self._repo.pushkey(namespace, key, old, new)
114 return self._repo.pushkey(namespace, key, old, new)
115
115
116 def listkeys(self, namespace):
116 def listkeys(self, namespace):
117 return self._repo.listkeys(namespace)
117 return self._repo.listkeys(namespace)
118
118
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 '''used to test argument passing over the wire'''
120 '''used to test argument passing over the wire'''
121 return "%s %s %s %s %s" % (one, two, three, four, five)
121 return "%s %s %s %s %s" % (one, two, three, four, five)
122
122
123 class locallegacypeer(localpeer):
123 class locallegacypeer(localpeer):
124 '''peer extension which implements legacy methods too; used for tests with
124 '''peer extension which implements legacy methods too; used for tests with
125 restricted capabilities'''
125 restricted capabilities'''
126
126
127 def __init__(self, repo):
127 def __init__(self, repo):
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129
129
130 def branches(self, nodes):
130 def branches(self, nodes):
131 return self._repo.branches(nodes)
131 return self._repo.branches(nodes)
132
132
133 def between(self, pairs):
133 def between(self, pairs):
134 return self._repo.between(pairs)
134 return self._repo.between(pairs)
135
135
136 def changegroup(self, basenodes, source):
136 def changegroup(self, basenodes, source):
137 return self._repo.changegroup(basenodes, source)
137 return self._repo.changegroup(basenodes, source)
138
138
139 def changegroupsubset(self, bases, heads, source):
139 def changegroupsubset(self, bases, heads, source):
140 return self._repo.changegroupsubset(bases, heads, source)
140 return self._repo.changegroupsubset(bases, heads, source)
141
141
142 class localrepository(object):
142 class localrepository(object):
143
143
144 supportedformats = set(('revlogv1', 'generaldelta'))
144 supportedformats = set(('revlogv1', 'generaldelta'))
145 supported = supportedformats | set(('store', 'fncache', 'shared',
145 supported = supportedformats | set(('store', 'fncache', 'shared',
146 'dotencode'))
146 'dotencode'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
148 requirements = ['revlogv1']
148 requirements = ['revlogv1']
149
149
150 def _baserequirements(self, create):
150 def _baserequirements(self, create):
151 return self.requirements[:]
151 return self.requirements[:]
152
152
153 def __init__(self, baseui, path=None, create=False):
153 def __init__(self, baseui, path=None, create=False):
154 self.wvfs = scmutil.vfs(path, expand=True)
154 self.wvfs = scmutil.vfs(path, expand=True)
155 self.wopener = self.wvfs
155 self.wopener = self.wvfs
156 self.root = self.wvfs.base
156 self.root = self.wvfs.base
157 self.path = self.wvfs.join(".hg")
157 self.path = self.wvfs.join(".hg")
158 self.origroot = path
158 self.origroot = path
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 self.vfs = scmutil.vfs(self.path)
160 self.vfs = scmutil.vfs(self.path)
161 self.opener = self.vfs
161 self.opener = self.vfs
162 self.baseui = baseui
162 self.baseui = baseui
163 self.ui = baseui.copy()
163 self.ui = baseui.copy()
164 # A list of callback to shape the phase if no data were found.
164 # A list of callback to shape the phase if no data were found.
165 # Callback are in the form: func(repo, roots) --> processed root.
165 # Callback are in the form: func(repo, roots) --> processed root.
166 # This list it to be filled by extension during repo setup
166 # This list it to be filled by extension during repo setup
167 self._phasedefaults = []
167 self._phasedefaults = []
168 try:
168 try:
169 self.ui.readconfig(self.join("hgrc"), self.root)
169 self.ui.readconfig(self.join("hgrc"), self.root)
170 extensions.loadall(self.ui)
170 extensions.loadall(self.ui)
171 except IOError:
171 except IOError:
172 pass
172 pass
173
173
174 if not self.vfs.isdir():
174 if not self.vfs.isdir():
175 if create:
175 if create:
176 if not self.wvfs.exists():
176 if not self.wvfs.exists():
177 self.wvfs.makedirs()
177 self.wvfs.makedirs()
178 self.vfs.makedir(notindexed=True)
178 self.vfs.makedir(notindexed=True)
179 requirements = self._baserequirements(create)
179 requirements = self._baserequirements(create)
180 if self.ui.configbool('format', 'usestore', True):
180 if self.ui.configbool('format', 'usestore', True):
181 self.vfs.mkdir("store")
181 self.vfs.mkdir("store")
182 requirements.append("store")
182 requirements.append("store")
183 if self.ui.configbool('format', 'usefncache', True):
183 if self.ui.configbool('format', 'usefncache', True):
184 requirements.append("fncache")
184 requirements.append("fncache")
185 if self.ui.configbool('format', 'dotencode', True):
185 if self.ui.configbool('format', 'dotencode', True):
186 requirements.append('dotencode')
186 requirements.append('dotencode')
187 # create an invalid changelog
187 # create an invalid changelog
188 self.vfs.append(
188 self.vfs.append(
189 "00changelog.i",
189 "00changelog.i",
190 '\0\0\0\2' # represents revlogv2
190 '\0\0\0\2' # represents revlogv2
191 ' dummy changelog to prevent using the old repo layout'
191 ' dummy changelog to prevent using the old repo layout'
192 )
192 )
193 if self.ui.configbool('format', 'generaldelta', False):
193 if self.ui.configbool('format', 'generaldelta', False):
194 requirements.append("generaldelta")
194 requirements.append("generaldelta")
195 requirements = set(requirements)
195 requirements = set(requirements)
196 else:
196 else:
197 raise error.RepoError(_("repository %s not found") % path)
197 raise error.RepoError(_("repository %s not found") % path)
198 elif create:
198 elif create:
199 raise error.RepoError(_("repository %s already exists") % path)
199 raise error.RepoError(_("repository %s already exists") % path)
200 else:
200 else:
201 try:
201 try:
202 requirements = scmutil.readrequires(self.vfs, self.supported)
202 requirements = scmutil.readrequires(self.vfs, self.supported)
203 except IOError, inst:
203 except IOError, inst:
204 if inst.errno != errno.ENOENT:
204 if inst.errno != errno.ENOENT:
205 raise
205 raise
206 requirements = set()
206 requirements = set()
207
207
208 self.sharedpath = self.path
208 self.sharedpath = self.path
209 try:
209 try:
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 if not os.path.exists(s):
211 if not os.path.exists(s):
212 raise error.RepoError(
212 raise error.RepoError(
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 self.sharedpath = s
214 self.sharedpath = s
215 except IOError, inst:
215 except IOError, inst:
216 if inst.errno != errno.ENOENT:
216 if inst.errno != errno.ENOENT:
217 raise
217 raise
218
218
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 self.spath = self.store.path
220 self.spath = self.store.path
221 self.svfs = self.store.vfs
221 self.svfs = self.store.vfs
222 self.sopener = self.svfs
222 self.sopener = self.svfs
223 self.sjoin = self.store.join
223 self.sjoin = self.store.join
224 self.vfs.createmode = self.store.createmode
224 self.vfs.createmode = self.store.createmode
225 self._applyrequirements(requirements)
225 self._applyrequirements(requirements)
226 if create:
226 if create:
227 self._writerequirements()
227 self._writerequirements()
228
228
229
229
230 self._branchcache = None
230 self._branchcache = None
231 self._branchcachetip = None
231 self._branchcachetip = None
232 self.filterpats = {}
232 self.filterpats = {}
233 self._datafilters = {}
233 self._datafilters = {}
234 self._transref = self._lockref = self._wlockref = None
234 self._transref = self._lockref = self._wlockref = None
235
235
236 # A cache for various files under .hg/ that tracks file changes,
236 # A cache for various files under .hg/ that tracks file changes,
237 # (used by the filecache decorator)
237 # (used by the filecache decorator)
238 #
238 #
239 # Maps a property name to its util.filecacheentry
239 # Maps a property name to its util.filecacheentry
240 self._filecache = {}
240 self._filecache = {}
241
241
242 def close(self):
242 def close(self):
243 pass
243 pass
244
244
245 def _restrictcapabilities(self, caps):
245 def _restrictcapabilities(self, caps):
246 return caps
246 return caps
247
247
248 def _applyrequirements(self, requirements):
248 def _applyrequirements(self, requirements):
249 self.requirements = requirements
249 self.requirements = requirements
250 self.sopener.options = dict((r, 1) for r in requirements
250 self.sopener.options = dict((r, 1) for r in requirements
251 if r in self.openerreqs)
251 if r in self.openerreqs)
252
252
253 def _writerequirements(self):
253 def _writerequirements(self):
254 reqfile = self.opener("requires", "w")
254 reqfile = self.opener("requires", "w")
255 for r in self.requirements:
255 for r in self.requirements:
256 reqfile.write("%s\n" % r)
256 reqfile.write("%s\n" % r)
257 reqfile.close()
257 reqfile.close()
258
258
259 def _checknested(self, path):
259 def _checknested(self, path):
260 """Determine if path is a legal nested repository."""
260 """Determine if path is a legal nested repository."""
261 if not path.startswith(self.root):
261 if not path.startswith(self.root):
262 return False
262 return False
263 subpath = path[len(self.root) + 1:]
263 subpath = path[len(self.root) + 1:]
264 normsubpath = util.pconvert(subpath)
264 normsubpath = util.pconvert(subpath)
265
265
266 # XXX: Checking against the current working copy is wrong in
266 # XXX: Checking against the current working copy is wrong in
267 # the sense that it can reject things like
267 # the sense that it can reject things like
268 #
268 #
269 # $ hg cat -r 10 sub/x.txt
269 # $ hg cat -r 10 sub/x.txt
270 #
270 #
271 # if sub/ is no longer a subrepository in the working copy
271 # if sub/ is no longer a subrepository in the working copy
272 # parent revision.
272 # parent revision.
273 #
273 #
274 # However, it can of course also allow things that would have
274 # However, it can of course also allow things that would have
275 # been rejected before, such as the above cat command if sub/
275 # been rejected before, such as the above cat command if sub/
276 # is a subrepository now, but was a normal directory before.
276 # is a subrepository now, but was a normal directory before.
277 # The old path auditor would have rejected by mistake since it
277 # The old path auditor would have rejected by mistake since it
278 # panics when it sees sub/.hg/.
278 # panics when it sees sub/.hg/.
279 #
279 #
280 # All in all, checking against the working copy seems sensible
280 # All in all, checking against the working copy seems sensible
281 # since we want to prevent access to nested repositories on
281 # since we want to prevent access to nested repositories on
282 # the filesystem *now*.
282 # the filesystem *now*.
283 ctx = self[None]
283 ctx = self[None]
284 parts = util.splitpath(subpath)
284 parts = util.splitpath(subpath)
285 while parts:
285 while parts:
286 prefix = '/'.join(parts)
286 prefix = '/'.join(parts)
287 if prefix in ctx.substate:
287 if prefix in ctx.substate:
288 if prefix == normsubpath:
288 if prefix == normsubpath:
289 return True
289 return True
290 else:
290 else:
291 sub = ctx.sub(prefix)
291 sub = ctx.sub(prefix)
292 return sub.checknested(subpath[len(prefix) + 1:])
292 return sub.checknested(subpath[len(prefix) + 1:])
293 else:
293 else:
294 parts.pop()
294 parts.pop()
295 return False
295 return False
296
296
297 def peer(self):
297 def peer(self):
298 return localpeer(self) # not cached to avoid reference cycle
298 return localpeer(self) # not cached to avoid reference cycle
299
299
300 def unfiltered(self):
300 def unfiltered(self):
301 """Return unfiltered version of the repository
301 """Return unfiltered version of the repository
302
302
303 Intended to be ovewritten by filtered repo."""
303 Intended to be ovewritten by filtered repo."""
304 return self
304 return self
305
305
306 @repofilecache('bookmarks')
306 @repofilecache('bookmarks')
307 def _bookmarks(self):
307 def _bookmarks(self):
308 return bookmarks.bmstore(self)
308 return bookmarks.bmstore(self)
309
309
310 @repofilecache('bookmarks.current')
310 @repofilecache('bookmarks.current')
311 def _bookmarkcurrent(self):
311 def _bookmarkcurrent(self):
312 return bookmarks.readcurrent(self)
312 return bookmarks.readcurrent(self)
313
313
314 def bookmarkheads(self, bookmark):
314 def bookmarkheads(self, bookmark):
315 name = bookmark.split('@', 1)[0]
315 name = bookmark.split('@', 1)[0]
316 heads = []
316 heads = []
317 for mark, n in self._bookmarks.iteritems():
317 for mark, n in self._bookmarks.iteritems():
318 if mark.split('@', 1)[0] == name:
318 if mark.split('@', 1)[0] == name:
319 heads.append(n)
319 heads.append(n)
320 return heads
320 return heads
321
321
322 @storecache('phaseroots')
322 @storecache('phaseroots')
323 def _phasecache(self):
323 def _phasecache(self):
324 return phases.phasecache(self, self._phasedefaults)
324 return phases.phasecache(self, self._phasedefaults)
325
325
326 @storecache('obsstore')
326 @storecache('obsstore')
327 def obsstore(self):
327 def obsstore(self):
328 store = obsolete.obsstore(self.sopener)
328 store = obsolete.obsstore(self.sopener)
329 if store and not obsolete._enabled:
329 if store and not obsolete._enabled:
330 # message is rare enough to not be translated
330 # message is rare enough to not be translated
331 msg = 'obsolete feature not enabled but %i markers found!\n'
331 msg = 'obsolete feature not enabled but %i markers found!\n'
332 self.ui.warn(msg % len(list(store)))
332 self.ui.warn(msg % len(list(store)))
333 return store
333 return store
334
334
335 @unfilteredpropertycache
335 @unfilteredpropertycache
336 def hiddenrevs(self):
336 def hiddenrevs(self):
337 """hiddenrevs: revs that should be hidden by command and tools
337 """hiddenrevs: revs that should be hidden by command and tools
338
338
339 This set is carried on the repo to ease initialization and lazy
339 This set is carried on the repo to ease initialization and lazy
340 loading; it'll probably move back to changelog for efficiency and
340 loading; it'll probably move back to changelog for efficiency and
341 consistency reasons.
341 consistency reasons.
342
342
343 Note that the hiddenrevs will needs invalidations when
343 Note that the hiddenrevs will needs invalidations when
344 - a new changesets is added (possible unstable above extinct)
344 - a new changesets is added (possible unstable above extinct)
345 - a new obsolete marker is added (possible new extinct changeset)
345 - a new obsolete marker is added (possible new extinct changeset)
346
346
347 hidden changesets cannot have non-hidden descendants
347 hidden changesets cannot have non-hidden descendants
348 """
348 """
349 hidden = set()
349 hidden = set()
350 if self.obsstore:
350 if self.obsstore:
351 ### hide extinct changeset that are not accessible by any mean
351 ### hide extinct changeset that are not accessible by any mean
352 hiddenquery = 'extinct() - ::(. + bookmark())'
352 hiddenquery = 'extinct() - ::(. + bookmark())'
353 hidden.update(self.revs(hiddenquery))
353 hidden.update(self.revs(hiddenquery))
354 return hidden
354 return hidden
355
355
356 @storecache('00changelog.i')
356 @storecache('00changelog.i')
357 def changelog(self):
357 def changelog(self):
358 c = changelog.changelog(self.sopener)
358 c = changelog.changelog(self.sopener)
359 if 'HG_PENDING' in os.environ:
359 if 'HG_PENDING' in os.environ:
360 p = os.environ['HG_PENDING']
360 p = os.environ['HG_PENDING']
361 if p.startswith(self.root):
361 if p.startswith(self.root):
362 c.readpending('00changelog.i.a')
362 c.readpending('00changelog.i.a')
363 return c
363 return c
364
364
365 @storecache('00manifest.i')
365 @storecache('00manifest.i')
366 def manifest(self):
366 def manifest(self):
367 return manifest.manifest(self.sopener)
367 return manifest.manifest(self.sopener)
368
368
369 @repofilecache('dirstate')
369 @repofilecache('dirstate')
370 def dirstate(self):
370 def dirstate(self):
371 warned = [0]
371 warned = [0]
372 def validate(node):
372 def validate(node):
373 try:
373 try:
374 self.changelog.rev(node)
374 self.changelog.rev(node)
375 return node
375 return node
376 except error.LookupError:
376 except error.LookupError:
377 if not warned[0]:
377 if not warned[0]:
378 warned[0] = True
378 warned[0] = True
379 self.ui.warn(_("warning: ignoring unknown"
379 self.ui.warn(_("warning: ignoring unknown"
380 " working parent %s!\n") % short(node))
380 " working parent %s!\n") % short(node))
381 return nullid
381 return nullid
382
382
383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384
384
385 def __getitem__(self, changeid):
385 def __getitem__(self, changeid):
386 if changeid is None:
386 if changeid is None:
387 return context.workingctx(self)
387 return context.workingctx(self)
388 return context.changectx(self, changeid)
388 return context.changectx(self, changeid)
389
389
390 def __contains__(self, changeid):
390 def __contains__(self, changeid):
391 try:
391 try:
392 return bool(self.lookup(changeid))
392 return bool(self.lookup(changeid))
393 except error.RepoLookupError:
393 except error.RepoLookupError:
394 return False
394 return False
395
395
396 def __nonzero__(self):
396 def __nonzero__(self):
397 return True
397 return True
398
398
399 def __len__(self):
399 def __len__(self):
400 return len(self.changelog)
400 return len(self.changelog)
401
401
402 def __iter__(self):
402 def __iter__(self):
403 return iter(self.changelog)
403 return iter(self.changelog)
404
404
405 def revs(self, expr, *args):
405 def revs(self, expr, *args):
406 '''Return a list of revisions matching the given revset'''
406 '''Return a list of revisions matching the given revset'''
407 expr = revset.formatspec(expr, *args)
407 expr = revset.formatspec(expr, *args)
408 m = revset.match(None, expr)
408 m = revset.match(None, expr)
409 return [r for r in m(self, list(self))]
409 return [r for r in m(self, list(self))]
410
410
411 def set(self, expr, *args):
411 def set(self, expr, *args):
412 '''
412 '''
413 Yield a context for each matching revision, after doing arg
413 Yield a context for each matching revision, after doing arg
414 replacement via revset.formatspec
414 replacement via revset.formatspec
415 '''
415 '''
416 for r in self.revs(expr, *args):
416 for r in self.revs(expr, *args):
417 yield self[r]
417 yield self[r]
418
418
419 def url(self):
419 def url(self):
420 return 'file:' + self.root
420 return 'file:' + self.root
421
421
422 def hook(self, name, throw=False, **args):
422 def hook(self, name, throw=False, **args):
423 return hook.hook(self.ui, self, name, throw, **args)
423 return hook.hook(self.ui, self, name, throw, **args)
424
424
425 @unfilteredmeth
425 @unfilteredmethod
426 def _tag(self, names, node, message, local, user, date, extra={}):
426 def _tag(self, names, node, message, local, user, date, extra={}):
427 if isinstance(names, str):
427 if isinstance(names, str):
428 names = (names,)
428 names = (names,)
429
429
430 branches = self.branchmap()
430 branches = self.branchmap()
431 for name in names:
431 for name in names:
432 self.hook('pretag', throw=True, node=hex(node), tag=name,
432 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 local=local)
433 local=local)
434 if name in branches:
434 if name in branches:
435 self.ui.warn(_("warning: tag %s conflicts with existing"
435 self.ui.warn(_("warning: tag %s conflicts with existing"
436 " branch name\n") % name)
436 " branch name\n") % name)
437
437
438 def writetags(fp, names, munge, prevtags):
438 def writetags(fp, names, munge, prevtags):
439 fp.seek(0, 2)
439 fp.seek(0, 2)
440 if prevtags and prevtags[-1] != '\n':
440 if prevtags and prevtags[-1] != '\n':
441 fp.write('\n')
441 fp.write('\n')
442 for name in names:
442 for name in names:
443 m = munge and munge(name) or name
443 m = munge and munge(name) or name
444 if (self._tagscache.tagtypes and
444 if (self._tagscache.tagtypes and
445 name in self._tagscache.tagtypes):
445 name in self._tagscache.tagtypes):
446 old = self.tags().get(name, nullid)
446 old = self.tags().get(name, nullid)
447 fp.write('%s %s\n' % (hex(old), m))
447 fp.write('%s %s\n' % (hex(old), m))
448 fp.write('%s %s\n' % (hex(node), m))
448 fp.write('%s %s\n' % (hex(node), m))
449 fp.close()
449 fp.close()
450
450
451 prevtags = ''
451 prevtags = ''
452 if local:
452 if local:
453 try:
453 try:
454 fp = self.opener('localtags', 'r+')
454 fp = self.opener('localtags', 'r+')
455 except IOError:
455 except IOError:
456 fp = self.opener('localtags', 'a')
456 fp = self.opener('localtags', 'a')
457 else:
457 else:
458 prevtags = fp.read()
458 prevtags = fp.read()
459
459
460 # local tags are stored in the current charset
460 # local tags are stored in the current charset
461 writetags(fp, names, None, prevtags)
461 writetags(fp, names, None, prevtags)
462 for name in names:
462 for name in names:
463 self.hook('tag', node=hex(node), tag=name, local=local)
463 self.hook('tag', node=hex(node), tag=name, local=local)
464 return
464 return
465
465
466 try:
466 try:
467 fp = self.wfile('.hgtags', 'rb+')
467 fp = self.wfile('.hgtags', 'rb+')
468 except IOError, e:
468 except IOError, e:
469 if e.errno != errno.ENOENT:
469 if e.errno != errno.ENOENT:
470 raise
470 raise
471 fp = self.wfile('.hgtags', 'ab')
471 fp = self.wfile('.hgtags', 'ab')
472 else:
472 else:
473 prevtags = fp.read()
473 prevtags = fp.read()
474
474
475 # committed tags are stored in UTF-8
475 # committed tags are stored in UTF-8
476 writetags(fp, names, encoding.fromlocal, prevtags)
476 writetags(fp, names, encoding.fromlocal, prevtags)
477
477
478 fp.close()
478 fp.close()
479
479
480 self.invalidatecaches()
480 self.invalidatecaches()
481
481
482 if '.hgtags' not in self.dirstate:
482 if '.hgtags' not in self.dirstate:
483 self[None].add(['.hgtags'])
483 self[None].add(['.hgtags'])
484
484
485 m = matchmod.exact(self.root, '', ['.hgtags'])
485 m = matchmod.exact(self.root, '', ['.hgtags'])
486 tagnode = self.commit(message, user, date, extra=extra, match=m)
486 tagnode = self.commit(message, user, date, extra=extra, match=m)
487
487
488 for name in names:
488 for name in names:
489 self.hook('tag', node=hex(node), tag=name, local=local)
489 self.hook('tag', node=hex(node), tag=name, local=local)
490
490
491 return tagnode
491 return tagnode
492
492
493 def tag(self, names, node, message, local, user, date):
493 def tag(self, names, node, message, local, user, date):
494 '''tag a revision with one or more symbolic names.
494 '''tag a revision with one or more symbolic names.
495
495
496 names is a list of strings or, when adding a single tag, names may be a
496 names is a list of strings or, when adding a single tag, names may be a
497 string.
497 string.
498
498
499 if local is True, the tags are stored in a per-repository file.
499 if local is True, the tags are stored in a per-repository file.
500 otherwise, they are stored in the .hgtags file, and a new
500 otherwise, they are stored in the .hgtags file, and a new
501 changeset is committed with the change.
501 changeset is committed with the change.
502
502
503 keyword arguments:
503 keyword arguments:
504
504
505 local: whether to store tags in non-version-controlled file
505 local: whether to store tags in non-version-controlled file
506 (default False)
506 (default False)
507
507
508 message: commit message to use if committing
508 message: commit message to use if committing
509
509
510 user: name of user to use if committing
510 user: name of user to use if committing
511
511
512 date: date tuple to use if committing'''
512 date: date tuple to use if committing'''
513
513
514 if not local:
514 if not local:
515 for x in self.status()[:5]:
515 for x in self.status()[:5]:
516 if '.hgtags' in x:
516 if '.hgtags' in x:
517 raise util.Abort(_('working copy of .hgtags is changed '
517 raise util.Abort(_('working copy of .hgtags is changed '
518 '(please commit .hgtags manually)'))
518 '(please commit .hgtags manually)'))
519
519
520 self.tags() # instantiate the cache
520 self.tags() # instantiate the cache
521 self._tag(names, node, message, local, user, date)
521 self._tag(names, node, message, local, user, date)
522
522
523 @filteredpropertycache
523 @filteredpropertycache
524 def _tagscache(self):
524 def _tagscache(self):
525 '''Returns a tagscache object that contains various tags related
525 '''Returns a tagscache object that contains various tags related
526 caches.'''
526 caches.'''
527
527
528 # This simplifies its cache management by having one decorated
528 # This simplifies its cache management by having one decorated
529 # function (this one) and the rest simply fetch things from it.
529 # function (this one) and the rest simply fetch things from it.
530 class tagscache(object):
530 class tagscache(object):
531 def __init__(self):
531 def __init__(self):
532 # These two define the set of tags for this repository. tags
532 # These two define the set of tags for this repository. tags
533 # maps tag name to node; tagtypes maps tag name to 'global' or
533 # maps tag name to node; tagtypes maps tag name to 'global' or
534 # 'local'. (Global tags are defined by .hgtags across all
534 # 'local'. (Global tags are defined by .hgtags across all
535 # heads, and local tags are defined in .hg/localtags.)
535 # heads, and local tags are defined in .hg/localtags.)
536 # They constitute the in-memory cache of tags.
536 # They constitute the in-memory cache of tags.
537 self.tags = self.tagtypes = None
537 self.tags = self.tagtypes = None
538
538
539 self.nodetagscache = self.tagslist = None
539 self.nodetagscache = self.tagslist = None
540
540
541 cache = tagscache()
541 cache = tagscache()
542 cache.tags, cache.tagtypes = self._findtags()
542 cache.tags, cache.tagtypes = self._findtags()
543
543
544 return cache
544 return cache
545
545
546 def tags(self):
546 def tags(self):
547 '''return a mapping of tag to node'''
547 '''return a mapping of tag to node'''
548 t = {}
548 t = {}
549 if self.changelog.filteredrevs:
549 if self.changelog.filteredrevs:
550 tags, tt = self._findtags()
550 tags, tt = self._findtags()
551 else:
551 else:
552 tags = self._tagscache.tags
552 tags = self._tagscache.tags
553 for k, v in tags.iteritems():
553 for k, v in tags.iteritems():
554 try:
554 try:
555 # ignore tags to unknown nodes
555 # ignore tags to unknown nodes
556 self.changelog.rev(v)
556 self.changelog.rev(v)
557 t[k] = v
557 t[k] = v
558 except (error.LookupError, ValueError):
558 except (error.LookupError, ValueError):
559 pass
559 pass
560 return t
560 return t
561
561
562 def _findtags(self):
562 def _findtags(self):
563 '''Do the hard work of finding tags. Return a pair of dicts
563 '''Do the hard work of finding tags. Return a pair of dicts
564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 maps tag name to a string like \'global\' or \'local\'.
565 maps tag name to a string like \'global\' or \'local\'.
566 Subclasses or extensions are free to add their own tags, but
566 Subclasses or extensions are free to add their own tags, but
567 should be aware that the returned dicts will be retained for the
567 should be aware that the returned dicts will be retained for the
568 duration of the localrepo object.'''
568 duration of the localrepo object.'''
569
569
570 # XXX what tagtype should subclasses/extensions use? Currently
570 # XXX what tagtype should subclasses/extensions use? Currently
571 # mq and bookmarks add tags, but do not set the tagtype at all.
571 # mq and bookmarks add tags, but do not set the tagtype at all.
572 # Should each extension invent its own tag type? Should there
572 # Should each extension invent its own tag type? Should there
573 # be one tagtype for all such "virtual" tags? Or is the status
573 # be one tagtype for all such "virtual" tags? Or is the status
574 # quo fine?
574 # quo fine?
575
575
576 alltags = {} # map tag name to (node, hist)
576 alltags = {} # map tag name to (node, hist)
577 tagtypes = {}
577 tagtypes = {}
578
578
579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581
581
582 # Build the return dicts. Have to re-encode tag names because
582 # Build the return dicts. Have to re-encode tag names because
583 # the tags module always uses UTF-8 (in order not to lose info
583 # the tags module always uses UTF-8 (in order not to lose info
584 # writing to the cache), but the rest of Mercurial wants them in
584 # writing to the cache), but the rest of Mercurial wants them in
585 # local encoding.
585 # local encoding.
586 tags = {}
586 tags = {}
587 for (name, (node, hist)) in alltags.iteritems():
587 for (name, (node, hist)) in alltags.iteritems():
588 if node != nullid:
588 if node != nullid:
589 tags[encoding.tolocal(name)] = node
589 tags[encoding.tolocal(name)] = node
590 tags['tip'] = self.changelog.tip()
590 tags['tip'] = self.changelog.tip()
591 tagtypes = dict([(encoding.tolocal(name), value)
591 tagtypes = dict([(encoding.tolocal(name), value)
592 for (name, value) in tagtypes.iteritems()])
592 for (name, value) in tagtypes.iteritems()])
593 return (tags, tagtypes)
593 return (tags, tagtypes)
594
594
595 def tagtype(self, tagname):
595 def tagtype(self, tagname):
596 '''
596 '''
597 return the type of the given tag. result can be:
597 return the type of the given tag. result can be:
598
598
599 'local' : a local tag
599 'local' : a local tag
600 'global' : a global tag
600 'global' : a global tag
601 None : tag does not exist
601 None : tag does not exist
602 '''
602 '''
603
603
604 return self._tagscache.tagtypes.get(tagname)
604 return self._tagscache.tagtypes.get(tagname)
605
605
606 def tagslist(self):
606 def tagslist(self):
607 '''return a list of tags ordered by revision'''
607 '''return a list of tags ordered by revision'''
608 if not self._tagscache.tagslist:
608 if not self._tagscache.tagslist:
609 l = []
609 l = []
610 for t, n in self.tags().iteritems():
610 for t, n in self.tags().iteritems():
611 r = self.changelog.rev(n)
611 r = self.changelog.rev(n)
612 l.append((r, t, n))
612 l.append((r, t, n))
613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614
614
615 return self._tagscache.tagslist
615 return self._tagscache.tagslist
616
616
617 def nodetags(self, node):
617 def nodetags(self, node):
618 '''return the tags associated with a node'''
618 '''return the tags associated with a node'''
619 if not self._tagscache.nodetagscache:
619 if not self._tagscache.nodetagscache:
620 nodetagscache = {}
620 nodetagscache = {}
621 for t, n in self._tagscache.tags.iteritems():
621 for t, n in self._tagscache.tags.iteritems():
622 nodetagscache.setdefault(n, []).append(t)
622 nodetagscache.setdefault(n, []).append(t)
623 for tags in nodetagscache.itervalues():
623 for tags in nodetagscache.itervalues():
624 tags.sort()
624 tags.sort()
625 self._tagscache.nodetagscache = nodetagscache
625 self._tagscache.nodetagscache = nodetagscache
626 return self._tagscache.nodetagscache.get(node, [])
626 return self._tagscache.nodetagscache.get(node, [])
627
627
628 def nodebookmarks(self, node):
628 def nodebookmarks(self, node):
629 marks = []
629 marks = []
630 for bookmark, n in self._bookmarks.iteritems():
630 for bookmark, n in self._bookmarks.iteritems():
631 if n == node:
631 if n == node:
632 marks.append(bookmark)
632 marks.append(bookmark)
633 return sorted(marks)
633 return sorted(marks)
634
634
635 def _branchtags(self, partial, lrev):
635 def _branchtags(self, partial, lrev):
636 # TODO: rename this function?
636 # TODO: rename this function?
637 tiprev = len(self) - 1
637 tiprev = len(self) - 1
638 if lrev != tiprev:
638 if lrev != tiprev:
639 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
639 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
640 self._updatebranchcache(partial, ctxgen)
640 self._updatebranchcache(partial, ctxgen)
641 self._writebranchcache(partial, self.changelog.tip(), tiprev)
641 self._writebranchcache(partial, self.changelog.tip(), tiprev)
642
642
643 return partial
643 return partial
644
644
645 @unfilteredmeth # Until we get a smarter cache management
645 @unfilteredmethod # Until we get a smarter cache management
646 def updatebranchcache(self):
646 def updatebranchcache(self):
647 tip = self.changelog.tip()
647 tip = self.changelog.tip()
648 if self._branchcache is not None and self._branchcachetip == tip:
648 if self._branchcache is not None and self._branchcachetip == tip:
649 return
649 return
650
650
651 oldtip = self._branchcachetip
651 oldtip = self._branchcachetip
652 self._branchcachetip = tip
652 self._branchcachetip = tip
653 if oldtip is None or oldtip not in self.changelog.nodemap:
653 if oldtip is None or oldtip not in self.changelog.nodemap:
654 partial, last, lrev = self._readbranchcache()
654 partial, last, lrev = self._readbranchcache()
655 else:
655 else:
656 lrev = self.changelog.rev(oldtip)
656 lrev = self.changelog.rev(oldtip)
657 partial = self._branchcache
657 partial = self._branchcache
658
658
659 self._branchtags(partial, lrev)
659 self._branchtags(partial, lrev)
660 # this private cache holds all heads (not just the branch tips)
660 # this private cache holds all heads (not just the branch tips)
661 self._branchcache = partial
661 self._branchcache = partial
662
662
663 def branchmap(self):
663 def branchmap(self):
664 '''returns a dictionary {branch: [branchheads]}'''
664 '''returns a dictionary {branch: [branchheads]}'''
665 if self.changelog.filteredrevs:
665 if self.changelog.filteredrevs:
666 # some changeset are excluded we can't use the cache
666 # some changeset are excluded we can't use the cache
667 branchmap = {}
667 branchmap = {}
668 self._updatebranchcache(branchmap, (self[r] for r in self))
668 self._updatebranchcache(branchmap, (self[r] for r in self))
669 return branchmap
669 return branchmap
670 else:
670 else:
671 self.updatebranchcache()
671 self.updatebranchcache()
672 return self._branchcache
672 return self._branchcache
673
673
674
674
675 def _branchtip(self, heads):
675 def _branchtip(self, heads):
676 '''return the tipmost branch head in heads'''
676 '''return the tipmost branch head in heads'''
677 tip = heads[-1]
677 tip = heads[-1]
678 for h in reversed(heads):
678 for h in reversed(heads):
679 if not self[h].closesbranch():
679 if not self[h].closesbranch():
680 tip = h
680 tip = h
681 break
681 break
682 return tip
682 return tip
683
683
684 def branchtip(self, branch):
684 def branchtip(self, branch):
685 '''return the tip node for a given branch'''
685 '''return the tip node for a given branch'''
686 if branch not in self.branchmap():
686 if branch not in self.branchmap():
687 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
687 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
688 return self._branchtip(self.branchmap()[branch])
688 return self._branchtip(self.branchmap()[branch])
689
689
690 def branchtags(self):
690 def branchtags(self):
691 '''return a dict where branch names map to the tipmost head of
691 '''return a dict where branch names map to the tipmost head of
692 the branch, open heads come before closed'''
692 the branch, open heads come before closed'''
693 bt = {}
693 bt = {}
694 for bn, heads in self.branchmap().iteritems():
694 for bn, heads in self.branchmap().iteritems():
695 bt[bn] = self._branchtip(heads)
695 bt[bn] = self._branchtip(heads)
696 return bt
696 return bt
697
697
698 @unfilteredmeth # Until we get a smarter cache management
698 @unfilteredmethod # Until we get a smarter cache management
699 def _readbranchcache(self):
699 def _readbranchcache(self):
700 partial = {}
700 partial = {}
701 try:
701 try:
702 f = self.opener("cache/branchheads")
702 f = self.opener("cache/branchheads")
703 lines = f.read().split('\n')
703 lines = f.read().split('\n')
704 f.close()
704 f.close()
705 except (IOError, OSError):
705 except (IOError, OSError):
706 return {}, nullid, nullrev
706 return {}, nullid, nullrev
707
707
708 try:
708 try:
709 last, lrev = lines.pop(0).split(" ", 1)
709 last, lrev = lines.pop(0).split(" ", 1)
710 last, lrev = bin(last), int(lrev)
710 last, lrev = bin(last), int(lrev)
711 if lrev >= len(self) or self[lrev].node() != last:
711 if lrev >= len(self) or self[lrev].node() != last:
712 # invalidate the cache
712 # invalidate the cache
713 raise ValueError('invalidating branch cache (tip differs)')
713 raise ValueError('invalidating branch cache (tip differs)')
714 for l in lines:
714 for l in lines:
715 if not l:
715 if not l:
716 continue
716 continue
717 node, label = l.split(" ", 1)
717 node, label = l.split(" ", 1)
718 label = encoding.tolocal(label.strip())
718 label = encoding.tolocal(label.strip())
719 if not node in self:
719 if not node in self:
720 raise ValueError('invalidating branch cache because node '+
720 raise ValueError('invalidating branch cache because node '+
721 '%s does not exist' % node)
721 '%s does not exist' % node)
722 partial.setdefault(label, []).append(bin(node))
722 partial.setdefault(label, []).append(bin(node))
723 except KeyboardInterrupt:
723 except KeyboardInterrupt:
724 raise
724 raise
725 except Exception, inst:
725 except Exception, inst:
726 if self.ui.debugflag:
726 if self.ui.debugflag:
727 self.ui.warn(str(inst), '\n')
727 self.ui.warn(str(inst), '\n')
728 partial, last, lrev = {}, nullid, nullrev
728 partial, last, lrev = {}, nullid, nullrev
729 return partial, last, lrev
729 return partial, last, lrev
730
730
731 @unfilteredmeth # Until we get a smarter cache management
731 @unfilteredmethod # Until we get a smarter cache management
732 def _writebranchcache(self, branches, tip, tiprev):
732 def _writebranchcache(self, branches, tip, tiprev):
733 try:
733 try:
734 f = self.opener("cache/branchheads", "w", atomictemp=True)
734 f = self.opener("cache/branchheads", "w", atomictemp=True)
735 f.write("%s %s\n" % (hex(tip), tiprev))
735 f.write("%s %s\n" % (hex(tip), tiprev))
736 for label, nodes in branches.iteritems():
736 for label, nodes in branches.iteritems():
737 for node in nodes:
737 for node in nodes:
738 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
738 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
739 f.close()
739 f.close()
740 except (IOError, OSError):
740 except (IOError, OSError):
741 pass
741 pass
742
742
743 @unfilteredmeth # Until we get a smarter cache management
743 @unfilteredmethod # Until we get a smarter cache management
744 def _updatebranchcache(self, partial, ctxgen):
744 def _updatebranchcache(self, partial, ctxgen):
745 """Given a branchhead cache, partial, that may have extra nodes or be
745 """Given a branchhead cache, partial, that may have extra nodes or be
746 missing heads, and a generator of nodes that are at least a superset of
746 missing heads, and a generator of nodes that are at least a superset of
747 heads missing, this function updates partial to be correct.
747 heads missing, this function updates partial to be correct.
748 """
748 """
749 # collect new branch entries
749 # collect new branch entries
750 newbranches = {}
750 newbranches = {}
751 for c in ctxgen:
751 for c in ctxgen:
752 newbranches.setdefault(c.branch(), []).append(c.node())
752 newbranches.setdefault(c.branch(), []).append(c.node())
753 # if older branchheads are reachable from new ones, they aren't
753 # if older branchheads are reachable from new ones, they aren't
754 # really branchheads. Note checking parents is insufficient:
754 # really branchheads. Note checking parents is insufficient:
755 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
755 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
756 for branch, newnodes in newbranches.iteritems():
756 for branch, newnodes in newbranches.iteritems():
757 bheads = partial.setdefault(branch, [])
757 bheads = partial.setdefault(branch, [])
758 # Remove candidate heads that no longer are in the repo (e.g., as
758 # Remove candidate heads that no longer are in the repo (e.g., as
759 # the result of a strip that just happened). Avoid using 'node in
759 # the result of a strip that just happened). Avoid using 'node in
760 # self' here because that dives down into branchcache code somewhat
760 # self' here because that dives down into branchcache code somewhat
761 # recursively.
761 # recursively.
762 bheadrevs = [self.changelog.rev(node) for node in bheads
762 bheadrevs = [self.changelog.rev(node) for node in bheads
763 if self.changelog.hasnode(node)]
763 if self.changelog.hasnode(node)]
764 newheadrevs = [self.changelog.rev(node) for node in newnodes
764 newheadrevs = [self.changelog.rev(node) for node in newnodes
765 if self.changelog.hasnode(node)]
765 if self.changelog.hasnode(node)]
766 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
766 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
767 # Remove duplicates - nodes that are in newheadrevs and are already
767 # Remove duplicates - nodes that are in newheadrevs and are already
768 # in bheadrevs. This can happen if you strip a node whose parent
768 # in bheadrevs. This can happen if you strip a node whose parent
769 # was already a head (because they're on different branches).
769 # was already a head (because they're on different branches).
770 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
770 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
771
771
772 # Starting from tip means fewer passes over reachable. If we know
772 # Starting from tip means fewer passes over reachable. If we know
773 # the new candidates are not ancestors of existing heads, we don't
773 # the new candidates are not ancestors of existing heads, we don't
774 # have to examine ancestors of existing heads
774 # have to examine ancestors of existing heads
775 if ctxisnew:
775 if ctxisnew:
776 iterrevs = sorted(newheadrevs)
776 iterrevs = sorted(newheadrevs)
777 else:
777 else:
778 iterrevs = list(bheadrevs)
778 iterrevs = list(bheadrevs)
779
779
780 # This loop prunes out two kinds of heads - heads that are
780 # This loop prunes out two kinds of heads - heads that are
781 # superseded by a head in newheadrevs, and newheadrevs that are not
781 # superseded by a head in newheadrevs, and newheadrevs that are not
782 # heads because an existing head is their descendant.
782 # heads because an existing head is their descendant.
783 while iterrevs:
783 while iterrevs:
784 latest = iterrevs.pop()
784 latest = iterrevs.pop()
785 if latest not in bheadrevs:
785 if latest not in bheadrevs:
786 continue
786 continue
787 ancestors = set(self.changelog.ancestors([latest],
787 ancestors = set(self.changelog.ancestors([latest],
788 bheadrevs[0]))
788 bheadrevs[0]))
789 if ancestors:
789 if ancestors:
790 bheadrevs = [b for b in bheadrevs if b not in ancestors]
790 bheadrevs = [b for b in bheadrevs if b not in ancestors]
791 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
791 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
792
792
793 # There may be branches that cease to exist when the last commit in the
793 # There may be branches that cease to exist when the last commit in the
794 # branch was stripped. This code filters them out. Note that the
794 # branch was stripped. This code filters them out. Note that the
795 # branch that ceased to exist may not be in newbranches because
795 # branch that ceased to exist may not be in newbranches because
796 # newbranches is the set of candidate heads, which when you strip the
796 # newbranches is the set of candidate heads, which when you strip the
797 # last commit in a branch will be the parent branch.
797 # last commit in a branch will be the parent branch.
798 for branch in partial.keys():
798 for branch in partial.keys():
799 nodes = [head for head in partial[branch]
799 nodes = [head for head in partial[branch]
800 if self.changelog.hasnode(head)]
800 if self.changelog.hasnode(head)]
801 if not nodes:
801 if not nodes:
802 del partial[branch]
802 del partial[branch]
803
803
804 def lookup(self, key):
804 def lookup(self, key):
805 return self[key].node()
805 return self[key].node()
806
806
807 def lookupbranch(self, key, remote=None):
807 def lookupbranch(self, key, remote=None):
808 repo = remote or self
808 repo = remote or self
809 if key in repo.branchmap():
809 if key in repo.branchmap():
810 return key
810 return key
811
811
812 repo = (remote and remote.local()) and remote or self
812 repo = (remote and remote.local()) and remote or self
813 return repo[key].branch()
813 return repo[key].branch()
814
814
815 def known(self, nodes):
815 def known(self, nodes):
816 nm = self.changelog.nodemap
816 nm = self.changelog.nodemap
817 pc = self._phasecache
817 pc = self._phasecache
818 result = []
818 result = []
819 for n in nodes:
819 for n in nodes:
820 r = nm.get(n)
820 r = nm.get(n)
821 resp = not (r is None or pc.phase(self, r) >= phases.secret)
821 resp = not (r is None or pc.phase(self, r) >= phases.secret)
822 result.append(resp)
822 result.append(resp)
823 return result
823 return result
824
824
825 def local(self):
825 def local(self):
826 return self
826 return self
827
827
828 def cancopy(self):
828 def cancopy(self):
829 return self.local() # so statichttprepo's override of local() works
829 return self.local() # so statichttprepo's override of local() works
830
830
831 def join(self, f):
831 def join(self, f):
832 return os.path.join(self.path, f)
832 return os.path.join(self.path, f)
833
833
834 def wjoin(self, f):
834 def wjoin(self, f):
835 return os.path.join(self.root, f)
835 return os.path.join(self.root, f)
836
836
837 def file(self, f):
837 def file(self, f):
838 if f[0] == '/':
838 if f[0] == '/':
839 f = f[1:]
839 f = f[1:]
840 return filelog.filelog(self.sopener, f)
840 return filelog.filelog(self.sopener, f)
841
841
842 def changectx(self, changeid):
842 def changectx(self, changeid):
843 return self[changeid]
843 return self[changeid]
844
844
845 def parents(self, changeid=None):
845 def parents(self, changeid=None):
846 '''get list of changectxs for parents of changeid'''
846 '''get list of changectxs for parents of changeid'''
847 return self[changeid].parents()
847 return self[changeid].parents()
848
848
849 def setparents(self, p1, p2=nullid):
849 def setparents(self, p1, p2=nullid):
850 copies = self.dirstate.setparents(p1, p2)
850 copies = self.dirstate.setparents(p1, p2)
851 if copies:
851 if copies:
852 # Adjust copy records, the dirstate cannot do it, it
852 # Adjust copy records, the dirstate cannot do it, it
853 # requires access to parents manifests. Preserve them
853 # requires access to parents manifests. Preserve them
854 # only for entries added to first parent.
854 # only for entries added to first parent.
855 pctx = self[p1]
855 pctx = self[p1]
856 for f in copies:
856 for f in copies:
857 if f not in pctx and copies[f] in pctx:
857 if f not in pctx and copies[f] in pctx:
858 self.dirstate.copy(copies[f], f)
858 self.dirstate.copy(copies[f], f)
859
859
860 def filectx(self, path, changeid=None, fileid=None):
860 def filectx(self, path, changeid=None, fileid=None):
861 """changeid can be a changeset revision, node, or tag.
861 """changeid can be a changeset revision, node, or tag.
862 fileid can be a file revision or node."""
862 fileid can be a file revision or node."""
863 return context.filectx(self, path, changeid, fileid)
863 return context.filectx(self, path, changeid, fileid)
864
864
865 def getcwd(self):
865 def getcwd(self):
866 return self.dirstate.getcwd()
866 return self.dirstate.getcwd()
867
867
868 def pathto(self, f, cwd=None):
868 def pathto(self, f, cwd=None):
869 return self.dirstate.pathto(f, cwd)
869 return self.dirstate.pathto(f, cwd)
870
870
871 def wfile(self, f, mode='r'):
871 def wfile(self, f, mode='r'):
872 return self.wopener(f, mode)
872 return self.wopener(f, mode)
873
873
874 def _link(self, f):
874 def _link(self, f):
875 return os.path.islink(self.wjoin(f))
875 return os.path.islink(self.wjoin(f))
876
876
877 def _loadfilter(self, filter):
877 def _loadfilter(self, filter):
878 if filter not in self.filterpats:
878 if filter not in self.filterpats:
879 l = []
879 l = []
880 for pat, cmd in self.ui.configitems(filter):
880 for pat, cmd in self.ui.configitems(filter):
881 if cmd == '!':
881 if cmd == '!':
882 continue
882 continue
883 mf = matchmod.match(self.root, '', [pat])
883 mf = matchmod.match(self.root, '', [pat])
884 fn = None
884 fn = None
885 params = cmd
885 params = cmd
886 for name, filterfn in self._datafilters.iteritems():
886 for name, filterfn in self._datafilters.iteritems():
887 if cmd.startswith(name):
887 if cmd.startswith(name):
888 fn = filterfn
888 fn = filterfn
889 params = cmd[len(name):].lstrip()
889 params = cmd[len(name):].lstrip()
890 break
890 break
891 if not fn:
891 if not fn:
892 fn = lambda s, c, **kwargs: util.filter(s, c)
892 fn = lambda s, c, **kwargs: util.filter(s, c)
893 # Wrap old filters not supporting keyword arguments
893 # Wrap old filters not supporting keyword arguments
894 if not inspect.getargspec(fn)[2]:
894 if not inspect.getargspec(fn)[2]:
895 oldfn = fn
895 oldfn = fn
896 fn = lambda s, c, **kwargs: oldfn(s, c)
896 fn = lambda s, c, **kwargs: oldfn(s, c)
897 l.append((mf, fn, params))
897 l.append((mf, fn, params))
898 self.filterpats[filter] = l
898 self.filterpats[filter] = l
899 return self.filterpats[filter]
899 return self.filterpats[filter]
900
900
901 def _filter(self, filterpats, filename, data):
901 def _filter(self, filterpats, filename, data):
902 for mf, fn, cmd in filterpats:
902 for mf, fn, cmd in filterpats:
903 if mf(filename):
903 if mf(filename):
904 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
904 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
905 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
905 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
906 break
906 break
907
907
908 return data
908 return data
909
909
910 @unfilteredpropertycache
910 @unfilteredpropertycache
911 def _encodefilterpats(self):
911 def _encodefilterpats(self):
912 return self._loadfilter('encode')
912 return self._loadfilter('encode')
913
913
914 @unfilteredpropertycache
914 @unfilteredpropertycache
915 def _decodefilterpats(self):
915 def _decodefilterpats(self):
916 return self._loadfilter('decode')
916 return self._loadfilter('decode')
917
917
918 def adddatafilter(self, name, filter):
918 def adddatafilter(self, name, filter):
919 self._datafilters[name] = filter
919 self._datafilters[name] = filter
920
920
921 def wread(self, filename):
921 def wread(self, filename):
922 if self._link(filename):
922 if self._link(filename):
923 data = os.readlink(self.wjoin(filename))
923 data = os.readlink(self.wjoin(filename))
924 else:
924 else:
925 data = self.wopener.read(filename)
925 data = self.wopener.read(filename)
926 return self._filter(self._encodefilterpats, filename, data)
926 return self._filter(self._encodefilterpats, filename, data)
927
927
928 def wwrite(self, filename, data, flags):
928 def wwrite(self, filename, data, flags):
929 data = self._filter(self._decodefilterpats, filename, data)
929 data = self._filter(self._decodefilterpats, filename, data)
930 if 'l' in flags:
930 if 'l' in flags:
931 self.wopener.symlink(data, filename)
931 self.wopener.symlink(data, filename)
932 else:
932 else:
933 self.wopener.write(filename, data)
933 self.wopener.write(filename, data)
934 if 'x' in flags:
934 if 'x' in flags:
935 util.setflags(self.wjoin(filename), False, True)
935 util.setflags(self.wjoin(filename), False, True)
936
936
937 def wwritedata(self, filename, data):
937 def wwritedata(self, filename, data):
938 return self._filter(self._decodefilterpats, filename, data)
938 return self._filter(self._decodefilterpats, filename, data)
939
939
940 def transaction(self, desc):
940 def transaction(self, desc):
941 tr = self._transref and self._transref() or None
941 tr = self._transref and self._transref() or None
942 if tr and tr.running():
942 if tr and tr.running():
943 return tr.nest()
943 return tr.nest()
944
944
945 # abort here if the journal already exists
945 # abort here if the journal already exists
946 if os.path.exists(self.sjoin("journal")):
946 if os.path.exists(self.sjoin("journal")):
947 raise error.RepoError(
947 raise error.RepoError(
948 _("abandoned transaction found - run hg recover"))
948 _("abandoned transaction found - run hg recover"))
949
949
950 self._writejournal(desc)
950 self._writejournal(desc)
951 renames = [(x, undoname(x)) for x in self._journalfiles()]
951 renames = [(x, undoname(x)) for x in self._journalfiles()]
952
952
953 tr = transaction.transaction(self.ui.warn, self.sopener,
953 tr = transaction.transaction(self.ui.warn, self.sopener,
954 self.sjoin("journal"),
954 self.sjoin("journal"),
955 aftertrans(renames),
955 aftertrans(renames),
956 self.store.createmode)
956 self.store.createmode)
957 self._transref = weakref.ref(tr)
957 self._transref = weakref.ref(tr)
958 return tr
958 return tr
959
959
960 def _journalfiles(self):
960 def _journalfiles(self):
961 return (self.sjoin('journal'), self.join('journal.dirstate'),
961 return (self.sjoin('journal'), self.join('journal.dirstate'),
962 self.join('journal.branch'), self.join('journal.desc'),
962 self.join('journal.branch'), self.join('journal.desc'),
963 self.join('journal.bookmarks'),
963 self.join('journal.bookmarks'),
964 self.sjoin('journal.phaseroots'))
964 self.sjoin('journal.phaseroots'))
965
965
966 def undofiles(self):
966 def undofiles(self):
967 return [undoname(x) for x in self._journalfiles()]
967 return [undoname(x) for x in self._journalfiles()]
968
968
969 def _writejournal(self, desc):
969 def _writejournal(self, desc):
970 self.opener.write("journal.dirstate",
970 self.opener.write("journal.dirstate",
971 self.opener.tryread("dirstate"))
971 self.opener.tryread("dirstate"))
972 self.opener.write("journal.branch",
972 self.opener.write("journal.branch",
973 encoding.fromlocal(self.dirstate.branch()))
973 encoding.fromlocal(self.dirstate.branch()))
974 self.opener.write("journal.desc",
974 self.opener.write("journal.desc",
975 "%d\n%s\n" % (len(self), desc))
975 "%d\n%s\n" % (len(self), desc))
976 self.opener.write("journal.bookmarks",
976 self.opener.write("journal.bookmarks",
977 self.opener.tryread("bookmarks"))
977 self.opener.tryread("bookmarks"))
978 self.sopener.write("journal.phaseroots",
978 self.sopener.write("journal.phaseroots",
979 self.sopener.tryread("phaseroots"))
979 self.sopener.tryread("phaseroots"))
980
980
981 def recover(self):
981 def recover(self):
982 lock = self.lock()
982 lock = self.lock()
983 try:
983 try:
984 if os.path.exists(self.sjoin("journal")):
984 if os.path.exists(self.sjoin("journal")):
985 self.ui.status(_("rolling back interrupted transaction\n"))
985 self.ui.status(_("rolling back interrupted transaction\n"))
986 transaction.rollback(self.sopener, self.sjoin("journal"),
986 transaction.rollback(self.sopener, self.sjoin("journal"),
987 self.ui.warn)
987 self.ui.warn)
988 self.invalidate()
988 self.invalidate()
989 return True
989 return True
990 else:
990 else:
991 self.ui.warn(_("no interrupted transaction available\n"))
991 self.ui.warn(_("no interrupted transaction available\n"))
992 return False
992 return False
993 finally:
993 finally:
994 lock.release()
994 lock.release()
995
995
996 def rollback(self, dryrun=False, force=False):
996 def rollback(self, dryrun=False, force=False):
997 wlock = lock = None
997 wlock = lock = None
998 try:
998 try:
999 wlock = self.wlock()
999 wlock = self.wlock()
1000 lock = self.lock()
1000 lock = self.lock()
1001 if os.path.exists(self.sjoin("undo")):
1001 if os.path.exists(self.sjoin("undo")):
1002 return self._rollback(dryrun, force)
1002 return self._rollback(dryrun, force)
1003 else:
1003 else:
1004 self.ui.warn(_("no rollback information available\n"))
1004 self.ui.warn(_("no rollback information available\n"))
1005 return 1
1005 return 1
1006 finally:
1006 finally:
1007 release(lock, wlock)
1007 release(lock, wlock)
1008
1008
1009 @unfilteredmeth # Until we get smarter cache management
1009 @unfilteredmethod # Until we get smarter cache management
1010 def _rollback(self, dryrun, force):
1010 def _rollback(self, dryrun, force):
1011 ui = self.ui
1011 ui = self.ui
1012 try:
1012 try:
1013 args = self.opener.read('undo.desc').splitlines()
1013 args = self.opener.read('undo.desc').splitlines()
1014 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1014 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1015 if len(args) >= 3:
1015 if len(args) >= 3:
1016 detail = args[2]
1016 detail = args[2]
1017 oldtip = oldlen - 1
1017 oldtip = oldlen - 1
1018
1018
1019 if detail and ui.verbose:
1019 if detail and ui.verbose:
1020 msg = (_('repository tip rolled back to revision %s'
1020 msg = (_('repository tip rolled back to revision %s'
1021 ' (undo %s: %s)\n')
1021 ' (undo %s: %s)\n')
1022 % (oldtip, desc, detail))
1022 % (oldtip, desc, detail))
1023 else:
1023 else:
1024 msg = (_('repository tip rolled back to revision %s'
1024 msg = (_('repository tip rolled back to revision %s'
1025 ' (undo %s)\n')
1025 ' (undo %s)\n')
1026 % (oldtip, desc))
1026 % (oldtip, desc))
1027 except IOError:
1027 except IOError:
1028 msg = _('rolling back unknown transaction\n')
1028 msg = _('rolling back unknown transaction\n')
1029 desc = None
1029 desc = None
1030
1030
1031 if not force and self['.'] != self['tip'] and desc == 'commit':
1031 if not force and self['.'] != self['tip'] and desc == 'commit':
1032 raise util.Abort(
1032 raise util.Abort(
1033 _('rollback of last commit while not checked out '
1033 _('rollback of last commit while not checked out '
1034 'may lose data'), hint=_('use -f to force'))
1034 'may lose data'), hint=_('use -f to force'))
1035
1035
1036 ui.status(msg)
1036 ui.status(msg)
1037 if dryrun:
1037 if dryrun:
1038 return 0
1038 return 0
1039
1039
1040 parents = self.dirstate.parents()
1040 parents = self.dirstate.parents()
1041 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1041 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1042 if os.path.exists(self.join('undo.bookmarks')):
1042 if os.path.exists(self.join('undo.bookmarks')):
1043 util.rename(self.join('undo.bookmarks'),
1043 util.rename(self.join('undo.bookmarks'),
1044 self.join('bookmarks'))
1044 self.join('bookmarks'))
1045 if os.path.exists(self.sjoin('undo.phaseroots')):
1045 if os.path.exists(self.sjoin('undo.phaseroots')):
1046 util.rename(self.sjoin('undo.phaseroots'),
1046 util.rename(self.sjoin('undo.phaseroots'),
1047 self.sjoin('phaseroots'))
1047 self.sjoin('phaseroots'))
1048 self.invalidate()
1048 self.invalidate()
1049
1049
1050 # Discard all cache entries to force reloading everything.
1050 # Discard all cache entries to force reloading everything.
1051 self._filecache.clear()
1051 self._filecache.clear()
1052
1052
1053 parentgone = (parents[0] not in self.changelog.nodemap or
1053 parentgone = (parents[0] not in self.changelog.nodemap or
1054 parents[1] not in self.changelog.nodemap)
1054 parents[1] not in self.changelog.nodemap)
1055 if parentgone:
1055 if parentgone:
1056 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1056 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1057 try:
1057 try:
1058 branch = self.opener.read('undo.branch')
1058 branch = self.opener.read('undo.branch')
1059 self.dirstate.setbranch(encoding.tolocal(branch))
1059 self.dirstate.setbranch(encoding.tolocal(branch))
1060 except IOError:
1060 except IOError:
1061 ui.warn(_('named branch could not be reset: '
1061 ui.warn(_('named branch could not be reset: '
1062 'current branch is still \'%s\'\n')
1062 'current branch is still \'%s\'\n')
1063 % self.dirstate.branch())
1063 % self.dirstate.branch())
1064
1064
1065 self.dirstate.invalidate()
1065 self.dirstate.invalidate()
1066 parents = tuple([p.rev() for p in self.parents()])
1066 parents = tuple([p.rev() for p in self.parents()])
1067 if len(parents) > 1:
1067 if len(parents) > 1:
1068 ui.status(_('working directory now based on '
1068 ui.status(_('working directory now based on '
1069 'revisions %d and %d\n') % parents)
1069 'revisions %d and %d\n') % parents)
1070 else:
1070 else:
1071 ui.status(_('working directory now based on '
1071 ui.status(_('working directory now based on '
1072 'revision %d\n') % parents)
1072 'revision %d\n') % parents)
1073 # TODO: if we know which new heads may result from this rollback, pass
1073 # TODO: if we know which new heads may result from this rollback, pass
1074 # them to destroy(), which will prevent the branchhead cache from being
1074 # them to destroy(), which will prevent the branchhead cache from being
1075 # invalidated.
1075 # invalidated.
1076 self.destroyed()
1076 self.destroyed()
1077 return 0
1077 return 0
1078
1078
1079 def invalidatecaches(self):
1079 def invalidatecaches(self):
1080
1080
1081 if '_tagscache' in vars(self):
1081 if '_tagscache' in vars(self):
1082 # can't use delattr on proxy
1082 # can't use delattr on proxy
1083 del self.__dict__['_tagscache']
1083 del self.__dict__['_tagscache']
1084
1084
1085 self.unfiltered()._branchcache = None # in UTF-8
1085 self.unfiltered()._branchcache = None # in UTF-8
1086 self.unfiltered()._branchcachetip = None
1086 self.unfiltered()._branchcachetip = None
1087 obsolete.clearobscaches(self)
1087 obsolete.clearobscaches(self)
1088
1088
1089 def invalidatedirstate(self):
1089 def invalidatedirstate(self):
1090 '''Invalidates the dirstate, causing the next call to dirstate
1090 '''Invalidates the dirstate, causing the next call to dirstate
1091 to check if it was modified since the last time it was read,
1091 to check if it was modified since the last time it was read,
1092 rereading it if it has.
1092 rereading it if it has.
1093
1093
1094 This is different to dirstate.invalidate() that it doesn't always
1094 This is different to dirstate.invalidate() that it doesn't always
1095 rereads the dirstate. Use dirstate.invalidate() if you want to
1095 rereads the dirstate. Use dirstate.invalidate() if you want to
1096 explicitly read the dirstate again (i.e. restoring it to a previous
1096 explicitly read the dirstate again (i.e. restoring it to a previous
1097 known good state).'''
1097 known good state).'''
1098 if hasunfilteredcache(self, 'dirstate'):
1098 if hasunfilteredcache(self, 'dirstate'):
1099 for k in self.dirstate._filecache:
1099 for k in self.dirstate._filecache:
1100 try:
1100 try:
1101 delattr(self.dirstate, k)
1101 delattr(self.dirstate, k)
1102 except AttributeError:
1102 except AttributeError:
1103 pass
1103 pass
1104 delattr(self.unfiltered(), 'dirstate')
1104 delattr(self.unfiltered(), 'dirstate')
1105
1105
1106 def invalidate(self):
1106 def invalidate(self):
1107 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1107 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1108 for k in self._filecache:
1108 for k in self._filecache:
1109 # dirstate is invalidated separately in invalidatedirstate()
1109 # dirstate is invalidated separately in invalidatedirstate()
1110 if k == 'dirstate':
1110 if k == 'dirstate':
1111 continue
1111 continue
1112
1112
1113 try:
1113 try:
1114 delattr(unfiltered, k)
1114 delattr(unfiltered, k)
1115 except AttributeError:
1115 except AttributeError:
1116 pass
1116 pass
1117 self.invalidatecaches()
1117 self.invalidatecaches()
1118
1118
1119 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1119 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1120 try:
1120 try:
1121 l = lock.lock(lockname, 0, releasefn, desc=desc)
1121 l = lock.lock(lockname, 0, releasefn, desc=desc)
1122 except error.LockHeld, inst:
1122 except error.LockHeld, inst:
1123 if not wait:
1123 if not wait:
1124 raise
1124 raise
1125 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1125 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1126 (desc, inst.locker))
1126 (desc, inst.locker))
1127 # default to 600 seconds timeout
1127 # default to 600 seconds timeout
1128 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1128 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1129 releasefn, desc=desc)
1129 releasefn, desc=desc)
1130 if acquirefn:
1130 if acquirefn:
1131 acquirefn()
1131 acquirefn()
1132 return l
1132 return l
1133
1133
1134 def _afterlock(self, callback):
1134 def _afterlock(self, callback):
1135 """add a callback to the current repository lock.
1135 """add a callback to the current repository lock.
1136
1136
1137 The callback will be executed on lock release."""
1137 The callback will be executed on lock release."""
1138 l = self._lockref and self._lockref()
1138 l = self._lockref and self._lockref()
1139 if l:
1139 if l:
1140 l.postrelease.append(callback)
1140 l.postrelease.append(callback)
1141 else:
1141 else:
1142 callback()
1142 callback()
1143
1143
1144 def lock(self, wait=True):
1144 def lock(self, wait=True):
1145 '''Lock the repository store (.hg/store) and return a weak reference
1145 '''Lock the repository store (.hg/store) and return a weak reference
1146 to the lock. Use this before modifying the store (e.g. committing or
1146 to the lock. Use this before modifying the store (e.g. committing or
1147 stripping). If you are opening a transaction, get a lock as well.)'''
1147 stripping). If you are opening a transaction, get a lock as well.)'''
1148 l = self._lockref and self._lockref()
1148 l = self._lockref and self._lockref()
1149 if l is not None and l.held:
1149 if l is not None and l.held:
1150 l.lock()
1150 l.lock()
1151 return l
1151 return l
1152
1152
1153 def unlock():
1153 def unlock():
1154 self.store.write()
1154 self.store.write()
1155 if hasunfilteredcache(self, '_phasecache'):
1155 if hasunfilteredcache(self, '_phasecache'):
1156 self._phasecache.write()
1156 self._phasecache.write()
1157 for k, ce in self._filecache.items():
1157 for k, ce in self._filecache.items():
1158 if k == 'dirstate':
1158 if k == 'dirstate':
1159 continue
1159 continue
1160 ce.refresh()
1160 ce.refresh()
1161
1161
1162 l = self._lock(self.sjoin("lock"), wait, unlock,
1162 l = self._lock(self.sjoin("lock"), wait, unlock,
1163 self.invalidate, _('repository %s') % self.origroot)
1163 self.invalidate, _('repository %s') % self.origroot)
1164 self._lockref = weakref.ref(l)
1164 self._lockref = weakref.ref(l)
1165 return l
1165 return l
1166
1166
1167 def wlock(self, wait=True):
1167 def wlock(self, wait=True):
1168 '''Lock the non-store parts of the repository (everything under
1168 '''Lock the non-store parts of the repository (everything under
1169 .hg except .hg/store) and return a weak reference to the lock.
1169 .hg except .hg/store) and return a weak reference to the lock.
1170 Use this before modifying files in .hg.'''
1170 Use this before modifying files in .hg.'''
1171 l = self._wlockref and self._wlockref()
1171 l = self._wlockref and self._wlockref()
1172 if l is not None and l.held:
1172 if l is not None and l.held:
1173 l.lock()
1173 l.lock()
1174 return l
1174 return l
1175
1175
1176 def unlock():
1176 def unlock():
1177 self.dirstate.write()
1177 self.dirstate.write()
1178 ce = self._filecache.get('dirstate')
1178 ce = self._filecache.get('dirstate')
1179 if ce:
1179 if ce:
1180 ce.refresh()
1180 ce.refresh()
1181
1181
1182 l = self._lock(self.join("wlock"), wait, unlock,
1182 l = self._lock(self.join("wlock"), wait, unlock,
1183 self.invalidatedirstate, _('working directory of %s') %
1183 self.invalidatedirstate, _('working directory of %s') %
1184 self.origroot)
1184 self.origroot)
1185 self._wlockref = weakref.ref(l)
1185 self._wlockref = weakref.ref(l)
1186 return l
1186 return l
1187
1187
1188 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1188 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1189 """
1189 """
1190 commit an individual file as part of a larger transaction
1190 commit an individual file as part of a larger transaction
1191 """
1191 """
1192
1192
1193 fname = fctx.path()
1193 fname = fctx.path()
1194 text = fctx.data()
1194 text = fctx.data()
1195 flog = self.file(fname)
1195 flog = self.file(fname)
1196 fparent1 = manifest1.get(fname, nullid)
1196 fparent1 = manifest1.get(fname, nullid)
1197 fparent2 = fparent2o = manifest2.get(fname, nullid)
1197 fparent2 = fparent2o = manifest2.get(fname, nullid)
1198
1198
1199 meta = {}
1199 meta = {}
1200 copy = fctx.renamed()
1200 copy = fctx.renamed()
1201 if copy and copy[0] != fname:
1201 if copy and copy[0] != fname:
1202 # Mark the new revision of this file as a copy of another
1202 # Mark the new revision of this file as a copy of another
1203 # file. This copy data will effectively act as a parent
1203 # file. This copy data will effectively act as a parent
1204 # of this new revision. If this is a merge, the first
1204 # of this new revision. If this is a merge, the first
1205 # parent will be the nullid (meaning "look up the copy data")
1205 # parent will be the nullid (meaning "look up the copy data")
1206 # and the second one will be the other parent. For example:
1206 # and the second one will be the other parent. For example:
1207 #
1207 #
1208 # 0 --- 1 --- 3 rev1 changes file foo
1208 # 0 --- 1 --- 3 rev1 changes file foo
1209 # \ / rev2 renames foo to bar and changes it
1209 # \ / rev2 renames foo to bar and changes it
1210 # \- 2 -/ rev3 should have bar with all changes and
1210 # \- 2 -/ rev3 should have bar with all changes and
1211 # should record that bar descends from
1211 # should record that bar descends from
1212 # bar in rev2 and foo in rev1
1212 # bar in rev2 and foo in rev1
1213 #
1213 #
1214 # this allows this merge to succeed:
1214 # this allows this merge to succeed:
1215 #
1215 #
1216 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1216 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1217 # \ / merging rev3 and rev4 should use bar@rev2
1217 # \ / merging rev3 and rev4 should use bar@rev2
1218 # \- 2 --- 4 as the merge base
1218 # \- 2 --- 4 as the merge base
1219 #
1219 #
1220
1220
1221 cfname = copy[0]
1221 cfname = copy[0]
1222 crev = manifest1.get(cfname)
1222 crev = manifest1.get(cfname)
1223 newfparent = fparent2
1223 newfparent = fparent2
1224
1224
1225 if manifest2: # branch merge
1225 if manifest2: # branch merge
1226 if fparent2 == nullid or crev is None: # copied on remote side
1226 if fparent2 == nullid or crev is None: # copied on remote side
1227 if cfname in manifest2:
1227 if cfname in manifest2:
1228 crev = manifest2[cfname]
1228 crev = manifest2[cfname]
1229 newfparent = fparent1
1229 newfparent = fparent1
1230
1230
1231 # find source in nearest ancestor if we've lost track
1231 # find source in nearest ancestor if we've lost track
1232 if not crev:
1232 if not crev:
1233 self.ui.debug(" %s: searching for copy revision for %s\n" %
1233 self.ui.debug(" %s: searching for copy revision for %s\n" %
1234 (fname, cfname))
1234 (fname, cfname))
1235 for ancestor in self[None].ancestors():
1235 for ancestor in self[None].ancestors():
1236 if cfname in ancestor:
1236 if cfname in ancestor:
1237 crev = ancestor[cfname].filenode()
1237 crev = ancestor[cfname].filenode()
1238 break
1238 break
1239
1239
1240 if crev:
1240 if crev:
1241 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1241 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1242 meta["copy"] = cfname
1242 meta["copy"] = cfname
1243 meta["copyrev"] = hex(crev)
1243 meta["copyrev"] = hex(crev)
1244 fparent1, fparent2 = nullid, newfparent
1244 fparent1, fparent2 = nullid, newfparent
1245 else:
1245 else:
1246 self.ui.warn(_("warning: can't find ancestor for '%s' "
1246 self.ui.warn(_("warning: can't find ancestor for '%s' "
1247 "copied from '%s'!\n") % (fname, cfname))
1247 "copied from '%s'!\n") % (fname, cfname))
1248
1248
1249 elif fparent2 != nullid:
1249 elif fparent2 != nullid:
1250 # is one parent an ancestor of the other?
1250 # is one parent an ancestor of the other?
1251 fparentancestor = flog.ancestor(fparent1, fparent2)
1251 fparentancestor = flog.ancestor(fparent1, fparent2)
1252 if fparentancestor == fparent1:
1252 if fparentancestor == fparent1:
1253 fparent1, fparent2 = fparent2, nullid
1253 fparent1, fparent2 = fparent2, nullid
1254 elif fparentancestor == fparent2:
1254 elif fparentancestor == fparent2:
1255 fparent2 = nullid
1255 fparent2 = nullid
1256
1256
1257 # is the file changed?
1257 # is the file changed?
1258 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1258 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1259 changelist.append(fname)
1259 changelist.append(fname)
1260 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1260 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1261
1261
1262 # are just the flags changed during merge?
1262 # are just the flags changed during merge?
1263 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1263 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1264 changelist.append(fname)
1264 changelist.append(fname)
1265
1265
1266 return fparent1
1266 return fparent1
1267
1267
1268 @unfilteredmeth
1268 @unfilteredmethod
1269 def commit(self, text="", user=None, date=None, match=None, force=False,
1269 def commit(self, text="", user=None, date=None, match=None, force=False,
1270 editor=False, extra={}):
1270 editor=False, extra={}):
1271 """Add a new revision to current repository.
1271 """Add a new revision to current repository.
1272
1272
1273 Revision information is gathered from the working directory,
1273 Revision information is gathered from the working directory,
1274 match can be used to filter the committed files. If editor is
1274 match can be used to filter the committed files. If editor is
1275 supplied, it is called to get a commit message.
1275 supplied, it is called to get a commit message.
1276 """
1276 """
1277
1277
1278 def fail(f, msg):
1278 def fail(f, msg):
1279 raise util.Abort('%s: %s' % (f, msg))
1279 raise util.Abort('%s: %s' % (f, msg))
1280
1280
1281 if not match:
1281 if not match:
1282 match = matchmod.always(self.root, '')
1282 match = matchmod.always(self.root, '')
1283
1283
1284 if not force:
1284 if not force:
1285 vdirs = []
1285 vdirs = []
1286 match.dir = vdirs.append
1286 match.dir = vdirs.append
1287 match.bad = fail
1287 match.bad = fail
1288
1288
1289 wlock = self.wlock()
1289 wlock = self.wlock()
1290 try:
1290 try:
1291 wctx = self[None]
1291 wctx = self[None]
1292 merge = len(wctx.parents()) > 1
1292 merge = len(wctx.parents()) > 1
1293
1293
1294 if (not force and merge and match and
1294 if (not force and merge and match and
1295 (match.files() or match.anypats())):
1295 (match.files() or match.anypats())):
1296 raise util.Abort(_('cannot partially commit a merge '
1296 raise util.Abort(_('cannot partially commit a merge '
1297 '(do not specify files or patterns)'))
1297 '(do not specify files or patterns)'))
1298
1298
1299 changes = self.status(match=match, clean=force)
1299 changes = self.status(match=match, clean=force)
1300 if force:
1300 if force:
1301 changes[0].extend(changes[6]) # mq may commit unchanged files
1301 changes[0].extend(changes[6]) # mq may commit unchanged files
1302
1302
1303 # check subrepos
1303 # check subrepos
1304 subs = []
1304 subs = []
1305 commitsubs = set()
1305 commitsubs = set()
1306 newstate = wctx.substate.copy()
1306 newstate = wctx.substate.copy()
1307 # only manage subrepos and .hgsubstate if .hgsub is present
1307 # only manage subrepos and .hgsubstate if .hgsub is present
1308 if '.hgsub' in wctx:
1308 if '.hgsub' in wctx:
1309 # we'll decide whether to track this ourselves, thanks
1309 # we'll decide whether to track this ourselves, thanks
1310 if '.hgsubstate' in changes[0]:
1310 if '.hgsubstate' in changes[0]:
1311 changes[0].remove('.hgsubstate')
1311 changes[0].remove('.hgsubstate')
1312 if '.hgsubstate' in changes[2]:
1312 if '.hgsubstate' in changes[2]:
1313 changes[2].remove('.hgsubstate')
1313 changes[2].remove('.hgsubstate')
1314
1314
1315 # compare current state to last committed state
1315 # compare current state to last committed state
1316 # build new substate based on last committed state
1316 # build new substate based on last committed state
1317 oldstate = wctx.p1().substate
1317 oldstate = wctx.p1().substate
1318 for s in sorted(newstate.keys()):
1318 for s in sorted(newstate.keys()):
1319 if not match(s):
1319 if not match(s):
1320 # ignore working copy, use old state if present
1320 # ignore working copy, use old state if present
1321 if s in oldstate:
1321 if s in oldstate:
1322 newstate[s] = oldstate[s]
1322 newstate[s] = oldstate[s]
1323 continue
1323 continue
1324 if not force:
1324 if not force:
1325 raise util.Abort(
1325 raise util.Abort(
1326 _("commit with new subrepo %s excluded") % s)
1326 _("commit with new subrepo %s excluded") % s)
1327 if wctx.sub(s).dirty(True):
1327 if wctx.sub(s).dirty(True):
1328 if not self.ui.configbool('ui', 'commitsubrepos'):
1328 if not self.ui.configbool('ui', 'commitsubrepos'):
1329 raise util.Abort(
1329 raise util.Abort(
1330 _("uncommitted changes in subrepo %s") % s,
1330 _("uncommitted changes in subrepo %s") % s,
1331 hint=_("use --subrepos for recursive commit"))
1331 hint=_("use --subrepos for recursive commit"))
1332 subs.append(s)
1332 subs.append(s)
1333 commitsubs.add(s)
1333 commitsubs.add(s)
1334 else:
1334 else:
1335 bs = wctx.sub(s).basestate()
1335 bs = wctx.sub(s).basestate()
1336 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1336 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1337 if oldstate.get(s, (None, None, None))[1] != bs:
1337 if oldstate.get(s, (None, None, None))[1] != bs:
1338 subs.append(s)
1338 subs.append(s)
1339
1339
1340 # check for removed subrepos
1340 # check for removed subrepos
1341 for p in wctx.parents():
1341 for p in wctx.parents():
1342 r = [s for s in p.substate if s not in newstate]
1342 r = [s for s in p.substate if s not in newstate]
1343 subs += [s for s in r if match(s)]
1343 subs += [s for s in r if match(s)]
1344 if subs:
1344 if subs:
1345 if (not match('.hgsub') and
1345 if (not match('.hgsub') and
1346 '.hgsub' in (wctx.modified() + wctx.added())):
1346 '.hgsub' in (wctx.modified() + wctx.added())):
1347 raise util.Abort(
1347 raise util.Abort(
1348 _("can't commit subrepos without .hgsub"))
1348 _("can't commit subrepos without .hgsub"))
1349 changes[0].insert(0, '.hgsubstate')
1349 changes[0].insert(0, '.hgsubstate')
1350
1350
1351 elif '.hgsub' in changes[2]:
1351 elif '.hgsub' in changes[2]:
1352 # clean up .hgsubstate when .hgsub is removed
1352 # clean up .hgsubstate when .hgsub is removed
1353 if ('.hgsubstate' in wctx and
1353 if ('.hgsubstate' in wctx and
1354 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1354 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1355 changes[2].insert(0, '.hgsubstate')
1355 changes[2].insert(0, '.hgsubstate')
1356
1356
1357 # make sure all explicit patterns are matched
1357 # make sure all explicit patterns are matched
1358 if not force and match.files():
1358 if not force and match.files():
1359 matched = set(changes[0] + changes[1] + changes[2])
1359 matched = set(changes[0] + changes[1] + changes[2])
1360
1360
1361 for f in match.files():
1361 for f in match.files():
1362 f = self.dirstate.normalize(f)
1362 f = self.dirstate.normalize(f)
1363 if f == '.' or f in matched or f in wctx.substate:
1363 if f == '.' or f in matched or f in wctx.substate:
1364 continue
1364 continue
1365 if f in changes[3]: # missing
1365 if f in changes[3]: # missing
1366 fail(f, _('file not found!'))
1366 fail(f, _('file not found!'))
1367 if f in vdirs: # visited directory
1367 if f in vdirs: # visited directory
1368 d = f + '/'
1368 d = f + '/'
1369 for mf in matched:
1369 for mf in matched:
1370 if mf.startswith(d):
1370 if mf.startswith(d):
1371 break
1371 break
1372 else:
1372 else:
1373 fail(f, _("no match under directory!"))
1373 fail(f, _("no match under directory!"))
1374 elif f not in self.dirstate:
1374 elif f not in self.dirstate:
1375 fail(f, _("file not tracked!"))
1375 fail(f, _("file not tracked!"))
1376
1376
1377 if (not force and not extra.get("close") and not merge
1377 if (not force and not extra.get("close") and not merge
1378 and not (changes[0] or changes[1] or changes[2])
1378 and not (changes[0] or changes[1] or changes[2])
1379 and wctx.branch() == wctx.p1().branch()):
1379 and wctx.branch() == wctx.p1().branch()):
1380 return None
1380 return None
1381
1381
1382 if merge and changes[3]:
1382 if merge and changes[3]:
1383 raise util.Abort(_("cannot commit merge with missing files"))
1383 raise util.Abort(_("cannot commit merge with missing files"))
1384
1384
1385 ms = mergemod.mergestate(self)
1385 ms = mergemod.mergestate(self)
1386 for f in changes[0]:
1386 for f in changes[0]:
1387 if f in ms and ms[f] == 'u':
1387 if f in ms and ms[f] == 'u':
1388 raise util.Abort(_("unresolved merge conflicts "
1388 raise util.Abort(_("unresolved merge conflicts "
1389 "(see hg help resolve)"))
1389 "(see hg help resolve)"))
1390
1390
1391 cctx = context.workingctx(self, text, user, date, extra, changes)
1391 cctx = context.workingctx(self, text, user, date, extra, changes)
1392 if editor:
1392 if editor:
1393 cctx._text = editor(self, cctx, subs)
1393 cctx._text = editor(self, cctx, subs)
1394 edited = (text != cctx._text)
1394 edited = (text != cctx._text)
1395
1395
1396 # commit subs and write new state
1396 # commit subs and write new state
1397 if subs:
1397 if subs:
1398 for s in sorted(commitsubs):
1398 for s in sorted(commitsubs):
1399 sub = wctx.sub(s)
1399 sub = wctx.sub(s)
1400 self.ui.status(_('committing subrepository %s\n') %
1400 self.ui.status(_('committing subrepository %s\n') %
1401 subrepo.subrelpath(sub))
1401 subrepo.subrelpath(sub))
1402 sr = sub.commit(cctx._text, user, date)
1402 sr = sub.commit(cctx._text, user, date)
1403 newstate[s] = (newstate[s][0], sr)
1403 newstate[s] = (newstate[s][0], sr)
1404 subrepo.writestate(self, newstate)
1404 subrepo.writestate(self, newstate)
1405
1405
1406 # Save commit message in case this transaction gets rolled back
1406 # Save commit message in case this transaction gets rolled back
1407 # (e.g. by a pretxncommit hook). Leave the content alone on
1407 # (e.g. by a pretxncommit hook). Leave the content alone on
1408 # the assumption that the user will use the same editor again.
1408 # the assumption that the user will use the same editor again.
1409 msgfn = self.savecommitmessage(cctx._text)
1409 msgfn = self.savecommitmessage(cctx._text)
1410
1410
1411 p1, p2 = self.dirstate.parents()
1411 p1, p2 = self.dirstate.parents()
1412 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1412 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1413 try:
1413 try:
1414 self.hook("precommit", throw=True, parent1=hookp1,
1414 self.hook("precommit", throw=True, parent1=hookp1,
1415 parent2=hookp2)
1415 parent2=hookp2)
1416 ret = self.commitctx(cctx, True)
1416 ret = self.commitctx(cctx, True)
1417 except: # re-raises
1417 except: # re-raises
1418 if edited:
1418 if edited:
1419 self.ui.write(
1419 self.ui.write(
1420 _('note: commit message saved in %s\n') % msgfn)
1420 _('note: commit message saved in %s\n') % msgfn)
1421 raise
1421 raise
1422
1422
1423 # update bookmarks, dirstate and mergestate
1423 # update bookmarks, dirstate and mergestate
1424 bookmarks.update(self, [p1, p2], ret)
1424 bookmarks.update(self, [p1, p2], ret)
1425 for f in changes[0] + changes[1]:
1425 for f in changes[0] + changes[1]:
1426 self.dirstate.normal(f)
1426 self.dirstate.normal(f)
1427 for f in changes[2]:
1427 for f in changes[2]:
1428 self.dirstate.drop(f)
1428 self.dirstate.drop(f)
1429 self.dirstate.setparents(ret)
1429 self.dirstate.setparents(ret)
1430 ms.reset()
1430 ms.reset()
1431 finally:
1431 finally:
1432 wlock.release()
1432 wlock.release()
1433
1433
1434 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1434 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1435 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1435 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1436 self._afterlock(commithook)
1436 self._afterlock(commithook)
1437 return ret
1437 return ret
1438
1438
1439 @unfilteredmeth
1439 @unfilteredmethod
1440 def commitctx(self, ctx, error=False):
1440 def commitctx(self, ctx, error=False):
1441 """Add a new revision to current repository.
1441 """Add a new revision to current repository.
1442 Revision information is passed via the context argument.
1442 Revision information is passed via the context argument.
1443 """
1443 """
1444
1444
1445 tr = lock = None
1445 tr = lock = None
1446 removed = list(ctx.removed())
1446 removed = list(ctx.removed())
1447 p1, p2 = ctx.p1(), ctx.p2()
1447 p1, p2 = ctx.p1(), ctx.p2()
1448 user = ctx.user()
1448 user = ctx.user()
1449
1449
1450 lock = self.lock()
1450 lock = self.lock()
1451 try:
1451 try:
1452 tr = self.transaction("commit")
1452 tr = self.transaction("commit")
1453 trp = weakref.proxy(tr)
1453 trp = weakref.proxy(tr)
1454
1454
1455 if ctx.files():
1455 if ctx.files():
1456 m1 = p1.manifest().copy()
1456 m1 = p1.manifest().copy()
1457 m2 = p2.manifest()
1457 m2 = p2.manifest()
1458
1458
1459 # check in files
1459 # check in files
1460 new = {}
1460 new = {}
1461 changed = []
1461 changed = []
1462 linkrev = len(self)
1462 linkrev = len(self)
1463 for f in sorted(ctx.modified() + ctx.added()):
1463 for f in sorted(ctx.modified() + ctx.added()):
1464 self.ui.note(f + "\n")
1464 self.ui.note(f + "\n")
1465 try:
1465 try:
1466 fctx = ctx[f]
1466 fctx = ctx[f]
1467 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1467 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1468 changed)
1468 changed)
1469 m1.set(f, fctx.flags())
1469 m1.set(f, fctx.flags())
1470 except OSError, inst:
1470 except OSError, inst:
1471 self.ui.warn(_("trouble committing %s!\n") % f)
1471 self.ui.warn(_("trouble committing %s!\n") % f)
1472 raise
1472 raise
1473 except IOError, inst:
1473 except IOError, inst:
1474 errcode = getattr(inst, 'errno', errno.ENOENT)
1474 errcode = getattr(inst, 'errno', errno.ENOENT)
1475 if error or errcode and errcode != errno.ENOENT:
1475 if error or errcode and errcode != errno.ENOENT:
1476 self.ui.warn(_("trouble committing %s!\n") % f)
1476 self.ui.warn(_("trouble committing %s!\n") % f)
1477 raise
1477 raise
1478 else:
1478 else:
1479 removed.append(f)
1479 removed.append(f)
1480
1480
1481 # update manifest
1481 # update manifest
1482 m1.update(new)
1482 m1.update(new)
1483 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1483 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1484 drop = [f for f in removed if f in m1]
1484 drop = [f for f in removed if f in m1]
1485 for f in drop:
1485 for f in drop:
1486 del m1[f]
1486 del m1[f]
1487 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1487 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1488 p2.manifestnode(), (new, drop))
1488 p2.manifestnode(), (new, drop))
1489 files = changed + removed
1489 files = changed + removed
1490 else:
1490 else:
1491 mn = p1.manifestnode()
1491 mn = p1.manifestnode()
1492 files = []
1492 files = []
1493
1493
1494 # update changelog
1494 # update changelog
1495 self.changelog.delayupdate()
1495 self.changelog.delayupdate()
1496 n = self.changelog.add(mn, files, ctx.description(),
1496 n = self.changelog.add(mn, files, ctx.description(),
1497 trp, p1.node(), p2.node(),
1497 trp, p1.node(), p2.node(),
1498 user, ctx.date(), ctx.extra().copy())
1498 user, ctx.date(), ctx.extra().copy())
1499 p = lambda: self.changelog.writepending() and self.root or ""
1499 p = lambda: self.changelog.writepending() and self.root or ""
1500 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1500 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1501 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1501 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1502 parent2=xp2, pending=p)
1502 parent2=xp2, pending=p)
1503 self.changelog.finalize(trp)
1503 self.changelog.finalize(trp)
1504 # set the new commit is proper phase
1504 # set the new commit is proper phase
1505 targetphase = phases.newcommitphase(self.ui)
1505 targetphase = phases.newcommitphase(self.ui)
1506 if targetphase:
1506 if targetphase:
1507 # retract boundary do not alter parent changeset.
1507 # retract boundary do not alter parent changeset.
1508 # if a parent have higher the resulting phase will
1508 # if a parent have higher the resulting phase will
1509 # be compliant anyway
1509 # be compliant anyway
1510 #
1510 #
1511 # if minimal phase was 0 we don't need to retract anything
1511 # if minimal phase was 0 we don't need to retract anything
1512 phases.retractboundary(self, targetphase, [n])
1512 phases.retractboundary(self, targetphase, [n])
1513 tr.close()
1513 tr.close()
1514 self.updatebranchcache()
1514 self.updatebranchcache()
1515 return n
1515 return n
1516 finally:
1516 finally:
1517 if tr:
1517 if tr:
1518 tr.release()
1518 tr.release()
1519 lock.release()
1519 lock.release()
1520
1520
1521 @unfilteredmeth
1521 @unfilteredmethod
1522 def destroyed(self, newheadnodes=None):
1522 def destroyed(self, newheadnodes=None):
1523 '''Inform the repository that nodes have been destroyed.
1523 '''Inform the repository that nodes have been destroyed.
1524 Intended for use by strip and rollback, so there's a common
1524 Intended for use by strip and rollback, so there's a common
1525 place for anything that has to be done after destroying history.
1525 place for anything that has to be done after destroying history.
1526
1526
1527 If you know the branchheadcache was uptodate before nodes were removed
1527 If you know the branchheadcache was uptodate before nodes were removed
1528 and you also know the set of candidate new heads that may have resulted
1528 and you also know the set of candidate new heads that may have resulted
1529 from the destruction, you can set newheadnodes. This will enable the
1529 from the destruction, you can set newheadnodes. This will enable the
1530 code to update the branchheads cache, rather than having future code
1530 code to update the branchheads cache, rather than having future code
1531 decide it's invalid and regenerating it from scratch.
1531 decide it's invalid and regenerating it from scratch.
1532 '''
1532 '''
1533 # If we have info, newheadnodes, on how to update the branch cache, do
1533 # If we have info, newheadnodes, on how to update the branch cache, do
1534 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1534 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1535 # will be caught the next time it is read.
1535 # will be caught the next time it is read.
1536 if newheadnodes:
1536 if newheadnodes:
1537 tiprev = len(self) - 1
1537 tiprev = len(self) - 1
1538 ctxgen = (self[node] for node in newheadnodes
1538 ctxgen = (self[node] for node in newheadnodes
1539 if self.changelog.hasnode(node))
1539 if self.changelog.hasnode(node))
1540 self._updatebranchcache(self._branchcache, ctxgen)
1540 self._updatebranchcache(self._branchcache, ctxgen)
1541 self._writebranchcache(self._branchcache, self.changelog.tip(),
1541 self._writebranchcache(self._branchcache, self.changelog.tip(),
1542 tiprev)
1542 tiprev)
1543
1543
1544 # Ensure the persistent tag cache is updated. Doing it now
1544 # Ensure the persistent tag cache is updated. Doing it now
1545 # means that the tag cache only has to worry about destroyed
1545 # means that the tag cache only has to worry about destroyed
1546 # heads immediately after a strip/rollback. That in turn
1546 # heads immediately after a strip/rollback. That in turn
1547 # guarantees that "cachetip == currenttip" (comparing both rev
1547 # guarantees that "cachetip == currenttip" (comparing both rev
1548 # and node) always means no nodes have been added or destroyed.
1548 # and node) always means no nodes have been added or destroyed.
1549
1549
1550 # XXX this is suboptimal when qrefresh'ing: we strip the current
1550 # XXX this is suboptimal when qrefresh'ing: we strip the current
1551 # head, refresh the tag cache, then immediately add a new head.
1551 # head, refresh the tag cache, then immediately add a new head.
1552 # But I think doing it this way is necessary for the "instant
1552 # But I think doing it this way is necessary for the "instant
1553 # tag cache retrieval" case to work.
1553 # tag cache retrieval" case to work.
1554 self.invalidatecaches()
1554 self.invalidatecaches()
1555
1555
1556 # Discard all cache entries to force reloading everything.
1556 # Discard all cache entries to force reloading everything.
1557 self._filecache.clear()
1557 self._filecache.clear()
1558
1558
1559 def walk(self, match, node=None):
1559 def walk(self, match, node=None):
1560 '''
1560 '''
1561 walk recursively through the directory tree or a given
1561 walk recursively through the directory tree or a given
1562 changeset, finding all files matched by the match
1562 changeset, finding all files matched by the match
1563 function
1563 function
1564 '''
1564 '''
1565 return self[node].walk(match)
1565 return self[node].walk(match)
1566
1566
1567 def status(self, node1='.', node2=None, match=None,
1567 def status(self, node1='.', node2=None, match=None,
1568 ignored=False, clean=False, unknown=False,
1568 ignored=False, clean=False, unknown=False,
1569 listsubrepos=False):
1569 listsubrepos=False):
1570 """return status of files between two nodes or node and working
1570 """return status of files between two nodes or node and working
1571 directory.
1571 directory.
1572
1572
1573 If node1 is None, use the first dirstate parent instead.
1573 If node1 is None, use the first dirstate parent instead.
1574 If node2 is None, compare node1 with working directory.
1574 If node2 is None, compare node1 with working directory.
1575 """
1575 """
1576
1576
1577 def mfmatches(ctx):
1577 def mfmatches(ctx):
1578 mf = ctx.manifest().copy()
1578 mf = ctx.manifest().copy()
1579 if match.always():
1579 if match.always():
1580 return mf
1580 return mf
1581 for fn in mf.keys():
1581 for fn in mf.keys():
1582 if not match(fn):
1582 if not match(fn):
1583 del mf[fn]
1583 del mf[fn]
1584 return mf
1584 return mf
1585
1585
1586 if isinstance(node1, context.changectx):
1586 if isinstance(node1, context.changectx):
1587 ctx1 = node1
1587 ctx1 = node1
1588 else:
1588 else:
1589 ctx1 = self[node1]
1589 ctx1 = self[node1]
1590 if isinstance(node2, context.changectx):
1590 if isinstance(node2, context.changectx):
1591 ctx2 = node2
1591 ctx2 = node2
1592 else:
1592 else:
1593 ctx2 = self[node2]
1593 ctx2 = self[node2]
1594
1594
1595 working = ctx2.rev() is None
1595 working = ctx2.rev() is None
1596 parentworking = working and ctx1 == self['.']
1596 parentworking = working and ctx1 == self['.']
1597 match = match or matchmod.always(self.root, self.getcwd())
1597 match = match or matchmod.always(self.root, self.getcwd())
1598 listignored, listclean, listunknown = ignored, clean, unknown
1598 listignored, listclean, listunknown = ignored, clean, unknown
1599
1599
1600 # load earliest manifest first for caching reasons
1600 # load earliest manifest first for caching reasons
1601 if not working and ctx2.rev() < ctx1.rev():
1601 if not working and ctx2.rev() < ctx1.rev():
1602 ctx2.manifest()
1602 ctx2.manifest()
1603
1603
1604 if not parentworking:
1604 if not parentworking:
1605 def bad(f, msg):
1605 def bad(f, msg):
1606 # 'f' may be a directory pattern from 'match.files()',
1606 # 'f' may be a directory pattern from 'match.files()',
1607 # so 'f not in ctx1' is not enough
1607 # so 'f not in ctx1' is not enough
1608 if f not in ctx1 and f not in ctx1.dirs():
1608 if f not in ctx1 and f not in ctx1.dirs():
1609 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1609 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1610 match.bad = bad
1610 match.bad = bad
1611
1611
1612 if working: # we need to scan the working dir
1612 if working: # we need to scan the working dir
1613 subrepos = []
1613 subrepos = []
1614 if '.hgsub' in self.dirstate:
1614 if '.hgsub' in self.dirstate:
1615 subrepos = ctx2.substate.keys()
1615 subrepos = ctx2.substate.keys()
1616 s = self.dirstate.status(match, subrepos, listignored,
1616 s = self.dirstate.status(match, subrepos, listignored,
1617 listclean, listunknown)
1617 listclean, listunknown)
1618 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1618 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1619
1619
1620 # check for any possibly clean files
1620 # check for any possibly clean files
1621 if parentworking and cmp:
1621 if parentworking and cmp:
1622 fixup = []
1622 fixup = []
1623 # do a full compare of any files that might have changed
1623 # do a full compare of any files that might have changed
1624 for f in sorted(cmp):
1624 for f in sorted(cmp):
1625 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1625 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1626 or ctx1[f].cmp(ctx2[f])):
1626 or ctx1[f].cmp(ctx2[f])):
1627 modified.append(f)
1627 modified.append(f)
1628 else:
1628 else:
1629 fixup.append(f)
1629 fixup.append(f)
1630
1630
1631 # update dirstate for files that are actually clean
1631 # update dirstate for files that are actually clean
1632 if fixup:
1632 if fixup:
1633 if listclean:
1633 if listclean:
1634 clean += fixup
1634 clean += fixup
1635
1635
1636 try:
1636 try:
1637 # updating the dirstate is optional
1637 # updating the dirstate is optional
1638 # so we don't wait on the lock
1638 # so we don't wait on the lock
1639 wlock = self.wlock(False)
1639 wlock = self.wlock(False)
1640 try:
1640 try:
1641 for f in fixup:
1641 for f in fixup:
1642 self.dirstate.normal(f)
1642 self.dirstate.normal(f)
1643 finally:
1643 finally:
1644 wlock.release()
1644 wlock.release()
1645 except error.LockError:
1645 except error.LockError:
1646 pass
1646 pass
1647
1647
1648 if not parentworking:
1648 if not parentworking:
1649 mf1 = mfmatches(ctx1)
1649 mf1 = mfmatches(ctx1)
1650 if working:
1650 if working:
1651 # we are comparing working dir against non-parent
1651 # we are comparing working dir against non-parent
1652 # generate a pseudo-manifest for the working dir
1652 # generate a pseudo-manifest for the working dir
1653 mf2 = mfmatches(self['.'])
1653 mf2 = mfmatches(self['.'])
1654 for f in cmp + modified + added:
1654 for f in cmp + modified + added:
1655 mf2[f] = None
1655 mf2[f] = None
1656 mf2.set(f, ctx2.flags(f))
1656 mf2.set(f, ctx2.flags(f))
1657 for f in removed:
1657 for f in removed:
1658 if f in mf2:
1658 if f in mf2:
1659 del mf2[f]
1659 del mf2[f]
1660 else:
1660 else:
1661 # we are comparing two revisions
1661 # we are comparing two revisions
1662 deleted, unknown, ignored = [], [], []
1662 deleted, unknown, ignored = [], [], []
1663 mf2 = mfmatches(ctx2)
1663 mf2 = mfmatches(ctx2)
1664
1664
1665 modified, added, clean = [], [], []
1665 modified, added, clean = [], [], []
1666 withflags = mf1.withflags() | mf2.withflags()
1666 withflags = mf1.withflags() | mf2.withflags()
1667 for fn in mf2:
1667 for fn in mf2:
1668 if fn in mf1:
1668 if fn in mf1:
1669 if (fn not in deleted and
1669 if (fn not in deleted and
1670 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1670 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1671 (mf1[fn] != mf2[fn] and
1671 (mf1[fn] != mf2[fn] and
1672 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1672 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1673 modified.append(fn)
1673 modified.append(fn)
1674 elif listclean:
1674 elif listclean:
1675 clean.append(fn)
1675 clean.append(fn)
1676 del mf1[fn]
1676 del mf1[fn]
1677 elif fn not in deleted:
1677 elif fn not in deleted:
1678 added.append(fn)
1678 added.append(fn)
1679 removed = mf1.keys()
1679 removed = mf1.keys()
1680
1680
1681 if working and modified and not self.dirstate._checklink:
1681 if working and modified and not self.dirstate._checklink:
1682 # Symlink placeholders may get non-symlink-like contents
1682 # Symlink placeholders may get non-symlink-like contents
1683 # via user error or dereferencing by NFS or Samba servers,
1683 # via user error or dereferencing by NFS or Samba servers,
1684 # so we filter out any placeholders that don't look like a
1684 # so we filter out any placeholders that don't look like a
1685 # symlink
1685 # symlink
1686 sane = []
1686 sane = []
1687 for f in modified:
1687 for f in modified:
1688 if ctx2.flags(f) == 'l':
1688 if ctx2.flags(f) == 'l':
1689 d = ctx2[f].data()
1689 d = ctx2[f].data()
1690 if len(d) >= 1024 or '\n' in d or util.binary(d):
1690 if len(d) >= 1024 or '\n' in d or util.binary(d):
1691 self.ui.debug('ignoring suspect symlink placeholder'
1691 self.ui.debug('ignoring suspect symlink placeholder'
1692 ' "%s"\n' % f)
1692 ' "%s"\n' % f)
1693 continue
1693 continue
1694 sane.append(f)
1694 sane.append(f)
1695 modified = sane
1695 modified = sane
1696
1696
1697 r = modified, added, removed, deleted, unknown, ignored, clean
1697 r = modified, added, removed, deleted, unknown, ignored, clean
1698
1698
1699 if listsubrepos:
1699 if listsubrepos:
1700 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1700 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1701 if working:
1701 if working:
1702 rev2 = None
1702 rev2 = None
1703 else:
1703 else:
1704 rev2 = ctx2.substate[subpath][1]
1704 rev2 = ctx2.substate[subpath][1]
1705 try:
1705 try:
1706 submatch = matchmod.narrowmatcher(subpath, match)
1706 submatch = matchmod.narrowmatcher(subpath, match)
1707 s = sub.status(rev2, match=submatch, ignored=listignored,
1707 s = sub.status(rev2, match=submatch, ignored=listignored,
1708 clean=listclean, unknown=listunknown,
1708 clean=listclean, unknown=listunknown,
1709 listsubrepos=True)
1709 listsubrepos=True)
1710 for rfiles, sfiles in zip(r, s):
1710 for rfiles, sfiles in zip(r, s):
1711 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1711 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1712 except error.LookupError:
1712 except error.LookupError:
1713 self.ui.status(_("skipping missing subrepository: %s\n")
1713 self.ui.status(_("skipping missing subrepository: %s\n")
1714 % subpath)
1714 % subpath)
1715
1715
1716 for l in r:
1716 for l in r:
1717 l.sort()
1717 l.sort()
1718 return r
1718 return r
1719
1719
1720 def heads(self, start=None):
1720 def heads(self, start=None):
1721 heads = self.changelog.heads(start)
1721 heads = self.changelog.heads(start)
1722 # sort the output in rev descending order
1722 # sort the output in rev descending order
1723 return sorted(heads, key=self.changelog.rev, reverse=True)
1723 return sorted(heads, key=self.changelog.rev, reverse=True)
1724
1724
1725 def branchheads(self, branch=None, start=None, closed=False):
1725 def branchheads(self, branch=None, start=None, closed=False):
1726 '''return a (possibly filtered) list of heads for the given branch
1726 '''return a (possibly filtered) list of heads for the given branch
1727
1727
1728 Heads are returned in topological order, from newest to oldest.
1728 Heads are returned in topological order, from newest to oldest.
1729 If branch is None, use the dirstate branch.
1729 If branch is None, use the dirstate branch.
1730 If start is not None, return only heads reachable from start.
1730 If start is not None, return only heads reachable from start.
1731 If closed is True, return heads that are marked as closed as well.
1731 If closed is True, return heads that are marked as closed as well.
1732 '''
1732 '''
1733 if branch is None:
1733 if branch is None:
1734 branch = self[None].branch()
1734 branch = self[None].branch()
1735 branches = self.branchmap()
1735 branches = self.branchmap()
1736 if branch not in branches:
1736 if branch not in branches:
1737 return []
1737 return []
1738 # the cache returns heads ordered lowest to highest
1738 # the cache returns heads ordered lowest to highest
1739 bheads = list(reversed(branches[branch]))
1739 bheads = list(reversed(branches[branch]))
1740 if start is not None:
1740 if start is not None:
1741 # filter out the heads that cannot be reached from startrev
1741 # filter out the heads that cannot be reached from startrev
1742 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1742 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1743 bheads = [h for h in bheads if h in fbheads]
1743 bheads = [h for h in bheads if h in fbheads]
1744 if not closed:
1744 if not closed:
1745 bheads = [h for h in bheads if not self[h].closesbranch()]
1745 bheads = [h for h in bheads if not self[h].closesbranch()]
1746 return bheads
1746 return bheads
1747
1747
1748 def branches(self, nodes):
1748 def branches(self, nodes):
1749 if not nodes:
1749 if not nodes:
1750 nodes = [self.changelog.tip()]
1750 nodes = [self.changelog.tip()]
1751 b = []
1751 b = []
1752 for n in nodes:
1752 for n in nodes:
1753 t = n
1753 t = n
1754 while True:
1754 while True:
1755 p = self.changelog.parents(n)
1755 p = self.changelog.parents(n)
1756 if p[1] != nullid or p[0] == nullid:
1756 if p[1] != nullid or p[0] == nullid:
1757 b.append((t, n, p[0], p[1]))
1757 b.append((t, n, p[0], p[1]))
1758 break
1758 break
1759 n = p[0]
1759 n = p[0]
1760 return b
1760 return b
1761
1761
1762 def between(self, pairs):
1762 def between(self, pairs):
1763 r = []
1763 r = []
1764
1764
1765 for top, bottom in pairs:
1765 for top, bottom in pairs:
1766 n, l, i = top, [], 0
1766 n, l, i = top, [], 0
1767 f = 1
1767 f = 1
1768
1768
1769 while n != bottom and n != nullid:
1769 while n != bottom and n != nullid:
1770 p = self.changelog.parents(n)[0]
1770 p = self.changelog.parents(n)[0]
1771 if i == f:
1771 if i == f:
1772 l.append(n)
1772 l.append(n)
1773 f = f * 2
1773 f = f * 2
1774 n = p
1774 n = p
1775 i += 1
1775 i += 1
1776
1776
1777 r.append(l)
1777 r.append(l)
1778
1778
1779 return r
1779 return r
1780
1780
1781 def pull(self, remote, heads=None, force=False):
1781 def pull(self, remote, heads=None, force=False):
1782 # don't open transaction for nothing or you break future useful
1782 # don't open transaction for nothing or you break future useful
1783 # rollback call
1783 # rollback call
1784 tr = None
1784 tr = None
1785 trname = 'pull\n' + util.hidepassword(remote.url())
1785 trname = 'pull\n' + util.hidepassword(remote.url())
1786 lock = self.lock()
1786 lock = self.lock()
1787 try:
1787 try:
1788 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1788 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1789 force=force)
1789 force=force)
1790 common, fetch, rheads = tmp
1790 common, fetch, rheads = tmp
1791 if not fetch:
1791 if not fetch:
1792 self.ui.status(_("no changes found\n"))
1792 self.ui.status(_("no changes found\n"))
1793 added = []
1793 added = []
1794 result = 0
1794 result = 0
1795 else:
1795 else:
1796 tr = self.transaction(trname)
1796 tr = self.transaction(trname)
1797 if heads is None and list(common) == [nullid]:
1797 if heads is None and list(common) == [nullid]:
1798 self.ui.status(_("requesting all changes\n"))
1798 self.ui.status(_("requesting all changes\n"))
1799 elif heads is None and remote.capable('changegroupsubset'):
1799 elif heads is None and remote.capable('changegroupsubset'):
1800 # issue1320, avoid a race if remote changed after discovery
1800 # issue1320, avoid a race if remote changed after discovery
1801 heads = rheads
1801 heads = rheads
1802
1802
1803 if remote.capable('getbundle'):
1803 if remote.capable('getbundle'):
1804 cg = remote.getbundle('pull', common=common,
1804 cg = remote.getbundle('pull', common=common,
1805 heads=heads or rheads)
1805 heads=heads or rheads)
1806 elif heads is None:
1806 elif heads is None:
1807 cg = remote.changegroup(fetch, 'pull')
1807 cg = remote.changegroup(fetch, 'pull')
1808 elif not remote.capable('changegroupsubset'):
1808 elif not remote.capable('changegroupsubset'):
1809 raise util.Abort(_("partial pull cannot be done because "
1809 raise util.Abort(_("partial pull cannot be done because "
1810 "other repository doesn't support "
1810 "other repository doesn't support "
1811 "changegroupsubset."))
1811 "changegroupsubset."))
1812 else:
1812 else:
1813 cg = remote.changegroupsubset(fetch, heads, 'pull')
1813 cg = remote.changegroupsubset(fetch, heads, 'pull')
1814 clstart = len(self.changelog)
1814 clstart = len(self.changelog)
1815 result = self.addchangegroup(cg, 'pull', remote.url())
1815 result = self.addchangegroup(cg, 'pull', remote.url())
1816 clend = len(self.changelog)
1816 clend = len(self.changelog)
1817 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1817 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1818
1818
1819 # compute target subset
1819 # compute target subset
1820 if heads is None:
1820 if heads is None:
1821 # We pulled every thing possible
1821 # We pulled every thing possible
1822 # sync on everything common
1822 # sync on everything common
1823 subset = common + added
1823 subset = common + added
1824 else:
1824 else:
1825 # We pulled a specific subset
1825 # We pulled a specific subset
1826 # sync on this subset
1826 # sync on this subset
1827 subset = heads
1827 subset = heads
1828
1828
1829 # Get remote phases data from remote
1829 # Get remote phases data from remote
1830 remotephases = remote.listkeys('phases')
1830 remotephases = remote.listkeys('phases')
1831 publishing = bool(remotephases.get('publishing', False))
1831 publishing = bool(remotephases.get('publishing', False))
1832 if remotephases and not publishing:
1832 if remotephases and not publishing:
1833 # remote is new and unpublishing
1833 # remote is new and unpublishing
1834 pheads, _dr = phases.analyzeremotephases(self, subset,
1834 pheads, _dr = phases.analyzeremotephases(self, subset,
1835 remotephases)
1835 remotephases)
1836 phases.advanceboundary(self, phases.public, pheads)
1836 phases.advanceboundary(self, phases.public, pheads)
1837 phases.advanceboundary(self, phases.draft, subset)
1837 phases.advanceboundary(self, phases.draft, subset)
1838 else:
1838 else:
1839 # Remote is old or publishing all common changesets
1839 # Remote is old or publishing all common changesets
1840 # should be seen as public
1840 # should be seen as public
1841 phases.advanceboundary(self, phases.public, subset)
1841 phases.advanceboundary(self, phases.public, subset)
1842
1842
1843 if obsolete._enabled:
1843 if obsolete._enabled:
1844 self.ui.debug('fetching remote obsolete markers\n')
1844 self.ui.debug('fetching remote obsolete markers\n')
1845 remoteobs = remote.listkeys('obsolete')
1845 remoteobs = remote.listkeys('obsolete')
1846 if 'dump0' in remoteobs:
1846 if 'dump0' in remoteobs:
1847 if tr is None:
1847 if tr is None:
1848 tr = self.transaction(trname)
1848 tr = self.transaction(trname)
1849 for key in sorted(remoteobs, reverse=True):
1849 for key in sorted(remoteobs, reverse=True):
1850 if key.startswith('dump'):
1850 if key.startswith('dump'):
1851 data = base85.b85decode(remoteobs[key])
1851 data = base85.b85decode(remoteobs[key])
1852 self.obsstore.mergemarkers(tr, data)
1852 self.obsstore.mergemarkers(tr, data)
1853 if tr is not None:
1853 if tr is not None:
1854 tr.close()
1854 tr.close()
1855 finally:
1855 finally:
1856 if tr is not None:
1856 if tr is not None:
1857 tr.release()
1857 tr.release()
1858 lock.release()
1858 lock.release()
1859
1859
1860 return result
1860 return result
1861
1861
1862 def checkpush(self, force, revs):
1862 def checkpush(self, force, revs):
1863 """Extensions can override this function if additional checks have
1863 """Extensions can override this function if additional checks have
1864 to be performed before pushing, or call it if they override push
1864 to be performed before pushing, or call it if they override push
1865 command.
1865 command.
1866 """
1866 """
1867 pass
1867 pass
1868
1868
1869 def push(self, remote, force=False, revs=None, newbranch=False):
1869 def push(self, remote, force=False, revs=None, newbranch=False):
1870 '''Push outgoing changesets (limited by revs) from the current
1870 '''Push outgoing changesets (limited by revs) from the current
1871 repository to remote. Return an integer:
1871 repository to remote. Return an integer:
1872 - None means nothing to push
1872 - None means nothing to push
1873 - 0 means HTTP error
1873 - 0 means HTTP error
1874 - 1 means we pushed and remote head count is unchanged *or*
1874 - 1 means we pushed and remote head count is unchanged *or*
1875 we have outgoing changesets but refused to push
1875 we have outgoing changesets but refused to push
1876 - other values as described by addchangegroup()
1876 - other values as described by addchangegroup()
1877 '''
1877 '''
1878 # there are two ways to push to remote repo:
1878 # there are two ways to push to remote repo:
1879 #
1879 #
1880 # addchangegroup assumes local user can lock remote
1880 # addchangegroup assumes local user can lock remote
1881 # repo (local filesystem, old ssh servers).
1881 # repo (local filesystem, old ssh servers).
1882 #
1882 #
1883 # unbundle assumes local user cannot lock remote repo (new ssh
1883 # unbundle assumes local user cannot lock remote repo (new ssh
1884 # servers, http servers).
1884 # servers, http servers).
1885
1885
1886 if not remote.canpush():
1886 if not remote.canpush():
1887 raise util.Abort(_("destination does not support push"))
1887 raise util.Abort(_("destination does not support push"))
1888 # get local lock as we might write phase data
1888 # get local lock as we might write phase data
1889 unfi = self.unfiltered()
1889 unfi = self.unfiltered()
1890 locallock = self.lock()
1890 locallock = self.lock()
1891 try:
1891 try:
1892 self.checkpush(force, revs)
1892 self.checkpush(force, revs)
1893 lock = None
1893 lock = None
1894 unbundle = remote.capable('unbundle')
1894 unbundle = remote.capable('unbundle')
1895 if not unbundle:
1895 if not unbundle:
1896 lock = remote.lock()
1896 lock = remote.lock()
1897 try:
1897 try:
1898 # discovery
1898 # discovery
1899 fci = discovery.findcommonincoming
1899 fci = discovery.findcommonincoming
1900 commoninc = fci(unfi, remote, force=force)
1900 commoninc = fci(unfi, remote, force=force)
1901 common, inc, remoteheads = commoninc
1901 common, inc, remoteheads = commoninc
1902 fco = discovery.findcommonoutgoing
1902 fco = discovery.findcommonoutgoing
1903 outgoing = fco(unfi, remote, onlyheads=revs,
1903 outgoing = fco(unfi, remote, onlyheads=revs,
1904 commoninc=commoninc, force=force)
1904 commoninc=commoninc, force=force)
1905
1905
1906
1906
1907 if not outgoing.missing:
1907 if not outgoing.missing:
1908 # nothing to push
1908 # nothing to push
1909 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1909 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1910 ret = None
1910 ret = None
1911 else:
1911 else:
1912 # something to push
1912 # something to push
1913 if not force:
1913 if not force:
1914 # if self.obsstore == False --> no obsolete
1914 # if self.obsstore == False --> no obsolete
1915 # then, save the iteration
1915 # then, save the iteration
1916 if unfi.obsstore:
1916 if unfi.obsstore:
1917 # this message are here for 80 char limit reason
1917 # this message are here for 80 char limit reason
1918 mso = _("push includes obsolete changeset: %s!")
1918 mso = _("push includes obsolete changeset: %s!")
1919 msu = _("push includes unstable changeset: %s!")
1919 msu = _("push includes unstable changeset: %s!")
1920 msb = _("push includes bumped changeset: %s!")
1920 msb = _("push includes bumped changeset: %s!")
1921 # If we are to push if there is at least one
1921 # If we are to push if there is at least one
1922 # obsolete or unstable changeset in missing, at
1922 # obsolete or unstable changeset in missing, at
1923 # least one of the missinghead will be obsolete or
1923 # least one of the missinghead will be obsolete or
1924 # unstable. So checking heads only is ok
1924 # unstable. So checking heads only is ok
1925 for node in outgoing.missingheads:
1925 for node in outgoing.missingheads:
1926 ctx = unfi[node]
1926 ctx = unfi[node]
1927 if ctx.obsolete():
1927 if ctx.obsolete():
1928 raise util.Abort(mso % ctx)
1928 raise util.Abort(mso % ctx)
1929 elif ctx.unstable():
1929 elif ctx.unstable():
1930 raise util.Abort(msu % ctx)
1930 raise util.Abort(msu % ctx)
1931 elif ctx.bumped():
1931 elif ctx.bumped():
1932 raise util.Abort(msb % ctx)
1932 raise util.Abort(msb % ctx)
1933 discovery.checkheads(unfi, remote, outgoing,
1933 discovery.checkheads(unfi, remote, outgoing,
1934 remoteheads, newbranch,
1934 remoteheads, newbranch,
1935 bool(inc))
1935 bool(inc))
1936
1936
1937 # create a changegroup from local
1937 # create a changegroup from local
1938 if revs is None and not outgoing.excluded:
1938 if revs is None and not outgoing.excluded:
1939 # push everything,
1939 # push everything,
1940 # use the fast path, no race possible on push
1940 # use the fast path, no race possible on push
1941 cg = self._changegroup(outgoing.missing, 'push')
1941 cg = self._changegroup(outgoing.missing, 'push')
1942 else:
1942 else:
1943 cg = self.getlocalbundle('push', outgoing)
1943 cg = self.getlocalbundle('push', outgoing)
1944
1944
1945 # apply changegroup to remote
1945 # apply changegroup to remote
1946 if unbundle:
1946 if unbundle:
1947 # local repo finds heads on server, finds out what
1947 # local repo finds heads on server, finds out what
1948 # revs it must push. once revs transferred, if server
1948 # revs it must push. once revs transferred, if server
1949 # finds it has different heads (someone else won
1949 # finds it has different heads (someone else won
1950 # commit/push race), server aborts.
1950 # commit/push race), server aborts.
1951 if force:
1951 if force:
1952 remoteheads = ['force']
1952 remoteheads = ['force']
1953 # ssh: return remote's addchangegroup()
1953 # ssh: return remote's addchangegroup()
1954 # http: return remote's addchangegroup() or 0 for error
1954 # http: return remote's addchangegroup() or 0 for error
1955 ret = remote.unbundle(cg, remoteheads, 'push')
1955 ret = remote.unbundle(cg, remoteheads, 'push')
1956 else:
1956 else:
1957 # we return an integer indicating remote head count
1957 # we return an integer indicating remote head count
1958 # change
1958 # change
1959 ret = remote.addchangegroup(cg, 'push', self.url())
1959 ret = remote.addchangegroup(cg, 'push', self.url())
1960
1960
1961 if ret:
1961 if ret:
1962 # push succeed, synchronize target of the push
1962 # push succeed, synchronize target of the push
1963 cheads = outgoing.missingheads
1963 cheads = outgoing.missingheads
1964 elif revs is None:
1964 elif revs is None:
1965 # All out push fails. synchronize all common
1965 # All out push fails. synchronize all common
1966 cheads = outgoing.commonheads
1966 cheads = outgoing.commonheads
1967 else:
1967 else:
1968 # I want cheads = heads(::missingheads and ::commonheads)
1968 # I want cheads = heads(::missingheads and ::commonheads)
1969 # (missingheads is revs with secret changeset filtered out)
1969 # (missingheads is revs with secret changeset filtered out)
1970 #
1970 #
1971 # This can be expressed as:
1971 # This can be expressed as:
1972 # cheads = ( (missingheads and ::commonheads)
1972 # cheads = ( (missingheads and ::commonheads)
1973 # + (commonheads and ::missingheads))"
1973 # + (commonheads and ::missingheads))"
1974 # )
1974 # )
1975 #
1975 #
1976 # while trying to push we already computed the following:
1976 # while trying to push we already computed the following:
1977 # common = (::commonheads)
1977 # common = (::commonheads)
1978 # missing = ((commonheads::missingheads) - commonheads)
1978 # missing = ((commonheads::missingheads) - commonheads)
1979 #
1979 #
1980 # We can pick:
1980 # We can pick:
1981 # * missingheads part of common (::commonheads)
1981 # * missingheads part of common (::commonheads)
1982 common = set(outgoing.common)
1982 common = set(outgoing.common)
1983 cheads = [node for node in revs if node in common]
1983 cheads = [node for node in revs if node in common]
1984 # and
1984 # and
1985 # * commonheads parents on missing
1985 # * commonheads parents on missing
1986 revset = unfi.set('%ln and parents(roots(%ln))',
1986 revset = unfi.set('%ln and parents(roots(%ln))',
1987 outgoing.commonheads,
1987 outgoing.commonheads,
1988 outgoing.missing)
1988 outgoing.missing)
1989 cheads.extend(c.node() for c in revset)
1989 cheads.extend(c.node() for c in revset)
1990 # even when we don't push, exchanging phase data is useful
1990 # even when we don't push, exchanging phase data is useful
1991 remotephases = remote.listkeys('phases')
1991 remotephases = remote.listkeys('phases')
1992 if not remotephases: # old server or public only repo
1992 if not remotephases: # old server or public only repo
1993 phases.advanceboundary(self, phases.public, cheads)
1993 phases.advanceboundary(self, phases.public, cheads)
1994 # don't push any phase data as there is nothing to push
1994 # don't push any phase data as there is nothing to push
1995 else:
1995 else:
1996 ana = phases.analyzeremotephases(self, cheads, remotephases)
1996 ana = phases.analyzeremotephases(self, cheads, remotephases)
1997 pheads, droots = ana
1997 pheads, droots = ana
1998 ### Apply remote phase on local
1998 ### Apply remote phase on local
1999 if remotephases.get('publishing', False):
1999 if remotephases.get('publishing', False):
2000 phases.advanceboundary(self, phases.public, cheads)
2000 phases.advanceboundary(self, phases.public, cheads)
2001 else: # publish = False
2001 else: # publish = False
2002 phases.advanceboundary(self, phases.public, pheads)
2002 phases.advanceboundary(self, phases.public, pheads)
2003 phases.advanceboundary(self, phases.draft, cheads)
2003 phases.advanceboundary(self, phases.draft, cheads)
2004 ### Apply local phase on remote
2004 ### Apply local phase on remote
2005
2005
2006 # Get the list of all revs draft on remote by public here.
2006 # Get the list of all revs draft on remote by public here.
2007 # XXX Beware that revset break if droots is not strictly
2007 # XXX Beware that revset break if droots is not strictly
2008 # XXX root we may want to ensure it is but it is costly
2008 # XXX root we may want to ensure it is but it is costly
2009 outdated = unfi.set('heads((%ln::%ln) and public())',
2009 outdated = unfi.set('heads((%ln::%ln) and public())',
2010 droots, cheads)
2010 droots, cheads)
2011 for newremotehead in outdated:
2011 for newremotehead in outdated:
2012 r = remote.pushkey('phases',
2012 r = remote.pushkey('phases',
2013 newremotehead.hex(),
2013 newremotehead.hex(),
2014 str(phases.draft),
2014 str(phases.draft),
2015 str(phases.public))
2015 str(phases.public))
2016 if not r:
2016 if not r:
2017 self.ui.warn(_('updating %s to public failed!\n')
2017 self.ui.warn(_('updating %s to public failed!\n')
2018 % newremotehead)
2018 % newremotehead)
2019 self.ui.debug('try to push obsolete markers to remote\n')
2019 self.ui.debug('try to push obsolete markers to remote\n')
2020 if (obsolete._enabled and self.obsstore and
2020 if (obsolete._enabled and self.obsstore and
2021 'obsolete' in remote.listkeys('namespaces')):
2021 'obsolete' in remote.listkeys('namespaces')):
2022 rslts = []
2022 rslts = []
2023 remotedata = self.listkeys('obsolete')
2023 remotedata = self.listkeys('obsolete')
2024 for key in sorted(remotedata, reverse=True):
2024 for key in sorted(remotedata, reverse=True):
2025 # reverse sort to ensure we end with dump0
2025 # reverse sort to ensure we end with dump0
2026 data = remotedata[key]
2026 data = remotedata[key]
2027 rslts.append(remote.pushkey('obsolete', key, '', data))
2027 rslts.append(remote.pushkey('obsolete', key, '', data))
2028 if [r for r in rslts if not r]:
2028 if [r for r in rslts if not r]:
2029 msg = _('failed to push some obsolete markers!\n')
2029 msg = _('failed to push some obsolete markers!\n')
2030 self.ui.warn(msg)
2030 self.ui.warn(msg)
2031 finally:
2031 finally:
2032 if lock is not None:
2032 if lock is not None:
2033 lock.release()
2033 lock.release()
2034 finally:
2034 finally:
2035 locallock.release()
2035 locallock.release()
2036
2036
2037 self.ui.debug("checking for updated bookmarks\n")
2037 self.ui.debug("checking for updated bookmarks\n")
2038 rb = remote.listkeys('bookmarks')
2038 rb = remote.listkeys('bookmarks')
2039 for k in rb.keys():
2039 for k in rb.keys():
2040 if k in unfi._bookmarks:
2040 if k in unfi._bookmarks:
2041 nr, nl = rb[k], hex(self._bookmarks[k])
2041 nr, nl = rb[k], hex(self._bookmarks[k])
2042 if nr in unfi:
2042 if nr in unfi:
2043 cr = unfi[nr]
2043 cr = unfi[nr]
2044 cl = unfi[nl]
2044 cl = unfi[nl]
2045 if bookmarks.validdest(unfi, cr, cl):
2045 if bookmarks.validdest(unfi, cr, cl):
2046 r = remote.pushkey('bookmarks', k, nr, nl)
2046 r = remote.pushkey('bookmarks', k, nr, nl)
2047 if r:
2047 if r:
2048 self.ui.status(_("updating bookmark %s\n") % k)
2048 self.ui.status(_("updating bookmark %s\n") % k)
2049 else:
2049 else:
2050 self.ui.warn(_('updating bookmark %s'
2050 self.ui.warn(_('updating bookmark %s'
2051 ' failed!\n') % k)
2051 ' failed!\n') % k)
2052
2052
2053 return ret
2053 return ret
2054
2054
2055 def changegroupinfo(self, nodes, source):
2055 def changegroupinfo(self, nodes, source):
2056 if self.ui.verbose or source == 'bundle':
2056 if self.ui.verbose or source == 'bundle':
2057 self.ui.status(_("%d changesets found\n") % len(nodes))
2057 self.ui.status(_("%d changesets found\n") % len(nodes))
2058 if self.ui.debugflag:
2058 if self.ui.debugflag:
2059 self.ui.debug("list of changesets:\n")
2059 self.ui.debug("list of changesets:\n")
2060 for node in nodes:
2060 for node in nodes:
2061 self.ui.debug("%s\n" % hex(node))
2061 self.ui.debug("%s\n" % hex(node))
2062
2062
2063 def changegroupsubset(self, bases, heads, source):
2063 def changegroupsubset(self, bases, heads, source):
2064 """Compute a changegroup consisting of all the nodes that are
2064 """Compute a changegroup consisting of all the nodes that are
2065 descendants of any of the bases and ancestors of any of the heads.
2065 descendants of any of the bases and ancestors of any of the heads.
2066 Return a chunkbuffer object whose read() method will return
2066 Return a chunkbuffer object whose read() method will return
2067 successive changegroup chunks.
2067 successive changegroup chunks.
2068
2068
2069 It is fairly complex as determining which filenodes and which
2069 It is fairly complex as determining which filenodes and which
2070 manifest nodes need to be included for the changeset to be complete
2070 manifest nodes need to be included for the changeset to be complete
2071 is non-trivial.
2071 is non-trivial.
2072
2072
2073 Another wrinkle is doing the reverse, figuring out which changeset in
2073 Another wrinkle is doing the reverse, figuring out which changeset in
2074 the changegroup a particular filenode or manifestnode belongs to.
2074 the changegroup a particular filenode or manifestnode belongs to.
2075 """
2075 """
2076 cl = self.changelog
2076 cl = self.changelog
2077 if not bases:
2077 if not bases:
2078 bases = [nullid]
2078 bases = [nullid]
2079 csets, bases, heads = cl.nodesbetween(bases, heads)
2079 csets, bases, heads = cl.nodesbetween(bases, heads)
2080 # We assume that all ancestors of bases are known
2080 # We assume that all ancestors of bases are known
2081 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2081 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2082 return self._changegroupsubset(common, csets, heads, source)
2082 return self._changegroupsubset(common, csets, heads, source)
2083
2083
2084 def getlocalbundle(self, source, outgoing):
2084 def getlocalbundle(self, source, outgoing):
2085 """Like getbundle, but taking a discovery.outgoing as an argument.
2085 """Like getbundle, but taking a discovery.outgoing as an argument.
2086
2086
2087 This is only implemented for local repos and reuses potentially
2087 This is only implemented for local repos and reuses potentially
2088 precomputed sets in outgoing."""
2088 precomputed sets in outgoing."""
2089 if not outgoing.missing:
2089 if not outgoing.missing:
2090 return None
2090 return None
2091 return self._changegroupsubset(outgoing.common,
2091 return self._changegroupsubset(outgoing.common,
2092 outgoing.missing,
2092 outgoing.missing,
2093 outgoing.missingheads,
2093 outgoing.missingheads,
2094 source)
2094 source)
2095
2095
2096 def getbundle(self, source, heads=None, common=None):
2096 def getbundle(self, source, heads=None, common=None):
2097 """Like changegroupsubset, but returns the set difference between the
2097 """Like changegroupsubset, but returns the set difference between the
2098 ancestors of heads and the ancestors common.
2098 ancestors of heads and the ancestors common.
2099
2099
2100 If heads is None, use the local heads. If common is None, use [nullid].
2100 If heads is None, use the local heads. If common is None, use [nullid].
2101
2101
2102 The nodes in common might not all be known locally due to the way the
2102 The nodes in common might not all be known locally due to the way the
2103 current discovery protocol works.
2103 current discovery protocol works.
2104 """
2104 """
2105 cl = self.changelog
2105 cl = self.changelog
2106 if common:
2106 if common:
2107 nm = cl.nodemap
2107 nm = cl.nodemap
2108 common = [n for n in common if n in nm]
2108 common = [n for n in common if n in nm]
2109 else:
2109 else:
2110 common = [nullid]
2110 common = [nullid]
2111 if not heads:
2111 if not heads:
2112 heads = cl.heads()
2112 heads = cl.heads()
2113 return self.getlocalbundle(source,
2113 return self.getlocalbundle(source,
2114 discovery.outgoing(cl, common, heads))
2114 discovery.outgoing(cl, common, heads))
2115
2115
2116 @unfilteredmeth
2116 @unfilteredmethod
2117 def _changegroupsubset(self, commonrevs, csets, heads, source):
2117 def _changegroupsubset(self, commonrevs, csets, heads, source):
2118
2118
2119 cl = self.changelog
2119 cl = self.changelog
2120 mf = self.manifest
2120 mf = self.manifest
2121 mfs = {} # needed manifests
2121 mfs = {} # needed manifests
2122 fnodes = {} # needed file nodes
2122 fnodes = {} # needed file nodes
2123 changedfiles = set()
2123 changedfiles = set()
2124 fstate = ['', {}]
2124 fstate = ['', {}]
2125 count = [0, 0]
2125 count = [0, 0]
2126
2126
2127 # can we go through the fast path ?
2127 # can we go through the fast path ?
2128 heads.sort()
2128 heads.sort()
2129 if heads == sorted(self.heads()):
2129 if heads == sorted(self.heads()):
2130 return self._changegroup(csets, source)
2130 return self._changegroup(csets, source)
2131
2131
2132 # slow path
2132 # slow path
2133 self.hook('preoutgoing', throw=True, source=source)
2133 self.hook('preoutgoing', throw=True, source=source)
2134 self.changegroupinfo(csets, source)
2134 self.changegroupinfo(csets, source)
2135
2135
2136 # filter any nodes that claim to be part of the known set
2136 # filter any nodes that claim to be part of the known set
2137 def prune(revlog, missing):
2137 def prune(revlog, missing):
2138 rr, rl = revlog.rev, revlog.linkrev
2138 rr, rl = revlog.rev, revlog.linkrev
2139 return [n for n in missing
2139 return [n for n in missing
2140 if rl(rr(n)) not in commonrevs]
2140 if rl(rr(n)) not in commonrevs]
2141
2141
2142 progress = self.ui.progress
2142 progress = self.ui.progress
2143 _bundling = _('bundling')
2143 _bundling = _('bundling')
2144 _changesets = _('changesets')
2144 _changesets = _('changesets')
2145 _manifests = _('manifests')
2145 _manifests = _('manifests')
2146 _files = _('files')
2146 _files = _('files')
2147
2147
2148 def lookup(revlog, x):
2148 def lookup(revlog, x):
2149 if revlog == cl:
2149 if revlog == cl:
2150 c = cl.read(x)
2150 c = cl.read(x)
2151 changedfiles.update(c[3])
2151 changedfiles.update(c[3])
2152 mfs.setdefault(c[0], x)
2152 mfs.setdefault(c[0], x)
2153 count[0] += 1
2153 count[0] += 1
2154 progress(_bundling, count[0],
2154 progress(_bundling, count[0],
2155 unit=_changesets, total=count[1])
2155 unit=_changesets, total=count[1])
2156 return x
2156 return x
2157 elif revlog == mf:
2157 elif revlog == mf:
2158 clnode = mfs[x]
2158 clnode = mfs[x]
2159 mdata = mf.readfast(x)
2159 mdata = mf.readfast(x)
2160 for f, n in mdata.iteritems():
2160 for f, n in mdata.iteritems():
2161 if f in changedfiles:
2161 if f in changedfiles:
2162 fnodes[f].setdefault(n, clnode)
2162 fnodes[f].setdefault(n, clnode)
2163 count[0] += 1
2163 count[0] += 1
2164 progress(_bundling, count[0],
2164 progress(_bundling, count[0],
2165 unit=_manifests, total=count[1])
2165 unit=_manifests, total=count[1])
2166 return clnode
2166 return clnode
2167 else:
2167 else:
2168 progress(_bundling, count[0], item=fstate[0],
2168 progress(_bundling, count[0], item=fstate[0],
2169 unit=_files, total=count[1])
2169 unit=_files, total=count[1])
2170 return fstate[1][x]
2170 return fstate[1][x]
2171
2171
2172 bundler = changegroup.bundle10(lookup)
2172 bundler = changegroup.bundle10(lookup)
2173 reorder = self.ui.config('bundle', 'reorder', 'auto')
2173 reorder = self.ui.config('bundle', 'reorder', 'auto')
2174 if reorder == 'auto':
2174 if reorder == 'auto':
2175 reorder = None
2175 reorder = None
2176 else:
2176 else:
2177 reorder = util.parsebool(reorder)
2177 reorder = util.parsebool(reorder)
2178
2178
2179 def gengroup():
2179 def gengroup():
2180 # Create a changenode group generator that will call our functions
2180 # Create a changenode group generator that will call our functions
2181 # back to lookup the owning changenode and collect information.
2181 # back to lookup the owning changenode and collect information.
2182 count[:] = [0, len(csets)]
2182 count[:] = [0, len(csets)]
2183 for chunk in cl.group(csets, bundler, reorder=reorder):
2183 for chunk in cl.group(csets, bundler, reorder=reorder):
2184 yield chunk
2184 yield chunk
2185 progress(_bundling, None)
2185 progress(_bundling, None)
2186
2186
2187 # Create a generator for the manifestnodes that calls our lookup
2187 # Create a generator for the manifestnodes that calls our lookup
2188 # and data collection functions back.
2188 # and data collection functions back.
2189 for f in changedfiles:
2189 for f in changedfiles:
2190 fnodes[f] = {}
2190 fnodes[f] = {}
2191 count[:] = [0, len(mfs)]
2191 count[:] = [0, len(mfs)]
2192 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2192 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2193 yield chunk
2193 yield chunk
2194 progress(_bundling, None)
2194 progress(_bundling, None)
2195
2195
2196 mfs.clear()
2196 mfs.clear()
2197
2197
2198 # Go through all our files in order sorted by name.
2198 # Go through all our files in order sorted by name.
2199 count[:] = [0, len(changedfiles)]
2199 count[:] = [0, len(changedfiles)]
2200 for fname in sorted(changedfiles):
2200 for fname in sorted(changedfiles):
2201 filerevlog = self.file(fname)
2201 filerevlog = self.file(fname)
2202 if not len(filerevlog):
2202 if not len(filerevlog):
2203 raise util.Abort(_("empty or missing revlog for %s")
2203 raise util.Abort(_("empty or missing revlog for %s")
2204 % fname)
2204 % fname)
2205 fstate[0] = fname
2205 fstate[0] = fname
2206 fstate[1] = fnodes.pop(fname, {})
2206 fstate[1] = fnodes.pop(fname, {})
2207
2207
2208 nodelist = prune(filerevlog, fstate[1])
2208 nodelist = prune(filerevlog, fstate[1])
2209 if nodelist:
2209 if nodelist:
2210 count[0] += 1
2210 count[0] += 1
2211 yield bundler.fileheader(fname)
2211 yield bundler.fileheader(fname)
2212 for chunk in filerevlog.group(nodelist, bundler, reorder):
2212 for chunk in filerevlog.group(nodelist, bundler, reorder):
2213 yield chunk
2213 yield chunk
2214
2214
2215 # Signal that no more groups are left.
2215 # Signal that no more groups are left.
2216 yield bundler.close()
2216 yield bundler.close()
2217 progress(_bundling, None)
2217 progress(_bundling, None)
2218
2218
2219 if csets:
2219 if csets:
2220 self.hook('outgoing', node=hex(csets[0]), source=source)
2220 self.hook('outgoing', node=hex(csets[0]), source=source)
2221
2221
2222 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2222 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2223
2223
2224 def changegroup(self, basenodes, source):
2224 def changegroup(self, basenodes, source):
2225 # to avoid a race we use changegroupsubset() (issue1320)
2225 # to avoid a race we use changegroupsubset() (issue1320)
2226 return self.changegroupsubset(basenodes, self.heads(), source)
2226 return self.changegroupsubset(basenodes, self.heads(), source)
2227
2227
2228 @unfilteredmeth
2228 @unfilteredmethod
2229 def _changegroup(self, nodes, source):
2229 def _changegroup(self, nodes, source):
2230 """Compute the changegroup of all nodes that we have that a recipient
2230 """Compute the changegroup of all nodes that we have that a recipient
2231 doesn't. Return a chunkbuffer object whose read() method will return
2231 doesn't. Return a chunkbuffer object whose read() method will return
2232 successive changegroup chunks.
2232 successive changegroup chunks.
2233
2233
2234 This is much easier than the previous function as we can assume that
2234 This is much easier than the previous function as we can assume that
2235 the recipient has any changenode we aren't sending them.
2235 the recipient has any changenode we aren't sending them.
2236
2236
2237 nodes is the set of nodes to send"""
2237 nodes is the set of nodes to send"""
2238
2238
2239 cl = self.changelog
2239 cl = self.changelog
2240 mf = self.manifest
2240 mf = self.manifest
2241 mfs = {}
2241 mfs = {}
2242 changedfiles = set()
2242 changedfiles = set()
2243 fstate = ['']
2243 fstate = ['']
2244 count = [0, 0]
2244 count = [0, 0]
2245
2245
2246 self.hook('preoutgoing', throw=True, source=source)
2246 self.hook('preoutgoing', throw=True, source=source)
2247 self.changegroupinfo(nodes, source)
2247 self.changegroupinfo(nodes, source)
2248
2248
2249 revset = set([cl.rev(n) for n in nodes])
2249 revset = set([cl.rev(n) for n in nodes])
2250
2250
2251 def gennodelst(log):
2251 def gennodelst(log):
2252 ln, llr = log.node, log.linkrev
2252 ln, llr = log.node, log.linkrev
2253 return [ln(r) for r in log if llr(r) in revset]
2253 return [ln(r) for r in log if llr(r) in revset]
2254
2254
2255 progress = self.ui.progress
2255 progress = self.ui.progress
2256 _bundling = _('bundling')
2256 _bundling = _('bundling')
2257 _changesets = _('changesets')
2257 _changesets = _('changesets')
2258 _manifests = _('manifests')
2258 _manifests = _('manifests')
2259 _files = _('files')
2259 _files = _('files')
2260
2260
2261 def lookup(revlog, x):
2261 def lookup(revlog, x):
2262 if revlog == cl:
2262 if revlog == cl:
2263 c = cl.read(x)
2263 c = cl.read(x)
2264 changedfiles.update(c[3])
2264 changedfiles.update(c[3])
2265 mfs.setdefault(c[0], x)
2265 mfs.setdefault(c[0], x)
2266 count[0] += 1
2266 count[0] += 1
2267 progress(_bundling, count[0],
2267 progress(_bundling, count[0],
2268 unit=_changesets, total=count[1])
2268 unit=_changesets, total=count[1])
2269 return x
2269 return x
2270 elif revlog == mf:
2270 elif revlog == mf:
2271 count[0] += 1
2271 count[0] += 1
2272 progress(_bundling, count[0],
2272 progress(_bundling, count[0],
2273 unit=_manifests, total=count[1])
2273 unit=_manifests, total=count[1])
2274 return cl.node(revlog.linkrev(revlog.rev(x)))
2274 return cl.node(revlog.linkrev(revlog.rev(x)))
2275 else:
2275 else:
2276 progress(_bundling, count[0], item=fstate[0],
2276 progress(_bundling, count[0], item=fstate[0],
2277 total=count[1], unit=_files)
2277 total=count[1], unit=_files)
2278 return cl.node(revlog.linkrev(revlog.rev(x)))
2278 return cl.node(revlog.linkrev(revlog.rev(x)))
2279
2279
2280 bundler = changegroup.bundle10(lookup)
2280 bundler = changegroup.bundle10(lookup)
2281 reorder = self.ui.config('bundle', 'reorder', 'auto')
2281 reorder = self.ui.config('bundle', 'reorder', 'auto')
2282 if reorder == 'auto':
2282 if reorder == 'auto':
2283 reorder = None
2283 reorder = None
2284 else:
2284 else:
2285 reorder = util.parsebool(reorder)
2285 reorder = util.parsebool(reorder)
2286
2286
2287 def gengroup():
2287 def gengroup():
2288 '''yield a sequence of changegroup chunks (strings)'''
2288 '''yield a sequence of changegroup chunks (strings)'''
2289 # construct a list of all changed files
2289 # construct a list of all changed files
2290
2290
2291 count[:] = [0, len(nodes)]
2291 count[:] = [0, len(nodes)]
2292 for chunk in cl.group(nodes, bundler, reorder=reorder):
2292 for chunk in cl.group(nodes, bundler, reorder=reorder):
2293 yield chunk
2293 yield chunk
2294 progress(_bundling, None)
2294 progress(_bundling, None)
2295
2295
2296 count[:] = [0, len(mfs)]
2296 count[:] = [0, len(mfs)]
2297 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2297 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2298 yield chunk
2298 yield chunk
2299 progress(_bundling, None)
2299 progress(_bundling, None)
2300
2300
2301 count[:] = [0, len(changedfiles)]
2301 count[:] = [0, len(changedfiles)]
2302 for fname in sorted(changedfiles):
2302 for fname in sorted(changedfiles):
2303 filerevlog = self.file(fname)
2303 filerevlog = self.file(fname)
2304 if not len(filerevlog):
2304 if not len(filerevlog):
2305 raise util.Abort(_("empty or missing revlog for %s")
2305 raise util.Abort(_("empty or missing revlog for %s")
2306 % fname)
2306 % fname)
2307 fstate[0] = fname
2307 fstate[0] = fname
2308 nodelist = gennodelst(filerevlog)
2308 nodelist = gennodelst(filerevlog)
2309 if nodelist:
2309 if nodelist:
2310 count[0] += 1
2310 count[0] += 1
2311 yield bundler.fileheader(fname)
2311 yield bundler.fileheader(fname)
2312 for chunk in filerevlog.group(nodelist, bundler, reorder):
2312 for chunk in filerevlog.group(nodelist, bundler, reorder):
2313 yield chunk
2313 yield chunk
2314 yield bundler.close()
2314 yield bundler.close()
2315 progress(_bundling, None)
2315 progress(_bundling, None)
2316
2316
2317 if nodes:
2317 if nodes:
2318 self.hook('outgoing', node=hex(nodes[0]), source=source)
2318 self.hook('outgoing', node=hex(nodes[0]), source=source)
2319
2319
2320 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2320 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2321
2321
2322 @unfilteredmeth
2322 @unfilteredmethod
2323 def addchangegroup(self, source, srctype, url, emptyok=False):
2323 def addchangegroup(self, source, srctype, url, emptyok=False):
2324 """Add the changegroup returned by source.read() to this repo.
2324 """Add the changegroup returned by source.read() to this repo.
2325 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2325 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2326 the URL of the repo where this changegroup is coming from.
2326 the URL of the repo where this changegroup is coming from.
2327
2327
2328 Return an integer summarizing the change to this repo:
2328 Return an integer summarizing the change to this repo:
2329 - nothing changed or no source: 0
2329 - nothing changed or no source: 0
2330 - more heads than before: 1+added heads (2..n)
2330 - more heads than before: 1+added heads (2..n)
2331 - fewer heads than before: -1-removed heads (-2..-n)
2331 - fewer heads than before: -1-removed heads (-2..-n)
2332 - number of heads stays the same: 1
2332 - number of heads stays the same: 1
2333 """
2333 """
2334 def csmap(x):
2334 def csmap(x):
2335 self.ui.debug("add changeset %s\n" % short(x))
2335 self.ui.debug("add changeset %s\n" % short(x))
2336 return len(cl)
2336 return len(cl)
2337
2337
2338 def revmap(x):
2338 def revmap(x):
2339 return cl.rev(x)
2339 return cl.rev(x)
2340
2340
2341 if not source:
2341 if not source:
2342 return 0
2342 return 0
2343
2343
2344 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2344 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2345
2345
2346 changesets = files = revisions = 0
2346 changesets = files = revisions = 0
2347 efiles = set()
2347 efiles = set()
2348
2348
2349 # write changelog data to temp files so concurrent readers will not see
2349 # write changelog data to temp files so concurrent readers will not see
2350 # inconsistent view
2350 # inconsistent view
2351 cl = self.changelog
2351 cl = self.changelog
2352 cl.delayupdate()
2352 cl.delayupdate()
2353 oldheads = cl.heads()
2353 oldheads = cl.heads()
2354
2354
2355 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2355 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2356 try:
2356 try:
2357 trp = weakref.proxy(tr)
2357 trp = weakref.proxy(tr)
2358 # pull off the changeset group
2358 # pull off the changeset group
2359 self.ui.status(_("adding changesets\n"))
2359 self.ui.status(_("adding changesets\n"))
2360 clstart = len(cl)
2360 clstart = len(cl)
2361 class prog(object):
2361 class prog(object):
2362 step = _('changesets')
2362 step = _('changesets')
2363 count = 1
2363 count = 1
2364 ui = self.ui
2364 ui = self.ui
2365 total = None
2365 total = None
2366 def __call__(self):
2366 def __call__(self):
2367 self.ui.progress(self.step, self.count, unit=_('chunks'),
2367 self.ui.progress(self.step, self.count, unit=_('chunks'),
2368 total=self.total)
2368 total=self.total)
2369 self.count += 1
2369 self.count += 1
2370 pr = prog()
2370 pr = prog()
2371 source.callback = pr
2371 source.callback = pr
2372
2372
2373 source.changelogheader()
2373 source.changelogheader()
2374 srccontent = cl.addgroup(source, csmap, trp)
2374 srccontent = cl.addgroup(source, csmap, trp)
2375 if not (srccontent or emptyok):
2375 if not (srccontent or emptyok):
2376 raise util.Abort(_("received changelog group is empty"))
2376 raise util.Abort(_("received changelog group is empty"))
2377 clend = len(cl)
2377 clend = len(cl)
2378 changesets = clend - clstart
2378 changesets = clend - clstart
2379 for c in xrange(clstart, clend):
2379 for c in xrange(clstart, clend):
2380 efiles.update(self[c].files())
2380 efiles.update(self[c].files())
2381 efiles = len(efiles)
2381 efiles = len(efiles)
2382 self.ui.progress(_('changesets'), None)
2382 self.ui.progress(_('changesets'), None)
2383
2383
2384 # pull off the manifest group
2384 # pull off the manifest group
2385 self.ui.status(_("adding manifests\n"))
2385 self.ui.status(_("adding manifests\n"))
2386 pr.step = _('manifests')
2386 pr.step = _('manifests')
2387 pr.count = 1
2387 pr.count = 1
2388 pr.total = changesets # manifests <= changesets
2388 pr.total = changesets # manifests <= changesets
2389 # no need to check for empty manifest group here:
2389 # no need to check for empty manifest group here:
2390 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2390 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2391 # no new manifest will be created and the manifest group will
2391 # no new manifest will be created and the manifest group will
2392 # be empty during the pull
2392 # be empty during the pull
2393 source.manifestheader()
2393 source.manifestheader()
2394 self.manifest.addgroup(source, revmap, trp)
2394 self.manifest.addgroup(source, revmap, trp)
2395 self.ui.progress(_('manifests'), None)
2395 self.ui.progress(_('manifests'), None)
2396
2396
2397 needfiles = {}
2397 needfiles = {}
2398 if self.ui.configbool('server', 'validate', default=False):
2398 if self.ui.configbool('server', 'validate', default=False):
2399 # validate incoming csets have their manifests
2399 # validate incoming csets have their manifests
2400 for cset in xrange(clstart, clend):
2400 for cset in xrange(clstart, clend):
2401 mfest = self.changelog.read(self.changelog.node(cset))[0]
2401 mfest = self.changelog.read(self.changelog.node(cset))[0]
2402 mfest = self.manifest.readdelta(mfest)
2402 mfest = self.manifest.readdelta(mfest)
2403 # store file nodes we must see
2403 # store file nodes we must see
2404 for f, n in mfest.iteritems():
2404 for f, n in mfest.iteritems():
2405 needfiles.setdefault(f, set()).add(n)
2405 needfiles.setdefault(f, set()).add(n)
2406
2406
2407 # process the files
2407 # process the files
2408 self.ui.status(_("adding file changes\n"))
2408 self.ui.status(_("adding file changes\n"))
2409 pr.step = _('files')
2409 pr.step = _('files')
2410 pr.count = 1
2410 pr.count = 1
2411 pr.total = efiles
2411 pr.total = efiles
2412 source.callback = None
2412 source.callback = None
2413
2413
2414 while True:
2414 while True:
2415 chunkdata = source.filelogheader()
2415 chunkdata = source.filelogheader()
2416 if not chunkdata:
2416 if not chunkdata:
2417 break
2417 break
2418 f = chunkdata["filename"]
2418 f = chunkdata["filename"]
2419 self.ui.debug("adding %s revisions\n" % f)
2419 self.ui.debug("adding %s revisions\n" % f)
2420 pr()
2420 pr()
2421 fl = self.file(f)
2421 fl = self.file(f)
2422 o = len(fl)
2422 o = len(fl)
2423 if not fl.addgroup(source, revmap, trp):
2423 if not fl.addgroup(source, revmap, trp):
2424 raise util.Abort(_("received file revlog group is empty"))
2424 raise util.Abort(_("received file revlog group is empty"))
2425 revisions += len(fl) - o
2425 revisions += len(fl) - o
2426 files += 1
2426 files += 1
2427 if f in needfiles:
2427 if f in needfiles:
2428 needs = needfiles[f]
2428 needs = needfiles[f]
2429 for new in xrange(o, len(fl)):
2429 for new in xrange(o, len(fl)):
2430 n = fl.node(new)
2430 n = fl.node(new)
2431 if n in needs:
2431 if n in needs:
2432 needs.remove(n)
2432 needs.remove(n)
2433 if not needs:
2433 if not needs:
2434 del needfiles[f]
2434 del needfiles[f]
2435 self.ui.progress(_('files'), None)
2435 self.ui.progress(_('files'), None)
2436
2436
2437 for f, needs in needfiles.iteritems():
2437 for f, needs in needfiles.iteritems():
2438 fl = self.file(f)
2438 fl = self.file(f)
2439 for n in needs:
2439 for n in needs:
2440 try:
2440 try:
2441 fl.rev(n)
2441 fl.rev(n)
2442 except error.LookupError:
2442 except error.LookupError:
2443 raise util.Abort(
2443 raise util.Abort(
2444 _('missing file data for %s:%s - run hg verify') %
2444 _('missing file data for %s:%s - run hg verify') %
2445 (f, hex(n)))
2445 (f, hex(n)))
2446
2446
2447 dh = 0
2447 dh = 0
2448 if oldheads:
2448 if oldheads:
2449 heads = cl.heads()
2449 heads = cl.heads()
2450 dh = len(heads) - len(oldheads)
2450 dh = len(heads) - len(oldheads)
2451 for h in heads:
2451 for h in heads:
2452 if h not in oldheads and self[h].closesbranch():
2452 if h not in oldheads and self[h].closesbranch():
2453 dh -= 1
2453 dh -= 1
2454 htext = ""
2454 htext = ""
2455 if dh:
2455 if dh:
2456 htext = _(" (%+d heads)") % dh
2456 htext = _(" (%+d heads)") % dh
2457
2457
2458 self.ui.status(_("added %d changesets"
2458 self.ui.status(_("added %d changesets"
2459 " with %d changes to %d files%s\n")
2459 " with %d changes to %d files%s\n")
2460 % (changesets, revisions, files, htext))
2460 % (changesets, revisions, files, htext))
2461 obsolete.clearobscaches(self)
2461 obsolete.clearobscaches(self)
2462
2462
2463 if changesets > 0:
2463 if changesets > 0:
2464 p = lambda: cl.writepending() and self.root or ""
2464 p = lambda: cl.writepending() and self.root or ""
2465 self.hook('pretxnchangegroup', throw=True,
2465 self.hook('pretxnchangegroup', throw=True,
2466 node=hex(cl.node(clstart)), source=srctype,
2466 node=hex(cl.node(clstart)), source=srctype,
2467 url=url, pending=p)
2467 url=url, pending=p)
2468
2468
2469 added = [cl.node(r) for r in xrange(clstart, clend)]
2469 added = [cl.node(r) for r in xrange(clstart, clend)]
2470 publishing = self.ui.configbool('phases', 'publish', True)
2470 publishing = self.ui.configbool('phases', 'publish', True)
2471 if srctype == 'push':
2471 if srctype == 'push':
2472 # Old server can not push the boundary themself.
2472 # Old server can not push the boundary themself.
2473 # New server won't push the boundary if changeset already
2473 # New server won't push the boundary if changeset already
2474 # existed locally as secrete
2474 # existed locally as secrete
2475 #
2475 #
2476 # We should not use added here but the list of all change in
2476 # We should not use added here but the list of all change in
2477 # the bundle
2477 # the bundle
2478 if publishing:
2478 if publishing:
2479 phases.advanceboundary(self, phases.public, srccontent)
2479 phases.advanceboundary(self, phases.public, srccontent)
2480 else:
2480 else:
2481 phases.advanceboundary(self, phases.draft, srccontent)
2481 phases.advanceboundary(self, phases.draft, srccontent)
2482 phases.retractboundary(self, phases.draft, added)
2482 phases.retractboundary(self, phases.draft, added)
2483 elif srctype != 'strip':
2483 elif srctype != 'strip':
2484 # publishing only alter behavior during push
2484 # publishing only alter behavior during push
2485 #
2485 #
2486 # strip should not touch boundary at all
2486 # strip should not touch boundary at all
2487 phases.retractboundary(self, phases.draft, added)
2487 phases.retractboundary(self, phases.draft, added)
2488
2488
2489 # make changelog see real files again
2489 # make changelog see real files again
2490 cl.finalize(trp)
2490 cl.finalize(trp)
2491
2491
2492 tr.close()
2492 tr.close()
2493
2493
2494 if changesets > 0:
2494 if changesets > 0:
2495 self.updatebranchcache()
2495 self.updatebranchcache()
2496 def runhooks():
2496 def runhooks():
2497 # forcefully update the on-disk branch cache
2497 # forcefully update the on-disk branch cache
2498 self.ui.debug("updating the branch cache\n")
2498 self.ui.debug("updating the branch cache\n")
2499 self.hook("changegroup", node=hex(cl.node(clstart)),
2499 self.hook("changegroup", node=hex(cl.node(clstart)),
2500 source=srctype, url=url)
2500 source=srctype, url=url)
2501
2501
2502 for n in added:
2502 for n in added:
2503 self.hook("incoming", node=hex(n), source=srctype,
2503 self.hook("incoming", node=hex(n), source=srctype,
2504 url=url)
2504 url=url)
2505 self._afterlock(runhooks)
2505 self._afterlock(runhooks)
2506
2506
2507 finally:
2507 finally:
2508 tr.release()
2508 tr.release()
2509 # never return 0 here:
2509 # never return 0 here:
2510 if dh < 0:
2510 if dh < 0:
2511 return dh - 1
2511 return dh - 1
2512 else:
2512 else:
2513 return dh + 1
2513 return dh + 1
2514
2514
2515 def stream_in(self, remote, requirements):
2515 def stream_in(self, remote, requirements):
2516 lock = self.lock()
2516 lock = self.lock()
2517 try:
2517 try:
2518 # Save remote branchmap. We will use it later
2518 # Save remote branchmap. We will use it later
2519 # to speed up branchcache creation
2519 # to speed up branchcache creation
2520 rbranchmap = None
2520 rbranchmap = None
2521 if remote.capable("branchmap"):
2521 if remote.capable("branchmap"):
2522 rbranchmap = remote.branchmap()
2522 rbranchmap = remote.branchmap()
2523
2523
2524 fp = remote.stream_out()
2524 fp = remote.stream_out()
2525 l = fp.readline()
2525 l = fp.readline()
2526 try:
2526 try:
2527 resp = int(l)
2527 resp = int(l)
2528 except ValueError:
2528 except ValueError:
2529 raise error.ResponseError(
2529 raise error.ResponseError(
2530 _('unexpected response from remote server:'), l)
2530 _('unexpected response from remote server:'), l)
2531 if resp == 1:
2531 if resp == 1:
2532 raise util.Abort(_('operation forbidden by server'))
2532 raise util.Abort(_('operation forbidden by server'))
2533 elif resp == 2:
2533 elif resp == 2:
2534 raise util.Abort(_('locking the remote repository failed'))
2534 raise util.Abort(_('locking the remote repository failed'))
2535 elif resp != 0:
2535 elif resp != 0:
2536 raise util.Abort(_('the server sent an unknown error code'))
2536 raise util.Abort(_('the server sent an unknown error code'))
2537 self.ui.status(_('streaming all changes\n'))
2537 self.ui.status(_('streaming all changes\n'))
2538 l = fp.readline()
2538 l = fp.readline()
2539 try:
2539 try:
2540 total_files, total_bytes = map(int, l.split(' ', 1))
2540 total_files, total_bytes = map(int, l.split(' ', 1))
2541 except (ValueError, TypeError):
2541 except (ValueError, TypeError):
2542 raise error.ResponseError(
2542 raise error.ResponseError(
2543 _('unexpected response from remote server:'), l)
2543 _('unexpected response from remote server:'), l)
2544 self.ui.status(_('%d files to transfer, %s of data\n') %
2544 self.ui.status(_('%d files to transfer, %s of data\n') %
2545 (total_files, util.bytecount(total_bytes)))
2545 (total_files, util.bytecount(total_bytes)))
2546 handled_bytes = 0
2546 handled_bytes = 0
2547 self.ui.progress(_('clone'), 0, total=total_bytes)
2547 self.ui.progress(_('clone'), 0, total=total_bytes)
2548 start = time.time()
2548 start = time.time()
2549 for i in xrange(total_files):
2549 for i in xrange(total_files):
2550 # XXX doesn't support '\n' or '\r' in filenames
2550 # XXX doesn't support '\n' or '\r' in filenames
2551 l = fp.readline()
2551 l = fp.readline()
2552 try:
2552 try:
2553 name, size = l.split('\0', 1)
2553 name, size = l.split('\0', 1)
2554 size = int(size)
2554 size = int(size)
2555 except (ValueError, TypeError):
2555 except (ValueError, TypeError):
2556 raise error.ResponseError(
2556 raise error.ResponseError(
2557 _('unexpected response from remote server:'), l)
2557 _('unexpected response from remote server:'), l)
2558 if self.ui.debugflag:
2558 if self.ui.debugflag:
2559 self.ui.debug('adding %s (%s)\n' %
2559 self.ui.debug('adding %s (%s)\n' %
2560 (name, util.bytecount(size)))
2560 (name, util.bytecount(size)))
2561 # for backwards compat, name was partially encoded
2561 # for backwards compat, name was partially encoded
2562 ofp = self.sopener(store.decodedir(name), 'w')
2562 ofp = self.sopener(store.decodedir(name), 'w')
2563 for chunk in util.filechunkiter(fp, limit=size):
2563 for chunk in util.filechunkiter(fp, limit=size):
2564 handled_bytes += len(chunk)
2564 handled_bytes += len(chunk)
2565 self.ui.progress(_('clone'), handled_bytes,
2565 self.ui.progress(_('clone'), handled_bytes,
2566 total=total_bytes)
2566 total=total_bytes)
2567 ofp.write(chunk)
2567 ofp.write(chunk)
2568 ofp.close()
2568 ofp.close()
2569 elapsed = time.time() - start
2569 elapsed = time.time() - start
2570 if elapsed <= 0:
2570 if elapsed <= 0:
2571 elapsed = 0.001
2571 elapsed = 0.001
2572 self.ui.progress(_('clone'), None)
2572 self.ui.progress(_('clone'), None)
2573 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2573 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2574 (util.bytecount(total_bytes), elapsed,
2574 (util.bytecount(total_bytes), elapsed,
2575 util.bytecount(total_bytes / elapsed)))
2575 util.bytecount(total_bytes / elapsed)))
2576
2576
2577 # new requirements = old non-format requirements +
2577 # new requirements = old non-format requirements +
2578 # new format-related
2578 # new format-related
2579 # requirements from the streamed-in repository
2579 # requirements from the streamed-in repository
2580 requirements.update(set(self.requirements) - self.supportedformats)
2580 requirements.update(set(self.requirements) - self.supportedformats)
2581 self._applyrequirements(requirements)
2581 self._applyrequirements(requirements)
2582 self._writerequirements()
2582 self._writerequirements()
2583
2583
2584 if rbranchmap:
2584 if rbranchmap:
2585 rbheads = []
2585 rbheads = []
2586 for bheads in rbranchmap.itervalues():
2586 for bheads in rbranchmap.itervalues():
2587 rbheads.extend(bheads)
2587 rbheads.extend(bheads)
2588
2588
2589 self.branchcache = rbranchmap
2589 self.branchcache = rbranchmap
2590 if rbheads:
2590 if rbheads:
2591 rtiprev = max((int(self.changelog.rev(node))
2591 rtiprev = max((int(self.changelog.rev(node))
2592 for node in rbheads))
2592 for node in rbheads))
2593 self._writebranchcache(self.branchcache,
2593 self._writebranchcache(self.branchcache,
2594 self[rtiprev].node(), rtiprev)
2594 self[rtiprev].node(), rtiprev)
2595 self.invalidate()
2595 self.invalidate()
2596 return len(self.heads()) + 1
2596 return len(self.heads()) + 1
2597 finally:
2597 finally:
2598 lock.release()
2598 lock.release()
2599
2599
2600 def clone(self, remote, heads=[], stream=False):
2600 def clone(self, remote, heads=[], stream=False):
2601 '''clone remote repository.
2601 '''clone remote repository.
2602
2602
2603 keyword arguments:
2603 keyword arguments:
2604 heads: list of revs to clone (forces use of pull)
2604 heads: list of revs to clone (forces use of pull)
2605 stream: use streaming clone if possible'''
2605 stream: use streaming clone if possible'''
2606
2606
2607 # now, all clients that can request uncompressed clones can
2607 # now, all clients that can request uncompressed clones can
2608 # read repo formats supported by all servers that can serve
2608 # read repo formats supported by all servers that can serve
2609 # them.
2609 # them.
2610
2610
2611 # if revlog format changes, client will have to check version
2611 # if revlog format changes, client will have to check version
2612 # and format flags on "stream" capability, and use
2612 # and format flags on "stream" capability, and use
2613 # uncompressed only if compatible.
2613 # uncompressed only if compatible.
2614
2614
2615 if not stream:
2615 if not stream:
2616 # if the server explicitly prefers to stream (for fast LANs)
2616 # if the server explicitly prefers to stream (for fast LANs)
2617 stream = remote.capable('stream-preferred')
2617 stream = remote.capable('stream-preferred')
2618
2618
2619 if stream and not heads:
2619 if stream and not heads:
2620 # 'stream' means remote revlog format is revlogv1 only
2620 # 'stream' means remote revlog format is revlogv1 only
2621 if remote.capable('stream'):
2621 if remote.capable('stream'):
2622 return self.stream_in(remote, set(('revlogv1',)))
2622 return self.stream_in(remote, set(('revlogv1',)))
2623 # otherwise, 'streamreqs' contains the remote revlog format
2623 # otherwise, 'streamreqs' contains the remote revlog format
2624 streamreqs = remote.capable('streamreqs')
2624 streamreqs = remote.capable('streamreqs')
2625 if streamreqs:
2625 if streamreqs:
2626 streamreqs = set(streamreqs.split(','))
2626 streamreqs = set(streamreqs.split(','))
2627 # if we support it, stream in and adjust our requirements
2627 # if we support it, stream in and adjust our requirements
2628 if not streamreqs - self.supportedformats:
2628 if not streamreqs - self.supportedformats:
2629 return self.stream_in(remote, streamreqs)
2629 return self.stream_in(remote, streamreqs)
2630 return self.pull(remote, heads)
2630 return self.pull(remote, heads)
2631
2631
2632 def pushkey(self, namespace, key, old, new):
2632 def pushkey(self, namespace, key, old, new):
2633 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2633 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2634 old=old, new=new)
2634 old=old, new=new)
2635 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2635 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2636 ret = pushkey.push(self, namespace, key, old, new)
2636 ret = pushkey.push(self, namespace, key, old, new)
2637 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2637 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2638 ret=ret)
2638 ret=ret)
2639 return ret
2639 return ret
2640
2640
2641 def listkeys(self, namespace):
2641 def listkeys(self, namespace):
2642 self.hook('prelistkeys', throw=True, namespace=namespace)
2642 self.hook('prelistkeys', throw=True, namespace=namespace)
2643 self.ui.debug('listing keys for "%s"\n' % namespace)
2643 self.ui.debug('listing keys for "%s"\n' % namespace)
2644 values = pushkey.list(self, namespace)
2644 values = pushkey.list(self, namespace)
2645 self.hook('listkeys', namespace=namespace, values=values)
2645 self.hook('listkeys', namespace=namespace, values=values)
2646 return values
2646 return values
2647
2647
2648 def debugwireargs(self, one, two, three=None, four=None, five=None):
2648 def debugwireargs(self, one, two, three=None, four=None, five=None):
2649 '''used to test argument passing over the wire'''
2649 '''used to test argument passing over the wire'''
2650 return "%s %s %s %s %s" % (one, two, three, four, five)
2650 return "%s %s %s %s %s" % (one, two, three, four, five)
2651
2651
2652 def savecommitmessage(self, text):
2652 def savecommitmessage(self, text):
2653 fp = self.opener('last-message.txt', 'wb')
2653 fp = self.opener('last-message.txt', 'wb')
2654 try:
2654 try:
2655 fp.write(text)
2655 fp.write(text)
2656 finally:
2656 finally:
2657 fp.close()
2657 fp.close()
2658 return self.pathto(fp.name[len(self.root)+1:])
2658 return self.pathto(fp.name[len(self.root)+1:])
2659
2659
2660 # used to avoid circular references so destructors work
2660 # used to avoid circular references so destructors work
2661 def aftertrans(files):
2661 def aftertrans(files):
2662 renamefiles = [tuple(t) for t in files]
2662 renamefiles = [tuple(t) for t in files]
2663 def a():
2663 def a():
2664 for src, dest in renamefiles:
2664 for src, dest in renamefiles:
2665 try:
2665 try:
2666 util.rename(src, dest)
2666 util.rename(src, dest)
2667 except OSError: # journal file does not yet exist
2667 except OSError: # journal file does not yet exist
2668 pass
2668 pass
2669 return a
2669 return a
2670
2670
2671 def undoname(fn):
2671 def undoname(fn):
2672 base, name = os.path.split(fn)
2672 base, name = os.path.split(fn)
2673 assert name.startswith('journal')
2673 assert name.startswith('journal')
2674 return os.path.join(base, name.replace('journal', 'undo', 1))
2674 return os.path.join(base, name.replace('journal', 'undo', 1))
2675
2675
2676 def instance(ui, path, create):
2676 def instance(ui, path, create):
2677 return localrepository(ui, util.urllocalpath(path), create)
2677 return localrepository(ui, util.urllocalpath(path), create)
2678
2678
2679 def islocal(path):
2679 def islocal(path):
2680 return True
2680 return True
General Comments 0
You need to be logged in to leave comments. Login now