##// END OF EJS Templates
localrepo: strip now incrementally updates the branchheads cache...
Joshua Redstone -
r16716:0311a6ab default
parent child Browse files
Show More
@@ -1,2355 +1,2401 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 # A list of callback to shape the phase if no data were found.
44 # A list of callback to shape the phase if no data were found.
45 # Callback are in the form: func(repo, roots) --> processed root.
45 # Callback are in the form: func(repo, roots) --> processed root.
46 # This list it to be filled by extension during repo setup
46 # This list it to be filled by extension during repo setup
47 self._phasedefaults = []
47 self._phasedefaults = []
48
48
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"), self.root)
50 self.ui.readconfig(self.join("hgrc"), self.root)
51 extensions.loadall(self.ui)
51 extensions.loadall(self.ui)
52 except IOError:
52 except IOError:
53 pass
53 pass
54
54
55 if not os.path.isdir(self.path):
55 if not os.path.isdir(self.path):
56 if create:
56 if create:
57 if not os.path.exists(path):
57 if not os.path.exists(path):
58 util.makedirs(path)
58 util.makedirs(path)
59 util.makedir(self.path, notindexed=True)
59 util.makedir(self.path, notindexed=True)
60 requirements = ["revlogv1"]
60 requirements = ["revlogv1"]
61 if self.ui.configbool('format', 'usestore', True):
61 if self.ui.configbool('format', 'usestore', True):
62 os.mkdir(os.path.join(self.path, "store"))
62 os.mkdir(os.path.join(self.path, "store"))
63 requirements.append("store")
63 requirements.append("store")
64 if self.ui.configbool('format', 'usefncache', True):
64 if self.ui.configbool('format', 'usefncache', True):
65 requirements.append("fncache")
65 requirements.append("fncache")
66 if self.ui.configbool('format', 'dotencode', True):
66 if self.ui.configbool('format', 'dotencode', True):
67 requirements.append('dotencode')
67 requirements.append('dotencode')
68 # create an invalid changelog
68 # create an invalid changelog
69 self.opener.append(
69 self.opener.append(
70 "00changelog.i",
70 "00changelog.i",
71 '\0\0\0\2' # represents revlogv2
71 '\0\0\0\2' # represents revlogv2
72 ' dummy changelog to prevent using the old repo layout'
72 ' dummy changelog to prevent using the old repo layout'
73 )
73 )
74 if self.ui.configbool('format', 'generaldelta', False):
74 if self.ui.configbool('format', 'generaldelta', False):
75 requirements.append("generaldelta")
75 requirements.append("generaldelta")
76 requirements = set(requirements)
76 requirements = set(requirements)
77 else:
77 else:
78 raise error.RepoError(_("repository %s not found") % path)
78 raise error.RepoError(_("repository %s not found") % path)
79 elif create:
79 elif create:
80 raise error.RepoError(_("repository %s already exists") % path)
80 raise error.RepoError(_("repository %s already exists") % path)
81 else:
81 else:
82 try:
82 try:
83 requirements = scmutil.readrequires(self.opener, self.supported)
83 requirements = scmutil.readrequires(self.opener, self.supported)
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87 requirements = set()
87 requirements = set()
88
88
89 self.sharedpath = self.path
89 self.sharedpath = self.path
90 try:
90 try:
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 if not os.path.exists(s):
92 if not os.path.exists(s):
93 raise error.RepoError(
93 raise error.RepoError(
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 self.sharedpath = s
95 self.sharedpath = s
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99
99
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.spath = self.store.path
101 self.spath = self.store.path
102 self.sopener = self.store.opener
102 self.sopener = self.store.opener
103 self.sjoin = self.store.join
103 self.sjoin = self.store.join
104 self.opener.createmode = self.store.createmode
104 self.opener.createmode = self.store.createmode
105 self._applyrequirements(requirements)
105 self._applyrequirements(requirements)
106 if create:
106 if create:
107 self._writerequirements()
107 self._writerequirements()
108
108
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 # A cache for various files under .hg/ that tracks file changes,
116 # A cache for various files under .hg/ that tracks file changes,
117 # (used by the filecache decorator)
117 # (used by the filecache decorator)
118 #
118 #
119 # Maps a property name to its util.filecacheentry
119 # Maps a property name to its util.filecacheentry
120 self._filecache = {}
120 self._filecache = {}
121
121
122 def _applyrequirements(self, requirements):
122 def _applyrequirements(self, requirements):
123 self.requirements = requirements
123 self.requirements = requirements
124 openerreqs = set(('revlogv1', 'generaldelta'))
124 openerreqs = set(('revlogv1', 'generaldelta'))
125 self.sopener.options = dict((r, 1) for r in requirements
125 self.sopener.options = dict((r, 1) for r in requirements
126 if r in openerreqs)
126 if r in openerreqs)
127
127
128 def _writerequirements(self):
128 def _writerequirements(self):
129 reqfile = self.opener("requires", "w")
129 reqfile = self.opener("requires", "w")
130 for r in self.requirements:
130 for r in self.requirements:
131 reqfile.write("%s\n" % r)
131 reqfile.write("%s\n" % r)
132 reqfile.close()
132 reqfile.close()
133
133
134 def _checknested(self, path):
134 def _checknested(self, path):
135 """Determine if path is a legal nested repository."""
135 """Determine if path is a legal nested repository."""
136 if not path.startswith(self.root):
136 if not path.startswith(self.root):
137 return False
137 return False
138 subpath = path[len(self.root) + 1:]
138 subpath = path[len(self.root) + 1:]
139 normsubpath = util.pconvert(subpath)
139 normsubpath = util.pconvert(subpath)
140
140
141 # XXX: Checking against the current working copy is wrong in
141 # XXX: Checking against the current working copy is wrong in
142 # the sense that it can reject things like
142 # the sense that it can reject things like
143 #
143 #
144 # $ hg cat -r 10 sub/x.txt
144 # $ hg cat -r 10 sub/x.txt
145 #
145 #
146 # if sub/ is no longer a subrepository in the working copy
146 # if sub/ is no longer a subrepository in the working copy
147 # parent revision.
147 # parent revision.
148 #
148 #
149 # However, it can of course also allow things that would have
149 # However, it can of course also allow things that would have
150 # been rejected before, such as the above cat command if sub/
150 # been rejected before, such as the above cat command if sub/
151 # is a subrepository now, but was a normal directory before.
151 # is a subrepository now, but was a normal directory before.
152 # The old path auditor would have rejected by mistake since it
152 # The old path auditor would have rejected by mistake since it
153 # panics when it sees sub/.hg/.
153 # panics when it sees sub/.hg/.
154 #
154 #
155 # All in all, checking against the working copy seems sensible
155 # All in all, checking against the working copy seems sensible
156 # since we want to prevent access to nested repositories on
156 # since we want to prevent access to nested repositories on
157 # the filesystem *now*.
157 # the filesystem *now*.
158 ctx = self[None]
158 ctx = self[None]
159 parts = util.splitpath(subpath)
159 parts = util.splitpath(subpath)
160 while parts:
160 while parts:
161 prefix = '/'.join(parts)
161 prefix = '/'.join(parts)
162 if prefix in ctx.substate:
162 if prefix in ctx.substate:
163 if prefix == normsubpath:
163 if prefix == normsubpath:
164 return True
164 return True
165 else:
165 else:
166 sub = ctx.sub(prefix)
166 sub = ctx.sub(prefix)
167 return sub.checknested(subpath[len(prefix) + 1:])
167 return sub.checknested(subpath[len(prefix) + 1:])
168 else:
168 else:
169 parts.pop()
169 parts.pop()
170 return False
170 return False
171
171
172 @filecache('bookmarks')
172 @filecache('bookmarks')
173 def _bookmarks(self):
173 def _bookmarks(self):
174 return bookmarks.read(self)
174 return bookmarks.read(self)
175
175
176 @filecache('bookmarks.current')
176 @filecache('bookmarks.current')
177 def _bookmarkcurrent(self):
177 def _bookmarkcurrent(self):
178 return bookmarks.readcurrent(self)
178 return bookmarks.readcurrent(self)
179
179
180 def _writebookmarks(self, marks):
180 def _writebookmarks(self, marks):
181 bookmarks.write(self)
181 bookmarks.write(self)
182
182
183 def bookmarkheads(self, bookmark):
183 def bookmarkheads(self, bookmark):
184 name = bookmark.split('@', 1)[0]
184 name = bookmark.split('@', 1)[0]
185 heads = []
185 heads = []
186 for mark, n in self._bookmarks.iteritems():
186 for mark, n in self._bookmarks.iteritems():
187 if mark.split('@', 1)[0] == name:
187 if mark.split('@', 1)[0] == name:
188 heads.append(n)
188 heads.append(n)
189 return heads
189 return heads
190
190
191 @storecache('phaseroots')
191 @storecache('phaseroots')
192 def _phasecache(self):
192 def _phasecache(self):
193 return phases.phasecache(self, self._phasedefaults)
193 return phases.phasecache(self, self._phasedefaults)
194
194
195 @storecache('00changelog.i')
195 @storecache('00changelog.i')
196 def changelog(self):
196 def changelog(self):
197 c = changelog.changelog(self.sopener)
197 c = changelog.changelog(self.sopener)
198 if 'HG_PENDING' in os.environ:
198 if 'HG_PENDING' in os.environ:
199 p = os.environ['HG_PENDING']
199 p = os.environ['HG_PENDING']
200 if p.startswith(self.root):
200 if p.startswith(self.root):
201 c.readpending('00changelog.i.a')
201 c.readpending('00changelog.i.a')
202 return c
202 return c
203
203
204 @storecache('00manifest.i')
204 @storecache('00manifest.i')
205 def manifest(self):
205 def manifest(self):
206 return manifest.manifest(self.sopener)
206 return manifest.manifest(self.sopener)
207
207
208 @filecache('dirstate')
208 @filecache('dirstate')
209 def dirstate(self):
209 def dirstate(self):
210 warned = [0]
210 warned = [0]
211 def validate(node):
211 def validate(node):
212 try:
212 try:
213 self.changelog.rev(node)
213 self.changelog.rev(node)
214 return node
214 return node
215 except error.LookupError:
215 except error.LookupError:
216 if not warned[0]:
216 if not warned[0]:
217 warned[0] = True
217 warned[0] = True
218 self.ui.warn(_("warning: ignoring unknown"
218 self.ui.warn(_("warning: ignoring unknown"
219 " working parent %s!\n") % short(node))
219 " working parent %s!\n") % short(node))
220 return nullid
220 return nullid
221
221
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
223
223
224 def __getitem__(self, changeid):
224 def __getitem__(self, changeid):
225 if changeid is None:
225 if changeid is None:
226 return context.workingctx(self)
226 return context.workingctx(self)
227 return context.changectx(self, changeid)
227 return context.changectx(self, changeid)
228
228
229 def __contains__(self, changeid):
229 def __contains__(self, changeid):
230 try:
230 try:
231 return bool(self.lookup(changeid))
231 return bool(self.lookup(changeid))
232 except error.RepoLookupError:
232 except error.RepoLookupError:
233 return False
233 return False
234
234
235 def __nonzero__(self):
235 def __nonzero__(self):
236 return True
236 return True
237
237
238 def __len__(self):
238 def __len__(self):
239 return len(self.changelog)
239 return len(self.changelog)
240
240
241 def __iter__(self):
241 def __iter__(self):
242 for i in xrange(len(self)):
242 for i in xrange(len(self)):
243 yield i
243 yield i
244
244
245 def revs(self, expr, *args):
245 def revs(self, expr, *args):
246 '''Return a list of revisions matching the given revset'''
246 '''Return a list of revisions matching the given revset'''
247 expr = revset.formatspec(expr, *args)
247 expr = revset.formatspec(expr, *args)
248 m = revset.match(None, expr)
248 m = revset.match(None, expr)
249 return [r for r in m(self, range(len(self)))]
249 return [r for r in m(self, range(len(self)))]
250
250
251 def set(self, expr, *args):
251 def set(self, expr, *args):
252 '''
252 '''
253 Yield a context for each matching revision, after doing arg
253 Yield a context for each matching revision, after doing arg
254 replacement via revset.formatspec
254 replacement via revset.formatspec
255 '''
255 '''
256 for r in self.revs(expr, *args):
256 for r in self.revs(expr, *args):
257 yield self[r]
257 yield self[r]
258
258
259 def url(self):
259 def url(self):
260 return 'file:' + self.root
260 return 'file:' + self.root
261
261
262 def hook(self, name, throw=False, **args):
262 def hook(self, name, throw=False, **args):
263 return hook.hook(self.ui, self, name, throw, **args)
263 return hook.hook(self.ui, self, name, throw, **args)
264
264
265 tag_disallowed = ':\r\n'
265 tag_disallowed = ':\r\n'
266
266
267 def _tag(self, names, node, message, local, user, date, extra={}):
267 def _tag(self, names, node, message, local, user, date, extra={}):
268 if isinstance(names, str):
268 if isinstance(names, str):
269 allchars = names
269 allchars = names
270 names = (names,)
270 names = (names,)
271 else:
271 else:
272 allchars = ''.join(names)
272 allchars = ''.join(names)
273 for c in self.tag_disallowed:
273 for c in self.tag_disallowed:
274 if c in allchars:
274 if c in allchars:
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
276
276
277 branches = self.branchmap()
277 branches = self.branchmap()
278 for name in names:
278 for name in names:
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
280 local=local)
280 local=local)
281 if name in branches:
281 if name in branches:
282 self.ui.warn(_("warning: tag %s conflicts with existing"
282 self.ui.warn(_("warning: tag %s conflicts with existing"
283 " branch name\n") % name)
283 " branch name\n") % name)
284
284
285 def writetags(fp, names, munge, prevtags):
285 def writetags(fp, names, munge, prevtags):
286 fp.seek(0, 2)
286 fp.seek(0, 2)
287 if prevtags and prevtags[-1] != '\n':
287 if prevtags and prevtags[-1] != '\n':
288 fp.write('\n')
288 fp.write('\n')
289 for name in names:
289 for name in names:
290 m = munge and munge(name) or name
290 m = munge and munge(name) or name
291 if (self._tagscache.tagtypes and
291 if (self._tagscache.tagtypes and
292 name in self._tagscache.tagtypes):
292 name in self._tagscache.tagtypes):
293 old = self.tags().get(name, nullid)
293 old = self.tags().get(name, nullid)
294 fp.write('%s %s\n' % (hex(old), m))
294 fp.write('%s %s\n' % (hex(old), m))
295 fp.write('%s %s\n' % (hex(node), m))
295 fp.write('%s %s\n' % (hex(node), m))
296 fp.close()
296 fp.close()
297
297
298 prevtags = ''
298 prevtags = ''
299 if local:
299 if local:
300 try:
300 try:
301 fp = self.opener('localtags', 'r+')
301 fp = self.opener('localtags', 'r+')
302 except IOError:
302 except IOError:
303 fp = self.opener('localtags', 'a')
303 fp = self.opener('localtags', 'a')
304 else:
304 else:
305 prevtags = fp.read()
305 prevtags = fp.read()
306
306
307 # local tags are stored in the current charset
307 # local tags are stored in the current charset
308 writetags(fp, names, None, prevtags)
308 writetags(fp, names, None, prevtags)
309 for name in names:
309 for name in names:
310 self.hook('tag', node=hex(node), tag=name, local=local)
310 self.hook('tag', node=hex(node), tag=name, local=local)
311 return
311 return
312
312
313 try:
313 try:
314 fp = self.wfile('.hgtags', 'rb+')
314 fp = self.wfile('.hgtags', 'rb+')
315 except IOError, e:
315 except IOError, e:
316 if e.errno != errno.ENOENT:
316 if e.errno != errno.ENOENT:
317 raise
317 raise
318 fp = self.wfile('.hgtags', 'ab')
318 fp = self.wfile('.hgtags', 'ab')
319 else:
319 else:
320 prevtags = fp.read()
320 prevtags = fp.read()
321
321
322 # committed tags are stored in UTF-8
322 # committed tags are stored in UTF-8
323 writetags(fp, names, encoding.fromlocal, prevtags)
323 writetags(fp, names, encoding.fromlocal, prevtags)
324
324
325 fp.close()
325 fp.close()
326
326
327 self.invalidatecaches()
327 self.invalidatecaches()
328
328
329 if '.hgtags' not in self.dirstate:
329 if '.hgtags' not in self.dirstate:
330 self[None].add(['.hgtags'])
330 self[None].add(['.hgtags'])
331
331
332 m = matchmod.exact(self.root, '', ['.hgtags'])
332 m = matchmod.exact(self.root, '', ['.hgtags'])
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
334
334
335 for name in names:
335 for name in names:
336 self.hook('tag', node=hex(node), tag=name, local=local)
336 self.hook('tag', node=hex(node), tag=name, local=local)
337
337
338 return tagnode
338 return tagnode
339
339
340 def tag(self, names, node, message, local, user, date):
340 def tag(self, names, node, message, local, user, date):
341 '''tag a revision with one or more symbolic names.
341 '''tag a revision with one or more symbolic names.
342
342
343 names is a list of strings or, when adding a single tag, names may be a
343 names is a list of strings or, when adding a single tag, names may be a
344 string.
344 string.
345
345
346 if local is True, the tags are stored in a per-repository file.
346 if local is True, the tags are stored in a per-repository file.
347 otherwise, they are stored in the .hgtags file, and a new
347 otherwise, they are stored in the .hgtags file, and a new
348 changeset is committed with the change.
348 changeset is committed with the change.
349
349
350 keyword arguments:
350 keyword arguments:
351
351
352 local: whether to store tags in non-version-controlled file
352 local: whether to store tags in non-version-controlled file
353 (default False)
353 (default False)
354
354
355 message: commit message to use if committing
355 message: commit message to use if committing
356
356
357 user: name of user to use if committing
357 user: name of user to use if committing
358
358
359 date: date tuple to use if committing'''
359 date: date tuple to use if committing'''
360
360
361 if not local:
361 if not local:
362 for x in self.status()[:5]:
362 for x in self.status()[:5]:
363 if '.hgtags' in x:
363 if '.hgtags' in x:
364 raise util.Abort(_('working copy of .hgtags is changed '
364 raise util.Abort(_('working copy of .hgtags is changed '
365 '(please commit .hgtags manually)'))
365 '(please commit .hgtags manually)'))
366
366
367 self.tags() # instantiate the cache
367 self.tags() # instantiate the cache
368 self._tag(names, node, message, local, user, date)
368 self._tag(names, node, message, local, user, date)
369
369
370 @propertycache
370 @propertycache
371 def _tagscache(self):
371 def _tagscache(self):
372 '''Returns a tagscache object that contains various tags related
372 '''Returns a tagscache object that contains various tags related
373 caches.'''
373 caches.'''
374
374
375 # This simplifies its cache management by having one decorated
375 # This simplifies its cache management by having one decorated
376 # function (this one) and the rest simply fetch things from it.
376 # function (this one) and the rest simply fetch things from it.
377 class tagscache(object):
377 class tagscache(object):
378 def __init__(self):
378 def __init__(self):
379 # These two define the set of tags for this repository. tags
379 # These two define the set of tags for this repository. tags
380 # maps tag name to node; tagtypes maps tag name to 'global' or
380 # maps tag name to node; tagtypes maps tag name to 'global' or
381 # 'local'. (Global tags are defined by .hgtags across all
381 # 'local'. (Global tags are defined by .hgtags across all
382 # heads, and local tags are defined in .hg/localtags.)
382 # heads, and local tags are defined in .hg/localtags.)
383 # They constitute the in-memory cache of tags.
383 # They constitute the in-memory cache of tags.
384 self.tags = self.tagtypes = None
384 self.tags = self.tagtypes = None
385
385
386 self.nodetagscache = self.tagslist = None
386 self.nodetagscache = self.tagslist = None
387
387
388 cache = tagscache()
388 cache = tagscache()
389 cache.tags, cache.tagtypes = self._findtags()
389 cache.tags, cache.tagtypes = self._findtags()
390
390
391 return cache
391 return cache
392
392
393 def tags(self):
393 def tags(self):
394 '''return a mapping of tag to node'''
394 '''return a mapping of tag to node'''
395 t = {}
395 t = {}
396 for k, v in self._tagscache.tags.iteritems():
396 for k, v in self._tagscache.tags.iteritems():
397 try:
397 try:
398 # ignore tags to unknown nodes
398 # ignore tags to unknown nodes
399 self.changelog.rev(v)
399 self.changelog.rev(v)
400 t[k] = v
400 t[k] = v
401 except (error.LookupError, ValueError):
401 except (error.LookupError, ValueError):
402 pass
402 pass
403 return t
403 return t
404
404
405 def _findtags(self):
405 def _findtags(self):
406 '''Do the hard work of finding tags. Return a pair of dicts
406 '''Do the hard work of finding tags. Return a pair of dicts
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
408 maps tag name to a string like \'global\' or \'local\'.
408 maps tag name to a string like \'global\' or \'local\'.
409 Subclasses or extensions are free to add their own tags, but
409 Subclasses or extensions are free to add their own tags, but
410 should be aware that the returned dicts will be retained for the
410 should be aware that the returned dicts will be retained for the
411 duration of the localrepo object.'''
411 duration of the localrepo object.'''
412
412
413 # XXX what tagtype should subclasses/extensions use? Currently
413 # XXX what tagtype should subclasses/extensions use? Currently
414 # mq and bookmarks add tags, but do not set the tagtype at all.
414 # mq and bookmarks add tags, but do not set the tagtype at all.
415 # Should each extension invent its own tag type? Should there
415 # Should each extension invent its own tag type? Should there
416 # be one tagtype for all such "virtual" tags? Or is the status
416 # be one tagtype for all such "virtual" tags? Or is the status
417 # quo fine?
417 # quo fine?
418
418
419 alltags = {} # map tag name to (node, hist)
419 alltags = {} # map tag name to (node, hist)
420 tagtypes = {}
420 tagtypes = {}
421
421
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
424
424
425 # Build the return dicts. Have to re-encode tag names because
425 # Build the return dicts. Have to re-encode tag names because
426 # the tags module always uses UTF-8 (in order not to lose info
426 # the tags module always uses UTF-8 (in order not to lose info
427 # writing to the cache), but the rest of Mercurial wants them in
427 # writing to the cache), but the rest of Mercurial wants them in
428 # local encoding.
428 # local encoding.
429 tags = {}
429 tags = {}
430 for (name, (node, hist)) in alltags.iteritems():
430 for (name, (node, hist)) in alltags.iteritems():
431 if node != nullid:
431 if node != nullid:
432 tags[encoding.tolocal(name)] = node
432 tags[encoding.tolocal(name)] = node
433 tags['tip'] = self.changelog.tip()
433 tags['tip'] = self.changelog.tip()
434 tagtypes = dict([(encoding.tolocal(name), value)
434 tagtypes = dict([(encoding.tolocal(name), value)
435 for (name, value) in tagtypes.iteritems()])
435 for (name, value) in tagtypes.iteritems()])
436 return (tags, tagtypes)
436 return (tags, tagtypes)
437
437
438 def tagtype(self, tagname):
438 def tagtype(self, tagname):
439 '''
439 '''
440 return the type of the given tag. result can be:
440 return the type of the given tag. result can be:
441
441
442 'local' : a local tag
442 'local' : a local tag
443 'global' : a global tag
443 'global' : a global tag
444 None : tag does not exist
444 None : tag does not exist
445 '''
445 '''
446
446
447 return self._tagscache.tagtypes.get(tagname)
447 return self._tagscache.tagtypes.get(tagname)
448
448
449 def tagslist(self):
449 def tagslist(self):
450 '''return a list of tags ordered by revision'''
450 '''return a list of tags ordered by revision'''
451 if not self._tagscache.tagslist:
451 if not self._tagscache.tagslist:
452 l = []
452 l = []
453 for t, n in self.tags().iteritems():
453 for t, n in self.tags().iteritems():
454 r = self.changelog.rev(n)
454 r = self.changelog.rev(n)
455 l.append((r, t, n))
455 l.append((r, t, n))
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
457
457
458 return self._tagscache.tagslist
458 return self._tagscache.tagslist
459
459
460 def nodetags(self, node):
460 def nodetags(self, node):
461 '''return the tags associated with a node'''
461 '''return the tags associated with a node'''
462 if not self._tagscache.nodetagscache:
462 if not self._tagscache.nodetagscache:
463 nodetagscache = {}
463 nodetagscache = {}
464 for t, n in self._tagscache.tags.iteritems():
464 for t, n in self._tagscache.tags.iteritems():
465 nodetagscache.setdefault(n, []).append(t)
465 nodetagscache.setdefault(n, []).append(t)
466 for tags in nodetagscache.itervalues():
466 for tags in nodetagscache.itervalues():
467 tags.sort()
467 tags.sort()
468 self._tagscache.nodetagscache = nodetagscache
468 self._tagscache.nodetagscache = nodetagscache
469 return self._tagscache.nodetagscache.get(node, [])
469 return self._tagscache.nodetagscache.get(node, [])
470
470
471 def nodebookmarks(self, node):
471 def nodebookmarks(self, node):
472 marks = []
472 marks = []
473 for bookmark, n in self._bookmarks.iteritems():
473 for bookmark, n in self._bookmarks.iteritems():
474 if n == node:
474 if n == node:
475 marks.append(bookmark)
475 marks.append(bookmark)
476 return sorted(marks)
476 return sorted(marks)
477
477
478 def _branchtags(self, partial, lrev):
478 def _branchtags(self, partial, lrev):
479 # TODO: rename this function?
479 # TODO: rename this function?
480 tiprev = len(self) - 1
480 tiprev = len(self) - 1
481 if lrev != tiprev:
481 if lrev != tiprev:
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
483 self._updatebranchcache(partial, ctxgen)
483 self._updatebranchcache(partial, ctxgen)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
485
485
486 return partial
486 return partial
487
487
488 def updatebranchcache(self):
488 def updatebranchcache(self):
489 tip = self.changelog.tip()
489 tip = self.changelog.tip()
490 if self._branchcache is not None and self._branchcachetip == tip:
490 if self._branchcache is not None and self._branchcachetip == tip:
491 return
491 return
492
492
493 oldtip = self._branchcachetip
493 oldtip = self._branchcachetip
494 self._branchcachetip = tip
494 self._branchcachetip = tip
495 if oldtip is None or oldtip not in self.changelog.nodemap:
495 if oldtip is None or oldtip not in self.changelog.nodemap:
496 partial, last, lrev = self._readbranchcache()
496 partial, last, lrev = self._readbranchcache()
497 else:
497 else:
498 lrev = self.changelog.rev(oldtip)
498 lrev = self.changelog.rev(oldtip)
499 partial = self._branchcache
499 partial = self._branchcache
500
500
501 self._branchtags(partial, lrev)
501 self._branchtags(partial, lrev)
502 # this private cache holds all heads (not just the branch tips)
502 # this private cache holds all heads (not just the branch tips)
503 self._branchcache = partial
503 self._branchcache = partial
504
504
505 def branchmap(self):
505 def branchmap(self):
506 '''returns a dictionary {branch: [branchheads]}'''
506 '''returns a dictionary {branch: [branchheads]}'''
507 self.updatebranchcache()
507 self.updatebranchcache()
508 return self._branchcache
508 return self._branchcache
509
509
510 def branchtags(self):
510 def branchtags(self):
511 '''return a dict where branch names map to the tipmost head of
511 '''return a dict where branch names map to the tipmost head of
512 the branch, open heads come before closed'''
512 the branch, open heads come before closed'''
513 bt = {}
513 bt = {}
514 for bn, heads in self.branchmap().iteritems():
514 for bn, heads in self.branchmap().iteritems():
515 tip = heads[-1]
515 tip = heads[-1]
516 for h in reversed(heads):
516 for h in reversed(heads):
517 if 'close' not in self.changelog.read(h)[5]:
517 if 'close' not in self.changelog.read(h)[5]:
518 tip = h
518 tip = h
519 break
519 break
520 bt[bn] = tip
520 bt[bn] = tip
521 return bt
521 return bt
522
522
523 def _readbranchcache(self):
523 def _readbranchcache(self):
524 partial = {}
524 partial = {}
525 try:
525 try:
526 f = self.opener("cache/branchheads")
526 f = self.opener("cache/branchheads")
527 lines = f.read().split('\n')
527 lines = f.read().split('\n')
528 f.close()
528 f.close()
529 except (IOError, OSError):
529 except (IOError, OSError):
530 return {}, nullid, nullrev
530 return {}, nullid, nullrev
531
531
532 try:
532 try:
533 last, lrev = lines.pop(0).split(" ", 1)
533 last, lrev = lines.pop(0).split(" ", 1)
534 last, lrev = bin(last), int(lrev)
534 last, lrev = bin(last), int(lrev)
535 if lrev >= len(self) or self[lrev].node() != last:
535 if lrev >= len(self) or self[lrev].node() != last:
536 # invalidate the cache
536 # invalidate the cache
537 raise ValueError('invalidating branch cache (tip differs)')
537 raise ValueError('invalidating branch cache (tip differs)')
538 for l in lines:
538 for l in lines:
539 if not l:
539 if not l:
540 continue
540 continue
541 node, label = l.split(" ", 1)
541 node, label = l.split(" ", 1)
542 label = encoding.tolocal(label.strip())
542 label = encoding.tolocal(label.strip())
543 if not node in self:
544 raise ValueError('invalidating branch cache because node '+
545 '%s does not exist' % node)
543 partial.setdefault(label, []).append(bin(node))
546 partial.setdefault(label, []).append(bin(node))
544 except KeyboardInterrupt:
547 except KeyboardInterrupt:
545 raise
548 raise
546 except Exception, inst:
549 except Exception, inst:
547 if self.ui.debugflag:
550 if self.ui.debugflag:
548 self.ui.warn(str(inst), '\n')
551 self.ui.warn(str(inst), '\n')
549 partial, last, lrev = {}, nullid, nullrev
552 partial, last, lrev = {}, nullid, nullrev
550 return partial, last, lrev
553 return partial, last, lrev
551
554
552 def _writebranchcache(self, branches, tip, tiprev):
555 def _writebranchcache(self, branches, tip, tiprev):
553 try:
556 try:
554 f = self.opener("cache/branchheads", "w", atomictemp=True)
557 f = self.opener("cache/branchheads", "w", atomictemp=True)
555 f.write("%s %s\n" % (hex(tip), tiprev))
558 f.write("%s %s\n" % (hex(tip), tiprev))
556 for label, nodes in branches.iteritems():
559 for label, nodes in branches.iteritems():
557 for node in nodes:
560 for node in nodes:
558 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
559 f.close()
562 f.close()
560 except (IOError, OSError):
563 except (IOError, OSError):
561 pass
564 pass
562
565
563 def _updatebranchcache(self, partial, ctxgen):
566 def _updatebranchcache(self, partial, ctxgen):
567 """Given a branchhead cache, partial, that may have extra nodes or be
568 missing heads, and a generator of nodes that are at least a superset of
569 heads missing, this function updates partial to be correct.
570 """
564 # collect new branch entries
571 # collect new branch entries
565 newbranches = {}
572 newbranches = {}
566 for c in ctxgen:
573 for c in ctxgen:
567 newbranches.setdefault(c.branch(), []).append(c.node())
574 newbranches.setdefault(c.branch(), []).append(c.node())
568 # if older branchheads are reachable from new ones, they aren't
575 # if older branchheads are reachable from new ones, they aren't
569 # really branchheads. Note checking parents is insufficient:
576 # really branchheads. Note checking parents is insufficient:
570 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
577 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
571 for branch, newnodes in newbranches.iteritems():
578 for branch, newnodes in newbranches.iteritems():
572 bheads = partial.setdefault(branch, [])
579 bheads = partial.setdefault(branch, [])
573 bheads.extend(newnodes)
580 bheads.extend(newnodes)
574 if len(bheads) <= 1:
581 # Remove duplicates - nodes that are in newnodes and are already in
575 continue
582 # bheads. This can happen if you strip a node and its parent was
576 bheads = sorted(bheads, key=lambda x: self[x].rev())
583 # already a head (because they're on different branches).
577 # starting from tip means fewer passes over reachable
584 bheads = set(bheads)
578 while newnodes:
585
579 latest = newnodes.pop()
586 # Remove candidate heads that no longer are in the repo (e.g., as
580 if latest not in bheads:
587 # the result of a strip that just happened).
581 continue
588 # avoid using 'bhead in self' here because that dives down into
582 minbhnode = self[bheads[0]].node()
589 # branchcache code somewhat recrusively.
583 reachable = self.changelog.reachable(latest, minbhnode)
590 bheads = [bhead for bhead in bheads \
584 reachable.remove(latest)
591 if self.changelog.hasnode(bhead)]
585 if reachable:
592 if len(bheads) > 1:
586 bheads = [b for b in bheads if b not in reachable]
593 bheads = sorted(bheads, key=lambda x: self[x].rev())
594 # starting from tip means fewer passes over reachable
595 while newnodes:
596 latest = newnodes.pop()
597 if latest not in bheads:
598 continue
599 minbhnode = self[bheads[0]].node()
600 reachable = self.changelog.reachable(latest, minbhnode)
601 reachable.remove(latest)
602 if reachable:
603 bheads = [b for b in bheads if b not in reachable]
587 partial[branch] = bheads
604 partial[branch] = bheads
588
605
606 # There may be branches that cease to exist when the last commit in the
607 # branch was stripped. This code filters them out. Note that the
608 # branch that ceased to exist may not be in newbranches because
609 # newbranches is the set of candidate heads, which when you strip the
610 # last commit in a branch will be the parent branch.
611 for branch in partial.keys():
612 nodes = [head for head in partial[branch] \
613 if self.changelog.hasnode(head)]
614 if len(nodes) < 1:
615 del partial[branch]
616
589 def lookup(self, key):
617 def lookup(self, key):
590 return self[key].node()
618 return self[key].node()
591
619
592 def lookupbranch(self, key, remote=None):
620 def lookupbranch(self, key, remote=None):
593 repo = remote or self
621 repo = remote or self
594 if key in repo.branchmap():
622 if key in repo.branchmap():
595 return key
623 return key
596
624
597 repo = (remote and remote.local()) and remote or self
625 repo = (remote and remote.local()) and remote or self
598 return repo[key].branch()
626 return repo[key].branch()
599
627
600 def known(self, nodes):
628 def known(self, nodes):
601 nm = self.changelog.nodemap
629 nm = self.changelog.nodemap
602 pc = self._phasecache
630 pc = self._phasecache
603 result = []
631 result = []
604 for n in nodes:
632 for n in nodes:
605 r = nm.get(n)
633 r = nm.get(n)
606 resp = not (r is None or pc.phase(self, r) >= phases.secret)
634 resp = not (r is None or pc.phase(self, r) >= phases.secret)
607 result.append(resp)
635 result.append(resp)
608 return result
636 return result
609
637
610 def local(self):
638 def local(self):
611 return self
639 return self
612
640
613 def join(self, f):
641 def join(self, f):
614 return os.path.join(self.path, f)
642 return os.path.join(self.path, f)
615
643
616 def wjoin(self, f):
644 def wjoin(self, f):
617 return os.path.join(self.root, f)
645 return os.path.join(self.root, f)
618
646
619 def file(self, f):
647 def file(self, f):
620 if f[0] == '/':
648 if f[0] == '/':
621 f = f[1:]
649 f = f[1:]
622 return filelog.filelog(self.sopener, f)
650 return filelog.filelog(self.sopener, f)
623
651
624 def changectx(self, changeid):
652 def changectx(self, changeid):
625 return self[changeid]
653 return self[changeid]
626
654
627 def parents(self, changeid=None):
655 def parents(self, changeid=None):
628 '''get list of changectxs for parents of changeid'''
656 '''get list of changectxs for parents of changeid'''
629 return self[changeid].parents()
657 return self[changeid].parents()
630
658
631 def setparents(self, p1, p2=nullid):
659 def setparents(self, p1, p2=nullid):
632 copies = self.dirstate.setparents(p1, p2)
660 copies = self.dirstate.setparents(p1, p2)
633 if copies:
661 if copies:
634 # Adjust copy records, the dirstate cannot do it, it
662 # Adjust copy records, the dirstate cannot do it, it
635 # requires access to parents manifests. Preserve them
663 # requires access to parents manifests. Preserve them
636 # only for entries added to first parent.
664 # only for entries added to first parent.
637 pctx = self[p1]
665 pctx = self[p1]
638 for f in copies:
666 for f in copies:
639 if f not in pctx and copies[f] in pctx:
667 if f not in pctx and copies[f] in pctx:
640 self.dirstate.copy(copies[f], f)
668 self.dirstate.copy(copies[f], f)
641
669
642 def filectx(self, path, changeid=None, fileid=None):
670 def filectx(self, path, changeid=None, fileid=None):
643 """changeid can be a changeset revision, node, or tag.
671 """changeid can be a changeset revision, node, or tag.
644 fileid can be a file revision or node."""
672 fileid can be a file revision or node."""
645 return context.filectx(self, path, changeid, fileid)
673 return context.filectx(self, path, changeid, fileid)
646
674
647 def getcwd(self):
675 def getcwd(self):
648 return self.dirstate.getcwd()
676 return self.dirstate.getcwd()
649
677
650 def pathto(self, f, cwd=None):
678 def pathto(self, f, cwd=None):
651 return self.dirstate.pathto(f, cwd)
679 return self.dirstate.pathto(f, cwd)
652
680
653 def wfile(self, f, mode='r'):
681 def wfile(self, f, mode='r'):
654 return self.wopener(f, mode)
682 return self.wopener(f, mode)
655
683
656 def _link(self, f):
684 def _link(self, f):
657 return os.path.islink(self.wjoin(f))
685 return os.path.islink(self.wjoin(f))
658
686
659 def _loadfilter(self, filter):
687 def _loadfilter(self, filter):
660 if filter not in self.filterpats:
688 if filter not in self.filterpats:
661 l = []
689 l = []
662 for pat, cmd in self.ui.configitems(filter):
690 for pat, cmd in self.ui.configitems(filter):
663 if cmd == '!':
691 if cmd == '!':
664 continue
692 continue
665 mf = matchmod.match(self.root, '', [pat])
693 mf = matchmod.match(self.root, '', [pat])
666 fn = None
694 fn = None
667 params = cmd
695 params = cmd
668 for name, filterfn in self._datafilters.iteritems():
696 for name, filterfn in self._datafilters.iteritems():
669 if cmd.startswith(name):
697 if cmd.startswith(name):
670 fn = filterfn
698 fn = filterfn
671 params = cmd[len(name):].lstrip()
699 params = cmd[len(name):].lstrip()
672 break
700 break
673 if not fn:
701 if not fn:
674 fn = lambda s, c, **kwargs: util.filter(s, c)
702 fn = lambda s, c, **kwargs: util.filter(s, c)
675 # Wrap old filters not supporting keyword arguments
703 # Wrap old filters not supporting keyword arguments
676 if not inspect.getargspec(fn)[2]:
704 if not inspect.getargspec(fn)[2]:
677 oldfn = fn
705 oldfn = fn
678 fn = lambda s, c, **kwargs: oldfn(s, c)
706 fn = lambda s, c, **kwargs: oldfn(s, c)
679 l.append((mf, fn, params))
707 l.append((mf, fn, params))
680 self.filterpats[filter] = l
708 self.filterpats[filter] = l
681 return self.filterpats[filter]
709 return self.filterpats[filter]
682
710
683 def _filter(self, filterpats, filename, data):
711 def _filter(self, filterpats, filename, data):
684 for mf, fn, cmd in filterpats:
712 for mf, fn, cmd in filterpats:
685 if mf(filename):
713 if mf(filename):
686 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
714 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
687 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
715 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
688 break
716 break
689
717
690 return data
718 return data
691
719
692 @propertycache
720 @propertycache
693 def _encodefilterpats(self):
721 def _encodefilterpats(self):
694 return self._loadfilter('encode')
722 return self._loadfilter('encode')
695
723
696 @propertycache
724 @propertycache
697 def _decodefilterpats(self):
725 def _decodefilterpats(self):
698 return self._loadfilter('decode')
726 return self._loadfilter('decode')
699
727
700 def adddatafilter(self, name, filter):
728 def adddatafilter(self, name, filter):
701 self._datafilters[name] = filter
729 self._datafilters[name] = filter
702
730
703 def wread(self, filename):
731 def wread(self, filename):
704 if self._link(filename):
732 if self._link(filename):
705 data = os.readlink(self.wjoin(filename))
733 data = os.readlink(self.wjoin(filename))
706 else:
734 else:
707 data = self.wopener.read(filename)
735 data = self.wopener.read(filename)
708 return self._filter(self._encodefilterpats, filename, data)
736 return self._filter(self._encodefilterpats, filename, data)
709
737
710 def wwrite(self, filename, data, flags):
738 def wwrite(self, filename, data, flags):
711 data = self._filter(self._decodefilterpats, filename, data)
739 data = self._filter(self._decodefilterpats, filename, data)
712 if 'l' in flags:
740 if 'l' in flags:
713 self.wopener.symlink(data, filename)
741 self.wopener.symlink(data, filename)
714 else:
742 else:
715 self.wopener.write(filename, data)
743 self.wopener.write(filename, data)
716 if 'x' in flags:
744 if 'x' in flags:
717 util.setflags(self.wjoin(filename), False, True)
745 util.setflags(self.wjoin(filename), False, True)
718
746
719 def wwritedata(self, filename, data):
747 def wwritedata(self, filename, data):
720 return self._filter(self._decodefilterpats, filename, data)
748 return self._filter(self._decodefilterpats, filename, data)
721
749
722 def transaction(self, desc):
750 def transaction(self, desc):
723 tr = self._transref and self._transref() or None
751 tr = self._transref and self._transref() or None
724 if tr and tr.running():
752 if tr and tr.running():
725 return tr.nest()
753 return tr.nest()
726
754
727 # abort here if the journal already exists
755 # abort here if the journal already exists
728 if os.path.exists(self.sjoin("journal")):
756 if os.path.exists(self.sjoin("journal")):
729 raise error.RepoError(
757 raise error.RepoError(
730 _("abandoned transaction found - run hg recover"))
758 _("abandoned transaction found - run hg recover"))
731
759
732 self._writejournal(desc)
760 self._writejournal(desc)
733 renames = [(x, undoname(x)) for x in self._journalfiles()]
761 renames = [(x, undoname(x)) for x in self._journalfiles()]
734
762
735 tr = transaction.transaction(self.ui.warn, self.sopener,
763 tr = transaction.transaction(self.ui.warn, self.sopener,
736 self.sjoin("journal"),
764 self.sjoin("journal"),
737 aftertrans(renames),
765 aftertrans(renames),
738 self.store.createmode)
766 self.store.createmode)
739 self._transref = weakref.ref(tr)
767 self._transref = weakref.ref(tr)
740 return tr
768 return tr
741
769
742 def _journalfiles(self):
770 def _journalfiles(self):
743 return (self.sjoin('journal'), self.join('journal.dirstate'),
771 return (self.sjoin('journal'), self.join('journal.dirstate'),
744 self.join('journal.branch'), self.join('journal.desc'),
772 self.join('journal.branch'), self.join('journal.desc'),
745 self.join('journal.bookmarks'),
773 self.join('journal.bookmarks'),
746 self.sjoin('journal.phaseroots'))
774 self.sjoin('journal.phaseroots'))
747
775
748 def undofiles(self):
776 def undofiles(self):
749 return [undoname(x) for x in self._journalfiles()]
777 return [undoname(x) for x in self._journalfiles()]
750
778
751 def _writejournal(self, desc):
779 def _writejournal(self, desc):
752 self.opener.write("journal.dirstate",
780 self.opener.write("journal.dirstate",
753 self.opener.tryread("dirstate"))
781 self.opener.tryread("dirstate"))
754 self.opener.write("journal.branch",
782 self.opener.write("journal.branch",
755 encoding.fromlocal(self.dirstate.branch()))
783 encoding.fromlocal(self.dirstate.branch()))
756 self.opener.write("journal.desc",
784 self.opener.write("journal.desc",
757 "%d\n%s\n" % (len(self), desc))
785 "%d\n%s\n" % (len(self), desc))
758 self.opener.write("journal.bookmarks",
786 self.opener.write("journal.bookmarks",
759 self.opener.tryread("bookmarks"))
787 self.opener.tryread("bookmarks"))
760 self.sopener.write("journal.phaseroots",
788 self.sopener.write("journal.phaseroots",
761 self.sopener.tryread("phaseroots"))
789 self.sopener.tryread("phaseroots"))
762
790
763 def recover(self):
791 def recover(self):
764 lock = self.lock()
792 lock = self.lock()
765 try:
793 try:
766 if os.path.exists(self.sjoin("journal")):
794 if os.path.exists(self.sjoin("journal")):
767 self.ui.status(_("rolling back interrupted transaction\n"))
795 self.ui.status(_("rolling back interrupted transaction\n"))
768 transaction.rollback(self.sopener, self.sjoin("journal"),
796 transaction.rollback(self.sopener, self.sjoin("journal"),
769 self.ui.warn)
797 self.ui.warn)
770 self.invalidate()
798 self.invalidate()
771 return True
799 return True
772 else:
800 else:
773 self.ui.warn(_("no interrupted transaction available\n"))
801 self.ui.warn(_("no interrupted transaction available\n"))
774 return False
802 return False
775 finally:
803 finally:
776 lock.release()
804 lock.release()
777
805
778 def rollback(self, dryrun=False, force=False):
806 def rollback(self, dryrun=False, force=False):
779 wlock = lock = None
807 wlock = lock = None
780 try:
808 try:
781 wlock = self.wlock()
809 wlock = self.wlock()
782 lock = self.lock()
810 lock = self.lock()
783 if os.path.exists(self.sjoin("undo")):
811 if os.path.exists(self.sjoin("undo")):
784 return self._rollback(dryrun, force)
812 return self._rollback(dryrun, force)
785 else:
813 else:
786 self.ui.warn(_("no rollback information available\n"))
814 self.ui.warn(_("no rollback information available\n"))
787 return 1
815 return 1
788 finally:
816 finally:
789 release(lock, wlock)
817 release(lock, wlock)
790
818
791 def _rollback(self, dryrun, force):
819 def _rollback(self, dryrun, force):
792 ui = self.ui
820 ui = self.ui
793 try:
821 try:
794 args = self.opener.read('undo.desc').splitlines()
822 args = self.opener.read('undo.desc').splitlines()
795 (oldlen, desc, detail) = (int(args[0]), args[1], None)
823 (oldlen, desc, detail) = (int(args[0]), args[1], None)
796 if len(args) >= 3:
824 if len(args) >= 3:
797 detail = args[2]
825 detail = args[2]
798 oldtip = oldlen - 1
826 oldtip = oldlen - 1
799
827
800 if detail and ui.verbose:
828 if detail and ui.verbose:
801 msg = (_('repository tip rolled back to revision %s'
829 msg = (_('repository tip rolled back to revision %s'
802 ' (undo %s: %s)\n')
830 ' (undo %s: %s)\n')
803 % (oldtip, desc, detail))
831 % (oldtip, desc, detail))
804 else:
832 else:
805 msg = (_('repository tip rolled back to revision %s'
833 msg = (_('repository tip rolled back to revision %s'
806 ' (undo %s)\n')
834 ' (undo %s)\n')
807 % (oldtip, desc))
835 % (oldtip, desc))
808 except IOError:
836 except IOError:
809 msg = _('rolling back unknown transaction\n')
837 msg = _('rolling back unknown transaction\n')
810 desc = None
838 desc = None
811
839
812 if not force and self['.'] != self['tip'] and desc == 'commit':
840 if not force and self['.'] != self['tip'] and desc == 'commit':
813 raise util.Abort(
841 raise util.Abort(
814 _('rollback of last commit while not checked out '
842 _('rollback of last commit while not checked out '
815 'may lose data'), hint=_('use -f to force'))
843 'may lose data'), hint=_('use -f to force'))
816
844
817 ui.status(msg)
845 ui.status(msg)
818 if dryrun:
846 if dryrun:
819 return 0
847 return 0
820
848
821 parents = self.dirstate.parents()
849 parents = self.dirstate.parents()
822 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
850 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
823 if os.path.exists(self.join('undo.bookmarks')):
851 if os.path.exists(self.join('undo.bookmarks')):
824 util.rename(self.join('undo.bookmarks'),
852 util.rename(self.join('undo.bookmarks'),
825 self.join('bookmarks'))
853 self.join('bookmarks'))
826 if os.path.exists(self.sjoin('undo.phaseroots')):
854 if os.path.exists(self.sjoin('undo.phaseroots')):
827 util.rename(self.sjoin('undo.phaseroots'),
855 util.rename(self.sjoin('undo.phaseroots'),
828 self.sjoin('phaseroots'))
856 self.sjoin('phaseroots'))
829 self.invalidate()
857 self.invalidate()
830
858
831 parentgone = (parents[0] not in self.changelog.nodemap or
859 parentgone = (parents[0] not in self.changelog.nodemap or
832 parents[1] not in self.changelog.nodemap)
860 parents[1] not in self.changelog.nodemap)
833 if parentgone:
861 if parentgone:
834 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
862 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
835 try:
863 try:
836 branch = self.opener.read('undo.branch')
864 branch = self.opener.read('undo.branch')
837 self.dirstate.setbranch(branch)
865 self.dirstate.setbranch(branch)
838 except IOError:
866 except IOError:
839 ui.warn(_('named branch could not be reset: '
867 ui.warn(_('named branch could not be reset: '
840 'current branch is still \'%s\'\n')
868 'current branch is still \'%s\'\n')
841 % self.dirstate.branch())
869 % self.dirstate.branch())
842
870
843 self.dirstate.invalidate()
871 self.dirstate.invalidate()
844 parents = tuple([p.rev() for p in self.parents()])
872 parents = tuple([p.rev() for p in self.parents()])
845 if len(parents) > 1:
873 if len(parents) > 1:
846 ui.status(_('working directory now based on '
874 ui.status(_('working directory now based on '
847 'revisions %d and %d\n') % parents)
875 'revisions %d and %d\n') % parents)
848 else:
876 else:
849 ui.status(_('working directory now based on '
877 ui.status(_('working directory now based on '
850 'revision %d\n') % parents)
878 'revision %d\n') % parents)
879 # TODO: if we know which new heads may result from this rollback, pass
880 # them to destroy(), which will prevent the branchhead cache from being
881 # invalidated.
851 self.destroyed()
882 self.destroyed()
852 return 0
883 return 0
853
884
854 def invalidatecaches(self):
885 def invalidatecaches(self):
855 def delcache(name):
886 def delcache(name):
856 try:
887 try:
857 delattr(self, name)
888 delattr(self, name)
858 except AttributeError:
889 except AttributeError:
859 pass
890 pass
860
891
861 delcache('_tagscache')
892 delcache('_tagscache')
862
893
863 self._branchcache = None # in UTF-8
894 self._branchcache = None # in UTF-8
864 self._branchcachetip = None
895 self._branchcachetip = None
865
896
866 def invalidatedirstate(self):
897 def invalidatedirstate(self):
867 '''Invalidates the dirstate, causing the next call to dirstate
898 '''Invalidates the dirstate, causing the next call to dirstate
868 to check if it was modified since the last time it was read,
899 to check if it was modified since the last time it was read,
869 rereading it if it has.
900 rereading it if it has.
870
901
871 This is different to dirstate.invalidate() that it doesn't always
902 This is different to dirstate.invalidate() that it doesn't always
872 rereads the dirstate. Use dirstate.invalidate() if you want to
903 rereads the dirstate. Use dirstate.invalidate() if you want to
873 explicitly read the dirstate again (i.e. restoring it to a previous
904 explicitly read the dirstate again (i.e. restoring it to a previous
874 known good state).'''
905 known good state).'''
875 if 'dirstate' in self.__dict__:
906 if 'dirstate' in self.__dict__:
876 for k in self.dirstate._filecache:
907 for k in self.dirstate._filecache:
877 try:
908 try:
878 delattr(self.dirstate, k)
909 delattr(self.dirstate, k)
879 except AttributeError:
910 except AttributeError:
880 pass
911 pass
881 delattr(self, 'dirstate')
912 delattr(self, 'dirstate')
882
913
883 def invalidate(self):
914 def invalidate(self):
884 for k in self._filecache:
915 for k in self._filecache:
885 # dirstate is invalidated separately in invalidatedirstate()
916 # dirstate is invalidated separately in invalidatedirstate()
886 if k == 'dirstate':
917 if k == 'dirstate':
887 continue
918 continue
888
919
889 try:
920 try:
890 delattr(self, k)
921 delattr(self, k)
891 except AttributeError:
922 except AttributeError:
892 pass
923 pass
893 self.invalidatecaches()
924 self.invalidatecaches()
894
925
895 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
926 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
896 try:
927 try:
897 l = lock.lock(lockname, 0, releasefn, desc=desc)
928 l = lock.lock(lockname, 0, releasefn, desc=desc)
898 except error.LockHeld, inst:
929 except error.LockHeld, inst:
899 if not wait:
930 if not wait:
900 raise
931 raise
901 self.ui.warn(_("waiting for lock on %s held by %r\n") %
932 self.ui.warn(_("waiting for lock on %s held by %r\n") %
902 (desc, inst.locker))
933 (desc, inst.locker))
903 # default to 600 seconds timeout
934 # default to 600 seconds timeout
904 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
935 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
905 releasefn, desc=desc)
936 releasefn, desc=desc)
906 if acquirefn:
937 if acquirefn:
907 acquirefn()
938 acquirefn()
908 return l
939 return l
909
940
910 def _afterlock(self, callback):
941 def _afterlock(self, callback):
911 """add a callback to the current repository lock.
942 """add a callback to the current repository lock.
912
943
913 The callback will be executed on lock release."""
944 The callback will be executed on lock release."""
914 l = self._lockref and self._lockref()
945 l = self._lockref and self._lockref()
915 if l:
946 if l:
916 l.postrelease.append(callback)
947 l.postrelease.append(callback)
917 else:
948 else:
918 callback()
949 callback()
919
950
920 def lock(self, wait=True):
951 def lock(self, wait=True):
921 '''Lock the repository store (.hg/store) and return a weak reference
952 '''Lock the repository store (.hg/store) and return a weak reference
922 to the lock. Use this before modifying the store (e.g. committing or
953 to the lock. Use this before modifying the store (e.g. committing or
923 stripping). If you are opening a transaction, get a lock as well.)'''
954 stripping). If you are opening a transaction, get a lock as well.)'''
924 l = self._lockref and self._lockref()
955 l = self._lockref and self._lockref()
925 if l is not None and l.held:
956 if l is not None and l.held:
926 l.lock()
957 l.lock()
927 return l
958 return l
928
959
929 def unlock():
960 def unlock():
930 self.store.write()
961 self.store.write()
931 if '_phasecache' in vars(self):
962 if '_phasecache' in vars(self):
932 self._phasecache.write()
963 self._phasecache.write()
933 for k, ce in self._filecache.items():
964 for k, ce in self._filecache.items():
934 if k == 'dirstate':
965 if k == 'dirstate':
935 continue
966 continue
936 ce.refresh()
967 ce.refresh()
937
968
938 l = self._lock(self.sjoin("lock"), wait, unlock,
969 l = self._lock(self.sjoin("lock"), wait, unlock,
939 self.invalidate, _('repository %s') % self.origroot)
970 self.invalidate, _('repository %s') % self.origroot)
940 self._lockref = weakref.ref(l)
971 self._lockref = weakref.ref(l)
941 return l
972 return l
942
973
943 def wlock(self, wait=True):
974 def wlock(self, wait=True):
944 '''Lock the non-store parts of the repository (everything under
975 '''Lock the non-store parts of the repository (everything under
945 .hg except .hg/store) and return a weak reference to the lock.
976 .hg except .hg/store) and return a weak reference to the lock.
946 Use this before modifying files in .hg.'''
977 Use this before modifying files in .hg.'''
947 l = self._wlockref and self._wlockref()
978 l = self._wlockref and self._wlockref()
948 if l is not None and l.held:
979 if l is not None and l.held:
949 l.lock()
980 l.lock()
950 return l
981 return l
951
982
952 def unlock():
983 def unlock():
953 self.dirstate.write()
984 self.dirstate.write()
954 ce = self._filecache.get('dirstate')
985 ce = self._filecache.get('dirstate')
955 if ce:
986 if ce:
956 ce.refresh()
987 ce.refresh()
957
988
958 l = self._lock(self.join("wlock"), wait, unlock,
989 l = self._lock(self.join("wlock"), wait, unlock,
959 self.invalidatedirstate, _('working directory of %s') %
990 self.invalidatedirstate, _('working directory of %s') %
960 self.origroot)
991 self.origroot)
961 self._wlockref = weakref.ref(l)
992 self._wlockref = weakref.ref(l)
962 return l
993 return l
963
994
964 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
995 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
965 """
996 """
966 commit an individual file as part of a larger transaction
997 commit an individual file as part of a larger transaction
967 """
998 """
968
999
969 fname = fctx.path()
1000 fname = fctx.path()
970 text = fctx.data()
1001 text = fctx.data()
971 flog = self.file(fname)
1002 flog = self.file(fname)
972 fparent1 = manifest1.get(fname, nullid)
1003 fparent1 = manifest1.get(fname, nullid)
973 fparent2 = fparent2o = manifest2.get(fname, nullid)
1004 fparent2 = fparent2o = manifest2.get(fname, nullid)
974
1005
975 meta = {}
1006 meta = {}
976 copy = fctx.renamed()
1007 copy = fctx.renamed()
977 if copy and copy[0] != fname:
1008 if copy and copy[0] != fname:
978 # Mark the new revision of this file as a copy of another
1009 # Mark the new revision of this file as a copy of another
979 # file. This copy data will effectively act as a parent
1010 # file. This copy data will effectively act as a parent
980 # of this new revision. If this is a merge, the first
1011 # of this new revision. If this is a merge, the first
981 # parent will be the nullid (meaning "look up the copy data")
1012 # parent will be the nullid (meaning "look up the copy data")
982 # and the second one will be the other parent. For example:
1013 # and the second one will be the other parent. For example:
983 #
1014 #
984 # 0 --- 1 --- 3 rev1 changes file foo
1015 # 0 --- 1 --- 3 rev1 changes file foo
985 # \ / rev2 renames foo to bar and changes it
1016 # \ / rev2 renames foo to bar and changes it
986 # \- 2 -/ rev3 should have bar with all changes and
1017 # \- 2 -/ rev3 should have bar with all changes and
987 # should record that bar descends from
1018 # should record that bar descends from
988 # bar in rev2 and foo in rev1
1019 # bar in rev2 and foo in rev1
989 #
1020 #
990 # this allows this merge to succeed:
1021 # this allows this merge to succeed:
991 #
1022 #
992 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1023 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
993 # \ / merging rev3 and rev4 should use bar@rev2
1024 # \ / merging rev3 and rev4 should use bar@rev2
994 # \- 2 --- 4 as the merge base
1025 # \- 2 --- 4 as the merge base
995 #
1026 #
996
1027
997 cfname = copy[0]
1028 cfname = copy[0]
998 crev = manifest1.get(cfname)
1029 crev = manifest1.get(cfname)
999 newfparent = fparent2
1030 newfparent = fparent2
1000
1031
1001 if manifest2: # branch merge
1032 if manifest2: # branch merge
1002 if fparent2 == nullid or crev is None: # copied on remote side
1033 if fparent2 == nullid or crev is None: # copied on remote side
1003 if cfname in manifest2:
1034 if cfname in manifest2:
1004 crev = manifest2[cfname]
1035 crev = manifest2[cfname]
1005 newfparent = fparent1
1036 newfparent = fparent1
1006
1037
1007 # find source in nearest ancestor if we've lost track
1038 # find source in nearest ancestor if we've lost track
1008 if not crev:
1039 if not crev:
1009 self.ui.debug(" %s: searching for copy revision for %s\n" %
1040 self.ui.debug(" %s: searching for copy revision for %s\n" %
1010 (fname, cfname))
1041 (fname, cfname))
1011 for ancestor in self[None].ancestors():
1042 for ancestor in self[None].ancestors():
1012 if cfname in ancestor:
1043 if cfname in ancestor:
1013 crev = ancestor[cfname].filenode()
1044 crev = ancestor[cfname].filenode()
1014 break
1045 break
1015
1046
1016 if crev:
1047 if crev:
1017 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1048 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1018 meta["copy"] = cfname
1049 meta["copy"] = cfname
1019 meta["copyrev"] = hex(crev)
1050 meta["copyrev"] = hex(crev)
1020 fparent1, fparent2 = nullid, newfparent
1051 fparent1, fparent2 = nullid, newfparent
1021 else:
1052 else:
1022 self.ui.warn(_("warning: can't find ancestor for '%s' "
1053 self.ui.warn(_("warning: can't find ancestor for '%s' "
1023 "copied from '%s'!\n") % (fname, cfname))
1054 "copied from '%s'!\n") % (fname, cfname))
1024
1055
1025 elif fparent2 != nullid:
1056 elif fparent2 != nullid:
1026 # is one parent an ancestor of the other?
1057 # is one parent an ancestor of the other?
1027 fparentancestor = flog.ancestor(fparent1, fparent2)
1058 fparentancestor = flog.ancestor(fparent1, fparent2)
1028 if fparentancestor == fparent1:
1059 if fparentancestor == fparent1:
1029 fparent1, fparent2 = fparent2, nullid
1060 fparent1, fparent2 = fparent2, nullid
1030 elif fparentancestor == fparent2:
1061 elif fparentancestor == fparent2:
1031 fparent2 = nullid
1062 fparent2 = nullid
1032
1063
1033 # is the file changed?
1064 # is the file changed?
1034 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1065 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1035 changelist.append(fname)
1066 changelist.append(fname)
1036 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1067 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1037
1068
1038 # are just the flags changed during merge?
1069 # are just the flags changed during merge?
1039 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1070 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1040 changelist.append(fname)
1071 changelist.append(fname)
1041
1072
1042 return fparent1
1073 return fparent1
1043
1074
1044 def commit(self, text="", user=None, date=None, match=None, force=False,
1075 def commit(self, text="", user=None, date=None, match=None, force=False,
1045 editor=False, extra={}):
1076 editor=False, extra={}):
1046 """Add a new revision to current repository.
1077 """Add a new revision to current repository.
1047
1078
1048 Revision information is gathered from the working directory,
1079 Revision information is gathered from the working directory,
1049 match can be used to filter the committed files. If editor is
1080 match can be used to filter the committed files. If editor is
1050 supplied, it is called to get a commit message.
1081 supplied, it is called to get a commit message.
1051 """
1082 """
1052
1083
1053 def fail(f, msg):
1084 def fail(f, msg):
1054 raise util.Abort('%s: %s' % (f, msg))
1085 raise util.Abort('%s: %s' % (f, msg))
1055
1086
1056 if not match:
1087 if not match:
1057 match = matchmod.always(self.root, '')
1088 match = matchmod.always(self.root, '')
1058
1089
1059 if not force:
1090 if not force:
1060 vdirs = []
1091 vdirs = []
1061 match.dir = vdirs.append
1092 match.dir = vdirs.append
1062 match.bad = fail
1093 match.bad = fail
1063
1094
1064 wlock = self.wlock()
1095 wlock = self.wlock()
1065 try:
1096 try:
1066 wctx = self[None]
1097 wctx = self[None]
1067 merge = len(wctx.parents()) > 1
1098 merge = len(wctx.parents()) > 1
1068
1099
1069 if (not force and merge and match and
1100 if (not force and merge and match and
1070 (match.files() or match.anypats())):
1101 (match.files() or match.anypats())):
1071 raise util.Abort(_('cannot partially commit a merge '
1102 raise util.Abort(_('cannot partially commit a merge '
1072 '(do not specify files or patterns)'))
1103 '(do not specify files or patterns)'))
1073
1104
1074 changes = self.status(match=match, clean=force)
1105 changes = self.status(match=match, clean=force)
1075 if force:
1106 if force:
1076 changes[0].extend(changes[6]) # mq may commit unchanged files
1107 changes[0].extend(changes[6]) # mq may commit unchanged files
1077
1108
1078 # check subrepos
1109 # check subrepos
1079 subs = []
1110 subs = []
1080 commitsubs = set()
1111 commitsubs = set()
1081 newstate = wctx.substate.copy()
1112 newstate = wctx.substate.copy()
1082 # only manage subrepos and .hgsubstate if .hgsub is present
1113 # only manage subrepos and .hgsubstate if .hgsub is present
1083 if '.hgsub' in wctx:
1114 if '.hgsub' in wctx:
1084 # we'll decide whether to track this ourselves, thanks
1115 # we'll decide whether to track this ourselves, thanks
1085 if '.hgsubstate' in changes[0]:
1116 if '.hgsubstate' in changes[0]:
1086 changes[0].remove('.hgsubstate')
1117 changes[0].remove('.hgsubstate')
1087 if '.hgsubstate' in changes[2]:
1118 if '.hgsubstate' in changes[2]:
1088 changes[2].remove('.hgsubstate')
1119 changes[2].remove('.hgsubstate')
1089
1120
1090 # compare current state to last committed state
1121 # compare current state to last committed state
1091 # build new substate based on last committed state
1122 # build new substate based on last committed state
1092 oldstate = wctx.p1().substate
1123 oldstate = wctx.p1().substate
1093 for s in sorted(newstate.keys()):
1124 for s in sorted(newstate.keys()):
1094 if not match(s):
1125 if not match(s):
1095 # ignore working copy, use old state if present
1126 # ignore working copy, use old state if present
1096 if s in oldstate:
1127 if s in oldstate:
1097 newstate[s] = oldstate[s]
1128 newstate[s] = oldstate[s]
1098 continue
1129 continue
1099 if not force:
1130 if not force:
1100 raise util.Abort(
1131 raise util.Abort(
1101 _("commit with new subrepo %s excluded") % s)
1132 _("commit with new subrepo %s excluded") % s)
1102 if wctx.sub(s).dirty(True):
1133 if wctx.sub(s).dirty(True):
1103 if not self.ui.configbool('ui', 'commitsubrepos'):
1134 if not self.ui.configbool('ui', 'commitsubrepos'):
1104 raise util.Abort(
1135 raise util.Abort(
1105 _("uncommitted changes in subrepo %s") % s,
1136 _("uncommitted changes in subrepo %s") % s,
1106 hint=_("use --subrepos for recursive commit"))
1137 hint=_("use --subrepos for recursive commit"))
1107 subs.append(s)
1138 subs.append(s)
1108 commitsubs.add(s)
1139 commitsubs.add(s)
1109 else:
1140 else:
1110 bs = wctx.sub(s).basestate()
1141 bs = wctx.sub(s).basestate()
1111 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1142 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1112 if oldstate.get(s, (None, None, None))[1] != bs:
1143 if oldstate.get(s, (None, None, None))[1] != bs:
1113 subs.append(s)
1144 subs.append(s)
1114
1145
1115 # check for removed subrepos
1146 # check for removed subrepos
1116 for p in wctx.parents():
1147 for p in wctx.parents():
1117 r = [s for s in p.substate if s not in newstate]
1148 r = [s for s in p.substate if s not in newstate]
1118 subs += [s for s in r if match(s)]
1149 subs += [s for s in r if match(s)]
1119 if subs:
1150 if subs:
1120 if (not match('.hgsub') and
1151 if (not match('.hgsub') and
1121 '.hgsub' in (wctx.modified() + wctx.added())):
1152 '.hgsub' in (wctx.modified() + wctx.added())):
1122 raise util.Abort(
1153 raise util.Abort(
1123 _("can't commit subrepos without .hgsub"))
1154 _("can't commit subrepos without .hgsub"))
1124 changes[0].insert(0, '.hgsubstate')
1155 changes[0].insert(0, '.hgsubstate')
1125
1156
1126 elif '.hgsub' in changes[2]:
1157 elif '.hgsub' in changes[2]:
1127 # clean up .hgsubstate when .hgsub is removed
1158 # clean up .hgsubstate when .hgsub is removed
1128 if ('.hgsubstate' in wctx and
1159 if ('.hgsubstate' in wctx and
1129 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1160 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1130 changes[2].insert(0, '.hgsubstate')
1161 changes[2].insert(0, '.hgsubstate')
1131
1162
1132 # make sure all explicit patterns are matched
1163 # make sure all explicit patterns are matched
1133 if not force and match.files():
1164 if not force and match.files():
1134 matched = set(changes[0] + changes[1] + changes[2])
1165 matched = set(changes[0] + changes[1] + changes[2])
1135
1166
1136 for f in match.files():
1167 for f in match.files():
1137 if f == '.' or f in matched or f in wctx.substate:
1168 if f == '.' or f in matched or f in wctx.substate:
1138 continue
1169 continue
1139 if f in changes[3]: # missing
1170 if f in changes[3]: # missing
1140 fail(f, _('file not found!'))
1171 fail(f, _('file not found!'))
1141 if f in vdirs: # visited directory
1172 if f in vdirs: # visited directory
1142 d = f + '/'
1173 d = f + '/'
1143 for mf in matched:
1174 for mf in matched:
1144 if mf.startswith(d):
1175 if mf.startswith(d):
1145 break
1176 break
1146 else:
1177 else:
1147 fail(f, _("no match under directory!"))
1178 fail(f, _("no match under directory!"))
1148 elif f not in self.dirstate:
1179 elif f not in self.dirstate:
1149 fail(f, _("file not tracked!"))
1180 fail(f, _("file not tracked!"))
1150
1181
1151 if (not force and not extra.get("close") and not merge
1182 if (not force and not extra.get("close") and not merge
1152 and not (changes[0] or changes[1] or changes[2])
1183 and not (changes[0] or changes[1] or changes[2])
1153 and wctx.branch() == wctx.p1().branch()):
1184 and wctx.branch() == wctx.p1().branch()):
1154 return None
1185 return None
1155
1186
1156 if merge and changes[3]:
1187 if merge and changes[3]:
1157 raise util.Abort(_("cannot commit merge with missing files"))
1188 raise util.Abort(_("cannot commit merge with missing files"))
1158
1189
1159 ms = mergemod.mergestate(self)
1190 ms = mergemod.mergestate(self)
1160 for f in changes[0]:
1191 for f in changes[0]:
1161 if f in ms and ms[f] == 'u':
1192 if f in ms and ms[f] == 'u':
1162 raise util.Abort(_("unresolved merge conflicts "
1193 raise util.Abort(_("unresolved merge conflicts "
1163 "(see hg help resolve)"))
1194 "(see hg help resolve)"))
1164
1195
1165 cctx = context.workingctx(self, text, user, date, extra, changes)
1196 cctx = context.workingctx(self, text, user, date, extra, changes)
1166 if editor:
1197 if editor:
1167 cctx._text = editor(self, cctx, subs)
1198 cctx._text = editor(self, cctx, subs)
1168 edited = (text != cctx._text)
1199 edited = (text != cctx._text)
1169
1200
1170 # commit subs and write new state
1201 # commit subs and write new state
1171 if subs:
1202 if subs:
1172 for s in sorted(commitsubs):
1203 for s in sorted(commitsubs):
1173 sub = wctx.sub(s)
1204 sub = wctx.sub(s)
1174 self.ui.status(_('committing subrepository %s\n') %
1205 self.ui.status(_('committing subrepository %s\n') %
1175 subrepo.subrelpath(sub))
1206 subrepo.subrelpath(sub))
1176 sr = sub.commit(cctx._text, user, date)
1207 sr = sub.commit(cctx._text, user, date)
1177 newstate[s] = (newstate[s][0], sr)
1208 newstate[s] = (newstate[s][0], sr)
1178 subrepo.writestate(self, newstate)
1209 subrepo.writestate(self, newstate)
1179
1210
1180 # Save commit message in case this transaction gets rolled back
1211 # Save commit message in case this transaction gets rolled back
1181 # (e.g. by a pretxncommit hook). Leave the content alone on
1212 # (e.g. by a pretxncommit hook). Leave the content alone on
1182 # the assumption that the user will use the same editor again.
1213 # the assumption that the user will use the same editor again.
1183 msgfn = self.savecommitmessage(cctx._text)
1214 msgfn = self.savecommitmessage(cctx._text)
1184
1215
1185 p1, p2 = self.dirstate.parents()
1216 p1, p2 = self.dirstate.parents()
1186 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1217 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1187 try:
1218 try:
1188 self.hook("precommit", throw=True, parent1=hookp1,
1219 self.hook("precommit", throw=True, parent1=hookp1,
1189 parent2=hookp2)
1220 parent2=hookp2)
1190 ret = self.commitctx(cctx, True)
1221 ret = self.commitctx(cctx, True)
1191 except: # re-raises
1222 except: # re-raises
1192 if edited:
1223 if edited:
1193 self.ui.write(
1224 self.ui.write(
1194 _('note: commit message saved in %s\n') % msgfn)
1225 _('note: commit message saved in %s\n') % msgfn)
1195 raise
1226 raise
1196
1227
1197 # update bookmarks, dirstate and mergestate
1228 # update bookmarks, dirstate and mergestate
1198 bookmarks.update(self, [p1, p2], ret)
1229 bookmarks.update(self, [p1, p2], ret)
1199 for f in changes[0] + changes[1]:
1230 for f in changes[0] + changes[1]:
1200 self.dirstate.normal(f)
1231 self.dirstate.normal(f)
1201 for f in changes[2]:
1232 for f in changes[2]:
1202 self.dirstate.drop(f)
1233 self.dirstate.drop(f)
1203 self.dirstate.setparents(ret)
1234 self.dirstate.setparents(ret)
1204 ms.reset()
1235 ms.reset()
1205 finally:
1236 finally:
1206 wlock.release()
1237 wlock.release()
1207
1238
1208 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1239 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1209 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1240 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1210 self._afterlock(commithook)
1241 self._afterlock(commithook)
1211 return ret
1242 return ret
1212
1243
1213 def commitctx(self, ctx, error=False):
1244 def commitctx(self, ctx, error=False):
1214 """Add a new revision to current repository.
1245 """Add a new revision to current repository.
1215 Revision information is passed via the context argument.
1246 Revision information is passed via the context argument.
1216 """
1247 """
1217
1248
1218 tr = lock = None
1249 tr = lock = None
1219 removed = list(ctx.removed())
1250 removed = list(ctx.removed())
1220 p1, p2 = ctx.p1(), ctx.p2()
1251 p1, p2 = ctx.p1(), ctx.p2()
1221 user = ctx.user()
1252 user = ctx.user()
1222
1253
1223 lock = self.lock()
1254 lock = self.lock()
1224 try:
1255 try:
1225 tr = self.transaction("commit")
1256 tr = self.transaction("commit")
1226 trp = weakref.proxy(tr)
1257 trp = weakref.proxy(tr)
1227
1258
1228 if ctx.files():
1259 if ctx.files():
1229 m1 = p1.manifest().copy()
1260 m1 = p1.manifest().copy()
1230 m2 = p2.manifest()
1261 m2 = p2.manifest()
1231
1262
1232 # check in files
1263 # check in files
1233 new = {}
1264 new = {}
1234 changed = []
1265 changed = []
1235 linkrev = len(self)
1266 linkrev = len(self)
1236 for f in sorted(ctx.modified() + ctx.added()):
1267 for f in sorted(ctx.modified() + ctx.added()):
1237 self.ui.note(f + "\n")
1268 self.ui.note(f + "\n")
1238 try:
1269 try:
1239 fctx = ctx[f]
1270 fctx = ctx[f]
1240 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1271 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1241 changed)
1272 changed)
1242 m1.set(f, fctx.flags())
1273 m1.set(f, fctx.flags())
1243 except OSError, inst:
1274 except OSError, inst:
1244 self.ui.warn(_("trouble committing %s!\n") % f)
1275 self.ui.warn(_("trouble committing %s!\n") % f)
1245 raise
1276 raise
1246 except IOError, inst:
1277 except IOError, inst:
1247 errcode = getattr(inst, 'errno', errno.ENOENT)
1278 errcode = getattr(inst, 'errno', errno.ENOENT)
1248 if error or errcode and errcode != errno.ENOENT:
1279 if error or errcode and errcode != errno.ENOENT:
1249 self.ui.warn(_("trouble committing %s!\n") % f)
1280 self.ui.warn(_("trouble committing %s!\n") % f)
1250 raise
1281 raise
1251 else:
1282 else:
1252 removed.append(f)
1283 removed.append(f)
1253
1284
1254 # update manifest
1285 # update manifest
1255 m1.update(new)
1286 m1.update(new)
1256 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1287 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1257 drop = [f for f in removed if f in m1]
1288 drop = [f for f in removed if f in m1]
1258 for f in drop:
1289 for f in drop:
1259 del m1[f]
1290 del m1[f]
1260 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1291 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1261 p2.manifestnode(), (new, drop))
1292 p2.manifestnode(), (new, drop))
1262 files = changed + removed
1293 files = changed + removed
1263 else:
1294 else:
1264 mn = p1.manifestnode()
1295 mn = p1.manifestnode()
1265 files = []
1296 files = []
1266
1297
1267 # update changelog
1298 # update changelog
1268 self.changelog.delayupdate()
1299 self.changelog.delayupdate()
1269 n = self.changelog.add(mn, files, ctx.description(),
1300 n = self.changelog.add(mn, files, ctx.description(),
1270 trp, p1.node(), p2.node(),
1301 trp, p1.node(), p2.node(),
1271 user, ctx.date(), ctx.extra().copy())
1302 user, ctx.date(), ctx.extra().copy())
1272 p = lambda: self.changelog.writepending() and self.root or ""
1303 p = lambda: self.changelog.writepending() and self.root or ""
1273 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1304 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1274 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1305 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1275 parent2=xp2, pending=p)
1306 parent2=xp2, pending=p)
1276 self.changelog.finalize(trp)
1307 self.changelog.finalize(trp)
1277 # set the new commit is proper phase
1308 # set the new commit is proper phase
1278 targetphase = phases.newcommitphase(self.ui)
1309 targetphase = phases.newcommitphase(self.ui)
1279 if targetphase:
1310 if targetphase:
1280 # retract boundary do not alter parent changeset.
1311 # retract boundary do not alter parent changeset.
1281 # if a parent have higher the resulting phase will
1312 # if a parent have higher the resulting phase will
1282 # be compliant anyway
1313 # be compliant anyway
1283 #
1314 #
1284 # if minimal phase was 0 we don't need to retract anything
1315 # if minimal phase was 0 we don't need to retract anything
1285 phases.retractboundary(self, targetphase, [n])
1316 phases.retractboundary(self, targetphase, [n])
1286 tr.close()
1317 tr.close()
1287 self.updatebranchcache()
1318 self.updatebranchcache()
1288 return n
1319 return n
1289 finally:
1320 finally:
1290 if tr:
1321 if tr:
1291 tr.release()
1322 tr.release()
1292 lock.release()
1323 lock.release()
1293
1324
1294 def destroyed(self):
1325 def destroyed(self, newheadrevs=None):
1295 '''Inform the repository that nodes have been destroyed.
1326 '''Inform the repository that nodes have been destroyed.
1296 Intended for use by strip and rollback, so there's a common
1327 Intended for use by strip and rollback, so there's a common
1297 place for anything that has to be done after destroying history.'''
1328 place for anything that has to be done after destroying history.
1298 # XXX it might be nice if we could take the list of destroyed
1329
1299 # nodes, but I don't see an easy way for rollback() to do that
1330 If you know the branchheadcache was uptodate before nodes were removed
1331 and you also know the set of candidate set of new heads that may have
1332 resulted from the destruction, you can set newheadrevs. This will
1333 enable the code to update the branchheads cache, rather than having
1334 future code decide it's invalid and regenrating it.
1335 '''
1336 if newheadrevs:
1337 tiprev = len(self) - 1
1338 ctxgen = (self[rev] for rev in newheadrevs)
1339 self._updatebranchcache(self._branchcache, ctxgen)
1340 self._writebranchcache(self._branchcache, self.changelog.tip(),
1341 tiprev)
1342 else:
1343 # No info to update the cache. If nodes were destroyed, the cache
1344 # is stale and this will be caught the next time it is read.
1345 pass
1300
1346
1301 # Ensure the persistent tag cache is updated. Doing it now
1347 # Ensure the persistent tag cache is updated. Doing it now
1302 # means that the tag cache only has to worry about destroyed
1348 # means that the tag cache only has to worry about destroyed
1303 # heads immediately after a strip/rollback. That in turn
1349 # heads immediately after a strip/rollback. That in turn
1304 # guarantees that "cachetip == currenttip" (comparing both rev
1350 # guarantees that "cachetip == currenttip" (comparing both rev
1305 # and node) always means no nodes have been added or destroyed.
1351 # and node) always means no nodes have been added or destroyed.
1306
1352
1307 # XXX this is suboptimal when qrefresh'ing: we strip the current
1353 # XXX this is suboptimal when qrefresh'ing: we strip the current
1308 # head, refresh the tag cache, then immediately add a new head.
1354 # head, refresh the tag cache, then immediately add a new head.
1309 # But I think doing it this way is necessary for the "instant
1355 # But I think doing it this way is necessary for the "instant
1310 # tag cache retrieval" case to work.
1356 # tag cache retrieval" case to work.
1311 self.invalidatecaches()
1357 self.invalidatecaches()
1312
1358
1313 # Discard all cache entries to force reloading everything.
1359 # Discard all cache entries to force reloading everything.
1314 self._filecache.clear()
1360 self._filecache.clear()
1315
1361
1316 def walk(self, match, node=None):
1362 def walk(self, match, node=None):
1317 '''
1363 '''
1318 walk recursively through the directory tree or a given
1364 walk recursively through the directory tree or a given
1319 changeset, finding all files matched by the match
1365 changeset, finding all files matched by the match
1320 function
1366 function
1321 '''
1367 '''
1322 return self[node].walk(match)
1368 return self[node].walk(match)
1323
1369
1324 def status(self, node1='.', node2=None, match=None,
1370 def status(self, node1='.', node2=None, match=None,
1325 ignored=False, clean=False, unknown=False,
1371 ignored=False, clean=False, unknown=False,
1326 listsubrepos=False):
1372 listsubrepos=False):
1327 """return status of files between two nodes or node and working
1373 """return status of files between two nodes or node and working
1328 directory.
1374 directory.
1329
1375
1330 If node1 is None, use the first dirstate parent instead.
1376 If node1 is None, use the first dirstate parent instead.
1331 If node2 is None, compare node1 with working directory.
1377 If node2 is None, compare node1 with working directory.
1332 """
1378 """
1333
1379
1334 def mfmatches(ctx):
1380 def mfmatches(ctx):
1335 mf = ctx.manifest().copy()
1381 mf = ctx.manifest().copy()
1336 if match.always():
1382 if match.always():
1337 return mf
1383 return mf
1338 for fn in mf.keys():
1384 for fn in mf.keys():
1339 if not match(fn):
1385 if not match(fn):
1340 del mf[fn]
1386 del mf[fn]
1341 return mf
1387 return mf
1342
1388
1343 if isinstance(node1, context.changectx):
1389 if isinstance(node1, context.changectx):
1344 ctx1 = node1
1390 ctx1 = node1
1345 else:
1391 else:
1346 ctx1 = self[node1]
1392 ctx1 = self[node1]
1347 if isinstance(node2, context.changectx):
1393 if isinstance(node2, context.changectx):
1348 ctx2 = node2
1394 ctx2 = node2
1349 else:
1395 else:
1350 ctx2 = self[node2]
1396 ctx2 = self[node2]
1351
1397
1352 working = ctx2.rev() is None
1398 working = ctx2.rev() is None
1353 parentworking = working and ctx1 == self['.']
1399 parentworking = working and ctx1 == self['.']
1354 match = match or matchmod.always(self.root, self.getcwd())
1400 match = match or matchmod.always(self.root, self.getcwd())
1355 listignored, listclean, listunknown = ignored, clean, unknown
1401 listignored, listclean, listunknown = ignored, clean, unknown
1356
1402
1357 # load earliest manifest first for caching reasons
1403 # load earliest manifest first for caching reasons
1358 if not working and ctx2.rev() < ctx1.rev():
1404 if not working and ctx2.rev() < ctx1.rev():
1359 ctx2.manifest()
1405 ctx2.manifest()
1360
1406
1361 if not parentworking:
1407 if not parentworking:
1362 def bad(f, msg):
1408 def bad(f, msg):
1363 # 'f' may be a directory pattern from 'match.files()',
1409 # 'f' may be a directory pattern from 'match.files()',
1364 # so 'f not in ctx1' is not enough
1410 # so 'f not in ctx1' is not enough
1365 if f not in ctx1 and f not in ctx1.dirs():
1411 if f not in ctx1 and f not in ctx1.dirs():
1366 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1412 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1367 match.bad = bad
1413 match.bad = bad
1368
1414
1369 if working: # we need to scan the working dir
1415 if working: # we need to scan the working dir
1370 subrepos = []
1416 subrepos = []
1371 if '.hgsub' in self.dirstate:
1417 if '.hgsub' in self.dirstate:
1372 subrepos = ctx2.substate.keys()
1418 subrepos = ctx2.substate.keys()
1373 s = self.dirstate.status(match, subrepos, listignored,
1419 s = self.dirstate.status(match, subrepos, listignored,
1374 listclean, listunknown)
1420 listclean, listunknown)
1375 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1421 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1376
1422
1377 # check for any possibly clean files
1423 # check for any possibly clean files
1378 if parentworking and cmp:
1424 if parentworking and cmp:
1379 fixup = []
1425 fixup = []
1380 # do a full compare of any files that might have changed
1426 # do a full compare of any files that might have changed
1381 for f in sorted(cmp):
1427 for f in sorted(cmp):
1382 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1428 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1383 or ctx1[f].cmp(ctx2[f])):
1429 or ctx1[f].cmp(ctx2[f])):
1384 modified.append(f)
1430 modified.append(f)
1385 else:
1431 else:
1386 fixup.append(f)
1432 fixup.append(f)
1387
1433
1388 # update dirstate for files that are actually clean
1434 # update dirstate for files that are actually clean
1389 if fixup:
1435 if fixup:
1390 if listclean:
1436 if listclean:
1391 clean += fixup
1437 clean += fixup
1392
1438
1393 try:
1439 try:
1394 # updating the dirstate is optional
1440 # updating the dirstate is optional
1395 # so we don't wait on the lock
1441 # so we don't wait on the lock
1396 wlock = self.wlock(False)
1442 wlock = self.wlock(False)
1397 try:
1443 try:
1398 for f in fixup:
1444 for f in fixup:
1399 self.dirstate.normal(f)
1445 self.dirstate.normal(f)
1400 finally:
1446 finally:
1401 wlock.release()
1447 wlock.release()
1402 except error.LockError:
1448 except error.LockError:
1403 pass
1449 pass
1404
1450
1405 if not parentworking:
1451 if not parentworking:
1406 mf1 = mfmatches(ctx1)
1452 mf1 = mfmatches(ctx1)
1407 if working:
1453 if working:
1408 # we are comparing working dir against non-parent
1454 # we are comparing working dir against non-parent
1409 # generate a pseudo-manifest for the working dir
1455 # generate a pseudo-manifest for the working dir
1410 mf2 = mfmatches(self['.'])
1456 mf2 = mfmatches(self['.'])
1411 for f in cmp + modified + added:
1457 for f in cmp + modified + added:
1412 mf2[f] = None
1458 mf2[f] = None
1413 mf2.set(f, ctx2.flags(f))
1459 mf2.set(f, ctx2.flags(f))
1414 for f in removed:
1460 for f in removed:
1415 if f in mf2:
1461 if f in mf2:
1416 del mf2[f]
1462 del mf2[f]
1417 else:
1463 else:
1418 # we are comparing two revisions
1464 # we are comparing two revisions
1419 deleted, unknown, ignored = [], [], []
1465 deleted, unknown, ignored = [], [], []
1420 mf2 = mfmatches(ctx2)
1466 mf2 = mfmatches(ctx2)
1421
1467
1422 modified, added, clean = [], [], []
1468 modified, added, clean = [], [], []
1423 withflags = mf1.withflags() | mf2.withflags()
1469 withflags = mf1.withflags() | mf2.withflags()
1424 for fn in mf2:
1470 for fn in mf2:
1425 if fn in mf1:
1471 if fn in mf1:
1426 if (fn not in deleted and
1472 if (fn not in deleted and
1427 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1473 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1428 (mf1[fn] != mf2[fn] and
1474 (mf1[fn] != mf2[fn] and
1429 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1475 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1430 modified.append(fn)
1476 modified.append(fn)
1431 elif listclean:
1477 elif listclean:
1432 clean.append(fn)
1478 clean.append(fn)
1433 del mf1[fn]
1479 del mf1[fn]
1434 elif fn not in deleted:
1480 elif fn not in deleted:
1435 added.append(fn)
1481 added.append(fn)
1436 removed = mf1.keys()
1482 removed = mf1.keys()
1437
1483
1438 if working and modified and not self.dirstate._checklink:
1484 if working and modified and not self.dirstate._checklink:
1439 # Symlink placeholders may get non-symlink-like contents
1485 # Symlink placeholders may get non-symlink-like contents
1440 # via user error or dereferencing by NFS or Samba servers,
1486 # via user error or dereferencing by NFS or Samba servers,
1441 # so we filter out any placeholders that don't look like a
1487 # so we filter out any placeholders that don't look like a
1442 # symlink
1488 # symlink
1443 sane = []
1489 sane = []
1444 for f in modified:
1490 for f in modified:
1445 if ctx2.flags(f) == 'l':
1491 if ctx2.flags(f) == 'l':
1446 d = ctx2[f].data()
1492 d = ctx2[f].data()
1447 if len(d) >= 1024 or '\n' in d or util.binary(d):
1493 if len(d) >= 1024 or '\n' in d or util.binary(d):
1448 self.ui.debug('ignoring suspect symlink placeholder'
1494 self.ui.debug('ignoring suspect symlink placeholder'
1449 ' "%s"\n' % f)
1495 ' "%s"\n' % f)
1450 continue
1496 continue
1451 sane.append(f)
1497 sane.append(f)
1452 modified = sane
1498 modified = sane
1453
1499
1454 r = modified, added, removed, deleted, unknown, ignored, clean
1500 r = modified, added, removed, deleted, unknown, ignored, clean
1455
1501
1456 if listsubrepos:
1502 if listsubrepos:
1457 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1503 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1458 if working:
1504 if working:
1459 rev2 = None
1505 rev2 = None
1460 else:
1506 else:
1461 rev2 = ctx2.substate[subpath][1]
1507 rev2 = ctx2.substate[subpath][1]
1462 try:
1508 try:
1463 submatch = matchmod.narrowmatcher(subpath, match)
1509 submatch = matchmod.narrowmatcher(subpath, match)
1464 s = sub.status(rev2, match=submatch, ignored=listignored,
1510 s = sub.status(rev2, match=submatch, ignored=listignored,
1465 clean=listclean, unknown=listunknown,
1511 clean=listclean, unknown=listunknown,
1466 listsubrepos=True)
1512 listsubrepos=True)
1467 for rfiles, sfiles in zip(r, s):
1513 for rfiles, sfiles in zip(r, s):
1468 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1514 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1469 except error.LookupError:
1515 except error.LookupError:
1470 self.ui.status(_("skipping missing subrepository: %s\n")
1516 self.ui.status(_("skipping missing subrepository: %s\n")
1471 % subpath)
1517 % subpath)
1472
1518
1473 for l in r:
1519 for l in r:
1474 l.sort()
1520 l.sort()
1475 return r
1521 return r
1476
1522
1477 def heads(self, start=None):
1523 def heads(self, start=None):
1478 heads = self.changelog.heads(start)
1524 heads = self.changelog.heads(start)
1479 # sort the output in rev descending order
1525 # sort the output in rev descending order
1480 return sorted(heads, key=self.changelog.rev, reverse=True)
1526 return sorted(heads, key=self.changelog.rev, reverse=True)
1481
1527
1482 def branchheads(self, branch=None, start=None, closed=False):
1528 def branchheads(self, branch=None, start=None, closed=False):
1483 '''return a (possibly filtered) list of heads for the given branch
1529 '''return a (possibly filtered) list of heads for the given branch
1484
1530
1485 Heads are returned in topological order, from newest to oldest.
1531 Heads are returned in topological order, from newest to oldest.
1486 If branch is None, use the dirstate branch.
1532 If branch is None, use the dirstate branch.
1487 If start is not None, return only heads reachable from start.
1533 If start is not None, return only heads reachable from start.
1488 If closed is True, return heads that are marked as closed as well.
1534 If closed is True, return heads that are marked as closed as well.
1489 '''
1535 '''
1490 if branch is None:
1536 if branch is None:
1491 branch = self[None].branch()
1537 branch = self[None].branch()
1492 branches = self.branchmap()
1538 branches = self.branchmap()
1493 if branch not in branches:
1539 if branch not in branches:
1494 return []
1540 return []
1495 # the cache returns heads ordered lowest to highest
1541 # the cache returns heads ordered lowest to highest
1496 bheads = list(reversed(branches[branch]))
1542 bheads = list(reversed(branches[branch]))
1497 if start is not None:
1543 if start is not None:
1498 # filter out the heads that cannot be reached from startrev
1544 # filter out the heads that cannot be reached from startrev
1499 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1545 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1500 bheads = [h for h in bheads if h in fbheads]
1546 bheads = [h for h in bheads if h in fbheads]
1501 if not closed:
1547 if not closed:
1502 bheads = [h for h in bheads if
1548 bheads = [h for h in bheads if
1503 ('close' not in self.changelog.read(h)[5])]
1549 ('close' not in self.changelog.read(h)[5])]
1504 return bheads
1550 return bheads
1505
1551
1506 def branches(self, nodes):
1552 def branches(self, nodes):
1507 if not nodes:
1553 if not nodes:
1508 nodes = [self.changelog.tip()]
1554 nodes = [self.changelog.tip()]
1509 b = []
1555 b = []
1510 for n in nodes:
1556 for n in nodes:
1511 t = n
1557 t = n
1512 while True:
1558 while True:
1513 p = self.changelog.parents(n)
1559 p = self.changelog.parents(n)
1514 if p[1] != nullid or p[0] == nullid:
1560 if p[1] != nullid or p[0] == nullid:
1515 b.append((t, n, p[0], p[1]))
1561 b.append((t, n, p[0], p[1]))
1516 break
1562 break
1517 n = p[0]
1563 n = p[0]
1518 return b
1564 return b
1519
1565
1520 def between(self, pairs):
1566 def between(self, pairs):
1521 r = []
1567 r = []
1522
1568
1523 for top, bottom in pairs:
1569 for top, bottom in pairs:
1524 n, l, i = top, [], 0
1570 n, l, i = top, [], 0
1525 f = 1
1571 f = 1
1526
1572
1527 while n != bottom and n != nullid:
1573 while n != bottom and n != nullid:
1528 p = self.changelog.parents(n)[0]
1574 p = self.changelog.parents(n)[0]
1529 if i == f:
1575 if i == f:
1530 l.append(n)
1576 l.append(n)
1531 f = f * 2
1577 f = f * 2
1532 n = p
1578 n = p
1533 i += 1
1579 i += 1
1534
1580
1535 r.append(l)
1581 r.append(l)
1536
1582
1537 return r
1583 return r
1538
1584
1539 def pull(self, remote, heads=None, force=False):
1585 def pull(self, remote, heads=None, force=False):
1540 lock = self.lock()
1586 lock = self.lock()
1541 try:
1587 try:
1542 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1588 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1543 force=force)
1589 force=force)
1544 common, fetch, rheads = tmp
1590 common, fetch, rheads = tmp
1545 if not fetch:
1591 if not fetch:
1546 self.ui.status(_("no changes found\n"))
1592 self.ui.status(_("no changes found\n"))
1547 added = []
1593 added = []
1548 result = 0
1594 result = 0
1549 else:
1595 else:
1550 if heads is None and list(common) == [nullid]:
1596 if heads is None and list(common) == [nullid]:
1551 self.ui.status(_("requesting all changes\n"))
1597 self.ui.status(_("requesting all changes\n"))
1552 elif heads is None and remote.capable('changegroupsubset'):
1598 elif heads is None and remote.capable('changegroupsubset'):
1553 # issue1320, avoid a race if remote changed after discovery
1599 # issue1320, avoid a race if remote changed after discovery
1554 heads = rheads
1600 heads = rheads
1555
1601
1556 if remote.capable('getbundle'):
1602 if remote.capable('getbundle'):
1557 cg = remote.getbundle('pull', common=common,
1603 cg = remote.getbundle('pull', common=common,
1558 heads=heads or rheads)
1604 heads=heads or rheads)
1559 elif heads is None:
1605 elif heads is None:
1560 cg = remote.changegroup(fetch, 'pull')
1606 cg = remote.changegroup(fetch, 'pull')
1561 elif not remote.capable('changegroupsubset'):
1607 elif not remote.capable('changegroupsubset'):
1562 raise util.Abort(_("partial pull cannot be done because "
1608 raise util.Abort(_("partial pull cannot be done because "
1563 "other repository doesn't support "
1609 "other repository doesn't support "
1564 "changegroupsubset."))
1610 "changegroupsubset."))
1565 else:
1611 else:
1566 cg = remote.changegroupsubset(fetch, heads, 'pull')
1612 cg = remote.changegroupsubset(fetch, heads, 'pull')
1567 clstart = len(self.changelog)
1613 clstart = len(self.changelog)
1568 result = self.addchangegroup(cg, 'pull', remote.url())
1614 result = self.addchangegroup(cg, 'pull', remote.url())
1569 clend = len(self.changelog)
1615 clend = len(self.changelog)
1570 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1616 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1571
1617
1572 # compute target subset
1618 # compute target subset
1573 if heads is None:
1619 if heads is None:
1574 # We pulled every thing possible
1620 # We pulled every thing possible
1575 # sync on everything common
1621 # sync on everything common
1576 subset = common + added
1622 subset = common + added
1577 else:
1623 else:
1578 # We pulled a specific subset
1624 # We pulled a specific subset
1579 # sync on this subset
1625 # sync on this subset
1580 subset = heads
1626 subset = heads
1581
1627
1582 # Get remote phases data from remote
1628 # Get remote phases data from remote
1583 remotephases = remote.listkeys('phases')
1629 remotephases = remote.listkeys('phases')
1584 publishing = bool(remotephases.get('publishing', False))
1630 publishing = bool(remotephases.get('publishing', False))
1585 if remotephases and not publishing:
1631 if remotephases and not publishing:
1586 # remote is new and unpublishing
1632 # remote is new and unpublishing
1587 pheads, _dr = phases.analyzeremotephases(self, subset,
1633 pheads, _dr = phases.analyzeremotephases(self, subset,
1588 remotephases)
1634 remotephases)
1589 phases.advanceboundary(self, phases.public, pheads)
1635 phases.advanceboundary(self, phases.public, pheads)
1590 phases.advanceboundary(self, phases.draft, subset)
1636 phases.advanceboundary(self, phases.draft, subset)
1591 else:
1637 else:
1592 # Remote is old or publishing all common changesets
1638 # Remote is old or publishing all common changesets
1593 # should be seen as public
1639 # should be seen as public
1594 phases.advanceboundary(self, phases.public, subset)
1640 phases.advanceboundary(self, phases.public, subset)
1595 finally:
1641 finally:
1596 lock.release()
1642 lock.release()
1597
1643
1598 return result
1644 return result
1599
1645
1600 def checkpush(self, force, revs):
1646 def checkpush(self, force, revs):
1601 """Extensions can override this function if additional checks have
1647 """Extensions can override this function if additional checks have
1602 to be performed before pushing, or call it if they override push
1648 to be performed before pushing, or call it if they override push
1603 command.
1649 command.
1604 """
1650 """
1605 pass
1651 pass
1606
1652
1607 def push(self, remote, force=False, revs=None, newbranch=False):
1653 def push(self, remote, force=False, revs=None, newbranch=False):
1608 '''Push outgoing changesets (limited by revs) from the current
1654 '''Push outgoing changesets (limited by revs) from the current
1609 repository to remote. Return an integer:
1655 repository to remote. Return an integer:
1610 - None means nothing to push
1656 - None means nothing to push
1611 - 0 means HTTP error
1657 - 0 means HTTP error
1612 - 1 means we pushed and remote head count is unchanged *or*
1658 - 1 means we pushed and remote head count is unchanged *or*
1613 we have outgoing changesets but refused to push
1659 we have outgoing changesets but refused to push
1614 - other values as described by addchangegroup()
1660 - other values as described by addchangegroup()
1615 '''
1661 '''
1616 # there are two ways to push to remote repo:
1662 # there are two ways to push to remote repo:
1617 #
1663 #
1618 # addchangegroup assumes local user can lock remote
1664 # addchangegroup assumes local user can lock remote
1619 # repo (local filesystem, old ssh servers).
1665 # repo (local filesystem, old ssh servers).
1620 #
1666 #
1621 # unbundle assumes local user cannot lock remote repo (new ssh
1667 # unbundle assumes local user cannot lock remote repo (new ssh
1622 # servers, http servers).
1668 # servers, http servers).
1623
1669
1624 # get local lock as we might write phase data
1670 # get local lock as we might write phase data
1625 locallock = self.lock()
1671 locallock = self.lock()
1626 try:
1672 try:
1627 self.checkpush(force, revs)
1673 self.checkpush(force, revs)
1628 lock = None
1674 lock = None
1629 unbundle = remote.capable('unbundle')
1675 unbundle = remote.capable('unbundle')
1630 if not unbundle:
1676 if not unbundle:
1631 lock = remote.lock()
1677 lock = remote.lock()
1632 try:
1678 try:
1633 # discovery
1679 # discovery
1634 fci = discovery.findcommonincoming
1680 fci = discovery.findcommonincoming
1635 commoninc = fci(self, remote, force=force)
1681 commoninc = fci(self, remote, force=force)
1636 common, inc, remoteheads = commoninc
1682 common, inc, remoteheads = commoninc
1637 fco = discovery.findcommonoutgoing
1683 fco = discovery.findcommonoutgoing
1638 outgoing = fco(self, remote, onlyheads=revs,
1684 outgoing = fco(self, remote, onlyheads=revs,
1639 commoninc=commoninc, force=force)
1685 commoninc=commoninc, force=force)
1640
1686
1641
1687
1642 if not outgoing.missing:
1688 if not outgoing.missing:
1643 # nothing to push
1689 # nothing to push
1644 scmutil.nochangesfound(self.ui, outgoing.excluded)
1690 scmutil.nochangesfound(self.ui, outgoing.excluded)
1645 ret = None
1691 ret = None
1646 else:
1692 else:
1647 # something to push
1693 # something to push
1648 if not force:
1694 if not force:
1649 discovery.checkheads(self, remote, outgoing,
1695 discovery.checkheads(self, remote, outgoing,
1650 remoteheads, newbranch,
1696 remoteheads, newbranch,
1651 bool(inc))
1697 bool(inc))
1652
1698
1653 # create a changegroup from local
1699 # create a changegroup from local
1654 if revs is None and not outgoing.excluded:
1700 if revs is None and not outgoing.excluded:
1655 # push everything,
1701 # push everything,
1656 # use the fast path, no race possible on push
1702 # use the fast path, no race possible on push
1657 cg = self._changegroup(outgoing.missing, 'push')
1703 cg = self._changegroup(outgoing.missing, 'push')
1658 else:
1704 else:
1659 cg = self.getlocalbundle('push', outgoing)
1705 cg = self.getlocalbundle('push', outgoing)
1660
1706
1661 # apply changegroup to remote
1707 # apply changegroup to remote
1662 if unbundle:
1708 if unbundle:
1663 # local repo finds heads on server, finds out what
1709 # local repo finds heads on server, finds out what
1664 # revs it must push. once revs transferred, if server
1710 # revs it must push. once revs transferred, if server
1665 # finds it has different heads (someone else won
1711 # finds it has different heads (someone else won
1666 # commit/push race), server aborts.
1712 # commit/push race), server aborts.
1667 if force:
1713 if force:
1668 remoteheads = ['force']
1714 remoteheads = ['force']
1669 # ssh: return remote's addchangegroup()
1715 # ssh: return remote's addchangegroup()
1670 # http: return remote's addchangegroup() or 0 for error
1716 # http: return remote's addchangegroup() or 0 for error
1671 ret = remote.unbundle(cg, remoteheads, 'push')
1717 ret = remote.unbundle(cg, remoteheads, 'push')
1672 else:
1718 else:
1673 # we return an integer indicating remote head count
1719 # we return an integer indicating remote head count
1674 # change
1720 # change
1675 ret = remote.addchangegroup(cg, 'push', self.url())
1721 ret = remote.addchangegroup(cg, 'push', self.url())
1676
1722
1677 if ret:
1723 if ret:
1678 # push succeed, synchonize target of the push
1724 # push succeed, synchonize target of the push
1679 cheads = outgoing.missingheads
1725 cheads = outgoing.missingheads
1680 elif revs is None:
1726 elif revs is None:
1681 # All out push fails. synchronize all common
1727 # All out push fails. synchronize all common
1682 cheads = outgoing.commonheads
1728 cheads = outgoing.commonheads
1683 else:
1729 else:
1684 # I want cheads = heads(::missingheads and ::commonheads)
1730 # I want cheads = heads(::missingheads and ::commonheads)
1685 # (missingheads is revs with secret changeset filtered out)
1731 # (missingheads is revs with secret changeset filtered out)
1686 #
1732 #
1687 # This can be expressed as:
1733 # This can be expressed as:
1688 # cheads = ( (missingheads and ::commonheads)
1734 # cheads = ( (missingheads and ::commonheads)
1689 # + (commonheads and ::missingheads))"
1735 # + (commonheads and ::missingheads))"
1690 # )
1736 # )
1691 #
1737 #
1692 # while trying to push we already computed the following:
1738 # while trying to push we already computed the following:
1693 # common = (::commonheads)
1739 # common = (::commonheads)
1694 # missing = ((commonheads::missingheads) - commonheads)
1740 # missing = ((commonheads::missingheads) - commonheads)
1695 #
1741 #
1696 # We can pick:
1742 # We can pick:
1697 # * missingheads part of comon (::commonheads)
1743 # * missingheads part of comon (::commonheads)
1698 common = set(outgoing.common)
1744 common = set(outgoing.common)
1699 cheads = [node for node in revs if node in common]
1745 cheads = [node for node in revs if node in common]
1700 # and
1746 # and
1701 # * commonheads parents on missing
1747 # * commonheads parents on missing
1702 revset = self.set('%ln and parents(roots(%ln))',
1748 revset = self.set('%ln and parents(roots(%ln))',
1703 outgoing.commonheads,
1749 outgoing.commonheads,
1704 outgoing.missing)
1750 outgoing.missing)
1705 cheads.extend(c.node() for c in revset)
1751 cheads.extend(c.node() for c in revset)
1706 # even when we don't push, exchanging phase data is useful
1752 # even when we don't push, exchanging phase data is useful
1707 remotephases = remote.listkeys('phases')
1753 remotephases = remote.listkeys('phases')
1708 if not remotephases: # old server or public only repo
1754 if not remotephases: # old server or public only repo
1709 phases.advanceboundary(self, phases.public, cheads)
1755 phases.advanceboundary(self, phases.public, cheads)
1710 # don't push any phase data as there is nothing to push
1756 # don't push any phase data as there is nothing to push
1711 else:
1757 else:
1712 ana = phases.analyzeremotephases(self, cheads, remotephases)
1758 ana = phases.analyzeremotephases(self, cheads, remotephases)
1713 pheads, droots = ana
1759 pheads, droots = ana
1714 ### Apply remote phase on local
1760 ### Apply remote phase on local
1715 if remotephases.get('publishing', False):
1761 if remotephases.get('publishing', False):
1716 phases.advanceboundary(self, phases.public, cheads)
1762 phases.advanceboundary(self, phases.public, cheads)
1717 else: # publish = False
1763 else: # publish = False
1718 phases.advanceboundary(self, phases.public, pheads)
1764 phases.advanceboundary(self, phases.public, pheads)
1719 phases.advanceboundary(self, phases.draft, cheads)
1765 phases.advanceboundary(self, phases.draft, cheads)
1720 ### Apply local phase on remote
1766 ### Apply local phase on remote
1721
1767
1722 # Get the list of all revs draft on remote by public here.
1768 # Get the list of all revs draft on remote by public here.
1723 # XXX Beware that revset break if droots is not strictly
1769 # XXX Beware that revset break if droots is not strictly
1724 # XXX root we may want to ensure it is but it is costly
1770 # XXX root we may want to ensure it is but it is costly
1725 outdated = self.set('heads((%ln::%ln) and public())',
1771 outdated = self.set('heads((%ln::%ln) and public())',
1726 droots, cheads)
1772 droots, cheads)
1727 for newremotehead in outdated:
1773 for newremotehead in outdated:
1728 r = remote.pushkey('phases',
1774 r = remote.pushkey('phases',
1729 newremotehead.hex(),
1775 newremotehead.hex(),
1730 str(phases.draft),
1776 str(phases.draft),
1731 str(phases.public))
1777 str(phases.public))
1732 if not r:
1778 if not r:
1733 self.ui.warn(_('updating %s to public failed!\n')
1779 self.ui.warn(_('updating %s to public failed!\n')
1734 % newremotehead)
1780 % newremotehead)
1735 finally:
1781 finally:
1736 if lock is not None:
1782 if lock is not None:
1737 lock.release()
1783 lock.release()
1738 finally:
1784 finally:
1739 locallock.release()
1785 locallock.release()
1740
1786
1741 self.ui.debug("checking for updated bookmarks\n")
1787 self.ui.debug("checking for updated bookmarks\n")
1742 rb = remote.listkeys('bookmarks')
1788 rb = remote.listkeys('bookmarks')
1743 for k in rb.keys():
1789 for k in rb.keys():
1744 if k in self._bookmarks:
1790 if k in self._bookmarks:
1745 nr, nl = rb[k], hex(self._bookmarks[k])
1791 nr, nl = rb[k], hex(self._bookmarks[k])
1746 if nr in self:
1792 if nr in self:
1747 cr = self[nr]
1793 cr = self[nr]
1748 cl = self[nl]
1794 cl = self[nl]
1749 if cl in cr.descendants():
1795 if cl in cr.descendants():
1750 r = remote.pushkey('bookmarks', k, nr, nl)
1796 r = remote.pushkey('bookmarks', k, nr, nl)
1751 if r:
1797 if r:
1752 self.ui.status(_("updating bookmark %s\n") % k)
1798 self.ui.status(_("updating bookmark %s\n") % k)
1753 else:
1799 else:
1754 self.ui.warn(_('updating bookmark %s'
1800 self.ui.warn(_('updating bookmark %s'
1755 ' failed!\n') % k)
1801 ' failed!\n') % k)
1756
1802
1757 return ret
1803 return ret
1758
1804
1759 def changegroupinfo(self, nodes, source):
1805 def changegroupinfo(self, nodes, source):
1760 if self.ui.verbose or source == 'bundle':
1806 if self.ui.verbose or source == 'bundle':
1761 self.ui.status(_("%d changesets found\n") % len(nodes))
1807 self.ui.status(_("%d changesets found\n") % len(nodes))
1762 if self.ui.debugflag:
1808 if self.ui.debugflag:
1763 self.ui.debug("list of changesets:\n")
1809 self.ui.debug("list of changesets:\n")
1764 for node in nodes:
1810 for node in nodes:
1765 self.ui.debug("%s\n" % hex(node))
1811 self.ui.debug("%s\n" % hex(node))
1766
1812
1767 def changegroupsubset(self, bases, heads, source):
1813 def changegroupsubset(self, bases, heads, source):
1768 """Compute a changegroup consisting of all the nodes that are
1814 """Compute a changegroup consisting of all the nodes that are
1769 descendants of any of the bases and ancestors of any of the heads.
1815 descendants of any of the bases and ancestors of any of the heads.
1770 Return a chunkbuffer object whose read() method will return
1816 Return a chunkbuffer object whose read() method will return
1771 successive changegroup chunks.
1817 successive changegroup chunks.
1772
1818
1773 It is fairly complex as determining which filenodes and which
1819 It is fairly complex as determining which filenodes and which
1774 manifest nodes need to be included for the changeset to be complete
1820 manifest nodes need to be included for the changeset to be complete
1775 is non-trivial.
1821 is non-trivial.
1776
1822
1777 Another wrinkle is doing the reverse, figuring out which changeset in
1823 Another wrinkle is doing the reverse, figuring out which changeset in
1778 the changegroup a particular filenode or manifestnode belongs to.
1824 the changegroup a particular filenode or manifestnode belongs to.
1779 """
1825 """
1780 cl = self.changelog
1826 cl = self.changelog
1781 if not bases:
1827 if not bases:
1782 bases = [nullid]
1828 bases = [nullid]
1783 csets, bases, heads = cl.nodesbetween(bases, heads)
1829 csets, bases, heads = cl.nodesbetween(bases, heads)
1784 # We assume that all ancestors of bases are known
1830 # We assume that all ancestors of bases are known
1785 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1831 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1786 return self._changegroupsubset(common, csets, heads, source)
1832 return self._changegroupsubset(common, csets, heads, source)
1787
1833
1788 def getlocalbundle(self, source, outgoing):
1834 def getlocalbundle(self, source, outgoing):
1789 """Like getbundle, but taking a discovery.outgoing as an argument.
1835 """Like getbundle, but taking a discovery.outgoing as an argument.
1790
1836
1791 This is only implemented for local repos and reuses potentially
1837 This is only implemented for local repos and reuses potentially
1792 precomputed sets in outgoing."""
1838 precomputed sets in outgoing."""
1793 if not outgoing.missing:
1839 if not outgoing.missing:
1794 return None
1840 return None
1795 return self._changegroupsubset(outgoing.common,
1841 return self._changegroupsubset(outgoing.common,
1796 outgoing.missing,
1842 outgoing.missing,
1797 outgoing.missingheads,
1843 outgoing.missingheads,
1798 source)
1844 source)
1799
1845
1800 def getbundle(self, source, heads=None, common=None):
1846 def getbundle(self, source, heads=None, common=None):
1801 """Like changegroupsubset, but returns the set difference between the
1847 """Like changegroupsubset, but returns the set difference between the
1802 ancestors of heads and the ancestors common.
1848 ancestors of heads and the ancestors common.
1803
1849
1804 If heads is None, use the local heads. If common is None, use [nullid].
1850 If heads is None, use the local heads. If common is None, use [nullid].
1805
1851
1806 The nodes in common might not all be known locally due to the way the
1852 The nodes in common might not all be known locally due to the way the
1807 current discovery protocol works.
1853 current discovery protocol works.
1808 """
1854 """
1809 cl = self.changelog
1855 cl = self.changelog
1810 if common:
1856 if common:
1811 nm = cl.nodemap
1857 nm = cl.nodemap
1812 common = [n for n in common if n in nm]
1858 common = [n for n in common if n in nm]
1813 else:
1859 else:
1814 common = [nullid]
1860 common = [nullid]
1815 if not heads:
1861 if not heads:
1816 heads = cl.heads()
1862 heads = cl.heads()
1817 return self.getlocalbundle(source,
1863 return self.getlocalbundle(source,
1818 discovery.outgoing(cl, common, heads))
1864 discovery.outgoing(cl, common, heads))
1819
1865
1820 def _changegroupsubset(self, commonrevs, csets, heads, source):
1866 def _changegroupsubset(self, commonrevs, csets, heads, source):
1821
1867
1822 cl = self.changelog
1868 cl = self.changelog
1823 mf = self.manifest
1869 mf = self.manifest
1824 mfs = {} # needed manifests
1870 mfs = {} # needed manifests
1825 fnodes = {} # needed file nodes
1871 fnodes = {} # needed file nodes
1826 changedfiles = set()
1872 changedfiles = set()
1827 fstate = ['', {}]
1873 fstate = ['', {}]
1828 count = [0, 0]
1874 count = [0, 0]
1829
1875
1830 # can we go through the fast path ?
1876 # can we go through the fast path ?
1831 heads.sort()
1877 heads.sort()
1832 if heads == sorted(self.heads()):
1878 if heads == sorted(self.heads()):
1833 return self._changegroup(csets, source)
1879 return self._changegroup(csets, source)
1834
1880
1835 # slow path
1881 # slow path
1836 self.hook('preoutgoing', throw=True, source=source)
1882 self.hook('preoutgoing', throw=True, source=source)
1837 self.changegroupinfo(csets, source)
1883 self.changegroupinfo(csets, source)
1838
1884
1839 # filter any nodes that claim to be part of the known set
1885 # filter any nodes that claim to be part of the known set
1840 def prune(revlog, missing):
1886 def prune(revlog, missing):
1841 rr, rl = revlog.rev, revlog.linkrev
1887 rr, rl = revlog.rev, revlog.linkrev
1842 return [n for n in missing
1888 return [n for n in missing
1843 if rl(rr(n)) not in commonrevs]
1889 if rl(rr(n)) not in commonrevs]
1844
1890
1845 progress = self.ui.progress
1891 progress = self.ui.progress
1846 _bundling = _('bundling')
1892 _bundling = _('bundling')
1847 _changesets = _('changesets')
1893 _changesets = _('changesets')
1848 _manifests = _('manifests')
1894 _manifests = _('manifests')
1849 _files = _('files')
1895 _files = _('files')
1850
1896
1851 def lookup(revlog, x):
1897 def lookup(revlog, x):
1852 if revlog == cl:
1898 if revlog == cl:
1853 c = cl.read(x)
1899 c = cl.read(x)
1854 changedfiles.update(c[3])
1900 changedfiles.update(c[3])
1855 mfs.setdefault(c[0], x)
1901 mfs.setdefault(c[0], x)
1856 count[0] += 1
1902 count[0] += 1
1857 progress(_bundling, count[0],
1903 progress(_bundling, count[0],
1858 unit=_changesets, total=count[1])
1904 unit=_changesets, total=count[1])
1859 return x
1905 return x
1860 elif revlog == mf:
1906 elif revlog == mf:
1861 clnode = mfs[x]
1907 clnode = mfs[x]
1862 mdata = mf.readfast(x)
1908 mdata = mf.readfast(x)
1863 for f, n in mdata.iteritems():
1909 for f, n in mdata.iteritems():
1864 if f in changedfiles:
1910 if f in changedfiles:
1865 fnodes[f].setdefault(n, clnode)
1911 fnodes[f].setdefault(n, clnode)
1866 count[0] += 1
1912 count[0] += 1
1867 progress(_bundling, count[0],
1913 progress(_bundling, count[0],
1868 unit=_manifests, total=count[1])
1914 unit=_manifests, total=count[1])
1869 return clnode
1915 return clnode
1870 else:
1916 else:
1871 progress(_bundling, count[0], item=fstate[0],
1917 progress(_bundling, count[0], item=fstate[0],
1872 unit=_files, total=count[1])
1918 unit=_files, total=count[1])
1873 return fstate[1][x]
1919 return fstate[1][x]
1874
1920
1875 bundler = changegroup.bundle10(lookup)
1921 bundler = changegroup.bundle10(lookup)
1876 reorder = self.ui.config('bundle', 'reorder', 'auto')
1922 reorder = self.ui.config('bundle', 'reorder', 'auto')
1877 if reorder == 'auto':
1923 if reorder == 'auto':
1878 reorder = None
1924 reorder = None
1879 else:
1925 else:
1880 reorder = util.parsebool(reorder)
1926 reorder = util.parsebool(reorder)
1881
1927
1882 def gengroup():
1928 def gengroup():
1883 # Create a changenode group generator that will call our functions
1929 # Create a changenode group generator that will call our functions
1884 # back to lookup the owning changenode and collect information.
1930 # back to lookup the owning changenode and collect information.
1885 count[:] = [0, len(csets)]
1931 count[:] = [0, len(csets)]
1886 for chunk in cl.group(csets, bundler, reorder=reorder):
1932 for chunk in cl.group(csets, bundler, reorder=reorder):
1887 yield chunk
1933 yield chunk
1888 progress(_bundling, None)
1934 progress(_bundling, None)
1889
1935
1890 # Create a generator for the manifestnodes that calls our lookup
1936 # Create a generator for the manifestnodes that calls our lookup
1891 # and data collection functions back.
1937 # and data collection functions back.
1892 for f in changedfiles:
1938 for f in changedfiles:
1893 fnodes[f] = {}
1939 fnodes[f] = {}
1894 count[:] = [0, len(mfs)]
1940 count[:] = [0, len(mfs)]
1895 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1941 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1896 yield chunk
1942 yield chunk
1897 progress(_bundling, None)
1943 progress(_bundling, None)
1898
1944
1899 mfs.clear()
1945 mfs.clear()
1900
1946
1901 # Go through all our files in order sorted by name.
1947 # Go through all our files in order sorted by name.
1902 count[:] = [0, len(changedfiles)]
1948 count[:] = [0, len(changedfiles)]
1903 for fname in sorted(changedfiles):
1949 for fname in sorted(changedfiles):
1904 filerevlog = self.file(fname)
1950 filerevlog = self.file(fname)
1905 if not len(filerevlog):
1951 if not len(filerevlog):
1906 raise util.Abort(_("empty or missing revlog for %s")
1952 raise util.Abort(_("empty or missing revlog for %s")
1907 % fname)
1953 % fname)
1908 fstate[0] = fname
1954 fstate[0] = fname
1909 fstate[1] = fnodes.pop(fname, {})
1955 fstate[1] = fnodes.pop(fname, {})
1910
1956
1911 nodelist = prune(filerevlog, fstate[1])
1957 nodelist = prune(filerevlog, fstate[1])
1912 if nodelist:
1958 if nodelist:
1913 count[0] += 1
1959 count[0] += 1
1914 yield bundler.fileheader(fname)
1960 yield bundler.fileheader(fname)
1915 for chunk in filerevlog.group(nodelist, bundler, reorder):
1961 for chunk in filerevlog.group(nodelist, bundler, reorder):
1916 yield chunk
1962 yield chunk
1917
1963
1918 # Signal that no more groups are left.
1964 # Signal that no more groups are left.
1919 yield bundler.close()
1965 yield bundler.close()
1920 progress(_bundling, None)
1966 progress(_bundling, None)
1921
1967
1922 if csets:
1968 if csets:
1923 self.hook('outgoing', node=hex(csets[0]), source=source)
1969 self.hook('outgoing', node=hex(csets[0]), source=source)
1924
1970
1925 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1971 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1926
1972
1927 def changegroup(self, basenodes, source):
1973 def changegroup(self, basenodes, source):
1928 # to avoid a race we use changegroupsubset() (issue1320)
1974 # to avoid a race we use changegroupsubset() (issue1320)
1929 return self.changegroupsubset(basenodes, self.heads(), source)
1975 return self.changegroupsubset(basenodes, self.heads(), source)
1930
1976
1931 def _changegroup(self, nodes, source):
1977 def _changegroup(self, nodes, source):
1932 """Compute the changegroup of all nodes that we have that a recipient
1978 """Compute the changegroup of all nodes that we have that a recipient
1933 doesn't. Return a chunkbuffer object whose read() method will return
1979 doesn't. Return a chunkbuffer object whose read() method will return
1934 successive changegroup chunks.
1980 successive changegroup chunks.
1935
1981
1936 This is much easier than the previous function as we can assume that
1982 This is much easier than the previous function as we can assume that
1937 the recipient has any changenode we aren't sending them.
1983 the recipient has any changenode we aren't sending them.
1938
1984
1939 nodes is the set of nodes to send"""
1985 nodes is the set of nodes to send"""
1940
1986
1941 cl = self.changelog
1987 cl = self.changelog
1942 mf = self.manifest
1988 mf = self.manifest
1943 mfs = {}
1989 mfs = {}
1944 changedfiles = set()
1990 changedfiles = set()
1945 fstate = ['']
1991 fstate = ['']
1946 count = [0, 0]
1992 count = [0, 0]
1947
1993
1948 self.hook('preoutgoing', throw=True, source=source)
1994 self.hook('preoutgoing', throw=True, source=source)
1949 self.changegroupinfo(nodes, source)
1995 self.changegroupinfo(nodes, source)
1950
1996
1951 revset = set([cl.rev(n) for n in nodes])
1997 revset = set([cl.rev(n) for n in nodes])
1952
1998
1953 def gennodelst(log):
1999 def gennodelst(log):
1954 ln, llr = log.node, log.linkrev
2000 ln, llr = log.node, log.linkrev
1955 return [ln(r) for r in log if llr(r) in revset]
2001 return [ln(r) for r in log if llr(r) in revset]
1956
2002
1957 progress = self.ui.progress
2003 progress = self.ui.progress
1958 _bundling = _('bundling')
2004 _bundling = _('bundling')
1959 _changesets = _('changesets')
2005 _changesets = _('changesets')
1960 _manifests = _('manifests')
2006 _manifests = _('manifests')
1961 _files = _('files')
2007 _files = _('files')
1962
2008
1963 def lookup(revlog, x):
2009 def lookup(revlog, x):
1964 if revlog == cl:
2010 if revlog == cl:
1965 c = cl.read(x)
2011 c = cl.read(x)
1966 changedfiles.update(c[3])
2012 changedfiles.update(c[3])
1967 mfs.setdefault(c[0], x)
2013 mfs.setdefault(c[0], x)
1968 count[0] += 1
2014 count[0] += 1
1969 progress(_bundling, count[0],
2015 progress(_bundling, count[0],
1970 unit=_changesets, total=count[1])
2016 unit=_changesets, total=count[1])
1971 return x
2017 return x
1972 elif revlog == mf:
2018 elif revlog == mf:
1973 count[0] += 1
2019 count[0] += 1
1974 progress(_bundling, count[0],
2020 progress(_bundling, count[0],
1975 unit=_manifests, total=count[1])
2021 unit=_manifests, total=count[1])
1976 return cl.node(revlog.linkrev(revlog.rev(x)))
2022 return cl.node(revlog.linkrev(revlog.rev(x)))
1977 else:
2023 else:
1978 progress(_bundling, count[0], item=fstate[0],
2024 progress(_bundling, count[0], item=fstate[0],
1979 total=count[1], unit=_files)
2025 total=count[1], unit=_files)
1980 return cl.node(revlog.linkrev(revlog.rev(x)))
2026 return cl.node(revlog.linkrev(revlog.rev(x)))
1981
2027
1982 bundler = changegroup.bundle10(lookup)
2028 bundler = changegroup.bundle10(lookup)
1983 reorder = self.ui.config('bundle', 'reorder', 'auto')
2029 reorder = self.ui.config('bundle', 'reorder', 'auto')
1984 if reorder == 'auto':
2030 if reorder == 'auto':
1985 reorder = None
2031 reorder = None
1986 else:
2032 else:
1987 reorder = util.parsebool(reorder)
2033 reorder = util.parsebool(reorder)
1988
2034
1989 def gengroup():
2035 def gengroup():
1990 '''yield a sequence of changegroup chunks (strings)'''
2036 '''yield a sequence of changegroup chunks (strings)'''
1991 # construct a list of all changed files
2037 # construct a list of all changed files
1992
2038
1993 count[:] = [0, len(nodes)]
2039 count[:] = [0, len(nodes)]
1994 for chunk in cl.group(nodes, bundler, reorder=reorder):
2040 for chunk in cl.group(nodes, bundler, reorder=reorder):
1995 yield chunk
2041 yield chunk
1996 progress(_bundling, None)
2042 progress(_bundling, None)
1997
2043
1998 count[:] = [0, len(mfs)]
2044 count[:] = [0, len(mfs)]
1999 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2045 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2000 yield chunk
2046 yield chunk
2001 progress(_bundling, None)
2047 progress(_bundling, None)
2002
2048
2003 count[:] = [0, len(changedfiles)]
2049 count[:] = [0, len(changedfiles)]
2004 for fname in sorted(changedfiles):
2050 for fname in sorted(changedfiles):
2005 filerevlog = self.file(fname)
2051 filerevlog = self.file(fname)
2006 if not len(filerevlog):
2052 if not len(filerevlog):
2007 raise util.Abort(_("empty or missing revlog for %s")
2053 raise util.Abort(_("empty or missing revlog for %s")
2008 % fname)
2054 % fname)
2009 fstate[0] = fname
2055 fstate[0] = fname
2010 nodelist = gennodelst(filerevlog)
2056 nodelist = gennodelst(filerevlog)
2011 if nodelist:
2057 if nodelist:
2012 count[0] += 1
2058 count[0] += 1
2013 yield bundler.fileheader(fname)
2059 yield bundler.fileheader(fname)
2014 for chunk in filerevlog.group(nodelist, bundler, reorder):
2060 for chunk in filerevlog.group(nodelist, bundler, reorder):
2015 yield chunk
2061 yield chunk
2016 yield bundler.close()
2062 yield bundler.close()
2017 progress(_bundling, None)
2063 progress(_bundling, None)
2018
2064
2019 if nodes:
2065 if nodes:
2020 self.hook('outgoing', node=hex(nodes[0]), source=source)
2066 self.hook('outgoing', node=hex(nodes[0]), source=source)
2021
2067
2022 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2068 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2023
2069
2024 def addchangegroup(self, source, srctype, url, emptyok=False):
2070 def addchangegroup(self, source, srctype, url, emptyok=False):
2025 """Add the changegroup returned by source.read() to this repo.
2071 """Add the changegroup returned by source.read() to this repo.
2026 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2072 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2027 the URL of the repo where this changegroup is coming from.
2073 the URL of the repo where this changegroup is coming from.
2028
2074
2029 Return an integer summarizing the change to this repo:
2075 Return an integer summarizing the change to this repo:
2030 - nothing changed or no source: 0
2076 - nothing changed or no source: 0
2031 - more heads than before: 1+added heads (2..n)
2077 - more heads than before: 1+added heads (2..n)
2032 - fewer heads than before: -1-removed heads (-2..-n)
2078 - fewer heads than before: -1-removed heads (-2..-n)
2033 - number of heads stays the same: 1
2079 - number of heads stays the same: 1
2034 """
2080 """
2035 def csmap(x):
2081 def csmap(x):
2036 self.ui.debug("add changeset %s\n" % short(x))
2082 self.ui.debug("add changeset %s\n" % short(x))
2037 return len(cl)
2083 return len(cl)
2038
2084
2039 def revmap(x):
2085 def revmap(x):
2040 return cl.rev(x)
2086 return cl.rev(x)
2041
2087
2042 if not source:
2088 if not source:
2043 return 0
2089 return 0
2044
2090
2045 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2091 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2046
2092
2047 changesets = files = revisions = 0
2093 changesets = files = revisions = 0
2048 efiles = set()
2094 efiles = set()
2049
2095
2050 # write changelog data to temp files so concurrent readers will not see
2096 # write changelog data to temp files so concurrent readers will not see
2051 # inconsistent view
2097 # inconsistent view
2052 cl = self.changelog
2098 cl = self.changelog
2053 cl.delayupdate()
2099 cl.delayupdate()
2054 oldheads = cl.heads()
2100 oldheads = cl.heads()
2055
2101
2056 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2102 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2057 try:
2103 try:
2058 trp = weakref.proxy(tr)
2104 trp = weakref.proxy(tr)
2059 # pull off the changeset group
2105 # pull off the changeset group
2060 self.ui.status(_("adding changesets\n"))
2106 self.ui.status(_("adding changesets\n"))
2061 clstart = len(cl)
2107 clstart = len(cl)
2062 class prog(object):
2108 class prog(object):
2063 step = _('changesets')
2109 step = _('changesets')
2064 count = 1
2110 count = 1
2065 ui = self.ui
2111 ui = self.ui
2066 total = None
2112 total = None
2067 def __call__(self):
2113 def __call__(self):
2068 self.ui.progress(self.step, self.count, unit=_('chunks'),
2114 self.ui.progress(self.step, self.count, unit=_('chunks'),
2069 total=self.total)
2115 total=self.total)
2070 self.count += 1
2116 self.count += 1
2071 pr = prog()
2117 pr = prog()
2072 source.callback = pr
2118 source.callback = pr
2073
2119
2074 source.changelogheader()
2120 source.changelogheader()
2075 srccontent = cl.addgroup(source, csmap, trp)
2121 srccontent = cl.addgroup(source, csmap, trp)
2076 if not (srccontent or emptyok):
2122 if not (srccontent or emptyok):
2077 raise util.Abort(_("received changelog group is empty"))
2123 raise util.Abort(_("received changelog group is empty"))
2078 clend = len(cl)
2124 clend = len(cl)
2079 changesets = clend - clstart
2125 changesets = clend - clstart
2080 for c in xrange(clstart, clend):
2126 for c in xrange(clstart, clend):
2081 efiles.update(self[c].files())
2127 efiles.update(self[c].files())
2082 efiles = len(efiles)
2128 efiles = len(efiles)
2083 self.ui.progress(_('changesets'), None)
2129 self.ui.progress(_('changesets'), None)
2084
2130
2085 # pull off the manifest group
2131 # pull off the manifest group
2086 self.ui.status(_("adding manifests\n"))
2132 self.ui.status(_("adding manifests\n"))
2087 pr.step = _('manifests')
2133 pr.step = _('manifests')
2088 pr.count = 1
2134 pr.count = 1
2089 pr.total = changesets # manifests <= changesets
2135 pr.total = changesets # manifests <= changesets
2090 # no need to check for empty manifest group here:
2136 # no need to check for empty manifest group here:
2091 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2137 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2092 # no new manifest will be created and the manifest group will
2138 # no new manifest will be created and the manifest group will
2093 # be empty during the pull
2139 # be empty during the pull
2094 source.manifestheader()
2140 source.manifestheader()
2095 self.manifest.addgroup(source, revmap, trp)
2141 self.manifest.addgroup(source, revmap, trp)
2096 self.ui.progress(_('manifests'), None)
2142 self.ui.progress(_('manifests'), None)
2097
2143
2098 needfiles = {}
2144 needfiles = {}
2099 if self.ui.configbool('server', 'validate', default=False):
2145 if self.ui.configbool('server', 'validate', default=False):
2100 # validate incoming csets have their manifests
2146 # validate incoming csets have their manifests
2101 for cset in xrange(clstart, clend):
2147 for cset in xrange(clstart, clend):
2102 mfest = self.changelog.read(self.changelog.node(cset))[0]
2148 mfest = self.changelog.read(self.changelog.node(cset))[0]
2103 mfest = self.manifest.readdelta(mfest)
2149 mfest = self.manifest.readdelta(mfest)
2104 # store file nodes we must see
2150 # store file nodes we must see
2105 for f, n in mfest.iteritems():
2151 for f, n in mfest.iteritems():
2106 needfiles.setdefault(f, set()).add(n)
2152 needfiles.setdefault(f, set()).add(n)
2107
2153
2108 # process the files
2154 # process the files
2109 self.ui.status(_("adding file changes\n"))
2155 self.ui.status(_("adding file changes\n"))
2110 pr.step = _('files')
2156 pr.step = _('files')
2111 pr.count = 1
2157 pr.count = 1
2112 pr.total = efiles
2158 pr.total = efiles
2113 source.callback = None
2159 source.callback = None
2114
2160
2115 while True:
2161 while True:
2116 chunkdata = source.filelogheader()
2162 chunkdata = source.filelogheader()
2117 if not chunkdata:
2163 if not chunkdata:
2118 break
2164 break
2119 f = chunkdata["filename"]
2165 f = chunkdata["filename"]
2120 self.ui.debug("adding %s revisions\n" % f)
2166 self.ui.debug("adding %s revisions\n" % f)
2121 pr()
2167 pr()
2122 fl = self.file(f)
2168 fl = self.file(f)
2123 o = len(fl)
2169 o = len(fl)
2124 if not fl.addgroup(source, revmap, trp):
2170 if not fl.addgroup(source, revmap, trp):
2125 raise util.Abort(_("received file revlog group is empty"))
2171 raise util.Abort(_("received file revlog group is empty"))
2126 revisions += len(fl) - o
2172 revisions += len(fl) - o
2127 files += 1
2173 files += 1
2128 if f in needfiles:
2174 if f in needfiles:
2129 needs = needfiles[f]
2175 needs = needfiles[f]
2130 for new in xrange(o, len(fl)):
2176 for new in xrange(o, len(fl)):
2131 n = fl.node(new)
2177 n = fl.node(new)
2132 if n in needs:
2178 if n in needs:
2133 needs.remove(n)
2179 needs.remove(n)
2134 if not needs:
2180 if not needs:
2135 del needfiles[f]
2181 del needfiles[f]
2136 self.ui.progress(_('files'), None)
2182 self.ui.progress(_('files'), None)
2137
2183
2138 for f, needs in needfiles.iteritems():
2184 for f, needs in needfiles.iteritems():
2139 fl = self.file(f)
2185 fl = self.file(f)
2140 for n in needs:
2186 for n in needs:
2141 try:
2187 try:
2142 fl.rev(n)
2188 fl.rev(n)
2143 except error.LookupError:
2189 except error.LookupError:
2144 raise util.Abort(
2190 raise util.Abort(
2145 _('missing file data for %s:%s - run hg verify') %
2191 _('missing file data for %s:%s - run hg verify') %
2146 (f, hex(n)))
2192 (f, hex(n)))
2147
2193
2148 dh = 0
2194 dh = 0
2149 if oldheads:
2195 if oldheads:
2150 heads = cl.heads()
2196 heads = cl.heads()
2151 dh = len(heads) - len(oldheads)
2197 dh = len(heads) - len(oldheads)
2152 for h in heads:
2198 for h in heads:
2153 if h not in oldheads and 'close' in self[h].extra():
2199 if h not in oldheads and 'close' in self[h].extra():
2154 dh -= 1
2200 dh -= 1
2155 htext = ""
2201 htext = ""
2156 if dh:
2202 if dh:
2157 htext = _(" (%+d heads)") % dh
2203 htext = _(" (%+d heads)") % dh
2158
2204
2159 self.ui.status(_("added %d changesets"
2205 self.ui.status(_("added %d changesets"
2160 " with %d changes to %d files%s\n")
2206 " with %d changes to %d files%s\n")
2161 % (changesets, revisions, files, htext))
2207 % (changesets, revisions, files, htext))
2162
2208
2163 if changesets > 0:
2209 if changesets > 0:
2164 p = lambda: cl.writepending() and self.root or ""
2210 p = lambda: cl.writepending() and self.root or ""
2165 self.hook('pretxnchangegroup', throw=True,
2211 self.hook('pretxnchangegroup', throw=True,
2166 node=hex(cl.node(clstart)), source=srctype,
2212 node=hex(cl.node(clstart)), source=srctype,
2167 url=url, pending=p)
2213 url=url, pending=p)
2168
2214
2169 added = [cl.node(r) for r in xrange(clstart, clend)]
2215 added = [cl.node(r) for r in xrange(clstart, clend)]
2170 publishing = self.ui.configbool('phases', 'publish', True)
2216 publishing = self.ui.configbool('phases', 'publish', True)
2171 if srctype == 'push':
2217 if srctype == 'push':
2172 # Old server can not push the boundary themself.
2218 # Old server can not push the boundary themself.
2173 # New server won't push the boundary if changeset already
2219 # New server won't push the boundary if changeset already
2174 # existed locally as secrete
2220 # existed locally as secrete
2175 #
2221 #
2176 # We should not use added here but the list of all change in
2222 # We should not use added here but the list of all change in
2177 # the bundle
2223 # the bundle
2178 if publishing:
2224 if publishing:
2179 phases.advanceboundary(self, phases.public, srccontent)
2225 phases.advanceboundary(self, phases.public, srccontent)
2180 else:
2226 else:
2181 phases.advanceboundary(self, phases.draft, srccontent)
2227 phases.advanceboundary(self, phases.draft, srccontent)
2182 phases.retractboundary(self, phases.draft, added)
2228 phases.retractboundary(self, phases.draft, added)
2183 elif srctype != 'strip':
2229 elif srctype != 'strip':
2184 # publishing only alter behavior during push
2230 # publishing only alter behavior during push
2185 #
2231 #
2186 # strip should not touch boundary at all
2232 # strip should not touch boundary at all
2187 phases.retractboundary(self, phases.draft, added)
2233 phases.retractboundary(self, phases.draft, added)
2188
2234
2189 # make changelog see real files again
2235 # make changelog see real files again
2190 cl.finalize(trp)
2236 cl.finalize(trp)
2191
2237
2192 tr.close()
2238 tr.close()
2193
2239
2194 if changesets > 0:
2240 if changesets > 0:
2195 def runhooks():
2241 def runhooks():
2196 # forcefully update the on-disk branch cache
2242 # forcefully update the on-disk branch cache
2197 self.ui.debug("updating the branch cache\n")
2243 self.ui.debug("updating the branch cache\n")
2198 self.updatebranchcache()
2244 self.updatebranchcache()
2199 self.hook("changegroup", node=hex(cl.node(clstart)),
2245 self.hook("changegroup", node=hex(cl.node(clstart)),
2200 source=srctype, url=url)
2246 source=srctype, url=url)
2201
2247
2202 for n in added:
2248 for n in added:
2203 self.hook("incoming", node=hex(n), source=srctype,
2249 self.hook("incoming", node=hex(n), source=srctype,
2204 url=url)
2250 url=url)
2205 self._afterlock(runhooks)
2251 self._afterlock(runhooks)
2206
2252
2207 finally:
2253 finally:
2208 tr.release()
2254 tr.release()
2209 # never return 0 here:
2255 # never return 0 here:
2210 if dh < 0:
2256 if dh < 0:
2211 return dh - 1
2257 return dh - 1
2212 else:
2258 else:
2213 return dh + 1
2259 return dh + 1
2214
2260
2215 def stream_in(self, remote, requirements):
2261 def stream_in(self, remote, requirements):
2216 lock = self.lock()
2262 lock = self.lock()
2217 try:
2263 try:
2218 fp = remote.stream_out()
2264 fp = remote.stream_out()
2219 l = fp.readline()
2265 l = fp.readline()
2220 try:
2266 try:
2221 resp = int(l)
2267 resp = int(l)
2222 except ValueError:
2268 except ValueError:
2223 raise error.ResponseError(
2269 raise error.ResponseError(
2224 _('Unexpected response from remote server:'), l)
2270 _('Unexpected response from remote server:'), l)
2225 if resp == 1:
2271 if resp == 1:
2226 raise util.Abort(_('operation forbidden by server'))
2272 raise util.Abort(_('operation forbidden by server'))
2227 elif resp == 2:
2273 elif resp == 2:
2228 raise util.Abort(_('locking the remote repository failed'))
2274 raise util.Abort(_('locking the remote repository failed'))
2229 elif resp != 0:
2275 elif resp != 0:
2230 raise util.Abort(_('the server sent an unknown error code'))
2276 raise util.Abort(_('the server sent an unknown error code'))
2231 self.ui.status(_('streaming all changes\n'))
2277 self.ui.status(_('streaming all changes\n'))
2232 l = fp.readline()
2278 l = fp.readline()
2233 try:
2279 try:
2234 total_files, total_bytes = map(int, l.split(' ', 1))
2280 total_files, total_bytes = map(int, l.split(' ', 1))
2235 except (ValueError, TypeError):
2281 except (ValueError, TypeError):
2236 raise error.ResponseError(
2282 raise error.ResponseError(
2237 _('Unexpected response from remote server:'), l)
2283 _('Unexpected response from remote server:'), l)
2238 self.ui.status(_('%d files to transfer, %s of data\n') %
2284 self.ui.status(_('%d files to transfer, %s of data\n') %
2239 (total_files, util.bytecount(total_bytes)))
2285 (total_files, util.bytecount(total_bytes)))
2240 start = time.time()
2286 start = time.time()
2241 for i in xrange(total_files):
2287 for i in xrange(total_files):
2242 # XXX doesn't support '\n' or '\r' in filenames
2288 # XXX doesn't support '\n' or '\r' in filenames
2243 l = fp.readline()
2289 l = fp.readline()
2244 try:
2290 try:
2245 name, size = l.split('\0', 1)
2291 name, size = l.split('\0', 1)
2246 size = int(size)
2292 size = int(size)
2247 except (ValueError, TypeError):
2293 except (ValueError, TypeError):
2248 raise error.ResponseError(
2294 raise error.ResponseError(
2249 _('Unexpected response from remote server:'), l)
2295 _('Unexpected response from remote server:'), l)
2250 if self.ui.debugflag:
2296 if self.ui.debugflag:
2251 self.ui.debug('adding %s (%s)\n' %
2297 self.ui.debug('adding %s (%s)\n' %
2252 (name, util.bytecount(size)))
2298 (name, util.bytecount(size)))
2253 # for backwards compat, name was partially encoded
2299 # for backwards compat, name was partially encoded
2254 ofp = self.sopener(store.decodedir(name), 'w')
2300 ofp = self.sopener(store.decodedir(name), 'w')
2255 for chunk in util.filechunkiter(fp, limit=size):
2301 for chunk in util.filechunkiter(fp, limit=size):
2256 ofp.write(chunk)
2302 ofp.write(chunk)
2257 ofp.close()
2303 ofp.close()
2258 elapsed = time.time() - start
2304 elapsed = time.time() - start
2259 if elapsed <= 0:
2305 if elapsed <= 0:
2260 elapsed = 0.001
2306 elapsed = 0.001
2261 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2307 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2262 (util.bytecount(total_bytes), elapsed,
2308 (util.bytecount(total_bytes), elapsed,
2263 util.bytecount(total_bytes / elapsed)))
2309 util.bytecount(total_bytes / elapsed)))
2264
2310
2265 # new requirements = old non-format requirements +
2311 # new requirements = old non-format requirements +
2266 # new format-related
2312 # new format-related
2267 # requirements from the streamed-in repository
2313 # requirements from the streamed-in repository
2268 requirements.update(set(self.requirements) - self.supportedformats)
2314 requirements.update(set(self.requirements) - self.supportedformats)
2269 self._applyrequirements(requirements)
2315 self._applyrequirements(requirements)
2270 self._writerequirements()
2316 self._writerequirements()
2271
2317
2272 self.invalidate()
2318 self.invalidate()
2273 return len(self.heads()) + 1
2319 return len(self.heads()) + 1
2274 finally:
2320 finally:
2275 lock.release()
2321 lock.release()
2276
2322
2277 def clone(self, remote, heads=[], stream=False):
2323 def clone(self, remote, heads=[], stream=False):
2278 '''clone remote repository.
2324 '''clone remote repository.
2279
2325
2280 keyword arguments:
2326 keyword arguments:
2281 heads: list of revs to clone (forces use of pull)
2327 heads: list of revs to clone (forces use of pull)
2282 stream: use streaming clone if possible'''
2328 stream: use streaming clone if possible'''
2283
2329
2284 # now, all clients that can request uncompressed clones can
2330 # now, all clients that can request uncompressed clones can
2285 # read repo formats supported by all servers that can serve
2331 # read repo formats supported by all servers that can serve
2286 # them.
2332 # them.
2287
2333
2288 # if revlog format changes, client will have to check version
2334 # if revlog format changes, client will have to check version
2289 # and format flags on "stream" capability, and use
2335 # and format flags on "stream" capability, and use
2290 # uncompressed only if compatible.
2336 # uncompressed only if compatible.
2291
2337
2292 if not stream:
2338 if not stream:
2293 # if the server explicitely prefer to stream (for fast LANs)
2339 # if the server explicitely prefer to stream (for fast LANs)
2294 stream = remote.capable('stream-preferred')
2340 stream = remote.capable('stream-preferred')
2295
2341
2296 if stream and not heads:
2342 if stream and not heads:
2297 # 'stream' means remote revlog format is revlogv1 only
2343 # 'stream' means remote revlog format is revlogv1 only
2298 if remote.capable('stream'):
2344 if remote.capable('stream'):
2299 return self.stream_in(remote, set(('revlogv1',)))
2345 return self.stream_in(remote, set(('revlogv1',)))
2300 # otherwise, 'streamreqs' contains the remote revlog format
2346 # otherwise, 'streamreqs' contains the remote revlog format
2301 streamreqs = remote.capable('streamreqs')
2347 streamreqs = remote.capable('streamreqs')
2302 if streamreqs:
2348 if streamreqs:
2303 streamreqs = set(streamreqs.split(','))
2349 streamreqs = set(streamreqs.split(','))
2304 # if we support it, stream in and adjust our requirements
2350 # if we support it, stream in and adjust our requirements
2305 if not streamreqs - self.supportedformats:
2351 if not streamreqs - self.supportedformats:
2306 return self.stream_in(remote, streamreqs)
2352 return self.stream_in(remote, streamreqs)
2307 return self.pull(remote, heads)
2353 return self.pull(remote, heads)
2308
2354
2309 def pushkey(self, namespace, key, old, new):
2355 def pushkey(self, namespace, key, old, new):
2310 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2356 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2311 old=old, new=new)
2357 old=old, new=new)
2312 ret = pushkey.push(self, namespace, key, old, new)
2358 ret = pushkey.push(self, namespace, key, old, new)
2313 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2359 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2314 ret=ret)
2360 ret=ret)
2315 return ret
2361 return ret
2316
2362
2317 def listkeys(self, namespace):
2363 def listkeys(self, namespace):
2318 self.hook('prelistkeys', throw=True, namespace=namespace)
2364 self.hook('prelistkeys', throw=True, namespace=namespace)
2319 values = pushkey.list(self, namespace)
2365 values = pushkey.list(self, namespace)
2320 self.hook('listkeys', namespace=namespace, values=values)
2366 self.hook('listkeys', namespace=namespace, values=values)
2321 return values
2367 return values
2322
2368
2323 def debugwireargs(self, one, two, three=None, four=None, five=None):
2369 def debugwireargs(self, one, two, three=None, four=None, five=None):
2324 '''used to test argument passing over the wire'''
2370 '''used to test argument passing over the wire'''
2325 return "%s %s %s %s %s" % (one, two, three, four, five)
2371 return "%s %s %s %s %s" % (one, two, three, four, five)
2326
2372
2327 def savecommitmessage(self, text):
2373 def savecommitmessage(self, text):
2328 fp = self.opener('last-message.txt', 'wb')
2374 fp = self.opener('last-message.txt', 'wb')
2329 try:
2375 try:
2330 fp.write(text)
2376 fp.write(text)
2331 finally:
2377 finally:
2332 fp.close()
2378 fp.close()
2333 return self.pathto(fp.name[len(self.root)+1:])
2379 return self.pathto(fp.name[len(self.root)+1:])
2334
2380
2335 # used to avoid circular references so destructors work
2381 # used to avoid circular references so destructors work
2336 def aftertrans(files):
2382 def aftertrans(files):
2337 renamefiles = [tuple(t) for t in files]
2383 renamefiles = [tuple(t) for t in files]
2338 def a():
2384 def a():
2339 for src, dest in renamefiles:
2385 for src, dest in renamefiles:
2340 try:
2386 try:
2341 util.rename(src, dest)
2387 util.rename(src, dest)
2342 except OSError: # journal file does not yet exist
2388 except OSError: # journal file does not yet exist
2343 pass
2389 pass
2344 return a
2390 return a
2345
2391
2346 def undoname(fn):
2392 def undoname(fn):
2347 base, name = os.path.split(fn)
2393 base, name = os.path.split(fn)
2348 assert name.startswith('journal')
2394 assert name.startswith('journal')
2349 return os.path.join(base, name.replace('journal', 'undo', 1))
2395 return os.path.join(base, name.replace('journal', 'undo', 1))
2350
2396
2351 def instance(ui, path, create):
2397 def instance(ui, path, create):
2352 return localrepository(ui, util.urllocalpath(path), create)
2398 return localrepository(ui, util.urllocalpath(path), create)
2353
2399
2354 def islocal(path):
2400 def islocal(path):
2355 return True
2401 return True
@@ -1,172 +1,185 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from mercurial import changegroup, bookmarks
9 from mercurial import changegroup, bookmarks
10 from mercurial.node import short
10 from mercurial.node import short
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 import os
12 import os
13 import errno
13 import errno
14
14
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
16 """create a bundle with the specified revisions as a backup"""
16 """create a bundle with the specified revisions as a backup"""
17 cg = repo.changegroupsubset(bases, heads, 'strip')
17 cg = repo.changegroupsubset(bases, heads, 'strip')
18 backupdir = repo.join("strip-backup")
18 backupdir = repo.join("strip-backup")
19 if not os.path.isdir(backupdir):
19 if not os.path.isdir(backupdir):
20 os.mkdir(backupdir)
20 os.mkdir(backupdir)
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
22 if compress:
22 if compress:
23 bundletype = "HG10BZ"
23 bundletype = "HG10BZ"
24 else:
24 else:
25 bundletype = "HG10UN"
25 bundletype = "HG10UN"
26 return changegroup.writebundle(cg, name, bundletype)
26 return changegroup.writebundle(cg, name, bundletype)
27
27
28 def _collectfiles(repo, striprev):
28 def _collectfiles(repo, striprev):
29 """find out the filelogs affected by the strip"""
29 """find out the filelogs affected by the strip"""
30 files = set()
30 files = set()
31
31
32 for x in xrange(striprev, len(repo)):
32 for x in xrange(striprev, len(repo)):
33 files.update(repo[x].files())
33 files.update(repo[x].files())
34
34
35 return sorted(files)
35 return sorted(files)
36
36
37 def _collectbrokencsets(repo, files, striprev):
37 def _collectbrokencsets(repo, files, striprev):
38 """return the changesets which will be broken by the truncation"""
38 """return the changesets which will be broken by the truncation"""
39 s = set()
39 s = set()
40 def collectone(revlog):
40 def collectone(revlog):
41 linkgen = (revlog.linkrev(i) for i in revlog)
41 linkgen = (revlog.linkrev(i) for i in revlog)
42 # find the truncation point of the revlog
42 # find the truncation point of the revlog
43 for lrev in linkgen:
43 for lrev in linkgen:
44 if lrev >= striprev:
44 if lrev >= striprev:
45 break
45 break
46 # see if any revision after this point has a linkrev
46 # see if any revision after this point has a linkrev
47 # less than striprev (those will be broken by strip)
47 # less than striprev (those will be broken by strip)
48 for lrev in linkgen:
48 for lrev in linkgen:
49 if lrev < striprev:
49 if lrev < striprev:
50 s.add(lrev)
50 s.add(lrev)
51
51
52 collectone(repo.manifest)
52 collectone(repo.manifest)
53 for fname in files:
53 for fname in files:
54 collectone(repo.file(fname))
54 collectone(repo.file(fname))
55
55
56 return s
56 return s
57
57
58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
59 # It simplifies the logic around updating the branchheads cache if we only
60 # have to consider the effect of the stripped revisions and not revisions
61 # missing because the cache is out-of-date.
62 repo.updatebranchcache()
63
59 cl = repo.changelog
64 cl = repo.changelog
60 # TODO handle undo of merge sets
65 # TODO handle undo of merge sets
61 if isinstance(nodelist, str):
66 if isinstance(nodelist, str):
62 nodelist = [nodelist]
67 nodelist = [nodelist]
63 striplist = [cl.rev(node) for node in nodelist]
68 striplist = [cl.rev(node) for node in nodelist]
64 striprev = min(striplist)
69 striprev = min(striplist)
65
70
71 # Set of potential new heads resulting from the strip. The parents of any
72 # node removed could be a new head because the node to be removed could have
73 # been the only child of the parent.
74 # Do a list->set->list conversion to remove duplicates.
75 stringstriplist = [str(rev) for rev in striplist]
76 newheadrevs = set(repo.revs("parents(%lr::) - %lr::", stringstriplist,
77 stringstriplist))
78
66 keeppartialbundle = backup == 'strip'
79 keeppartialbundle = backup == 'strip'
67
80
68 # Some revisions with rev > striprev may not be descendants of striprev.
81 # Some revisions with rev > striprev may not be descendants of striprev.
69 # We have to find these revisions and put them in a bundle, so that
82 # We have to find these revisions and put them in a bundle, so that
70 # we can restore them after the truncations.
83 # we can restore them after the truncations.
71 # To create the bundle we use repo.changegroupsubset which requires
84 # To create the bundle we use repo.changegroupsubset which requires
72 # the list of heads and bases of the set of interesting revisions.
85 # the list of heads and bases of the set of interesting revisions.
73 # (head = revision in the set that has no descendant in the set;
86 # (head = revision in the set that has no descendant in the set;
74 # base = revision in the set that has no ancestor in the set)
87 # base = revision in the set that has no ancestor in the set)
75 tostrip = set(striplist)
88 tostrip = set(striplist)
76 for rev in striplist:
89 for rev in striplist:
77 for desc in cl.descendants(rev):
90 for desc in cl.descendants(rev):
78 tostrip.add(desc)
91 tostrip.add(desc)
79
92
80 files = _collectfiles(repo, striprev)
93 files = _collectfiles(repo, striprev)
81 saverevs = _collectbrokencsets(repo, files, striprev)
94 saverevs = _collectbrokencsets(repo, files, striprev)
82
95
83 # compute heads
96 # compute heads
84 saveheads = set(saverevs)
97 saveheads = set(saverevs)
85 for r in xrange(striprev + 1, len(cl)):
98 for r in xrange(striprev + 1, len(cl)):
86 if r not in tostrip:
99 if r not in tostrip:
87 saverevs.add(r)
100 saverevs.add(r)
88 saveheads.difference_update(cl.parentrevs(r))
101 saveheads.difference_update(cl.parentrevs(r))
89 saveheads.add(r)
102 saveheads.add(r)
90 saveheads = [cl.node(r) for r in saveheads]
103 saveheads = [cl.node(r) for r in saveheads]
91
104
92 # compute base nodes
105 # compute base nodes
93 if saverevs:
106 if saverevs:
94 descendants = set(cl.descendants(*saverevs))
107 descendants = set(cl.descendants(*saverevs))
95 saverevs.difference_update(descendants)
108 saverevs.difference_update(descendants)
96 savebases = [cl.node(r) for r in saverevs]
109 savebases = [cl.node(r) for r in saverevs]
97 stripbases = [cl.node(r) for r in tostrip]
110 stripbases = [cl.node(r) for r in tostrip]
98
111
99 bm = repo._bookmarks
112 bm = repo._bookmarks
100 updatebm = []
113 updatebm = []
101 for m in bm:
114 for m in bm:
102 rev = repo[bm[m]].rev()
115 rev = repo[bm[m]].rev()
103 if rev in tostrip:
116 if rev in tostrip:
104 updatebm.append(m)
117 updatebm.append(m)
105
118
106 # create a changegroup for all the branches we need to keep
119 # create a changegroup for all the branches we need to keep
107 backupfile = None
120 backupfile = None
108 if backup == "all":
121 if backup == "all":
109 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
122 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
110 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
123 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
111 if saveheads or savebases:
124 if saveheads or savebases:
112 # do not compress partial bundle if we remove it from disk later
125 # do not compress partial bundle if we remove it from disk later
113 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
126 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
114 compress=keeppartialbundle)
127 compress=keeppartialbundle)
115
128
116 mfst = repo.manifest
129 mfst = repo.manifest
117
130
118 tr = repo.transaction("strip")
131 tr = repo.transaction("strip")
119 offset = len(tr.entries)
132 offset = len(tr.entries)
120
133
121 try:
134 try:
122 tr.startgroup()
135 tr.startgroup()
123 cl.strip(striprev, tr)
136 cl.strip(striprev, tr)
124 mfst.strip(striprev, tr)
137 mfst.strip(striprev, tr)
125 for fn in files:
138 for fn in files:
126 repo.file(fn).strip(striprev, tr)
139 repo.file(fn).strip(striprev, tr)
127 tr.endgroup()
140 tr.endgroup()
128
141
129 try:
142 try:
130 for i in xrange(offset, len(tr.entries)):
143 for i in xrange(offset, len(tr.entries)):
131 file, troffset, ignore = tr.entries[i]
144 file, troffset, ignore = tr.entries[i]
132 repo.sopener(file, 'a').truncate(troffset)
145 repo.sopener(file, 'a').truncate(troffset)
133 tr.close()
146 tr.close()
134 except: # re-raises
147 except: # re-raises
135 tr.abort()
148 tr.abort()
136 raise
149 raise
137
150
138 if saveheads or savebases:
151 if saveheads or savebases:
139 ui.note(_("adding branch\n"))
152 ui.note(_("adding branch\n"))
140 f = open(chgrpfile, "rb")
153 f = open(chgrpfile, "rb")
141 gen = changegroup.readbundle(f, chgrpfile)
154 gen = changegroup.readbundle(f, chgrpfile)
142 if not repo.ui.verbose:
155 if not repo.ui.verbose:
143 # silence internal shuffling chatter
156 # silence internal shuffling chatter
144 repo.ui.pushbuffer()
157 repo.ui.pushbuffer()
145 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
158 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
146 if not repo.ui.verbose:
159 if not repo.ui.verbose:
147 repo.ui.popbuffer()
160 repo.ui.popbuffer()
148 f.close()
161 f.close()
149 if not keeppartialbundle:
162 if not keeppartialbundle:
150 os.unlink(chgrpfile)
163 os.unlink(chgrpfile)
151
164
152 # remove undo files
165 # remove undo files
153 for undofile in repo.undofiles():
166 for undofile in repo.undofiles():
154 try:
167 try:
155 os.unlink(undofile)
168 os.unlink(undofile)
156 except OSError, e:
169 except OSError, e:
157 if e.errno != errno.ENOENT:
170 if e.errno != errno.ENOENT:
158 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
171 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
159
172
160 for m in updatebm:
173 for m in updatebm:
161 bm[m] = repo['.'].node()
174 bm[m] = repo['.'].node()
162 bookmarks.write(repo)
175 bookmarks.write(repo)
163 except: # re-raises
176 except: # re-raises
164 if backupfile:
177 if backupfile:
165 ui.warn(_("strip failed, full bundle stored in '%s'\n")
178 ui.warn(_("strip failed, full bundle stored in '%s'\n")
166 % backupfile)
179 % backupfile)
167 elif saveheads:
180 elif saveheads:
168 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
181 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
169 % chgrpfile)
182 % chgrpfile)
170 raise
183 raise
171
184
172 repo.destroyed()
185 repo.destroyed(newheadrevs)
@@ -1,352 +1,353 b''
1 $ "$TESTDIR/hghave" execbit || exit 80
1 $ "$TESTDIR/hghave" execbit || exit 80
2
2
3 $ hg init
3 $ hg init
4
4
5 Setup:
5 Setup:
6
6
7 $ echo a >> a
7 $ echo a >> a
8 $ hg ci -Am 'base'
8 $ hg ci -Am 'base'
9 adding a
9 adding a
10
10
11 Refuse to amend public csets:
11 Refuse to amend public csets:
12
12
13 $ hg phase -r . -p
13 $ hg phase -r . -p
14 $ hg ci --amend
14 $ hg ci --amend
15 abort: cannot amend public changesets
15 abort: cannot amend public changesets
16 [255]
16 [255]
17 $ hg phase -r . -f -d
17 $ hg phase -r . -f -d
18
18
19 $ echo a >> a
19 $ echo a >> a
20 $ hg ci -Am 'base1'
20 $ hg ci -Am 'base1'
21
21
22 Nothing to amend:
22 Nothing to amend:
23
23
24 $ hg ci --amend
24 $ hg ci --amend
25 nothing changed
25 nothing changed
26 [1]
26 [1]
27
27
28 Amending changeset with changes in working dir:
28 Amending changeset with changes in working dir:
29
29
30 $ echo a >> a
30 $ echo a >> a
31 $ hg ci --amend -m 'amend base1'
31 $ hg ci --amend -m 'amend base1'
32 saved backup bundle to $TESTTMP/.hg/strip-backup/489edb5b847d-amend-backup.hg
32 saved backup bundle to $TESTTMP/.hg/strip-backup/489edb5b847d-amend-backup.hg
33 $ hg diff -c .
33 $ hg diff -c .
34 diff -r ad120869acf0 -r 9cd25b479c51 a
34 diff -r ad120869acf0 -r 9cd25b479c51 a
35 --- a/a Thu Jan 01 00:00:00 1970 +0000
35 --- a/a Thu Jan 01 00:00:00 1970 +0000
36 +++ b/a Thu Jan 01 00:00:00 1970 +0000
36 +++ b/a Thu Jan 01 00:00:00 1970 +0000
37 @@ -1,1 +1,3 @@
37 @@ -1,1 +1,3 @@
38 a
38 a
39 +a
39 +a
40 +a
40 +a
41 $ hg log
41 $ hg log
42 changeset: 1:9cd25b479c51
42 changeset: 1:9cd25b479c51
43 tag: tip
43 tag: tip
44 user: test
44 user: test
45 date: Thu Jan 01 00:00:00 1970 +0000
45 date: Thu Jan 01 00:00:00 1970 +0000
46 summary: amend base1
46 summary: amend base1
47
47
48 changeset: 0:ad120869acf0
48 changeset: 0:ad120869acf0
49 user: test
49 user: test
50 date: Thu Jan 01 00:00:00 1970 +0000
50 date: Thu Jan 01 00:00:00 1970 +0000
51 summary: base
51 summary: base
52
52
53
53
54 Add new file:
54 Add new file:
55
55
56 $ echo b > b
56 $ echo b > b
57 $ hg ci --amend -Am 'amend base1 new file'
57 $ hg ci --amend -Am 'amend base1 new file'
58 adding b
58 adding b
59 saved backup bundle to $TESTTMP/.hg/strip-backup/9cd25b479c51-amend-backup.hg
59 saved backup bundle to $TESTTMP/.hg/strip-backup/9cd25b479c51-amend-backup.hg
60
60
61 Remove file that was added in amended commit:
61 Remove file that was added in amended commit:
62
62
63 $ hg rm b
63 $ hg rm b
64 $ hg ci --amend -m 'amend base1 remove new file'
64 $ hg ci --amend -m 'amend base1 remove new file'
65 saved backup bundle to $TESTTMP/.hg/strip-backup/e2bb3ecffd2f-amend-backup.hg
65 saved backup bundle to $TESTTMP/.hg/strip-backup/e2bb3ecffd2f-amend-backup.hg
66
66
67 $ hg cat b
67 $ hg cat b
68 b: no such file in rev 664a9b2d60cd
68 b: no such file in rev 664a9b2d60cd
69 [1]
69 [1]
70
70
71 No changes, just a different message:
71 No changes, just a different message:
72
72
73 $ hg ci -v --amend -m 'no changes, new message'
73 $ hg ci -v --amend -m 'no changes, new message'
74 amending changeset 664a9b2d60cd
74 amending changeset 664a9b2d60cd
75 copying changeset 664a9b2d60cd to ad120869acf0
75 copying changeset 664a9b2d60cd to ad120869acf0
76 a
76 a
77 stripping amended changeset 664a9b2d60cd
77 stripping amended changeset 664a9b2d60cd
78 1 changesets found
78 1 changesets found
79 saved backup bundle to $TESTTMP/.hg/strip-backup/664a9b2d60cd-amend-backup.hg
79 saved backup bundle to $TESTTMP/.hg/strip-backup/664a9b2d60cd-amend-backup.hg
80 1 changesets found
80 1 changesets found
81 adding branch
81 adding branch
82 adding changesets
82 adding changesets
83 adding manifests
83 adding manifests
84 adding file changes
84 adding file changes
85 added 1 changesets with 1 changes to 1 files
85 added 1 changesets with 1 changes to 1 files
86 committed changeset 1:ea6e356ff2ad
86 committed changeset 1:ea6e356ff2ad
87 $ hg diff -c .
87 $ hg diff -c .
88 diff -r ad120869acf0 -r ea6e356ff2ad a
88 diff -r ad120869acf0 -r ea6e356ff2ad a
89 --- a/a Thu Jan 01 00:00:00 1970 +0000
89 --- a/a Thu Jan 01 00:00:00 1970 +0000
90 +++ b/a Thu Jan 01 00:00:00 1970 +0000
90 +++ b/a Thu Jan 01 00:00:00 1970 +0000
91 @@ -1,1 +1,3 @@
91 @@ -1,1 +1,3 @@
92 a
92 a
93 +a
93 +a
94 +a
94 +a
95 $ hg log
95 $ hg log
96 changeset: 1:ea6e356ff2ad
96 changeset: 1:ea6e356ff2ad
97 tag: tip
97 tag: tip
98 user: test
98 user: test
99 date: Thu Jan 01 00:00:00 1970 +0000
99 date: Thu Jan 01 00:00:00 1970 +0000
100 summary: no changes, new message
100 summary: no changes, new message
101
101
102 changeset: 0:ad120869acf0
102 changeset: 0:ad120869acf0
103 user: test
103 user: test
104 date: Thu Jan 01 00:00:00 1970 +0000
104 date: Thu Jan 01 00:00:00 1970 +0000
105 summary: base
105 summary: base
106
106
107
107
108 Disable default date on commit so when -d isn't given, the old date is preserved:
108 Disable default date on commit so when -d isn't given, the old date is preserved:
109
109
110 $ echo '[defaults]' >> $HGRCPATH
110 $ echo '[defaults]' >> $HGRCPATH
111 $ echo 'commit=' >> $HGRCPATH
111 $ echo 'commit=' >> $HGRCPATH
112
112
113 Test -u/-d:
113 Test -u/-d:
114
114
115 $ hg ci --amend -u foo -d '1 0'
115 $ hg ci --amend -u foo -d '1 0'
116 saved backup bundle to $TESTTMP/.hg/strip-backup/ea6e356ff2ad-amend-backup.hg
116 saved backup bundle to $TESTTMP/.hg/strip-backup/ea6e356ff2ad-amend-backup.hg
117 $ echo a >> a
117 $ echo a >> a
118 $ hg ci --amend -u foo -d '1 0'
118 $ hg ci --amend -u foo -d '1 0'
119 saved backup bundle to $TESTTMP/.hg/strip-backup/377b91ce8b56-amend-backup.hg
119 saved backup bundle to $TESTTMP/.hg/strip-backup/377b91ce8b56-amend-backup.hg
120 $ hg log -r .
120 $ hg log -r .
121 changeset: 1:2c94e4a5756f
121 changeset: 1:2c94e4a5756f
122 tag: tip
122 tag: tip
123 user: foo
123 user: foo
124 date: Thu Jan 01 00:00:01 1970 +0000
124 date: Thu Jan 01 00:00:01 1970 +0000
125 summary: no changes, new message
125 summary: no changes, new message
126
126
127
127
128 Open editor with old commit message if a message isn't given otherwise:
128 Open editor with old commit message if a message isn't given otherwise:
129
129
130 $ cat > editor << '__EOF__'
130 $ cat > editor << '__EOF__'
131 > #!/bin/sh
131 > #!/bin/sh
132 > cat $1
132 > cat $1
133 > echo "another precious commit message" > "$1"
133 > echo "another precious commit message" > "$1"
134 > __EOF__
134 > __EOF__
135 $ chmod +x editor
135 $ chmod +x editor
136 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
136 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
137 amending changeset 2c94e4a5756f
137 amending changeset 2c94e4a5756f
138 copying changeset 2c94e4a5756f to ad120869acf0
138 copying changeset 2c94e4a5756f to ad120869acf0
139 no changes, new message
139 no changes, new message
140
140
141
141
142 HG: Enter commit message. Lines beginning with 'HG:' are removed.
142 HG: Enter commit message. Lines beginning with 'HG:' are removed.
143 HG: Leave message empty to abort commit.
143 HG: Leave message empty to abort commit.
144 HG: --
144 HG: --
145 HG: user: foo
145 HG: user: foo
146 HG: branch 'default'
146 HG: branch 'default'
147 HG: changed a
147 HG: changed a
148 a
148 a
149 stripping amended changeset 2c94e4a5756f
149 stripping amended changeset 2c94e4a5756f
150 1 changesets found
150 1 changesets found
151 saved backup bundle to $TESTTMP/.hg/strip-backup/2c94e4a5756f-amend-backup.hg
151 saved backup bundle to $TESTTMP/.hg/strip-backup/2c94e4a5756f-amend-backup.hg
152 1 changesets found
152 1 changesets found
153 adding branch
153 adding branch
154 adding changesets
154 adding changesets
155 adding manifests
155 adding manifests
156 adding file changes
156 adding file changes
157 added 1 changesets with 1 changes to 1 files
157 added 1 changesets with 1 changes to 1 files
158 committed changeset 1:ffb49186f961
158 committed changeset 1:ffb49186f961
159
159
160 Same, but with changes in working dir (different code path):
160 Same, but with changes in working dir (different code path):
161
161
162 $ echo a >> a
162 $ echo a >> a
163 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
163 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
164 amending changeset ffb49186f961
164 amending changeset ffb49186f961
165 another precious commit message
165 another precious commit message
166
166
167
167
168 HG: Enter commit message. Lines beginning with 'HG:' are removed.
168 HG: Enter commit message. Lines beginning with 'HG:' are removed.
169 HG: Leave message empty to abort commit.
169 HG: Leave message empty to abort commit.
170 HG: --
170 HG: --
171 HG: user: foo
171 HG: user: foo
172 HG: branch 'default'
172 HG: branch 'default'
173 HG: changed a
173 HG: changed a
174 a
174 a
175 copying changeset 27f3aacd3011 to ad120869acf0
175 copying changeset 27f3aacd3011 to ad120869acf0
176 a
176 a
177 stripping intermediate changeset 27f3aacd3011
177 stripping intermediate changeset 27f3aacd3011
178 stripping amended changeset ffb49186f961
178 stripping amended changeset ffb49186f961
179 2 changesets found
179 2 changesets found
180 saved backup bundle to $TESTTMP/.hg/strip-backup/ffb49186f961-amend-backup.hg
180 saved backup bundle to $TESTTMP/.hg/strip-backup/ffb49186f961-amend-backup.hg
181 1 changesets found
181 1 changesets found
182 adding branch
182 adding branch
183 adding changesets
183 adding changesets
184 adding manifests
184 adding manifests
185 adding file changes
185 adding file changes
186 added 1 changesets with 1 changes to 1 files
186 added 1 changesets with 1 changes to 1 files
187 committed changeset 1:fb6cca43446f
187 committed changeset 1:fb6cca43446f
188
188
189 $ rm editor
189 $ rm editor
190 $ hg log -r .
190 $ hg log -r .
191 changeset: 1:fb6cca43446f
191 changeset: 1:fb6cca43446f
192 tag: tip
192 tag: tip
193 user: foo
193 user: foo
194 date: Thu Jan 01 00:00:01 1970 +0000
194 date: Thu Jan 01 00:00:01 1970 +0000
195 summary: another precious commit message
195 summary: another precious commit message
196
196
197
197
198 Moving bookmarks, preserve active bookmark:
198 Moving bookmarks, preserve active bookmark:
199
199
200 $ hg book book1
200 $ hg book book1
201 $ hg book book2
201 $ hg book book2
202 $ hg ci --amend -m 'move bookmarks'
202 $ hg ci --amend -m 'move bookmarks'
203 saved backup bundle to $TESTTMP/.hg/strip-backup/fb6cca43446f-amend-backup.hg
203 saved backup bundle to $TESTTMP/.hg/strip-backup/fb6cca43446f-amend-backup.hg
204 $ hg book
204 $ hg book
205 book1 1:0cf1c7a51bcf
205 book1 1:0cf1c7a51bcf
206 * book2 1:0cf1c7a51bcf
206 * book2 1:0cf1c7a51bcf
207 $ echo a >> a
207 $ echo a >> a
208 $ hg ci --amend -m 'move bookmarks'
208 $ hg ci --amend -m 'move bookmarks'
209 saved backup bundle to $TESTTMP/.hg/strip-backup/0cf1c7a51bcf-amend-backup.hg
209 saved backup bundle to $TESTTMP/.hg/strip-backup/0cf1c7a51bcf-amend-backup.hg
210 $ hg book
210 $ hg book
211 book1 1:7344472bd951
211 book1 1:7344472bd951
212 * book2 1:7344472bd951
212 * book2 1:7344472bd951
213
213
214 $ echo '[defaults]' >> $HGRCPATH
214 $ echo '[defaults]' >> $HGRCPATH
215 $ echo "commit=-d '0 0'" >> $HGRCPATH
215 $ echo "commit=-d '0 0'" >> $HGRCPATH
216
216
217 Moving branches:
217 Moving branches:
218
218
219 $ hg branch foo
219 $ hg branch foo
220 marked working directory as branch foo
220 marked working directory as branch foo
221 (branches are permanent and global, did you want a bookmark?)
221 (branches are permanent and global, did you want a bookmark?)
222 $ echo a >> a
222 $ echo a >> a
223 $ hg ci -m 'branch foo'
223 $ hg ci -m 'branch foo'
224 $ hg branch default -f
224 $ hg branch default -f
225 marked working directory as branch default
225 marked working directory as branch default
226 (branches are permanent and global, did you want a bookmark?)
226 (branches are permanent and global, did you want a bookmark?)
227 $ hg ci --amend -m 'back to default'
227 $ hg ci --amend -m 'back to default'
228 saved backup bundle to $TESTTMP/.hg/strip-backup/1661ca36a2db-amend-backup.hg
228 saved backup bundle to $TESTTMP/.hg/strip-backup/1661ca36a2db-amend-backup.hg
229 $ hg branches
229 $ hg branches
230 default 2:f24ee5961967
230 default 2:f24ee5961967
231
231
232 Close branch:
232 Close branch:
233
233
234 $ hg up -q 0
234 $ hg up -q 0
235 $ echo b >> b
235 $ echo b >> b
236 $ hg branch foo
236 $ hg branch foo
237 marked working directory as branch foo
237 marked working directory as branch foo
238 (branches are permanent and global, did you want a bookmark?)
238 (branches are permanent and global, did you want a bookmark?)
239 $ hg ci -Am 'fork'
239 $ hg ci -Am 'fork'
240 adding b
240 adding b
241 $ echo b >> b
241 $ echo b >> b
242 $ hg ci -mb
242 $ hg ci -mb
243 $ hg ci --amend --close-branch -m 'closing branch foo'
243 $ hg ci --amend --close-branch -m 'closing branch foo'
244 saved backup bundle to $TESTTMP/.hg/strip-backup/c962248fa264-amend-backup.hg
244 saved backup bundle to $TESTTMP/.hg/strip-backup/c962248fa264-amend-backup.hg
245
245
246 Same thing, different code path:
246 Same thing, different code path:
247
247
248 $ echo b >> b
248 $ echo b >> b
249 $ hg ci -m 'reopen branch'
249 $ hg ci -m 'reopen branch'
250 created new head
250 reopening closed branch head 4
251 reopening closed branch head 4
251 $ echo b >> b
252 $ echo b >> b
252 $ hg ci --amend --close-branch
253 $ hg ci --amend --close-branch
253 saved backup bundle to $TESTTMP/.hg/strip-backup/5e302dcc12b8-amend-backup.hg
254 saved backup bundle to $TESTTMP/.hg/strip-backup/5e302dcc12b8-amend-backup.hg
254 $ hg branches
255 $ hg branches
255 default 2:f24ee5961967
256 default 2:f24ee5961967
256
257
257 Refuse to amend merges:
258 Refuse to amend merges:
258
259
259 $ hg up -q default
260 $ hg up -q default
260 $ hg merge foo
261 $ hg merge foo
261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
262 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
262 (branch merge, don't forget to commit)
263 (branch merge, don't forget to commit)
263 $ hg ci --amend
264 $ hg ci --amend
264 abort: cannot amend while merging
265 abort: cannot amend while merging
265 [255]
266 [255]
266 $ hg ci -m 'merge'
267 $ hg ci -m 'merge'
267 $ hg ci --amend
268 $ hg ci --amend
268 abort: cannot amend merge changesets
269 abort: cannot amend merge changesets
269 [255]
270 [255]
270
271
271 Follow copies/renames:
272 Follow copies/renames:
272
273
273 $ hg mv b c
274 $ hg mv b c
274 $ hg ci -m 'b -> c'
275 $ hg ci -m 'b -> c'
275 $ hg mv c d
276 $ hg mv c d
276 $ hg ci --amend -m 'b -> d'
277 $ hg ci --amend -m 'b -> d'
277 saved backup bundle to $TESTTMP/.hg/strip-backup/9c207120aa98-amend-backup.hg
278 saved backup bundle to $TESTTMP/.hg/strip-backup/9c207120aa98-amend-backup.hg
278 $ hg st --rev '.^' --copies d
279 $ hg st --rev '.^' --copies d
279 A d
280 A d
280 b
281 b
281 $ hg cp d e
282 $ hg cp d e
282 $ hg ci -m 'e = d'
283 $ hg ci -m 'e = d'
283 $ hg cp e f
284 $ hg cp e f
284 $ hg ci --amend -m 'f = d'
285 $ hg ci --amend -m 'f = d'
285 saved backup bundle to $TESTTMP/.hg/strip-backup/fda2b3b27b22-amend-backup.hg
286 saved backup bundle to $TESTTMP/.hg/strip-backup/fda2b3b27b22-amend-backup.hg
286 $ hg st --rev '.^' --copies f
287 $ hg st --rev '.^' --copies f
287 A f
288 A f
288 d
289 d
289
290
290 $ mv f f.orig
291 $ mv f f.orig
291 $ hg rm -A f
292 $ hg rm -A f
292 $ hg ci -m removef
293 $ hg ci -m removef
293 $ hg cp a f
294 $ hg cp a f
294 $ mv f.orig f
295 $ mv f.orig f
295 $ hg ci --amend -m replacef
296 $ hg ci --amend -m replacef
296 saved backup bundle to $TESTTMP/.hg/strip-backup/20a7413547f9-amend-backup.hg
297 saved backup bundle to $TESTTMP/.hg/strip-backup/20a7413547f9-amend-backup.hg
297 $ hg st --change . --copies
298 $ hg st --change . --copies
298 $ hg log -r . --template "{file_copies}\n"
299 $ hg log -r . --template "{file_copies}\n"
299
300
300
301
301 Move added file (issue3410):
302 Move added file (issue3410):
302
303
303 $ echo g >> g
304 $ echo g >> g
304 $ hg ci -Am g
305 $ hg ci -Am g
305 adding g
306 adding g
306 $ hg mv g h
307 $ hg mv g h
307 $ hg ci --amend
308 $ hg ci --amend
308 saved backup bundle to $TESTTMP/.hg/strip-backup/5daa77a5d616-amend-backup.hg
309 saved backup bundle to $TESTTMP/.hg/strip-backup/5daa77a5d616-amend-backup.hg
309 $ hg st --change . --copies h
310 $ hg st --change . --copies h
310 A h
311 A h
311 $ hg log -r . --template "{file_copies}\n"
312 $ hg log -r . --template "{file_copies}\n"
312
313
313
314
314 Can't rollback an amend:
315 Can't rollback an amend:
315
316
316 $ hg rollback
317 $ hg rollback
317 no rollback information available
318 no rollback information available
318 [1]
319 [1]
319
320
320 Preserve extra dict (issue3430):
321 Preserve extra dict (issue3430):
321
322
322 $ hg branch a
323 $ hg branch a
323 marked working directory as branch a
324 marked working directory as branch a
324 (branches are permanent and global, did you want a bookmark?)
325 (branches are permanent and global, did you want a bookmark?)
325 $ echo a >> a
326 $ echo a >> a
326 $ hg ci -ma
327 $ hg ci -ma
327 $ hg ci --amend -m "a'"
328 $ hg ci --amend -m "a'"
328 saved backup bundle to $TESTTMP/.hg/strip-backup/167f8e3031df-amend-backup.hg
329 saved backup bundle to $TESTTMP/.hg/strip-backup/167f8e3031df-amend-backup.hg
329 $ hg log -r . --template "{branch}\n"
330 $ hg log -r . --template "{branch}\n"
330 a
331 a
331 $ hg ci --amend -m "a''"
332 $ hg ci --amend -m "a''"
332 saved backup bundle to $TESTTMP/.hg/strip-backup/ceac1a44c806-amend-backup.hg
333 saved backup bundle to $TESTTMP/.hg/strip-backup/ceac1a44c806-amend-backup.hg
333 $ hg log -r . --template "{branch}\n"
334 $ hg log -r . --template "{branch}\n"
334 a
335 a
335
336
336 Also preserve other entries in the dict that are in the old commit,
337 Also preserve other entries in the dict that are in the old commit,
337 first graft something so there's an additional entry:
338 first graft something so there's an additional entry:
338
339
339 $ hg up 0 -q
340 $ hg up 0 -q
340 $ echo z > z
341 $ echo z > z
341 $ hg ci -Am 'fork'
342 $ hg ci -Am 'fork'
342 adding z
343 adding z
343 created new head
344 created new head
344 $ hg up 11
345 $ hg up 11
345 5 files updated, 0 files merged, 1 files removed, 0 files unresolved
346 5 files updated, 0 files merged, 1 files removed, 0 files unresolved
346 $ hg graft 12
347 $ hg graft 12
347 grafting revision 12
348 grafting revision 12
348 $ hg ci --amend -m 'graft amend'
349 $ hg ci --amend -m 'graft amend'
349 saved backup bundle to $TESTTMP/.hg/strip-backup/18a5124daf7a-amend-backup.hg
350 saved backup bundle to $TESTTMP/.hg/strip-backup/18a5124daf7a-amend-backup.hg
350 $ hg log -r . --debug | grep extra
351 $ hg log -r . --debug | grep extra
351 extra: branch=a
352 extra: branch=a
352 extra: source=2647734878ef0236dda712fae9c1651cf694ea8a
353 extra: source=2647734878ef0236dda712fae9c1651cf694ea8a
@@ -1,124 +1,125 b''
1 $ branches=.hg/cache/branchheads
1 $ branches=.hg/cache/branchheads
2 $ echo '[extensions]' >> $HGRCPATH
2 $ echo '[extensions]' >> $HGRCPATH
3 $ echo 'mq =' >> $HGRCPATH
3 $ echo 'mq =' >> $HGRCPATH
4
4
5 $ show_branch_cache()
5 $ show_branch_cache()
6 > {
6 > {
7 > # force cache (re)generation
7 > # force cache (re)generation
8 > hg log -r does-not-exist 2> /dev/null
8 > hg log -r does-not-exist 2> /dev/null
9 > hg log -r tip --template 'tip: {rev}\n'
9 > hg log -r tip --template 'tip: {rev}\n'
10 > if [ -f $branches ]; then
10 > if [ -f $branches ]; then
11 > sort $branches
11 > sort $branches
12 > else
12 > else
13 > echo No branch cache
13 > echo No branch cache
14 > fi
14 > fi
15 > if [ "$1" = 1 ]; then
15 > if [ "$1" = 1 ]; then
16 > for b in foo bar; do
16 > for b in foo bar; do
17 > hg log -r $b --template "branch $b: "'{rev}\n'
17 > hg log -r $b --template "branch $b: "'{rev}\n'
18 > done
18 > done
19 > fi
19 > fi
20 > }
20 > }
21
21
22 $ hg init a
22 $ hg init a
23 $ cd a
23 $ cd a
24 $ hg qinit -c
24 $ hg qinit -c
25
25
26
26
27 mq patch on an empty repo
27 mq patch on an empty repo
28
28
29 $ hg qnew -d '0 0' p1
29 $ hg qnew -d '0 0' p1
30 $ show_branch_cache
30 $ show_branch_cache
31 tip: 0
31 tip: 0
32 No branch cache
32 No branch cache
33
33
34 $ echo > pfile
34 $ echo > pfile
35 $ hg add pfile
35 $ hg add pfile
36 $ hg qrefresh -m 'patch 1'
36 $ hg qrefresh -m 'patch 1'
37 $ show_branch_cache
37 $ show_branch_cache
38 tip: 0
38 tip: 0
39 No branch cache
39 d986d5caac23a7d44a46efc0ddaf5eb9665844cf 0
40 d986d5caac23a7d44a46efc0ddaf5eb9665844cf default
40
41
41 some regular revisions
42 some regular revisions
42
43
43 $ hg qpop
44 $ hg qpop
44 popping p1
45 popping p1
45 patch queue now empty
46 patch queue now empty
46 $ echo foo > foo
47 $ echo foo > foo
47 $ hg add foo
48 $ hg add foo
48 $ echo foo > .hg/branch
49 $ echo foo > .hg/branch
49 $ hg ci -m 'branch foo'
50 $ hg ci -m 'branch foo'
50
51
51 $ echo bar > bar
52 $ echo bar > bar
52 $ hg add bar
53 $ hg add bar
53 $ echo bar > .hg/branch
54 $ echo bar > .hg/branch
54 $ hg ci -m 'branch bar'
55 $ hg ci -m 'branch bar'
55 $ show_branch_cache
56 $ show_branch_cache
56 tip: 1
57 tip: 1
57 c229711f16da3d7591f89b1b8d963b79bda22714 1
58 c229711f16da3d7591f89b1b8d963b79bda22714 1
58 c229711f16da3d7591f89b1b8d963b79bda22714 bar
59 c229711f16da3d7591f89b1b8d963b79bda22714 bar
59 dc25e3827021582e979f600811852e36cbe57341 foo
60 dc25e3827021582e979f600811852e36cbe57341 foo
60
61
61 add some mq patches
62 add some mq patches
62
63
63 $ hg qpush
64 $ hg qpush
64 applying p1
65 applying p1
65 now at: p1
66 now at: p1
66 $ show_branch_cache
67 $ show_branch_cache
67 tip: 2
68 tip: 2
68 c229711f16da3d7591f89b1b8d963b79bda22714 1
69 c229711f16da3d7591f89b1b8d963b79bda22714 1
69 c229711f16da3d7591f89b1b8d963b79bda22714 bar
70 c229711f16da3d7591f89b1b8d963b79bda22714 bar
70 dc25e3827021582e979f600811852e36cbe57341 foo
71 dc25e3827021582e979f600811852e36cbe57341 foo
71
72
72 $ hg qnew -d '0 0' p2
73 $ hg qnew -d '0 0' p2
73 $ echo foo > .hg/branch
74 $ echo foo > .hg/branch
74 $ echo foo2 >> foo
75 $ echo foo2 >> foo
75 $ hg qrefresh -m 'patch 2'
76 $ hg qrefresh -m 'patch 2'
76 $ show_branch_cache 1
77 $ show_branch_cache 1
77 tip: 3
78 tip: 3
78 c229711f16da3d7591f89b1b8d963b79bda22714 1
79 982611f6955f9c48d3365decea203217c945ef0d 2
79 c229711f16da3d7591f89b1b8d963b79bda22714 bar
80 982611f6955f9c48d3365decea203217c945ef0d bar
80 dc25e3827021582e979f600811852e36cbe57341 foo
81 dc25e3827021582e979f600811852e36cbe57341 foo
81 branch foo: 3
82 branch foo: 3
82 branch bar: 2
83 branch bar: 2
83
84
84 removing the cache
85 removing the cache
85
86
86 $ rm $branches
87 $ rm $branches
87 $ show_branch_cache 1
88 $ show_branch_cache 1
88 tip: 3
89 tip: 3
89 c229711f16da3d7591f89b1b8d963b79bda22714 1
90 c229711f16da3d7591f89b1b8d963b79bda22714 1
90 c229711f16da3d7591f89b1b8d963b79bda22714 bar
91 c229711f16da3d7591f89b1b8d963b79bda22714 bar
91 dc25e3827021582e979f600811852e36cbe57341 foo
92 dc25e3827021582e979f600811852e36cbe57341 foo
92 branch foo: 3
93 branch foo: 3
93 branch bar: 2
94 branch bar: 2
94
95
95 importing rev 1 (the cache now ends in one of the patches)
96 importing rev 1 (the cache now ends in one of the patches)
96
97
97 $ hg qimport -r 1 -n p0
98 $ hg qimport -r 1 -n p0
98 $ show_branch_cache 1
99 $ show_branch_cache 1
99 tip: 3
100 tip: 3
100 c229711f16da3d7591f89b1b8d963b79bda22714 1
101 c229711f16da3d7591f89b1b8d963b79bda22714 1
101 c229711f16da3d7591f89b1b8d963b79bda22714 bar
102 c229711f16da3d7591f89b1b8d963b79bda22714 bar
102 dc25e3827021582e979f600811852e36cbe57341 foo
103 dc25e3827021582e979f600811852e36cbe57341 foo
103 branch foo: 3
104 branch foo: 3
104 branch bar: 2
105 branch bar: 2
105 $ hg log -r qbase --template 'qbase: {rev}\n'
106 $ hg log -r qbase --template 'qbase: {rev}\n'
106 qbase: 1
107 qbase: 1
107
108
108 detect an invalid cache
109 detect an invalid cache
109
110
110 $ hg qpop -a
111 $ hg qpop -a
111 popping p2
112 popping p2
112 popping p1
113 popping p1
113 popping p0
114 popping p0
114 patch queue now empty
115 patch queue now empty
115 $ hg qpush -a
116 $ hg qpush -a
116 applying p0
117 applying p0
117 applying p1
118 applying p1
118 applying p2
119 applying p2
119 now at: p2
120 now at: p2
120 $ show_branch_cache
121 $ show_branch_cache
121 tip: 3
122 tip: 3
122 dc25e3827021582e979f600811852e36cbe57341 0
123 dc25e3827021582e979f600811852e36cbe57341 0
123 dc25e3827021582e979f600811852e36cbe57341 foo
124 dc25e3827021582e979f600811852e36cbe57341 foo
124
125
General Comments 0
You need to be logged in to leave comments. Login now