##// END OF EJS Templates
branchcache: backout 0311a6abd38a
Matt Mackall -
r16745:27b2e182 default
parent child Browse files
Show More
@@ -1,2410 +1,2364 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 # A list of callback to shape the phase if no data were found.
44 # A list of callback to shape the phase if no data were found.
45 # Callback are in the form: func(repo, roots) --> processed root.
45 # Callback are in the form: func(repo, roots) --> processed root.
46 # This list it to be filled by extension during repo setup
46 # This list it to be filled by extension during repo setup
47 self._phasedefaults = []
47 self._phasedefaults = []
48
48
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"), self.root)
50 self.ui.readconfig(self.join("hgrc"), self.root)
51 extensions.loadall(self.ui)
51 extensions.loadall(self.ui)
52 except IOError:
52 except IOError:
53 pass
53 pass
54
54
55 if not os.path.isdir(self.path):
55 if not os.path.isdir(self.path):
56 if create:
56 if create:
57 if not os.path.exists(path):
57 if not os.path.exists(path):
58 util.makedirs(path)
58 util.makedirs(path)
59 util.makedir(self.path, notindexed=True)
59 util.makedir(self.path, notindexed=True)
60 requirements = ["revlogv1"]
60 requirements = ["revlogv1"]
61 if self.ui.configbool('format', 'usestore', True):
61 if self.ui.configbool('format', 'usestore', True):
62 os.mkdir(os.path.join(self.path, "store"))
62 os.mkdir(os.path.join(self.path, "store"))
63 requirements.append("store")
63 requirements.append("store")
64 if self.ui.configbool('format', 'usefncache', True):
64 if self.ui.configbool('format', 'usefncache', True):
65 requirements.append("fncache")
65 requirements.append("fncache")
66 if self.ui.configbool('format', 'dotencode', True):
66 if self.ui.configbool('format', 'dotencode', True):
67 requirements.append('dotencode')
67 requirements.append('dotencode')
68 # create an invalid changelog
68 # create an invalid changelog
69 self.opener.append(
69 self.opener.append(
70 "00changelog.i",
70 "00changelog.i",
71 '\0\0\0\2' # represents revlogv2
71 '\0\0\0\2' # represents revlogv2
72 ' dummy changelog to prevent using the old repo layout'
72 ' dummy changelog to prevent using the old repo layout'
73 )
73 )
74 if self.ui.configbool('format', 'generaldelta', False):
74 if self.ui.configbool('format', 'generaldelta', False):
75 requirements.append("generaldelta")
75 requirements.append("generaldelta")
76 requirements = set(requirements)
76 requirements = set(requirements)
77 else:
77 else:
78 raise error.RepoError(_("repository %s not found") % path)
78 raise error.RepoError(_("repository %s not found") % path)
79 elif create:
79 elif create:
80 raise error.RepoError(_("repository %s already exists") % path)
80 raise error.RepoError(_("repository %s already exists") % path)
81 else:
81 else:
82 try:
82 try:
83 requirements = scmutil.readrequires(self.opener, self.supported)
83 requirements = scmutil.readrequires(self.opener, self.supported)
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87 requirements = set()
87 requirements = set()
88
88
89 self.sharedpath = self.path
89 self.sharedpath = self.path
90 try:
90 try:
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 if not os.path.exists(s):
92 if not os.path.exists(s):
93 raise error.RepoError(
93 raise error.RepoError(
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 self.sharedpath = s
95 self.sharedpath = s
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99
99
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.spath = self.store.path
101 self.spath = self.store.path
102 self.sopener = self.store.opener
102 self.sopener = self.store.opener
103 self.sjoin = self.store.join
103 self.sjoin = self.store.join
104 self.opener.createmode = self.store.createmode
104 self.opener.createmode = self.store.createmode
105 self._applyrequirements(requirements)
105 self._applyrequirements(requirements)
106 if create:
106 if create:
107 self._writerequirements()
107 self._writerequirements()
108
108
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 # A cache for various files under .hg/ that tracks file changes,
116 # A cache for various files under .hg/ that tracks file changes,
117 # (used by the filecache decorator)
117 # (used by the filecache decorator)
118 #
118 #
119 # Maps a property name to its util.filecacheentry
119 # Maps a property name to its util.filecacheentry
120 self._filecache = {}
120 self._filecache = {}
121
121
122 def _applyrequirements(self, requirements):
122 def _applyrequirements(self, requirements):
123 self.requirements = requirements
123 self.requirements = requirements
124 openerreqs = set(('revlogv1', 'generaldelta'))
124 openerreqs = set(('revlogv1', 'generaldelta'))
125 self.sopener.options = dict((r, 1) for r in requirements
125 self.sopener.options = dict((r, 1) for r in requirements
126 if r in openerreqs)
126 if r in openerreqs)
127
127
128 def _writerequirements(self):
128 def _writerequirements(self):
129 reqfile = self.opener("requires", "w")
129 reqfile = self.opener("requires", "w")
130 for r in self.requirements:
130 for r in self.requirements:
131 reqfile.write("%s\n" % r)
131 reqfile.write("%s\n" % r)
132 reqfile.close()
132 reqfile.close()
133
133
134 def _checknested(self, path):
134 def _checknested(self, path):
135 """Determine if path is a legal nested repository."""
135 """Determine if path is a legal nested repository."""
136 if not path.startswith(self.root):
136 if not path.startswith(self.root):
137 return False
137 return False
138 subpath = path[len(self.root) + 1:]
138 subpath = path[len(self.root) + 1:]
139 normsubpath = util.pconvert(subpath)
139 normsubpath = util.pconvert(subpath)
140
140
141 # XXX: Checking against the current working copy is wrong in
141 # XXX: Checking against the current working copy is wrong in
142 # the sense that it can reject things like
142 # the sense that it can reject things like
143 #
143 #
144 # $ hg cat -r 10 sub/x.txt
144 # $ hg cat -r 10 sub/x.txt
145 #
145 #
146 # if sub/ is no longer a subrepository in the working copy
146 # if sub/ is no longer a subrepository in the working copy
147 # parent revision.
147 # parent revision.
148 #
148 #
149 # However, it can of course also allow things that would have
149 # However, it can of course also allow things that would have
150 # been rejected before, such as the above cat command if sub/
150 # been rejected before, such as the above cat command if sub/
151 # is a subrepository now, but was a normal directory before.
151 # is a subrepository now, but was a normal directory before.
152 # The old path auditor would have rejected by mistake since it
152 # The old path auditor would have rejected by mistake since it
153 # panics when it sees sub/.hg/.
153 # panics when it sees sub/.hg/.
154 #
154 #
155 # All in all, checking against the working copy seems sensible
155 # All in all, checking against the working copy seems sensible
156 # since we want to prevent access to nested repositories on
156 # since we want to prevent access to nested repositories on
157 # the filesystem *now*.
157 # the filesystem *now*.
158 ctx = self[None]
158 ctx = self[None]
159 parts = util.splitpath(subpath)
159 parts = util.splitpath(subpath)
160 while parts:
160 while parts:
161 prefix = '/'.join(parts)
161 prefix = '/'.join(parts)
162 if prefix in ctx.substate:
162 if prefix in ctx.substate:
163 if prefix == normsubpath:
163 if prefix == normsubpath:
164 return True
164 return True
165 else:
165 else:
166 sub = ctx.sub(prefix)
166 sub = ctx.sub(prefix)
167 return sub.checknested(subpath[len(prefix) + 1:])
167 return sub.checknested(subpath[len(prefix) + 1:])
168 else:
168 else:
169 parts.pop()
169 parts.pop()
170 return False
170 return False
171
171
172 @filecache('bookmarks')
172 @filecache('bookmarks')
173 def _bookmarks(self):
173 def _bookmarks(self):
174 return bookmarks.read(self)
174 return bookmarks.read(self)
175
175
176 @filecache('bookmarks.current')
176 @filecache('bookmarks.current')
177 def _bookmarkcurrent(self):
177 def _bookmarkcurrent(self):
178 return bookmarks.readcurrent(self)
178 return bookmarks.readcurrent(self)
179
179
180 def _writebookmarks(self, marks):
180 def _writebookmarks(self, marks):
181 bookmarks.write(self)
181 bookmarks.write(self)
182
182
183 def bookmarkheads(self, bookmark):
183 def bookmarkheads(self, bookmark):
184 name = bookmark.split('@', 1)[0]
184 name = bookmark.split('@', 1)[0]
185 heads = []
185 heads = []
186 for mark, n in self._bookmarks.iteritems():
186 for mark, n in self._bookmarks.iteritems():
187 if mark.split('@', 1)[0] == name:
187 if mark.split('@', 1)[0] == name:
188 heads.append(n)
188 heads.append(n)
189 return heads
189 return heads
190
190
191 @storecache('phaseroots')
191 @storecache('phaseroots')
192 def _phasecache(self):
192 def _phasecache(self):
193 return phases.phasecache(self, self._phasedefaults)
193 return phases.phasecache(self, self._phasedefaults)
194
194
195 @storecache('00changelog.i')
195 @storecache('00changelog.i')
196 def changelog(self):
196 def changelog(self):
197 c = changelog.changelog(self.sopener)
197 c = changelog.changelog(self.sopener)
198 if 'HG_PENDING' in os.environ:
198 if 'HG_PENDING' in os.environ:
199 p = os.environ['HG_PENDING']
199 p = os.environ['HG_PENDING']
200 if p.startswith(self.root):
200 if p.startswith(self.root):
201 c.readpending('00changelog.i.a')
201 c.readpending('00changelog.i.a')
202 return c
202 return c
203
203
204 @storecache('00manifest.i')
204 @storecache('00manifest.i')
205 def manifest(self):
205 def manifest(self):
206 return manifest.manifest(self.sopener)
206 return manifest.manifest(self.sopener)
207
207
208 @filecache('dirstate')
208 @filecache('dirstate')
209 def dirstate(self):
209 def dirstate(self):
210 warned = [0]
210 warned = [0]
211 def validate(node):
211 def validate(node):
212 try:
212 try:
213 self.changelog.rev(node)
213 self.changelog.rev(node)
214 return node
214 return node
215 except error.LookupError:
215 except error.LookupError:
216 if not warned[0]:
216 if not warned[0]:
217 warned[0] = True
217 warned[0] = True
218 self.ui.warn(_("warning: ignoring unknown"
218 self.ui.warn(_("warning: ignoring unknown"
219 " working parent %s!\n") % short(node))
219 " working parent %s!\n") % short(node))
220 return nullid
220 return nullid
221
221
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
223
223
224 def __getitem__(self, changeid):
224 def __getitem__(self, changeid):
225 if changeid is None:
225 if changeid is None:
226 return context.workingctx(self)
226 return context.workingctx(self)
227 return context.changectx(self, changeid)
227 return context.changectx(self, changeid)
228
228
229 def __contains__(self, changeid):
229 def __contains__(self, changeid):
230 try:
230 try:
231 return bool(self.lookup(changeid))
231 return bool(self.lookup(changeid))
232 except error.RepoLookupError:
232 except error.RepoLookupError:
233 return False
233 return False
234
234
235 def __nonzero__(self):
235 def __nonzero__(self):
236 return True
236 return True
237
237
238 def __len__(self):
238 def __len__(self):
239 return len(self.changelog)
239 return len(self.changelog)
240
240
241 def __iter__(self):
241 def __iter__(self):
242 for i in xrange(len(self)):
242 for i in xrange(len(self)):
243 yield i
243 yield i
244
244
245 def revs(self, expr, *args):
245 def revs(self, expr, *args):
246 '''Return a list of revisions matching the given revset'''
246 '''Return a list of revisions matching the given revset'''
247 expr = revset.formatspec(expr, *args)
247 expr = revset.formatspec(expr, *args)
248 m = revset.match(None, expr)
248 m = revset.match(None, expr)
249 return [r for r in m(self, range(len(self)))]
249 return [r for r in m(self, range(len(self)))]
250
250
251 def set(self, expr, *args):
251 def set(self, expr, *args):
252 '''
252 '''
253 Yield a context for each matching revision, after doing arg
253 Yield a context for each matching revision, after doing arg
254 replacement via revset.formatspec
254 replacement via revset.formatspec
255 '''
255 '''
256 for r in self.revs(expr, *args):
256 for r in self.revs(expr, *args):
257 yield self[r]
257 yield self[r]
258
258
259 def url(self):
259 def url(self):
260 return 'file:' + self.root
260 return 'file:' + self.root
261
261
262 def hook(self, name, throw=False, **args):
262 def hook(self, name, throw=False, **args):
263 return hook.hook(self.ui, self, name, throw, **args)
263 return hook.hook(self.ui, self, name, throw, **args)
264
264
265 tag_disallowed = ':\r\n'
265 tag_disallowed = ':\r\n'
266
266
267 def _tag(self, names, node, message, local, user, date, extra={}):
267 def _tag(self, names, node, message, local, user, date, extra={}):
268 if isinstance(names, str):
268 if isinstance(names, str):
269 allchars = names
269 allchars = names
270 names = (names,)
270 names = (names,)
271 else:
271 else:
272 allchars = ''.join(names)
272 allchars = ''.join(names)
273 for c in self.tag_disallowed:
273 for c in self.tag_disallowed:
274 if c in allchars:
274 if c in allchars:
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
276
276
277 branches = self.branchmap()
277 branches = self.branchmap()
278 for name in names:
278 for name in names:
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
280 local=local)
280 local=local)
281 if name in branches:
281 if name in branches:
282 self.ui.warn(_("warning: tag %s conflicts with existing"
282 self.ui.warn(_("warning: tag %s conflicts with existing"
283 " branch name\n") % name)
283 " branch name\n") % name)
284
284
285 def writetags(fp, names, munge, prevtags):
285 def writetags(fp, names, munge, prevtags):
286 fp.seek(0, 2)
286 fp.seek(0, 2)
287 if prevtags and prevtags[-1] != '\n':
287 if prevtags and prevtags[-1] != '\n':
288 fp.write('\n')
288 fp.write('\n')
289 for name in names:
289 for name in names:
290 m = munge and munge(name) or name
290 m = munge and munge(name) or name
291 if (self._tagscache.tagtypes and
291 if (self._tagscache.tagtypes and
292 name in self._tagscache.tagtypes):
292 name in self._tagscache.tagtypes):
293 old = self.tags().get(name, nullid)
293 old = self.tags().get(name, nullid)
294 fp.write('%s %s\n' % (hex(old), m))
294 fp.write('%s %s\n' % (hex(old), m))
295 fp.write('%s %s\n' % (hex(node), m))
295 fp.write('%s %s\n' % (hex(node), m))
296 fp.close()
296 fp.close()
297
297
298 prevtags = ''
298 prevtags = ''
299 if local:
299 if local:
300 try:
300 try:
301 fp = self.opener('localtags', 'r+')
301 fp = self.opener('localtags', 'r+')
302 except IOError:
302 except IOError:
303 fp = self.opener('localtags', 'a')
303 fp = self.opener('localtags', 'a')
304 else:
304 else:
305 prevtags = fp.read()
305 prevtags = fp.read()
306
306
307 # local tags are stored in the current charset
307 # local tags are stored in the current charset
308 writetags(fp, names, None, prevtags)
308 writetags(fp, names, None, prevtags)
309 for name in names:
309 for name in names:
310 self.hook('tag', node=hex(node), tag=name, local=local)
310 self.hook('tag', node=hex(node), tag=name, local=local)
311 return
311 return
312
312
313 try:
313 try:
314 fp = self.wfile('.hgtags', 'rb+')
314 fp = self.wfile('.hgtags', 'rb+')
315 except IOError, e:
315 except IOError, e:
316 if e.errno != errno.ENOENT:
316 if e.errno != errno.ENOENT:
317 raise
317 raise
318 fp = self.wfile('.hgtags', 'ab')
318 fp = self.wfile('.hgtags', 'ab')
319 else:
319 else:
320 prevtags = fp.read()
320 prevtags = fp.read()
321
321
322 # committed tags are stored in UTF-8
322 # committed tags are stored in UTF-8
323 writetags(fp, names, encoding.fromlocal, prevtags)
323 writetags(fp, names, encoding.fromlocal, prevtags)
324
324
325 fp.close()
325 fp.close()
326
326
327 self.invalidatecaches()
327 self.invalidatecaches()
328
328
329 if '.hgtags' not in self.dirstate:
329 if '.hgtags' not in self.dirstate:
330 self[None].add(['.hgtags'])
330 self[None].add(['.hgtags'])
331
331
332 m = matchmod.exact(self.root, '', ['.hgtags'])
332 m = matchmod.exact(self.root, '', ['.hgtags'])
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
334
334
335 for name in names:
335 for name in names:
336 self.hook('tag', node=hex(node), tag=name, local=local)
336 self.hook('tag', node=hex(node), tag=name, local=local)
337
337
338 return tagnode
338 return tagnode
339
339
340 def tag(self, names, node, message, local, user, date):
340 def tag(self, names, node, message, local, user, date):
341 '''tag a revision with one or more symbolic names.
341 '''tag a revision with one or more symbolic names.
342
342
343 names is a list of strings or, when adding a single tag, names may be a
343 names is a list of strings or, when adding a single tag, names may be a
344 string.
344 string.
345
345
346 if local is True, the tags are stored in a per-repository file.
346 if local is True, the tags are stored in a per-repository file.
347 otherwise, they are stored in the .hgtags file, and a new
347 otherwise, they are stored in the .hgtags file, and a new
348 changeset is committed with the change.
348 changeset is committed with the change.
349
349
350 keyword arguments:
350 keyword arguments:
351
351
352 local: whether to store tags in non-version-controlled file
352 local: whether to store tags in non-version-controlled file
353 (default False)
353 (default False)
354
354
355 message: commit message to use if committing
355 message: commit message to use if committing
356
356
357 user: name of user to use if committing
357 user: name of user to use if committing
358
358
359 date: date tuple to use if committing'''
359 date: date tuple to use if committing'''
360
360
361 if not local:
361 if not local:
362 for x in self.status()[:5]:
362 for x in self.status()[:5]:
363 if '.hgtags' in x:
363 if '.hgtags' in x:
364 raise util.Abort(_('working copy of .hgtags is changed '
364 raise util.Abort(_('working copy of .hgtags is changed '
365 '(please commit .hgtags manually)'))
365 '(please commit .hgtags manually)'))
366
366
367 self.tags() # instantiate the cache
367 self.tags() # instantiate the cache
368 self._tag(names, node, message, local, user, date)
368 self._tag(names, node, message, local, user, date)
369
369
370 @propertycache
370 @propertycache
371 def _tagscache(self):
371 def _tagscache(self):
372 '''Returns a tagscache object that contains various tags related
372 '''Returns a tagscache object that contains various tags related
373 caches.'''
373 caches.'''
374
374
375 # This simplifies its cache management by having one decorated
375 # This simplifies its cache management by having one decorated
376 # function (this one) and the rest simply fetch things from it.
376 # function (this one) and the rest simply fetch things from it.
377 class tagscache(object):
377 class tagscache(object):
378 def __init__(self):
378 def __init__(self):
379 # These two define the set of tags for this repository. tags
379 # These two define the set of tags for this repository. tags
380 # maps tag name to node; tagtypes maps tag name to 'global' or
380 # maps tag name to node; tagtypes maps tag name to 'global' or
381 # 'local'. (Global tags are defined by .hgtags across all
381 # 'local'. (Global tags are defined by .hgtags across all
382 # heads, and local tags are defined in .hg/localtags.)
382 # heads, and local tags are defined in .hg/localtags.)
383 # They constitute the in-memory cache of tags.
383 # They constitute the in-memory cache of tags.
384 self.tags = self.tagtypes = None
384 self.tags = self.tagtypes = None
385
385
386 self.nodetagscache = self.tagslist = None
386 self.nodetagscache = self.tagslist = None
387
387
388 cache = tagscache()
388 cache = tagscache()
389 cache.tags, cache.tagtypes = self._findtags()
389 cache.tags, cache.tagtypes = self._findtags()
390
390
391 return cache
391 return cache
392
392
393 def tags(self):
393 def tags(self):
394 '''return a mapping of tag to node'''
394 '''return a mapping of tag to node'''
395 t = {}
395 t = {}
396 for k, v in self._tagscache.tags.iteritems():
396 for k, v in self._tagscache.tags.iteritems():
397 try:
397 try:
398 # ignore tags to unknown nodes
398 # ignore tags to unknown nodes
399 self.changelog.rev(v)
399 self.changelog.rev(v)
400 t[k] = v
400 t[k] = v
401 except (error.LookupError, ValueError):
401 except (error.LookupError, ValueError):
402 pass
402 pass
403 return t
403 return t
404
404
405 def _findtags(self):
405 def _findtags(self):
406 '''Do the hard work of finding tags. Return a pair of dicts
406 '''Do the hard work of finding tags. Return a pair of dicts
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
408 maps tag name to a string like \'global\' or \'local\'.
408 maps tag name to a string like \'global\' or \'local\'.
409 Subclasses or extensions are free to add their own tags, but
409 Subclasses or extensions are free to add their own tags, but
410 should be aware that the returned dicts will be retained for the
410 should be aware that the returned dicts will be retained for the
411 duration of the localrepo object.'''
411 duration of the localrepo object.'''
412
412
413 # XXX what tagtype should subclasses/extensions use? Currently
413 # XXX what tagtype should subclasses/extensions use? Currently
414 # mq and bookmarks add tags, but do not set the tagtype at all.
414 # mq and bookmarks add tags, but do not set the tagtype at all.
415 # Should each extension invent its own tag type? Should there
415 # Should each extension invent its own tag type? Should there
416 # be one tagtype for all such "virtual" tags? Or is the status
416 # be one tagtype for all such "virtual" tags? Or is the status
417 # quo fine?
417 # quo fine?
418
418
419 alltags = {} # map tag name to (node, hist)
419 alltags = {} # map tag name to (node, hist)
420 tagtypes = {}
420 tagtypes = {}
421
421
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
424
424
425 # Build the return dicts. Have to re-encode tag names because
425 # Build the return dicts. Have to re-encode tag names because
426 # the tags module always uses UTF-8 (in order not to lose info
426 # the tags module always uses UTF-8 (in order not to lose info
427 # writing to the cache), but the rest of Mercurial wants them in
427 # writing to the cache), but the rest of Mercurial wants them in
428 # local encoding.
428 # local encoding.
429 tags = {}
429 tags = {}
430 for (name, (node, hist)) in alltags.iteritems():
430 for (name, (node, hist)) in alltags.iteritems():
431 if node != nullid:
431 if node != nullid:
432 tags[encoding.tolocal(name)] = node
432 tags[encoding.tolocal(name)] = node
433 tags['tip'] = self.changelog.tip()
433 tags['tip'] = self.changelog.tip()
434 tagtypes = dict([(encoding.tolocal(name), value)
434 tagtypes = dict([(encoding.tolocal(name), value)
435 for (name, value) in tagtypes.iteritems()])
435 for (name, value) in tagtypes.iteritems()])
436 return (tags, tagtypes)
436 return (tags, tagtypes)
437
437
438 def tagtype(self, tagname):
438 def tagtype(self, tagname):
439 '''
439 '''
440 return the type of the given tag. result can be:
440 return the type of the given tag. result can be:
441
441
442 'local' : a local tag
442 'local' : a local tag
443 'global' : a global tag
443 'global' : a global tag
444 None : tag does not exist
444 None : tag does not exist
445 '''
445 '''
446
446
447 return self._tagscache.tagtypes.get(tagname)
447 return self._tagscache.tagtypes.get(tagname)
448
448
449 def tagslist(self):
449 def tagslist(self):
450 '''return a list of tags ordered by revision'''
450 '''return a list of tags ordered by revision'''
451 if not self._tagscache.tagslist:
451 if not self._tagscache.tagslist:
452 l = []
452 l = []
453 for t, n in self.tags().iteritems():
453 for t, n in self.tags().iteritems():
454 r = self.changelog.rev(n)
454 r = self.changelog.rev(n)
455 l.append((r, t, n))
455 l.append((r, t, n))
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
457
457
458 return self._tagscache.tagslist
458 return self._tagscache.tagslist
459
459
460 def nodetags(self, node):
460 def nodetags(self, node):
461 '''return the tags associated with a node'''
461 '''return the tags associated with a node'''
462 if not self._tagscache.nodetagscache:
462 if not self._tagscache.nodetagscache:
463 nodetagscache = {}
463 nodetagscache = {}
464 for t, n in self._tagscache.tags.iteritems():
464 for t, n in self._tagscache.tags.iteritems():
465 nodetagscache.setdefault(n, []).append(t)
465 nodetagscache.setdefault(n, []).append(t)
466 for tags in nodetagscache.itervalues():
466 for tags in nodetagscache.itervalues():
467 tags.sort()
467 tags.sort()
468 self._tagscache.nodetagscache = nodetagscache
468 self._tagscache.nodetagscache = nodetagscache
469 return self._tagscache.nodetagscache.get(node, [])
469 return self._tagscache.nodetagscache.get(node, [])
470
470
471 def nodebookmarks(self, node):
471 def nodebookmarks(self, node):
472 marks = []
472 marks = []
473 for bookmark, n in self._bookmarks.iteritems():
473 for bookmark, n in self._bookmarks.iteritems():
474 if n == node:
474 if n == node:
475 marks.append(bookmark)
475 marks.append(bookmark)
476 return sorted(marks)
476 return sorted(marks)
477
477
478 def _branchtags(self, partial, lrev):
478 def _branchtags(self, partial, lrev):
479 # TODO: rename this function?
479 # TODO: rename this function?
480 tiprev = len(self) - 1
480 tiprev = len(self) - 1
481 if lrev != tiprev:
481 if lrev != tiprev:
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
483 self._updatebranchcache(partial, ctxgen)
483 self._updatebranchcache(partial, ctxgen)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
485
485
486 return partial
486 return partial
487
487
488 def updatebranchcache(self):
488 def updatebranchcache(self):
489 tip = self.changelog.tip()
489 tip = self.changelog.tip()
490 if self._branchcache is not None and self._branchcachetip == tip:
490 if self._branchcache is not None and self._branchcachetip == tip:
491 return
491 return
492
492
493 oldtip = self._branchcachetip
493 oldtip = self._branchcachetip
494 self._branchcachetip = tip
494 self._branchcachetip = tip
495 if oldtip is None or oldtip not in self.changelog.nodemap:
495 if oldtip is None or oldtip not in self.changelog.nodemap:
496 partial, last, lrev = self._readbranchcache()
496 partial, last, lrev = self._readbranchcache()
497 else:
497 else:
498 lrev = self.changelog.rev(oldtip)
498 lrev = self.changelog.rev(oldtip)
499 partial = self._branchcache
499 partial = self._branchcache
500
500
501 self._branchtags(partial, lrev)
501 self._branchtags(partial, lrev)
502 # this private cache holds all heads (not just the branch tips)
502 # this private cache holds all heads (not just the branch tips)
503 self._branchcache = partial
503 self._branchcache = partial
504
504
505 def branchmap(self):
505 def branchmap(self):
506 '''returns a dictionary {branch: [branchheads]}'''
506 '''returns a dictionary {branch: [branchheads]}'''
507 self.updatebranchcache()
507 self.updatebranchcache()
508 return self._branchcache
508 return self._branchcache
509
509
510 def _branchtip(self, heads):
510 def _branchtip(self, heads):
511 '''return the tipmost branch head in heads'''
511 '''return the tipmost branch head in heads'''
512 tip = heads[-1]
512 tip = heads[-1]
513 for h in reversed(heads):
513 for h in reversed(heads):
514 if not self[h].closesbranch():
514 if not self[h].closesbranch():
515 tip = h
515 tip = h
516 break
516 break
517 return tip
517 return tip
518
518
519 def branchtip(self, branch):
519 def branchtip(self, branch):
520 '''return the tip node for a given branch'''
520 '''return the tip node for a given branch'''
521 if branch not in self.branchmap():
521 if branch not in self.branchmap():
522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
523 return self._branchtip(self.branchmap()[branch])
523 return self._branchtip(self.branchmap()[branch])
524
524
525 def branchtags(self):
525 def branchtags(self):
526 '''return a dict where branch names map to the tipmost head of
526 '''return a dict where branch names map to the tipmost head of
527 the branch, open heads come before closed'''
527 the branch, open heads come before closed'''
528 bt = {}
528 bt = {}
529 for bn, heads in self.branchmap().iteritems():
529 for bn, heads in self.branchmap().iteritems():
530 bt[bn] = self._branchtip(heads)
530 bt[bn] = self._branchtip(heads)
531 return bt
531 return bt
532
532
533 def _readbranchcache(self):
533 def _readbranchcache(self):
534 partial = {}
534 partial = {}
535 try:
535 try:
536 f = self.opener("cache/branchheads")
536 f = self.opener("cache/branchheads")
537 lines = f.read().split('\n')
537 lines = f.read().split('\n')
538 f.close()
538 f.close()
539 except (IOError, OSError):
539 except (IOError, OSError):
540 return {}, nullid, nullrev
540 return {}, nullid, nullrev
541
541
542 try:
542 try:
543 last, lrev = lines.pop(0).split(" ", 1)
543 last, lrev = lines.pop(0).split(" ", 1)
544 last, lrev = bin(last), int(lrev)
544 last, lrev = bin(last), int(lrev)
545 if lrev >= len(self) or self[lrev].node() != last:
545 if lrev >= len(self) or self[lrev].node() != last:
546 # invalidate the cache
546 # invalidate the cache
547 raise ValueError('invalidating branch cache (tip differs)')
547 raise ValueError('invalidating branch cache (tip differs)')
548 for l in lines:
548 for l in lines:
549 if not l:
549 if not l:
550 continue
550 continue
551 node, label = l.split(" ", 1)
551 node, label = l.split(" ", 1)
552 label = encoding.tolocal(label.strip())
552 label = encoding.tolocal(label.strip())
553 if not node in self:
554 raise ValueError('invalidating branch cache because node '+
555 '%s does not exist' % node)
556 partial.setdefault(label, []).append(bin(node))
553 partial.setdefault(label, []).append(bin(node))
557 except KeyboardInterrupt:
554 except KeyboardInterrupt:
558 raise
555 raise
559 except Exception, inst:
556 except Exception, inst:
560 if self.ui.debugflag:
557 if self.ui.debugflag:
561 self.ui.warn(str(inst), '\n')
558 self.ui.warn(str(inst), '\n')
562 partial, last, lrev = {}, nullid, nullrev
559 partial, last, lrev = {}, nullid, nullrev
563 return partial, last, lrev
560 return partial, last, lrev
564
561
565 def _writebranchcache(self, branches, tip, tiprev):
562 def _writebranchcache(self, branches, tip, tiprev):
566 try:
563 try:
567 f = self.opener("cache/branchheads", "w", atomictemp=True)
564 f = self.opener("cache/branchheads", "w", atomictemp=True)
568 f.write("%s %s\n" % (hex(tip), tiprev))
565 f.write("%s %s\n" % (hex(tip), tiprev))
569 for label, nodes in branches.iteritems():
566 for label, nodes in branches.iteritems():
570 for node in nodes:
567 for node in nodes:
571 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
568 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
572 f.close()
569 f.close()
573 except (IOError, OSError):
570 except (IOError, OSError):
574 pass
571 pass
575
572
576 def _updatebranchcache(self, partial, ctxgen):
573 def _updatebranchcache(self, partial, ctxgen):
577 """Given a branchhead cache, partial, that may have extra nodes or be
578 missing heads, and a generator of nodes that are at least a superset of
579 heads missing, this function updates partial to be correct.
580 """
581 # collect new branch entries
574 # collect new branch entries
582 newbranches = {}
575 newbranches = {}
583 for c in ctxgen:
576 for c in ctxgen:
584 newbranches.setdefault(c.branch(), []).append(c.node())
577 newbranches.setdefault(c.branch(), []).append(c.node())
585 # if older branchheads are reachable from new ones, they aren't
578 # if older branchheads are reachable from new ones, they aren't
586 # really branchheads. Note checking parents is insufficient:
579 # really branchheads. Note checking parents is insufficient:
587 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
580 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
588 for branch, newnodes in newbranches.iteritems():
581 for branch, newnodes in newbranches.iteritems():
589 bheads = partial.setdefault(branch, [])
582 bheads = partial.setdefault(branch, [])
590 bheads.extend(newnodes)
583 bheads.extend(newnodes)
591 # Remove duplicates - nodes that are in newnodes and are already in
584 if len(bheads) <= 1:
592 # bheads. This can happen if you strip a node and its parent was
585 continue
593 # already a head (because they're on different branches).
586 bheads = sorted(bheads, key=lambda x: self[x].rev())
594 bheads = set(bheads)
587 # starting from tip means fewer passes over reachable
595
588 while newnodes:
596 # Remove candidate heads that no longer are in the repo (e.g., as
589 latest = newnodes.pop()
597 # the result of a strip that just happened).
590 if latest not in bheads:
598 # avoid using 'bhead in self' here because that dives down into
591 continue
599 # branchcache code somewhat recrusively.
592 minbhnode = self[bheads[0]].node()
600 bheads = [bhead for bhead in bheads \
593 reachable = self.changelog.reachable(latest, minbhnode)
601 if self.changelog.hasnode(bhead)]
594 reachable.remove(latest)
602 if len(bheads) > 1:
595 if reachable:
603 bheads = sorted(bheads, key=lambda x: self[x].rev())
596 bheads = [b for b in bheads if b not in reachable]
604 # starting from tip means fewer passes over reachable
605 while newnodes:
606 latest = newnodes.pop()
607 if latest not in bheads:
608 continue
609 minbhnode = self[bheads[0]].node()
610 reachable = self.changelog.reachable(latest, minbhnode)
611 reachable.remove(latest)
612 if reachable:
613 bheads = [b for b in bheads if b not in reachable]
614 partial[branch] = bheads
597 partial[branch] = bheads
615
598
616 # There may be branches that cease to exist when the last commit in the
617 # branch was stripped. This code filters them out. Note that the
618 # branch that ceased to exist may not be in newbranches because
619 # newbranches is the set of candidate heads, which when you strip the
620 # last commit in a branch will be the parent branch.
621 for branch in partial.keys():
622 nodes = [head for head in partial[branch] \
623 if self.changelog.hasnode(head)]
624 if len(nodes) < 1:
625 del partial[branch]
626
627 def lookup(self, key):
599 def lookup(self, key):
628 return self[key].node()
600 return self[key].node()
629
601
630 def lookupbranch(self, key, remote=None):
602 def lookupbranch(self, key, remote=None):
631 repo = remote or self
603 repo = remote or self
632 if key in repo.branchmap():
604 if key in repo.branchmap():
633 return key
605 return key
634
606
635 repo = (remote and remote.local()) and remote or self
607 repo = (remote and remote.local()) and remote or self
636 return repo[key].branch()
608 return repo[key].branch()
637
609
638 def known(self, nodes):
610 def known(self, nodes):
639 nm = self.changelog.nodemap
611 nm = self.changelog.nodemap
640 pc = self._phasecache
612 pc = self._phasecache
641 result = []
613 result = []
642 for n in nodes:
614 for n in nodes:
643 r = nm.get(n)
615 r = nm.get(n)
644 resp = not (r is None or pc.phase(self, r) >= phases.secret)
616 resp = not (r is None or pc.phase(self, r) >= phases.secret)
645 result.append(resp)
617 result.append(resp)
646 return result
618 return result
647
619
648 def local(self):
620 def local(self):
649 return self
621 return self
650
622
651 def join(self, f):
623 def join(self, f):
652 return os.path.join(self.path, f)
624 return os.path.join(self.path, f)
653
625
654 def wjoin(self, f):
626 def wjoin(self, f):
655 return os.path.join(self.root, f)
627 return os.path.join(self.root, f)
656
628
657 def file(self, f):
629 def file(self, f):
658 if f[0] == '/':
630 if f[0] == '/':
659 f = f[1:]
631 f = f[1:]
660 return filelog.filelog(self.sopener, f)
632 return filelog.filelog(self.sopener, f)
661
633
662 def changectx(self, changeid):
634 def changectx(self, changeid):
663 return self[changeid]
635 return self[changeid]
664
636
665 def parents(self, changeid=None):
637 def parents(self, changeid=None):
666 '''get list of changectxs for parents of changeid'''
638 '''get list of changectxs for parents of changeid'''
667 return self[changeid].parents()
639 return self[changeid].parents()
668
640
669 def setparents(self, p1, p2=nullid):
641 def setparents(self, p1, p2=nullid):
670 copies = self.dirstate.setparents(p1, p2)
642 copies = self.dirstate.setparents(p1, p2)
671 if copies:
643 if copies:
672 # Adjust copy records, the dirstate cannot do it, it
644 # Adjust copy records, the dirstate cannot do it, it
673 # requires access to parents manifests. Preserve them
645 # requires access to parents manifests. Preserve them
674 # only for entries added to first parent.
646 # only for entries added to first parent.
675 pctx = self[p1]
647 pctx = self[p1]
676 for f in copies:
648 for f in copies:
677 if f not in pctx and copies[f] in pctx:
649 if f not in pctx and copies[f] in pctx:
678 self.dirstate.copy(copies[f], f)
650 self.dirstate.copy(copies[f], f)
679
651
680 def filectx(self, path, changeid=None, fileid=None):
652 def filectx(self, path, changeid=None, fileid=None):
681 """changeid can be a changeset revision, node, or tag.
653 """changeid can be a changeset revision, node, or tag.
682 fileid can be a file revision or node."""
654 fileid can be a file revision or node."""
683 return context.filectx(self, path, changeid, fileid)
655 return context.filectx(self, path, changeid, fileid)
684
656
685 def getcwd(self):
657 def getcwd(self):
686 return self.dirstate.getcwd()
658 return self.dirstate.getcwd()
687
659
688 def pathto(self, f, cwd=None):
660 def pathto(self, f, cwd=None):
689 return self.dirstate.pathto(f, cwd)
661 return self.dirstate.pathto(f, cwd)
690
662
691 def wfile(self, f, mode='r'):
663 def wfile(self, f, mode='r'):
692 return self.wopener(f, mode)
664 return self.wopener(f, mode)
693
665
694 def _link(self, f):
666 def _link(self, f):
695 return os.path.islink(self.wjoin(f))
667 return os.path.islink(self.wjoin(f))
696
668
697 def _loadfilter(self, filter):
669 def _loadfilter(self, filter):
698 if filter not in self.filterpats:
670 if filter not in self.filterpats:
699 l = []
671 l = []
700 for pat, cmd in self.ui.configitems(filter):
672 for pat, cmd in self.ui.configitems(filter):
701 if cmd == '!':
673 if cmd == '!':
702 continue
674 continue
703 mf = matchmod.match(self.root, '', [pat])
675 mf = matchmod.match(self.root, '', [pat])
704 fn = None
676 fn = None
705 params = cmd
677 params = cmd
706 for name, filterfn in self._datafilters.iteritems():
678 for name, filterfn in self._datafilters.iteritems():
707 if cmd.startswith(name):
679 if cmd.startswith(name):
708 fn = filterfn
680 fn = filterfn
709 params = cmd[len(name):].lstrip()
681 params = cmd[len(name):].lstrip()
710 break
682 break
711 if not fn:
683 if not fn:
712 fn = lambda s, c, **kwargs: util.filter(s, c)
684 fn = lambda s, c, **kwargs: util.filter(s, c)
713 # Wrap old filters not supporting keyword arguments
685 # Wrap old filters not supporting keyword arguments
714 if not inspect.getargspec(fn)[2]:
686 if not inspect.getargspec(fn)[2]:
715 oldfn = fn
687 oldfn = fn
716 fn = lambda s, c, **kwargs: oldfn(s, c)
688 fn = lambda s, c, **kwargs: oldfn(s, c)
717 l.append((mf, fn, params))
689 l.append((mf, fn, params))
718 self.filterpats[filter] = l
690 self.filterpats[filter] = l
719 return self.filterpats[filter]
691 return self.filterpats[filter]
720
692
721 def _filter(self, filterpats, filename, data):
693 def _filter(self, filterpats, filename, data):
722 for mf, fn, cmd in filterpats:
694 for mf, fn, cmd in filterpats:
723 if mf(filename):
695 if mf(filename):
724 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
696 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
725 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
697 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
726 break
698 break
727
699
728 return data
700 return data
729
701
730 @propertycache
702 @propertycache
731 def _encodefilterpats(self):
703 def _encodefilterpats(self):
732 return self._loadfilter('encode')
704 return self._loadfilter('encode')
733
705
734 @propertycache
706 @propertycache
735 def _decodefilterpats(self):
707 def _decodefilterpats(self):
736 return self._loadfilter('decode')
708 return self._loadfilter('decode')
737
709
738 def adddatafilter(self, name, filter):
710 def adddatafilter(self, name, filter):
739 self._datafilters[name] = filter
711 self._datafilters[name] = filter
740
712
741 def wread(self, filename):
713 def wread(self, filename):
742 if self._link(filename):
714 if self._link(filename):
743 data = os.readlink(self.wjoin(filename))
715 data = os.readlink(self.wjoin(filename))
744 else:
716 else:
745 data = self.wopener.read(filename)
717 data = self.wopener.read(filename)
746 return self._filter(self._encodefilterpats, filename, data)
718 return self._filter(self._encodefilterpats, filename, data)
747
719
748 def wwrite(self, filename, data, flags):
720 def wwrite(self, filename, data, flags):
749 data = self._filter(self._decodefilterpats, filename, data)
721 data = self._filter(self._decodefilterpats, filename, data)
750 if 'l' in flags:
722 if 'l' in flags:
751 self.wopener.symlink(data, filename)
723 self.wopener.symlink(data, filename)
752 else:
724 else:
753 self.wopener.write(filename, data)
725 self.wopener.write(filename, data)
754 if 'x' in flags:
726 if 'x' in flags:
755 util.setflags(self.wjoin(filename), False, True)
727 util.setflags(self.wjoin(filename), False, True)
756
728
757 def wwritedata(self, filename, data):
729 def wwritedata(self, filename, data):
758 return self._filter(self._decodefilterpats, filename, data)
730 return self._filter(self._decodefilterpats, filename, data)
759
731
760 def transaction(self, desc):
732 def transaction(self, desc):
761 tr = self._transref and self._transref() or None
733 tr = self._transref and self._transref() or None
762 if tr and tr.running():
734 if tr and tr.running():
763 return tr.nest()
735 return tr.nest()
764
736
765 # abort here if the journal already exists
737 # abort here if the journal already exists
766 if os.path.exists(self.sjoin("journal")):
738 if os.path.exists(self.sjoin("journal")):
767 raise error.RepoError(
739 raise error.RepoError(
768 _("abandoned transaction found - run hg recover"))
740 _("abandoned transaction found - run hg recover"))
769
741
770 self._writejournal(desc)
742 self._writejournal(desc)
771 renames = [(x, undoname(x)) for x in self._journalfiles()]
743 renames = [(x, undoname(x)) for x in self._journalfiles()]
772
744
773 tr = transaction.transaction(self.ui.warn, self.sopener,
745 tr = transaction.transaction(self.ui.warn, self.sopener,
774 self.sjoin("journal"),
746 self.sjoin("journal"),
775 aftertrans(renames),
747 aftertrans(renames),
776 self.store.createmode)
748 self.store.createmode)
777 self._transref = weakref.ref(tr)
749 self._transref = weakref.ref(tr)
778 return tr
750 return tr
779
751
780 def _journalfiles(self):
752 def _journalfiles(self):
781 return (self.sjoin('journal'), self.join('journal.dirstate'),
753 return (self.sjoin('journal'), self.join('journal.dirstate'),
782 self.join('journal.branch'), self.join('journal.desc'),
754 self.join('journal.branch'), self.join('journal.desc'),
783 self.join('journal.bookmarks'),
755 self.join('journal.bookmarks'),
784 self.sjoin('journal.phaseroots'))
756 self.sjoin('journal.phaseroots'))
785
757
786 def undofiles(self):
758 def undofiles(self):
787 return [undoname(x) for x in self._journalfiles()]
759 return [undoname(x) for x in self._journalfiles()]
788
760
789 def _writejournal(self, desc):
761 def _writejournal(self, desc):
790 self.opener.write("journal.dirstate",
762 self.opener.write("journal.dirstate",
791 self.opener.tryread("dirstate"))
763 self.opener.tryread("dirstate"))
792 self.opener.write("journal.branch",
764 self.opener.write("journal.branch",
793 encoding.fromlocal(self.dirstate.branch()))
765 encoding.fromlocal(self.dirstate.branch()))
794 self.opener.write("journal.desc",
766 self.opener.write("journal.desc",
795 "%d\n%s\n" % (len(self), desc))
767 "%d\n%s\n" % (len(self), desc))
796 self.opener.write("journal.bookmarks",
768 self.opener.write("journal.bookmarks",
797 self.opener.tryread("bookmarks"))
769 self.opener.tryread("bookmarks"))
798 self.sopener.write("journal.phaseroots",
770 self.sopener.write("journal.phaseroots",
799 self.sopener.tryread("phaseroots"))
771 self.sopener.tryread("phaseroots"))
800
772
801 def recover(self):
773 def recover(self):
802 lock = self.lock()
774 lock = self.lock()
803 try:
775 try:
804 if os.path.exists(self.sjoin("journal")):
776 if os.path.exists(self.sjoin("journal")):
805 self.ui.status(_("rolling back interrupted transaction\n"))
777 self.ui.status(_("rolling back interrupted transaction\n"))
806 transaction.rollback(self.sopener, self.sjoin("journal"),
778 transaction.rollback(self.sopener, self.sjoin("journal"),
807 self.ui.warn)
779 self.ui.warn)
808 self.invalidate()
780 self.invalidate()
809 return True
781 return True
810 else:
782 else:
811 self.ui.warn(_("no interrupted transaction available\n"))
783 self.ui.warn(_("no interrupted transaction available\n"))
812 return False
784 return False
813 finally:
785 finally:
814 lock.release()
786 lock.release()
815
787
816 def rollback(self, dryrun=False, force=False):
788 def rollback(self, dryrun=False, force=False):
817 wlock = lock = None
789 wlock = lock = None
818 try:
790 try:
819 wlock = self.wlock()
791 wlock = self.wlock()
820 lock = self.lock()
792 lock = self.lock()
821 if os.path.exists(self.sjoin("undo")):
793 if os.path.exists(self.sjoin("undo")):
822 return self._rollback(dryrun, force)
794 return self._rollback(dryrun, force)
823 else:
795 else:
824 self.ui.warn(_("no rollback information available\n"))
796 self.ui.warn(_("no rollback information available\n"))
825 return 1
797 return 1
826 finally:
798 finally:
827 release(lock, wlock)
799 release(lock, wlock)
828
800
829 def _rollback(self, dryrun, force):
801 def _rollback(self, dryrun, force):
830 ui = self.ui
802 ui = self.ui
831 try:
803 try:
832 args = self.opener.read('undo.desc').splitlines()
804 args = self.opener.read('undo.desc').splitlines()
833 (oldlen, desc, detail) = (int(args[0]), args[1], None)
805 (oldlen, desc, detail) = (int(args[0]), args[1], None)
834 if len(args) >= 3:
806 if len(args) >= 3:
835 detail = args[2]
807 detail = args[2]
836 oldtip = oldlen - 1
808 oldtip = oldlen - 1
837
809
838 if detail and ui.verbose:
810 if detail and ui.verbose:
839 msg = (_('repository tip rolled back to revision %s'
811 msg = (_('repository tip rolled back to revision %s'
840 ' (undo %s: %s)\n')
812 ' (undo %s: %s)\n')
841 % (oldtip, desc, detail))
813 % (oldtip, desc, detail))
842 else:
814 else:
843 msg = (_('repository tip rolled back to revision %s'
815 msg = (_('repository tip rolled back to revision %s'
844 ' (undo %s)\n')
816 ' (undo %s)\n')
845 % (oldtip, desc))
817 % (oldtip, desc))
846 except IOError:
818 except IOError:
847 msg = _('rolling back unknown transaction\n')
819 msg = _('rolling back unknown transaction\n')
848 desc = None
820 desc = None
849
821
850 if not force and self['.'] != self['tip'] and desc == 'commit':
822 if not force and self['.'] != self['tip'] and desc == 'commit':
851 raise util.Abort(
823 raise util.Abort(
852 _('rollback of last commit while not checked out '
824 _('rollback of last commit while not checked out '
853 'may lose data'), hint=_('use -f to force'))
825 'may lose data'), hint=_('use -f to force'))
854
826
855 ui.status(msg)
827 ui.status(msg)
856 if dryrun:
828 if dryrun:
857 return 0
829 return 0
858
830
859 parents = self.dirstate.parents()
831 parents = self.dirstate.parents()
860 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
832 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
861 if os.path.exists(self.join('undo.bookmarks')):
833 if os.path.exists(self.join('undo.bookmarks')):
862 util.rename(self.join('undo.bookmarks'),
834 util.rename(self.join('undo.bookmarks'),
863 self.join('bookmarks'))
835 self.join('bookmarks'))
864 if os.path.exists(self.sjoin('undo.phaseroots')):
836 if os.path.exists(self.sjoin('undo.phaseroots')):
865 util.rename(self.sjoin('undo.phaseroots'),
837 util.rename(self.sjoin('undo.phaseroots'),
866 self.sjoin('phaseroots'))
838 self.sjoin('phaseroots'))
867 self.invalidate()
839 self.invalidate()
868
840
869 parentgone = (parents[0] not in self.changelog.nodemap or
841 parentgone = (parents[0] not in self.changelog.nodemap or
870 parents[1] not in self.changelog.nodemap)
842 parents[1] not in self.changelog.nodemap)
871 if parentgone:
843 if parentgone:
872 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
844 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
873 try:
845 try:
874 branch = self.opener.read('undo.branch')
846 branch = self.opener.read('undo.branch')
875 self.dirstate.setbranch(branch)
847 self.dirstate.setbranch(branch)
876 except IOError:
848 except IOError:
877 ui.warn(_('named branch could not be reset: '
849 ui.warn(_('named branch could not be reset: '
878 'current branch is still \'%s\'\n')
850 'current branch is still \'%s\'\n')
879 % self.dirstate.branch())
851 % self.dirstate.branch())
880
852
881 self.dirstate.invalidate()
853 self.dirstate.invalidate()
882 parents = tuple([p.rev() for p in self.parents()])
854 parents = tuple([p.rev() for p in self.parents()])
883 if len(parents) > 1:
855 if len(parents) > 1:
884 ui.status(_('working directory now based on '
856 ui.status(_('working directory now based on '
885 'revisions %d and %d\n') % parents)
857 'revisions %d and %d\n') % parents)
886 else:
858 else:
887 ui.status(_('working directory now based on '
859 ui.status(_('working directory now based on '
888 'revision %d\n') % parents)
860 'revision %d\n') % parents)
889 # TODO: if we know which new heads may result from this rollback, pass
890 # them to destroy(), which will prevent the branchhead cache from being
891 # invalidated.
892 self.destroyed()
861 self.destroyed()
893 return 0
862 return 0
894
863
895 def invalidatecaches(self):
864 def invalidatecaches(self):
896 def delcache(name):
865 def delcache(name):
897 try:
866 try:
898 delattr(self, name)
867 delattr(self, name)
899 except AttributeError:
868 except AttributeError:
900 pass
869 pass
901
870
902 delcache('_tagscache')
871 delcache('_tagscache')
903
872
904 self._branchcache = None # in UTF-8
873 self._branchcache = None # in UTF-8
905 self._branchcachetip = None
874 self._branchcachetip = None
906
875
907 def invalidatedirstate(self):
876 def invalidatedirstate(self):
908 '''Invalidates the dirstate, causing the next call to dirstate
877 '''Invalidates the dirstate, causing the next call to dirstate
909 to check if it was modified since the last time it was read,
878 to check if it was modified since the last time it was read,
910 rereading it if it has.
879 rereading it if it has.
911
880
912 This is different to dirstate.invalidate() that it doesn't always
881 This is different to dirstate.invalidate() that it doesn't always
913 rereads the dirstate. Use dirstate.invalidate() if you want to
882 rereads the dirstate. Use dirstate.invalidate() if you want to
914 explicitly read the dirstate again (i.e. restoring it to a previous
883 explicitly read the dirstate again (i.e. restoring it to a previous
915 known good state).'''
884 known good state).'''
916 if 'dirstate' in self.__dict__:
885 if 'dirstate' in self.__dict__:
917 for k in self.dirstate._filecache:
886 for k in self.dirstate._filecache:
918 try:
887 try:
919 delattr(self.dirstate, k)
888 delattr(self.dirstate, k)
920 except AttributeError:
889 except AttributeError:
921 pass
890 pass
922 delattr(self, 'dirstate')
891 delattr(self, 'dirstate')
923
892
924 def invalidate(self):
893 def invalidate(self):
925 for k in self._filecache:
894 for k in self._filecache:
926 # dirstate is invalidated separately in invalidatedirstate()
895 # dirstate is invalidated separately in invalidatedirstate()
927 if k == 'dirstate':
896 if k == 'dirstate':
928 continue
897 continue
929
898
930 try:
899 try:
931 delattr(self, k)
900 delattr(self, k)
932 except AttributeError:
901 except AttributeError:
933 pass
902 pass
934 self.invalidatecaches()
903 self.invalidatecaches()
935
904
936 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
905 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
937 try:
906 try:
938 l = lock.lock(lockname, 0, releasefn, desc=desc)
907 l = lock.lock(lockname, 0, releasefn, desc=desc)
939 except error.LockHeld, inst:
908 except error.LockHeld, inst:
940 if not wait:
909 if not wait:
941 raise
910 raise
942 self.ui.warn(_("waiting for lock on %s held by %r\n") %
911 self.ui.warn(_("waiting for lock on %s held by %r\n") %
943 (desc, inst.locker))
912 (desc, inst.locker))
944 # default to 600 seconds timeout
913 # default to 600 seconds timeout
945 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
914 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
946 releasefn, desc=desc)
915 releasefn, desc=desc)
947 if acquirefn:
916 if acquirefn:
948 acquirefn()
917 acquirefn()
949 return l
918 return l
950
919
951 def _afterlock(self, callback):
920 def _afterlock(self, callback):
952 """add a callback to the current repository lock.
921 """add a callback to the current repository lock.
953
922
954 The callback will be executed on lock release."""
923 The callback will be executed on lock release."""
955 l = self._lockref and self._lockref()
924 l = self._lockref and self._lockref()
956 if l:
925 if l:
957 l.postrelease.append(callback)
926 l.postrelease.append(callback)
958 else:
927 else:
959 callback()
928 callback()
960
929
961 def lock(self, wait=True):
930 def lock(self, wait=True):
962 '''Lock the repository store (.hg/store) and return a weak reference
931 '''Lock the repository store (.hg/store) and return a weak reference
963 to the lock. Use this before modifying the store (e.g. committing or
932 to the lock. Use this before modifying the store (e.g. committing or
964 stripping). If you are opening a transaction, get a lock as well.)'''
933 stripping). If you are opening a transaction, get a lock as well.)'''
965 l = self._lockref and self._lockref()
934 l = self._lockref and self._lockref()
966 if l is not None and l.held:
935 if l is not None and l.held:
967 l.lock()
936 l.lock()
968 return l
937 return l
969
938
970 def unlock():
939 def unlock():
971 self.store.write()
940 self.store.write()
972 if '_phasecache' in vars(self):
941 if '_phasecache' in vars(self):
973 self._phasecache.write()
942 self._phasecache.write()
974 for k, ce in self._filecache.items():
943 for k, ce in self._filecache.items():
975 if k == 'dirstate':
944 if k == 'dirstate':
976 continue
945 continue
977 ce.refresh()
946 ce.refresh()
978
947
979 l = self._lock(self.sjoin("lock"), wait, unlock,
948 l = self._lock(self.sjoin("lock"), wait, unlock,
980 self.invalidate, _('repository %s') % self.origroot)
949 self.invalidate, _('repository %s') % self.origroot)
981 self._lockref = weakref.ref(l)
950 self._lockref = weakref.ref(l)
982 return l
951 return l
983
952
984 def wlock(self, wait=True):
953 def wlock(self, wait=True):
985 '''Lock the non-store parts of the repository (everything under
954 '''Lock the non-store parts of the repository (everything under
986 .hg except .hg/store) and return a weak reference to the lock.
955 .hg except .hg/store) and return a weak reference to the lock.
987 Use this before modifying files in .hg.'''
956 Use this before modifying files in .hg.'''
988 l = self._wlockref and self._wlockref()
957 l = self._wlockref and self._wlockref()
989 if l is not None and l.held:
958 if l is not None and l.held:
990 l.lock()
959 l.lock()
991 return l
960 return l
992
961
993 def unlock():
962 def unlock():
994 self.dirstate.write()
963 self.dirstate.write()
995 ce = self._filecache.get('dirstate')
964 ce = self._filecache.get('dirstate')
996 if ce:
965 if ce:
997 ce.refresh()
966 ce.refresh()
998
967
999 l = self._lock(self.join("wlock"), wait, unlock,
968 l = self._lock(self.join("wlock"), wait, unlock,
1000 self.invalidatedirstate, _('working directory of %s') %
969 self.invalidatedirstate, _('working directory of %s') %
1001 self.origroot)
970 self.origroot)
1002 self._wlockref = weakref.ref(l)
971 self._wlockref = weakref.ref(l)
1003 return l
972 return l
1004
973
1005 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
974 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1006 """
975 """
1007 commit an individual file as part of a larger transaction
976 commit an individual file as part of a larger transaction
1008 """
977 """
1009
978
1010 fname = fctx.path()
979 fname = fctx.path()
1011 text = fctx.data()
980 text = fctx.data()
1012 flog = self.file(fname)
981 flog = self.file(fname)
1013 fparent1 = manifest1.get(fname, nullid)
982 fparent1 = manifest1.get(fname, nullid)
1014 fparent2 = fparent2o = manifest2.get(fname, nullid)
983 fparent2 = fparent2o = manifest2.get(fname, nullid)
1015
984
1016 meta = {}
985 meta = {}
1017 copy = fctx.renamed()
986 copy = fctx.renamed()
1018 if copy and copy[0] != fname:
987 if copy and copy[0] != fname:
1019 # Mark the new revision of this file as a copy of another
988 # Mark the new revision of this file as a copy of another
1020 # file. This copy data will effectively act as a parent
989 # file. This copy data will effectively act as a parent
1021 # of this new revision. If this is a merge, the first
990 # of this new revision. If this is a merge, the first
1022 # parent will be the nullid (meaning "look up the copy data")
991 # parent will be the nullid (meaning "look up the copy data")
1023 # and the second one will be the other parent. For example:
992 # and the second one will be the other parent. For example:
1024 #
993 #
1025 # 0 --- 1 --- 3 rev1 changes file foo
994 # 0 --- 1 --- 3 rev1 changes file foo
1026 # \ / rev2 renames foo to bar and changes it
995 # \ / rev2 renames foo to bar and changes it
1027 # \- 2 -/ rev3 should have bar with all changes and
996 # \- 2 -/ rev3 should have bar with all changes and
1028 # should record that bar descends from
997 # should record that bar descends from
1029 # bar in rev2 and foo in rev1
998 # bar in rev2 and foo in rev1
1030 #
999 #
1031 # this allows this merge to succeed:
1000 # this allows this merge to succeed:
1032 #
1001 #
1033 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1002 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1034 # \ / merging rev3 and rev4 should use bar@rev2
1003 # \ / merging rev3 and rev4 should use bar@rev2
1035 # \- 2 --- 4 as the merge base
1004 # \- 2 --- 4 as the merge base
1036 #
1005 #
1037
1006
1038 cfname = copy[0]
1007 cfname = copy[0]
1039 crev = manifest1.get(cfname)
1008 crev = manifest1.get(cfname)
1040 newfparent = fparent2
1009 newfparent = fparent2
1041
1010
1042 if manifest2: # branch merge
1011 if manifest2: # branch merge
1043 if fparent2 == nullid or crev is None: # copied on remote side
1012 if fparent2 == nullid or crev is None: # copied on remote side
1044 if cfname in manifest2:
1013 if cfname in manifest2:
1045 crev = manifest2[cfname]
1014 crev = manifest2[cfname]
1046 newfparent = fparent1
1015 newfparent = fparent1
1047
1016
1048 # find source in nearest ancestor if we've lost track
1017 # find source in nearest ancestor if we've lost track
1049 if not crev:
1018 if not crev:
1050 self.ui.debug(" %s: searching for copy revision for %s\n" %
1019 self.ui.debug(" %s: searching for copy revision for %s\n" %
1051 (fname, cfname))
1020 (fname, cfname))
1052 for ancestor in self[None].ancestors():
1021 for ancestor in self[None].ancestors():
1053 if cfname in ancestor:
1022 if cfname in ancestor:
1054 crev = ancestor[cfname].filenode()
1023 crev = ancestor[cfname].filenode()
1055 break
1024 break
1056
1025
1057 if crev:
1026 if crev:
1058 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1027 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1059 meta["copy"] = cfname
1028 meta["copy"] = cfname
1060 meta["copyrev"] = hex(crev)
1029 meta["copyrev"] = hex(crev)
1061 fparent1, fparent2 = nullid, newfparent
1030 fparent1, fparent2 = nullid, newfparent
1062 else:
1031 else:
1063 self.ui.warn(_("warning: can't find ancestor for '%s' "
1032 self.ui.warn(_("warning: can't find ancestor for '%s' "
1064 "copied from '%s'!\n") % (fname, cfname))
1033 "copied from '%s'!\n") % (fname, cfname))
1065
1034
1066 elif fparent2 != nullid:
1035 elif fparent2 != nullid:
1067 # is one parent an ancestor of the other?
1036 # is one parent an ancestor of the other?
1068 fparentancestor = flog.ancestor(fparent1, fparent2)
1037 fparentancestor = flog.ancestor(fparent1, fparent2)
1069 if fparentancestor == fparent1:
1038 if fparentancestor == fparent1:
1070 fparent1, fparent2 = fparent2, nullid
1039 fparent1, fparent2 = fparent2, nullid
1071 elif fparentancestor == fparent2:
1040 elif fparentancestor == fparent2:
1072 fparent2 = nullid
1041 fparent2 = nullid
1073
1042
1074 # is the file changed?
1043 # is the file changed?
1075 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1044 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1076 changelist.append(fname)
1045 changelist.append(fname)
1077 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1046 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1078
1047
1079 # are just the flags changed during merge?
1048 # are just the flags changed during merge?
1080 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1049 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1081 changelist.append(fname)
1050 changelist.append(fname)
1082
1051
1083 return fparent1
1052 return fparent1
1084
1053
1085 def commit(self, text="", user=None, date=None, match=None, force=False,
1054 def commit(self, text="", user=None, date=None, match=None, force=False,
1086 editor=False, extra={}):
1055 editor=False, extra={}):
1087 """Add a new revision to current repository.
1056 """Add a new revision to current repository.
1088
1057
1089 Revision information is gathered from the working directory,
1058 Revision information is gathered from the working directory,
1090 match can be used to filter the committed files. If editor is
1059 match can be used to filter the committed files. If editor is
1091 supplied, it is called to get a commit message.
1060 supplied, it is called to get a commit message.
1092 """
1061 """
1093
1062
1094 def fail(f, msg):
1063 def fail(f, msg):
1095 raise util.Abort('%s: %s' % (f, msg))
1064 raise util.Abort('%s: %s' % (f, msg))
1096
1065
1097 if not match:
1066 if not match:
1098 match = matchmod.always(self.root, '')
1067 match = matchmod.always(self.root, '')
1099
1068
1100 if not force:
1069 if not force:
1101 vdirs = []
1070 vdirs = []
1102 match.dir = vdirs.append
1071 match.dir = vdirs.append
1103 match.bad = fail
1072 match.bad = fail
1104
1073
1105 wlock = self.wlock()
1074 wlock = self.wlock()
1106 try:
1075 try:
1107 wctx = self[None]
1076 wctx = self[None]
1108 merge = len(wctx.parents()) > 1
1077 merge = len(wctx.parents()) > 1
1109
1078
1110 if (not force and merge and match and
1079 if (not force and merge and match and
1111 (match.files() or match.anypats())):
1080 (match.files() or match.anypats())):
1112 raise util.Abort(_('cannot partially commit a merge '
1081 raise util.Abort(_('cannot partially commit a merge '
1113 '(do not specify files or patterns)'))
1082 '(do not specify files or patterns)'))
1114
1083
1115 changes = self.status(match=match, clean=force)
1084 changes = self.status(match=match, clean=force)
1116 if force:
1085 if force:
1117 changes[0].extend(changes[6]) # mq may commit unchanged files
1086 changes[0].extend(changes[6]) # mq may commit unchanged files
1118
1087
1119 # check subrepos
1088 # check subrepos
1120 subs = []
1089 subs = []
1121 commitsubs = set()
1090 commitsubs = set()
1122 newstate = wctx.substate.copy()
1091 newstate = wctx.substate.copy()
1123 # only manage subrepos and .hgsubstate if .hgsub is present
1092 # only manage subrepos and .hgsubstate if .hgsub is present
1124 if '.hgsub' in wctx:
1093 if '.hgsub' in wctx:
1125 # we'll decide whether to track this ourselves, thanks
1094 # we'll decide whether to track this ourselves, thanks
1126 if '.hgsubstate' in changes[0]:
1095 if '.hgsubstate' in changes[0]:
1127 changes[0].remove('.hgsubstate')
1096 changes[0].remove('.hgsubstate')
1128 if '.hgsubstate' in changes[2]:
1097 if '.hgsubstate' in changes[2]:
1129 changes[2].remove('.hgsubstate')
1098 changes[2].remove('.hgsubstate')
1130
1099
1131 # compare current state to last committed state
1100 # compare current state to last committed state
1132 # build new substate based on last committed state
1101 # build new substate based on last committed state
1133 oldstate = wctx.p1().substate
1102 oldstate = wctx.p1().substate
1134 for s in sorted(newstate.keys()):
1103 for s in sorted(newstate.keys()):
1135 if not match(s):
1104 if not match(s):
1136 # ignore working copy, use old state if present
1105 # ignore working copy, use old state if present
1137 if s in oldstate:
1106 if s in oldstate:
1138 newstate[s] = oldstate[s]
1107 newstate[s] = oldstate[s]
1139 continue
1108 continue
1140 if not force:
1109 if not force:
1141 raise util.Abort(
1110 raise util.Abort(
1142 _("commit with new subrepo %s excluded") % s)
1111 _("commit with new subrepo %s excluded") % s)
1143 if wctx.sub(s).dirty(True):
1112 if wctx.sub(s).dirty(True):
1144 if not self.ui.configbool('ui', 'commitsubrepos'):
1113 if not self.ui.configbool('ui', 'commitsubrepos'):
1145 raise util.Abort(
1114 raise util.Abort(
1146 _("uncommitted changes in subrepo %s") % s,
1115 _("uncommitted changes in subrepo %s") % s,
1147 hint=_("use --subrepos for recursive commit"))
1116 hint=_("use --subrepos for recursive commit"))
1148 subs.append(s)
1117 subs.append(s)
1149 commitsubs.add(s)
1118 commitsubs.add(s)
1150 else:
1119 else:
1151 bs = wctx.sub(s).basestate()
1120 bs = wctx.sub(s).basestate()
1152 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1121 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1153 if oldstate.get(s, (None, None, None))[1] != bs:
1122 if oldstate.get(s, (None, None, None))[1] != bs:
1154 subs.append(s)
1123 subs.append(s)
1155
1124
1156 # check for removed subrepos
1125 # check for removed subrepos
1157 for p in wctx.parents():
1126 for p in wctx.parents():
1158 r = [s for s in p.substate if s not in newstate]
1127 r = [s for s in p.substate if s not in newstate]
1159 subs += [s for s in r if match(s)]
1128 subs += [s for s in r if match(s)]
1160 if subs:
1129 if subs:
1161 if (not match('.hgsub') and
1130 if (not match('.hgsub') and
1162 '.hgsub' in (wctx.modified() + wctx.added())):
1131 '.hgsub' in (wctx.modified() + wctx.added())):
1163 raise util.Abort(
1132 raise util.Abort(
1164 _("can't commit subrepos without .hgsub"))
1133 _("can't commit subrepos without .hgsub"))
1165 changes[0].insert(0, '.hgsubstate')
1134 changes[0].insert(0, '.hgsubstate')
1166
1135
1167 elif '.hgsub' in changes[2]:
1136 elif '.hgsub' in changes[2]:
1168 # clean up .hgsubstate when .hgsub is removed
1137 # clean up .hgsubstate when .hgsub is removed
1169 if ('.hgsubstate' in wctx and
1138 if ('.hgsubstate' in wctx and
1170 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1139 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1171 changes[2].insert(0, '.hgsubstate')
1140 changes[2].insert(0, '.hgsubstate')
1172
1141
1173 # make sure all explicit patterns are matched
1142 # make sure all explicit patterns are matched
1174 if not force and match.files():
1143 if not force and match.files():
1175 matched = set(changes[0] + changes[1] + changes[2])
1144 matched = set(changes[0] + changes[1] + changes[2])
1176
1145
1177 for f in match.files():
1146 for f in match.files():
1178 if f == '.' or f in matched or f in wctx.substate:
1147 if f == '.' or f in matched or f in wctx.substate:
1179 continue
1148 continue
1180 if f in changes[3]: # missing
1149 if f in changes[3]: # missing
1181 fail(f, _('file not found!'))
1150 fail(f, _('file not found!'))
1182 if f in vdirs: # visited directory
1151 if f in vdirs: # visited directory
1183 d = f + '/'
1152 d = f + '/'
1184 for mf in matched:
1153 for mf in matched:
1185 if mf.startswith(d):
1154 if mf.startswith(d):
1186 break
1155 break
1187 else:
1156 else:
1188 fail(f, _("no match under directory!"))
1157 fail(f, _("no match under directory!"))
1189 elif f not in self.dirstate:
1158 elif f not in self.dirstate:
1190 fail(f, _("file not tracked!"))
1159 fail(f, _("file not tracked!"))
1191
1160
1192 if (not force and not extra.get("close") and not merge
1161 if (not force and not extra.get("close") and not merge
1193 and not (changes[0] or changes[1] or changes[2])
1162 and not (changes[0] or changes[1] or changes[2])
1194 and wctx.branch() == wctx.p1().branch()):
1163 and wctx.branch() == wctx.p1().branch()):
1195 return None
1164 return None
1196
1165
1197 if merge and changes[3]:
1166 if merge and changes[3]:
1198 raise util.Abort(_("cannot commit merge with missing files"))
1167 raise util.Abort(_("cannot commit merge with missing files"))
1199
1168
1200 ms = mergemod.mergestate(self)
1169 ms = mergemod.mergestate(self)
1201 for f in changes[0]:
1170 for f in changes[0]:
1202 if f in ms and ms[f] == 'u':
1171 if f in ms and ms[f] == 'u':
1203 raise util.Abort(_("unresolved merge conflicts "
1172 raise util.Abort(_("unresolved merge conflicts "
1204 "(see hg help resolve)"))
1173 "(see hg help resolve)"))
1205
1174
1206 cctx = context.workingctx(self, text, user, date, extra, changes)
1175 cctx = context.workingctx(self, text, user, date, extra, changes)
1207 if editor:
1176 if editor:
1208 cctx._text = editor(self, cctx, subs)
1177 cctx._text = editor(self, cctx, subs)
1209 edited = (text != cctx._text)
1178 edited = (text != cctx._text)
1210
1179
1211 # commit subs and write new state
1180 # commit subs and write new state
1212 if subs:
1181 if subs:
1213 for s in sorted(commitsubs):
1182 for s in sorted(commitsubs):
1214 sub = wctx.sub(s)
1183 sub = wctx.sub(s)
1215 self.ui.status(_('committing subrepository %s\n') %
1184 self.ui.status(_('committing subrepository %s\n') %
1216 subrepo.subrelpath(sub))
1185 subrepo.subrelpath(sub))
1217 sr = sub.commit(cctx._text, user, date)
1186 sr = sub.commit(cctx._text, user, date)
1218 newstate[s] = (newstate[s][0], sr)
1187 newstate[s] = (newstate[s][0], sr)
1219 subrepo.writestate(self, newstate)
1188 subrepo.writestate(self, newstate)
1220
1189
1221 # Save commit message in case this transaction gets rolled back
1190 # Save commit message in case this transaction gets rolled back
1222 # (e.g. by a pretxncommit hook). Leave the content alone on
1191 # (e.g. by a pretxncommit hook). Leave the content alone on
1223 # the assumption that the user will use the same editor again.
1192 # the assumption that the user will use the same editor again.
1224 msgfn = self.savecommitmessage(cctx._text)
1193 msgfn = self.savecommitmessage(cctx._text)
1225
1194
1226 p1, p2 = self.dirstate.parents()
1195 p1, p2 = self.dirstate.parents()
1227 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1196 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1228 try:
1197 try:
1229 self.hook("precommit", throw=True, parent1=hookp1,
1198 self.hook("precommit", throw=True, parent1=hookp1,
1230 parent2=hookp2)
1199 parent2=hookp2)
1231 ret = self.commitctx(cctx, True)
1200 ret = self.commitctx(cctx, True)
1232 except: # re-raises
1201 except: # re-raises
1233 if edited:
1202 if edited:
1234 self.ui.write(
1203 self.ui.write(
1235 _('note: commit message saved in %s\n') % msgfn)
1204 _('note: commit message saved in %s\n') % msgfn)
1236 raise
1205 raise
1237
1206
1238 # update bookmarks, dirstate and mergestate
1207 # update bookmarks, dirstate and mergestate
1239 bookmarks.update(self, [p1, p2], ret)
1208 bookmarks.update(self, [p1, p2], ret)
1240 for f in changes[0] + changes[1]:
1209 for f in changes[0] + changes[1]:
1241 self.dirstate.normal(f)
1210 self.dirstate.normal(f)
1242 for f in changes[2]:
1211 for f in changes[2]:
1243 self.dirstate.drop(f)
1212 self.dirstate.drop(f)
1244 self.dirstate.setparents(ret)
1213 self.dirstate.setparents(ret)
1245 ms.reset()
1214 ms.reset()
1246 finally:
1215 finally:
1247 wlock.release()
1216 wlock.release()
1248
1217
1249 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1218 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1250 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1219 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1251 self._afterlock(commithook)
1220 self._afterlock(commithook)
1252 return ret
1221 return ret
1253
1222
1254 def commitctx(self, ctx, error=False):
1223 def commitctx(self, ctx, error=False):
1255 """Add a new revision to current repository.
1224 """Add a new revision to current repository.
1256 Revision information is passed via the context argument.
1225 Revision information is passed via the context argument.
1257 """
1226 """
1258
1227
1259 tr = lock = None
1228 tr = lock = None
1260 removed = list(ctx.removed())
1229 removed = list(ctx.removed())
1261 p1, p2 = ctx.p1(), ctx.p2()
1230 p1, p2 = ctx.p1(), ctx.p2()
1262 user = ctx.user()
1231 user = ctx.user()
1263
1232
1264 lock = self.lock()
1233 lock = self.lock()
1265 try:
1234 try:
1266 tr = self.transaction("commit")
1235 tr = self.transaction("commit")
1267 trp = weakref.proxy(tr)
1236 trp = weakref.proxy(tr)
1268
1237
1269 if ctx.files():
1238 if ctx.files():
1270 m1 = p1.manifest().copy()
1239 m1 = p1.manifest().copy()
1271 m2 = p2.manifest()
1240 m2 = p2.manifest()
1272
1241
1273 # check in files
1242 # check in files
1274 new = {}
1243 new = {}
1275 changed = []
1244 changed = []
1276 linkrev = len(self)
1245 linkrev = len(self)
1277 for f in sorted(ctx.modified() + ctx.added()):
1246 for f in sorted(ctx.modified() + ctx.added()):
1278 self.ui.note(f + "\n")
1247 self.ui.note(f + "\n")
1279 try:
1248 try:
1280 fctx = ctx[f]
1249 fctx = ctx[f]
1281 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1250 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1282 changed)
1251 changed)
1283 m1.set(f, fctx.flags())
1252 m1.set(f, fctx.flags())
1284 except OSError, inst:
1253 except OSError, inst:
1285 self.ui.warn(_("trouble committing %s!\n") % f)
1254 self.ui.warn(_("trouble committing %s!\n") % f)
1286 raise
1255 raise
1287 except IOError, inst:
1256 except IOError, inst:
1288 errcode = getattr(inst, 'errno', errno.ENOENT)
1257 errcode = getattr(inst, 'errno', errno.ENOENT)
1289 if error or errcode and errcode != errno.ENOENT:
1258 if error or errcode and errcode != errno.ENOENT:
1290 self.ui.warn(_("trouble committing %s!\n") % f)
1259 self.ui.warn(_("trouble committing %s!\n") % f)
1291 raise
1260 raise
1292 else:
1261 else:
1293 removed.append(f)
1262 removed.append(f)
1294
1263
1295 # update manifest
1264 # update manifest
1296 m1.update(new)
1265 m1.update(new)
1297 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1266 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1298 drop = [f for f in removed if f in m1]
1267 drop = [f for f in removed if f in m1]
1299 for f in drop:
1268 for f in drop:
1300 del m1[f]
1269 del m1[f]
1301 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1270 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1302 p2.manifestnode(), (new, drop))
1271 p2.manifestnode(), (new, drop))
1303 files = changed + removed
1272 files = changed + removed
1304 else:
1273 else:
1305 mn = p1.manifestnode()
1274 mn = p1.manifestnode()
1306 files = []
1275 files = []
1307
1276
1308 # update changelog
1277 # update changelog
1309 self.changelog.delayupdate()
1278 self.changelog.delayupdate()
1310 n = self.changelog.add(mn, files, ctx.description(),
1279 n = self.changelog.add(mn, files, ctx.description(),
1311 trp, p1.node(), p2.node(),
1280 trp, p1.node(), p2.node(),
1312 user, ctx.date(), ctx.extra().copy())
1281 user, ctx.date(), ctx.extra().copy())
1313 p = lambda: self.changelog.writepending() and self.root or ""
1282 p = lambda: self.changelog.writepending() and self.root or ""
1314 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1283 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1315 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1284 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1316 parent2=xp2, pending=p)
1285 parent2=xp2, pending=p)
1317 self.changelog.finalize(trp)
1286 self.changelog.finalize(trp)
1318 # set the new commit is proper phase
1287 # set the new commit is proper phase
1319 targetphase = phases.newcommitphase(self.ui)
1288 targetphase = phases.newcommitphase(self.ui)
1320 if targetphase:
1289 if targetphase:
1321 # retract boundary do not alter parent changeset.
1290 # retract boundary do not alter parent changeset.
1322 # if a parent have higher the resulting phase will
1291 # if a parent have higher the resulting phase will
1323 # be compliant anyway
1292 # be compliant anyway
1324 #
1293 #
1325 # if minimal phase was 0 we don't need to retract anything
1294 # if minimal phase was 0 we don't need to retract anything
1326 phases.retractboundary(self, targetphase, [n])
1295 phases.retractboundary(self, targetphase, [n])
1327 tr.close()
1296 tr.close()
1328 self.updatebranchcache()
1297 self.updatebranchcache()
1329 return n
1298 return n
1330 finally:
1299 finally:
1331 if tr:
1300 if tr:
1332 tr.release()
1301 tr.release()
1333 lock.release()
1302 lock.release()
1334
1303
1335 def destroyed(self, newheadrevs=None):
1304 def destroyed(self):
1336 '''Inform the repository that nodes have been destroyed.
1305 '''Inform the repository that nodes have been destroyed.
1337 Intended for use by strip and rollback, so there's a common
1306 Intended for use by strip and rollback, so there's a common
1338 place for anything that has to be done after destroying history.
1307 place for anything that has to be done after destroying history.'''
1339
1308 # XXX it might be nice if we could take the list of destroyed
1340 If you know the branchheadcache was uptodate before nodes were removed
1309 # nodes, but I don't see an easy way for rollback() to do that
1341 and you also know the set of candidate set of new heads that may have
1342 resulted from the destruction, you can set newheadrevs. This will
1343 enable the code to update the branchheads cache, rather than having
1344 future code decide it's invalid and regenrating it.
1345 '''
1346 if newheadrevs:
1347 tiprev = len(self) - 1
1348 ctxgen = (self[rev] for rev in newheadrevs)
1349 self._updatebranchcache(self._branchcache, ctxgen)
1350 self._writebranchcache(self._branchcache, self.changelog.tip(),
1351 tiprev)
1352 else:
1353 # No info to update the cache. If nodes were destroyed, the cache
1354 # is stale and this will be caught the next time it is read.
1355 pass
1356
1310
1357 # Ensure the persistent tag cache is updated. Doing it now
1311 # Ensure the persistent tag cache is updated. Doing it now
1358 # means that the tag cache only has to worry about destroyed
1312 # means that the tag cache only has to worry about destroyed
1359 # heads immediately after a strip/rollback. That in turn
1313 # heads immediately after a strip/rollback. That in turn
1360 # guarantees that "cachetip == currenttip" (comparing both rev
1314 # guarantees that "cachetip == currenttip" (comparing both rev
1361 # and node) always means no nodes have been added or destroyed.
1315 # and node) always means no nodes have been added or destroyed.
1362
1316
1363 # XXX this is suboptimal when qrefresh'ing: we strip the current
1317 # XXX this is suboptimal when qrefresh'ing: we strip the current
1364 # head, refresh the tag cache, then immediately add a new head.
1318 # head, refresh the tag cache, then immediately add a new head.
1365 # But I think doing it this way is necessary for the "instant
1319 # But I think doing it this way is necessary for the "instant
1366 # tag cache retrieval" case to work.
1320 # tag cache retrieval" case to work.
1367 self.invalidatecaches()
1321 self.invalidatecaches()
1368
1322
1369 # Discard all cache entries to force reloading everything.
1323 # Discard all cache entries to force reloading everything.
1370 self._filecache.clear()
1324 self._filecache.clear()
1371
1325
1372 def walk(self, match, node=None):
1326 def walk(self, match, node=None):
1373 '''
1327 '''
1374 walk recursively through the directory tree or a given
1328 walk recursively through the directory tree or a given
1375 changeset, finding all files matched by the match
1329 changeset, finding all files matched by the match
1376 function
1330 function
1377 '''
1331 '''
1378 return self[node].walk(match)
1332 return self[node].walk(match)
1379
1333
1380 def status(self, node1='.', node2=None, match=None,
1334 def status(self, node1='.', node2=None, match=None,
1381 ignored=False, clean=False, unknown=False,
1335 ignored=False, clean=False, unknown=False,
1382 listsubrepos=False):
1336 listsubrepos=False):
1383 """return status of files between two nodes or node and working
1337 """return status of files between two nodes or node and working
1384 directory.
1338 directory.
1385
1339
1386 If node1 is None, use the first dirstate parent instead.
1340 If node1 is None, use the first dirstate parent instead.
1387 If node2 is None, compare node1 with working directory.
1341 If node2 is None, compare node1 with working directory.
1388 """
1342 """
1389
1343
1390 def mfmatches(ctx):
1344 def mfmatches(ctx):
1391 mf = ctx.manifest().copy()
1345 mf = ctx.manifest().copy()
1392 if match.always():
1346 if match.always():
1393 return mf
1347 return mf
1394 for fn in mf.keys():
1348 for fn in mf.keys():
1395 if not match(fn):
1349 if not match(fn):
1396 del mf[fn]
1350 del mf[fn]
1397 return mf
1351 return mf
1398
1352
1399 if isinstance(node1, context.changectx):
1353 if isinstance(node1, context.changectx):
1400 ctx1 = node1
1354 ctx1 = node1
1401 else:
1355 else:
1402 ctx1 = self[node1]
1356 ctx1 = self[node1]
1403 if isinstance(node2, context.changectx):
1357 if isinstance(node2, context.changectx):
1404 ctx2 = node2
1358 ctx2 = node2
1405 else:
1359 else:
1406 ctx2 = self[node2]
1360 ctx2 = self[node2]
1407
1361
1408 working = ctx2.rev() is None
1362 working = ctx2.rev() is None
1409 parentworking = working and ctx1 == self['.']
1363 parentworking = working and ctx1 == self['.']
1410 match = match or matchmod.always(self.root, self.getcwd())
1364 match = match or matchmod.always(self.root, self.getcwd())
1411 listignored, listclean, listunknown = ignored, clean, unknown
1365 listignored, listclean, listunknown = ignored, clean, unknown
1412
1366
1413 # load earliest manifest first for caching reasons
1367 # load earliest manifest first for caching reasons
1414 if not working and ctx2.rev() < ctx1.rev():
1368 if not working and ctx2.rev() < ctx1.rev():
1415 ctx2.manifest()
1369 ctx2.manifest()
1416
1370
1417 if not parentworking:
1371 if not parentworking:
1418 def bad(f, msg):
1372 def bad(f, msg):
1419 # 'f' may be a directory pattern from 'match.files()',
1373 # 'f' may be a directory pattern from 'match.files()',
1420 # so 'f not in ctx1' is not enough
1374 # so 'f not in ctx1' is not enough
1421 if f not in ctx1 and f not in ctx1.dirs():
1375 if f not in ctx1 and f not in ctx1.dirs():
1422 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1376 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1423 match.bad = bad
1377 match.bad = bad
1424
1378
1425 if working: # we need to scan the working dir
1379 if working: # we need to scan the working dir
1426 subrepos = []
1380 subrepos = []
1427 if '.hgsub' in self.dirstate:
1381 if '.hgsub' in self.dirstate:
1428 subrepos = ctx2.substate.keys()
1382 subrepos = ctx2.substate.keys()
1429 s = self.dirstate.status(match, subrepos, listignored,
1383 s = self.dirstate.status(match, subrepos, listignored,
1430 listclean, listunknown)
1384 listclean, listunknown)
1431 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1385 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1432
1386
1433 # check for any possibly clean files
1387 # check for any possibly clean files
1434 if parentworking and cmp:
1388 if parentworking and cmp:
1435 fixup = []
1389 fixup = []
1436 # do a full compare of any files that might have changed
1390 # do a full compare of any files that might have changed
1437 for f in sorted(cmp):
1391 for f in sorted(cmp):
1438 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1392 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1439 or ctx1[f].cmp(ctx2[f])):
1393 or ctx1[f].cmp(ctx2[f])):
1440 modified.append(f)
1394 modified.append(f)
1441 else:
1395 else:
1442 fixup.append(f)
1396 fixup.append(f)
1443
1397
1444 # update dirstate for files that are actually clean
1398 # update dirstate for files that are actually clean
1445 if fixup:
1399 if fixup:
1446 if listclean:
1400 if listclean:
1447 clean += fixup
1401 clean += fixup
1448
1402
1449 try:
1403 try:
1450 # updating the dirstate is optional
1404 # updating the dirstate is optional
1451 # so we don't wait on the lock
1405 # so we don't wait on the lock
1452 wlock = self.wlock(False)
1406 wlock = self.wlock(False)
1453 try:
1407 try:
1454 for f in fixup:
1408 for f in fixup:
1455 self.dirstate.normal(f)
1409 self.dirstate.normal(f)
1456 finally:
1410 finally:
1457 wlock.release()
1411 wlock.release()
1458 except error.LockError:
1412 except error.LockError:
1459 pass
1413 pass
1460
1414
1461 if not parentworking:
1415 if not parentworking:
1462 mf1 = mfmatches(ctx1)
1416 mf1 = mfmatches(ctx1)
1463 if working:
1417 if working:
1464 # we are comparing working dir against non-parent
1418 # we are comparing working dir against non-parent
1465 # generate a pseudo-manifest for the working dir
1419 # generate a pseudo-manifest for the working dir
1466 mf2 = mfmatches(self['.'])
1420 mf2 = mfmatches(self['.'])
1467 for f in cmp + modified + added:
1421 for f in cmp + modified + added:
1468 mf2[f] = None
1422 mf2[f] = None
1469 mf2.set(f, ctx2.flags(f))
1423 mf2.set(f, ctx2.flags(f))
1470 for f in removed:
1424 for f in removed:
1471 if f in mf2:
1425 if f in mf2:
1472 del mf2[f]
1426 del mf2[f]
1473 else:
1427 else:
1474 # we are comparing two revisions
1428 # we are comparing two revisions
1475 deleted, unknown, ignored = [], [], []
1429 deleted, unknown, ignored = [], [], []
1476 mf2 = mfmatches(ctx2)
1430 mf2 = mfmatches(ctx2)
1477
1431
1478 modified, added, clean = [], [], []
1432 modified, added, clean = [], [], []
1479 withflags = mf1.withflags() | mf2.withflags()
1433 withflags = mf1.withflags() | mf2.withflags()
1480 for fn in mf2:
1434 for fn in mf2:
1481 if fn in mf1:
1435 if fn in mf1:
1482 if (fn not in deleted and
1436 if (fn not in deleted and
1483 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1437 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1484 (mf1[fn] != mf2[fn] and
1438 (mf1[fn] != mf2[fn] and
1485 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1439 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1486 modified.append(fn)
1440 modified.append(fn)
1487 elif listclean:
1441 elif listclean:
1488 clean.append(fn)
1442 clean.append(fn)
1489 del mf1[fn]
1443 del mf1[fn]
1490 elif fn not in deleted:
1444 elif fn not in deleted:
1491 added.append(fn)
1445 added.append(fn)
1492 removed = mf1.keys()
1446 removed = mf1.keys()
1493
1447
1494 if working and modified and not self.dirstate._checklink:
1448 if working and modified and not self.dirstate._checklink:
1495 # Symlink placeholders may get non-symlink-like contents
1449 # Symlink placeholders may get non-symlink-like contents
1496 # via user error or dereferencing by NFS or Samba servers,
1450 # via user error or dereferencing by NFS or Samba servers,
1497 # so we filter out any placeholders that don't look like a
1451 # so we filter out any placeholders that don't look like a
1498 # symlink
1452 # symlink
1499 sane = []
1453 sane = []
1500 for f in modified:
1454 for f in modified:
1501 if ctx2.flags(f) == 'l':
1455 if ctx2.flags(f) == 'l':
1502 d = ctx2[f].data()
1456 d = ctx2[f].data()
1503 if len(d) >= 1024 or '\n' in d or util.binary(d):
1457 if len(d) >= 1024 or '\n' in d or util.binary(d):
1504 self.ui.debug('ignoring suspect symlink placeholder'
1458 self.ui.debug('ignoring suspect symlink placeholder'
1505 ' "%s"\n' % f)
1459 ' "%s"\n' % f)
1506 continue
1460 continue
1507 sane.append(f)
1461 sane.append(f)
1508 modified = sane
1462 modified = sane
1509
1463
1510 r = modified, added, removed, deleted, unknown, ignored, clean
1464 r = modified, added, removed, deleted, unknown, ignored, clean
1511
1465
1512 if listsubrepos:
1466 if listsubrepos:
1513 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1467 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1514 if working:
1468 if working:
1515 rev2 = None
1469 rev2 = None
1516 else:
1470 else:
1517 rev2 = ctx2.substate[subpath][1]
1471 rev2 = ctx2.substate[subpath][1]
1518 try:
1472 try:
1519 submatch = matchmod.narrowmatcher(subpath, match)
1473 submatch = matchmod.narrowmatcher(subpath, match)
1520 s = sub.status(rev2, match=submatch, ignored=listignored,
1474 s = sub.status(rev2, match=submatch, ignored=listignored,
1521 clean=listclean, unknown=listunknown,
1475 clean=listclean, unknown=listunknown,
1522 listsubrepos=True)
1476 listsubrepos=True)
1523 for rfiles, sfiles in zip(r, s):
1477 for rfiles, sfiles in zip(r, s):
1524 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1478 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1525 except error.LookupError:
1479 except error.LookupError:
1526 self.ui.status(_("skipping missing subrepository: %s\n")
1480 self.ui.status(_("skipping missing subrepository: %s\n")
1527 % subpath)
1481 % subpath)
1528
1482
1529 for l in r:
1483 for l in r:
1530 l.sort()
1484 l.sort()
1531 return r
1485 return r
1532
1486
1533 def heads(self, start=None):
1487 def heads(self, start=None):
1534 heads = self.changelog.heads(start)
1488 heads = self.changelog.heads(start)
1535 # sort the output in rev descending order
1489 # sort the output in rev descending order
1536 return sorted(heads, key=self.changelog.rev, reverse=True)
1490 return sorted(heads, key=self.changelog.rev, reverse=True)
1537
1491
1538 def branchheads(self, branch=None, start=None, closed=False):
1492 def branchheads(self, branch=None, start=None, closed=False):
1539 '''return a (possibly filtered) list of heads for the given branch
1493 '''return a (possibly filtered) list of heads for the given branch
1540
1494
1541 Heads are returned in topological order, from newest to oldest.
1495 Heads are returned in topological order, from newest to oldest.
1542 If branch is None, use the dirstate branch.
1496 If branch is None, use the dirstate branch.
1543 If start is not None, return only heads reachable from start.
1497 If start is not None, return only heads reachable from start.
1544 If closed is True, return heads that are marked as closed as well.
1498 If closed is True, return heads that are marked as closed as well.
1545 '''
1499 '''
1546 if branch is None:
1500 if branch is None:
1547 branch = self[None].branch()
1501 branch = self[None].branch()
1548 branches = self.branchmap()
1502 branches = self.branchmap()
1549 if branch not in branches:
1503 if branch not in branches:
1550 return []
1504 return []
1551 # the cache returns heads ordered lowest to highest
1505 # the cache returns heads ordered lowest to highest
1552 bheads = list(reversed(branches[branch]))
1506 bheads = list(reversed(branches[branch]))
1553 if start is not None:
1507 if start is not None:
1554 # filter out the heads that cannot be reached from startrev
1508 # filter out the heads that cannot be reached from startrev
1555 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1556 bheads = [h for h in bheads if h in fbheads]
1510 bheads = [h for h in bheads if h in fbheads]
1557 if not closed:
1511 if not closed:
1558 bheads = [h for h in bheads if not self[h].closesbranch()]
1512 bheads = [h for h in bheads if not self[h].closesbranch()]
1559 return bheads
1513 return bheads
1560
1514
1561 def branches(self, nodes):
1515 def branches(self, nodes):
1562 if not nodes:
1516 if not nodes:
1563 nodes = [self.changelog.tip()]
1517 nodes = [self.changelog.tip()]
1564 b = []
1518 b = []
1565 for n in nodes:
1519 for n in nodes:
1566 t = n
1520 t = n
1567 while True:
1521 while True:
1568 p = self.changelog.parents(n)
1522 p = self.changelog.parents(n)
1569 if p[1] != nullid or p[0] == nullid:
1523 if p[1] != nullid or p[0] == nullid:
1570 b.append((t, n, p[0], p[1]))
1524 b.append((t, n, p[0], p[1]))
1571 break
1525 break
1572 n = p[0]
1526 n = p[0]
1573 return b
1527 return b
1574
1528
1575 def between(self, pairs):
1529 def between(self, pairs):
1576 r = []
1530 r = []
1577
1531
1578 for top, bottom in pairs:
1532 for top, bottom in pairs:
1579 n, l, i = top, [], 0
1533 n, l, i = top, [], 0
1580 f = 1
1534 f = 1
1581
1535
1582 while n != bottom and n != nullid:
1536 while n != bottom and n != nullid:
1583 p = self.changelog.parents(n)[0]
1537 p = self.changelog.parents(n)[0]
1584 if i == f:
1538 if i == f:
1585 l.append(n)
1539 l.append(n)
1586 f = f * 2
1540 f = f * 2
1587 n = p
1541 n = p
1588 i += 1
1542 i += 1
1589
1543
1590 r.append(l)
1544 r.append(l)
1591
1545
1592 return r
1546 return r
1593
1547
1594 def pull(self, remote, heads=None, force=False):
1548 def pull(self, remote, heads=None, force=False):
1595 lock = self.lock()
1549 lock = self.lock()
1596 try:
1550 try:
1597 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1551 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1598 force=force)
1552 force=force)
1599 common, fetch, rheads = tmp
1553 common, fetch, rheads = tmp
1600 if not fetch:
1554 if not fetch:
1601 self.ui.status(_("no changes found\n"))
1555 self.ui.status(_("no changes found\n"))
1602 added = []
1556 added = []
1603 result = 0
1557 result = 0
1604 else:
1558 else:
1605 if heads is None and list(common) == [nullid]:
1559 if heads is None and list(common) == [nullid]:
1606 self.ui.status(_("requesting all changes\n"))
1560 self.ui.status(_("requesting all changes\n"))
1607 elif heads is None and remote.capable('changegroupsubset'):
1561 elif heads is None and remote.capable('changegroupsubset'):
1608 # issue1320, avoid a race if remote changed after discovery
1562 # issue1320, avoid a race if remote changed after discovery
1609 heads = rheads
1563 heads = rheads
1610
1564
1611 if remote.capable('getbundle'):
1565 if remote.capable('getbundle'):
1612 cg = remote.getbundle('pull', common=common,
1566 cg = remote.getbundle('pull', common=common,
1613 heads=heads or rheads)
1567 heads=heads or rheads)
1614 elif heads is None:
1568 elif heads is None:
1615 cg = remote.changegroup(fetch, 'pull')
1569 cg = remote.changegroup(fetch, 'pull')
1616 elif not remote.capable('changegroupsubset'):
1570 elif not remote.capable('changegroupsubset'):
1617 raise util.Abort(_("partial pull cannot be done because "
1571 raise util.Abort(_("partial pull cannot be done because "
1618 "other repository doesn't support "
1572 "other repository doesn't support "
1619 "changegroupsubset."))
1573 "changegroupsubset."))
1620 else:
1574 else:
1621 cg = remote.changegroupsubset(fetch, heads, 'pull')
1575 cg = remote.changegroupsubset(fetch, heads, 'pull')
1622 clstart = len(self.changelog)
1576 clstart = len(self.changelog)
1623 result = self.addchangegroup(cg, 'pull', remote.url())
1577 result = self.addchangegroup(cg, 'pull', remote.url())
1624 clend = len(self.changelog)
1578 clend = len(self.changelog)
1625 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1579 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1626
1580
1627 # compute target subset
1581 # compute target subset
1628 if heads is None:
1582 if heads is None:
1629 # We pulled every thing possible
1583 # We pulled every thing possible
1630 # sync on everything common
1584 # sync on everything common
1631 subset = common + added
1585 subset = common + added
1632 else:
1586 else:
1633 # We pulled a specific subset
1587 # We pulled a specific subset
1634 # sync on this subset
1588 # sync on this subset
1635 subset = heads
1589 subset = heads
1636
1590
1637 # Get remote phases data from remote
1591 # Get remote phases data from remote
1638 remotephases = remote.listkeys('phases')
1592 remotephases = remote.listkeys('phases')
1639 publishing = bool(remotephases.get('publishing', False))
1593 publishing = bool(remotephases.get('publishing', False))
1640 if remotephases and not publishing:
1594 if remotephases and not publishing:
1641 # remote is new and unpublishing
1595 # remote is new and unpublishing
1642 pheads, _dr = phases.analyzeremotephases(self, subset,
1596 pheads, _dr = phases.analyzeremotephases(self, subset,
1643 remotephases)
1597 remotephases)
1644 phases.advanceboundary(self, phases.public, pheads)
1598 phases.advanceboundary(self, phases.public, pheads)
1645 phases.advanceboundary(self, phases.draft, subset)
1599 phases.advanceboundary(self, phases.draft, subset)
1646 else:
1600 else:
1647 # Remote is old or publishing all common changesets
1601 # Remote is old or publishing all common changesets
1648 # should be seen as public
1602 # should be seen as public
1649 phases.advanceboundary(self, phases.public, subset)
1603 phases.advanceboundary(self, phases.public, subset)
1650 finally:
1604 finally:
1651 lock.release()
1605 lock.release()
1652
1606
1653 return result
1607 return result
1654
1608
1655 def checkpush(self, force, revs):
1609 def checkpush(self, force, revs):
1656 """Extensions can override this function if additional checks have
1610 """Extensions can override this function if additional checks have
1657 to be performed before pushing, or call it if they override push
1611 to be performed before pushing, or call it if they override push
1658 command.
1612 command.
1659 """
1613 """
1660 pass
1614 pass
1661
1615
1662 def push(self, remote, force=False, revs=None, newbranch=False):
1616 def push(self, remote, force=False, revs=None, newbranch=False):
1663 '''Push outgoing changesets (limited by revs) from the current
1617 '''Push outgoing changesets (limited by revs) from the current
1664 repository to remote. Return an integer:
1618 repository to remote. Return an integer:
1665 - None means nothing to push
1619 - None means nothing to push
1666 - 0 means HTTP error
1620 - 0 means HTTP error
1667 - 1 means we pushed and remote head count is unchanged *or*
1621 - 1 means we pushed and remote head count is unchanged *or*
1668 we have outgoing changesets but refused to push
1622 we have outgoing changesets but refused to push
1669 - other values as described by addchangegroup()
1623 - other values as described by addchangegroup()
1670 '''
1624 '''
1671 # there are two ways to push to remote repo:
1625 # there are two ways to push to remote repo:
1672 #
1626 #
1673 # addchangegroup assumes local user can lock remote
1627 # addchangegroup assumes local user can lock remote
1674 # repo (local filesystem, old ssh servers).
1628 # repo (local filesystem, old ssh servers).
1675 #
1629 #
1676 # unbundle assumes local user cannot lock remote repo (new ssh
1630 # unbundle assumes local user cannot lock remote repo (new ssh
1677 # servers, http servers).
1631 # servers, http servers).
1678
1632
1679 # get local lock as we might write phase data
1633 # get local lock as we might write phase data
1680 locallock = self.lock()
1634 locallock = self.lock()
1681 try:
1635 try:
1682 self.checkpush(force, revs)
1636 self.checkpush(force, revs)
1683 lock = None
1637 lock = None
1684 unbundle = remote.capable('unbundle')
1638 unbundle = remote.capable('unbundle')
1685 if not unbundle:
1639 if not unbundle:
1686 lock = remote.lock()
1640 lock = remote.lock()
1687 try:
1641 try:
1688 # discovery
1642 # discovery
1689 fci = discovery.findcommonincoming
1643 fci = discovery.findcommonincoming
1690 commoninc = fci(self, remote, force=force)
1644 commoninc = fci(self, remote, force=force)
1691 common, inc, remoteheads = commoninc
1645 common, inc, remoteheads = commoninc
1692 fco = discovery.findcommonoutgoing
1646 fco = discovery.findcommonoutgoing
1693 outgoing = fco(self, remote, onlyheads=revs,
1647 outgoing = fco(self, remote, onlyheads=revs,
1694 commoninc=commoninc, force=force)
1648 commoninc=commoninc, force=force)
1695
1649
1696
1650
1697 if not outgoing.missing:
1651 if not outgoing.missing:
1698 # nothing to push
1652 # nothing to push
1699 scmutil.nochangesfound(self.ui, outgoing.excluded)
1653 scmutil.nochangesfound(self.ui, outgoing.excluded)
1700 ret = None
1654 ret = None
1701 else:
1655 else:
1702 # something to push
1656 # something to push
1703 if not force:
1657 if not force:
1704 discovery.checkheads(self, remote, outgoing,
1658 discovery.checkheads(self, remote, outgoing,
1705 remoteheads, newbranch,
1659 remoteheads, newbranch,
1706 bool(inc))
1660 bool(inc))
1707
1661
1708 # create a changegroup from local
1662 # create a changegroup from local
1709 if revs is None and not outgoing.excluded:
1663 if revs is None and not outgoing.excluded:
1710 # push everything,
1664 # push everything,
1711 # use the fast path, no race possible on push
1665 # use the fast path, no race possible on push
1712 cg = self._changegroup(outgoing.missing, 'push')
1666 cg = self._changegroup(outgoing.missing, 'push')
1713 else:
1667 else:
1714 cg = self.getlocalbundle('push', outgoing)
1668 cg = self.getlocalbundle('push', outgoing)
1715
1669
1716 # apply changegroup to remote
1670 # apply changegroup to remote
1717 if unbundle:
1671 if unbundle:
1718 # local repo finds heads on server, finds out what
1672 # local repo finds heads on server, finds out what
1719 # revs it must push. once revs transferred, if server
1673 # revs it must push. once revs transferred, if server
1720 # finds it has different heads (someone else won
1674 # finds it has different heads (someone else won
1721 # commit/push race), server aborts.
1675 # commit/push race), server aborts.
1722 if force:
1676 if force:
1723 remoteheads = ['force']
1677 remoteheads = ['force']
1724 # ssh: return remote's addchangegroup()
1678 # ssh: return remote's addchangegroup()
1725 # http: return remote's addchangegroup() or 0 for error
1679 # http: return remote's addchangegroup() or 0 for error
1726 ret = remote.unbundle(cg, remoteheads, 'push')
1680 ret = remote.unbundle(cg, remoteheads, 'push')
1727 else:
1681 else:
1728 # we return an integer indicating remote head count
1682 # we return an integer indicating remote head count
1729 # change
1683 # change
1730 ret = remote.addchangegroup(cg, 'push', self.url())
1684 ret = remote.addchangegroup(cg, 'push', self.url())
1731
1685
1732 if ret:
1686 if ret:
1733 # push succeed, synchonize target of the push
1687 # push succeed, synchonize target of the push
1734 cheads = outgoing.missingheads
1688 cheads = outgoing.missingheads
1735 elif revs is None:
1689 elif revs is None:
1736 # All out push fails. synchronize all common
1690 # All out push fails. synchronize all common
1737 cheads = outgoing.commonheads
1691 cheads = outgoing.commonheads
1738 else:
1692 else:
1739 # I want cheads = heads(::missingheads and ::commonheads)
1693 # I want cheads = heads(::missingheads and ::commonheads)
1740 # (missingheads is revs with secret changeset filtered out)
1694 # (missingheads is revs with secret changeset filtered out)
1741 #
1695 #
1742 # This can be expressed as:
1696 # This can be expressed as:
1743 # cheads = ( (missingheads and ::commonheads)
1697 # cheads = ( (missingheads and ::commonheads)
1744 # + (commonheads and ::missingheads))"
1698 # + (commonheads and ::missingheads))"
1745 # )
1699 # )
1746 #
1700 #
1747 # while trying to push we already computed the following:
1701 # while trying to push we already computed the following:
1748 # common = (::commonheads)
1702 # common = (::commonheads)
1749 # missing = ((commonheads::missingheads) - commonheads)
1703 # missing = ((commonheads::missingheads) - commonheads)
1750 #
1704 #
1751 # We can pick:
1705 # We can pick:
1752 # * missingheads part of comon (::commonheads)
1706 # * missingheads part of comon (::commonheads)
1753 common = set(outgoing.common)
1707 common = set(outgoing.common)
1754 cheads = [node for node in revs if node in common]
1708 cheads = [node for node in revs if node in common]
1755 # and
1709 # and
1756 # * commonheads parents on missing
1710 # * commonheads parents on missing
1757 revset = self.set('%ln and parents(roots(%ln))',
1711 revset = self.set('%ln and parents(roots(%ln))',
1758 outgoing.commonheads,
1712 outgoing.commonheads,
1759 outgoing.missing)
1713 outgoing.missing)
1760 cheads.extend(c.node() for c in revset)
1714 cheads.extend(c.node() for c in revset)
1761 # even when we don't push, exchanging phase data is useful
1715 # even when we don't push, exchanging phase data is useful
1762 remotephases = remote.listkeys('phases')
1716 remotephases = remote.listkeys('phases')
1763 if not remotephases: # old server or public only repo
1717 if not remotephases: # old server or public only repo
1764 phases.advanceboundary(self, phases.public, cheads)
1718 phases.advanceboundary(self, phases.public, cheads)
1765 # don't push any phase data as there is nothing to push
1719 # don't push any phase data as there is nothing to push
1766 else:
1720 else:
1767 ana = phases.analyzeremotephases(self, cheads, remotephases)
1721 ana = phases.analyzeremotephases(self, cheads, remotephases)
1768 pheads, droots = ana
1722 pheads, droots = ana
1769 ### Apply remote phase on local
1723 ### Apply remote phase on local
1770 if remotephases.get('publishing', False):
1724 if remotephases.get('publishing', False):
1771 phases.advanceboundary(self, phases.public, cheads)
1725 phases.advanceboundary(self, phases.public, cheads)
1772 else: # publish = False
1726 else: # publish = False
1773 phases.advanceboundary(self, phases.public, pheads)
1727 phases.advanceboundary(self, phases.public, pheads)
1774 phases.advanceboundary(self, phases.draft, cheads)
1728 phases.advanceboundary(self, phases.draft, cheads)
1775 ### Apply local phase on remote
1729 ### Apply local phase on remote
1776
1730
1777 # Get the list of all revs draft on remote by public here.
1731 # Get the list of all revs draft on remote by public here.
1778 # XXX Beware that revset break if droots is not strictly
1732 # XXX Beware that revset break if droots is not strictly
1779 # XXX root we may want to ensure it is but it is costly
1733 # XXX root we may want to ensure it is but it is costly
1780 outdated = self.set('heads((%ln::%ln) and public())',
1734 outdated = self.set('heads((%ln::%ln) and public())',
1781 droots, cheads)
1735 droots, cheads)
1782 for newremotehead in outdated:
1736 for newremotehead in outdated:
1783 r = remote.pushkey('phases',
1737 r = remote.pushkey('phases',
1784 newremotehead.hex(),
1738 newremotehead.hex(),
1785 str(phases.draft),
1739 str(phases.draft),
1786 str(phases.public))
1740 str(phases.public))
1787 if not r:
1741 if not r:
1788 self.ui.warn(_('updating %s to public failed!\n')
1742 self.ui.warn(_('updating %s to public failed!\n')
1789 % newremotehead)
1743 % newremotehead)
1790 finally:
1744 finally:
1791 if lock is not None:
1745 if lock is not None:
1792 lock.release()
1746 lock.release()
1793 finally:
1747 finally:
1794 locallock.release()
1748 locallock.release()
1795
1749
1796 self.ui.debug("checking for updated bookmarks\n")
1750 self.ui.debug("checking for updated bookmarks\n")
1797 rb = remote.listkeys('bookmarks')
1751 rb = remote.listkeys('bookmarks')
1798 for k in rb.keys():
1752 for k in rb.keys():
1799 if k in self._bookmarks:
1753 if k in self._bookmarks:
1800 nr, nl = rb[k], hex(self._bookmarks[k])
1754 nr, nl = rb[k], hex(self._bookmarks[k])
1801 if nr in self:
1755 if nr in self:
1802 cr = self[nr]
1756 cr = self[nr]
1803 cl = self[nl]
1757 cl = self[nl]
1804 if cl in cr.descendants():
1758 if cl in cr.descendants():
1805 r = remote.pushkey('bookmarks', k, nr, nl)
1759 r = remote.pushkey('bookmarks', k, nr, nl)
1806 if r:
1760 if r:
1807 self.ui.status(_("updating bookmark %s\n") % k)
1761 self.ui.status(_("updating bookmark %s\n") % k)
1808 else:
1762 else:
1809 self.ui.warn(_('updating bookmark %s'
1763 self.ui.warn(_('updating bookmark %s'
1810 ' failed!\n') % k)
1764 ' failed!\n') % k)
1811
1765
1812 return ret
1766 return ret
1813
1767
1814 def changegroupinfo(self, nodes, source):
1768 def changegroupinfo(self, nodes, source):
1815 if self.ui.verbose or source == 'bundle':
1769 if self.ui.verbose or source == 'bundle':
1816 self.ui.status(_("%d changesets found\n") % len(nodes))
1770 self.ui.status(_("%d changesets found\n") % len(nodes))
1817 if self.ui.debugflag:
1771 if self.ui.debugflag:
1818 self.ui.debug("list of changesets:\n")
1772 self.ui.debug("list of changesets:\n")
1819 for node in nodes:
1773 for node in nodes:
1820 self.ui.debug("%s\n" % hex(node))
1774 self.ui.debug("%s\n" % hex(node))
1821
1775
1822 def changegroupsubset(self, bases, heads, source):
1776 def changegroupsubset(self, bases, heads, source):
1823 """Compute a changegroup consisting of all the nodes that are
1777 """Compute a changegroup consisting of all the nodes that are
1824 descendants of any of the bases and ancestors of any of the heads.
1778 descendants of any of the bases and ancestors of any of the heads.
1825 Return a chunkbuffer object whose read() method will return
1779 Return a chunkbuffer object whose read() method will return
1826 successive changegroup chunks.
1780 successive changegroup chunks.
1827
1781
1828 It is fairly complex as determining which filenodes and which
1782 It is fairly complex as determining which filenodes and which
1829 manifest nodes need to be included for the changeset to be complete
1783 manifest nodes need to be included for the changeset to be complete
1830 is non-trivial.
1784 is non-trivial.
1831
1785
1832 Another wrinkle is doing the reverse, figuring out which changeset in
1786 Another wrinkle is doing the reverse, figuring out which changeset in
1833 the changegroup a particular filenode or manifestnode belongs to.
1787 the changegroup a particular filenode or manifestnode belongs to.
1834 """
1788 """
1835 cl = self.changelog
1789 cl = self.changelog
1836 if not bases:
1790 if not bases:
1837 bases = [nullid]
1791 bases = [nullid]
1838 csets, bases, heads = cl.nodesbetween(bases, heads)
1792 csets, bases, heads = cl.nodesbetween(bases, heads)
1839 # We assume that all ancestors of bases are known
1793 # We assume that all ancestors of bases are known
1840 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1794 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1841 return self._changegroupsubset(common, csets, heads, source)
1795 return self._changegroupsubset(common, csets, heads, source)
1842
1796
1843 def getlocalbundle(self, source, outgoing):
1797 def getlocalbundle(self, source, outgoing):
1844 """Like getbundle, but taking a discovery.outgoing as an argument.
1798 """Like getbundle, but taking a discovery.outgoing as an argument.
1845
1799
1846 This is only implemented for local repos and reuses potentially
1800 This is only implemented for local repos and reuses potentially
1847 precomputed sets in outgoing."""
1801 precomputed sets in outgoing."""
1848 if not outgoing.missing:
1802 if not outgoing.missing:
1849 return None
1803 return None
1850 return self._changegroupsubset(outgoing.common,
1804 return self._changegroupsubset(outgoing.common,
1851 outgoing.missing,
1805 outgoing.missing,
1852 outgoing.missingheads,
1806 outgoing.missingheads,
1853 source)
1807 source)
1854
1808
1855 def getbundle(self, source, heads=None, common=None):
1809 def getbundle(self, source, heads=None, common=None):
1856 """Like changegroupsubset, but returns the set difference between the
1810 """Like changegroupsubset, but returns the set difference between the
1857 ancestors of heads and the ancestors common.
1811 ancestors of heads and the ancestors common.
1858
1812
1859 If heads is None, use the local heads. If common is None, use [nullid].
1813 If heads is None, use the local heads. If common is None, use [nullid].
1860
1814
1861 The nodes in common might not all be known locally due to the way the
1815 The nodes in common might not all be known locally due to the way the
1862 current discovery protocol works.
1816 current discovery protocol works.
1863 """
1817 """
1864 cl = self.changelog
1818 cl = self.changelog
1865 if common:
1819 if common:
1866 nm = cl.nodemap
1820 nm = cl.nodemap
1867 common = [n for n in common if n in nm]
1821 common = [n for n in common if n in nm]
1868 else:
1822 else:
1869 common = [nullid]
1823 common = [nullid]
1870 if not heads:
1824 if not heads:
1871 heads = cl.heads()
1825 heads = cl.heads()
1872 return self.getlocalbundle(source,
1826 return self.getlocalbundle(source,
1873 discovery.outgoing(cl, common, heads))
1827 discovery.outgoing(cl, common, heads))
1874
1828
1875 def _changegroupsubset(self, commonrevs, csets, heads, source):
1829 def _changegroupsubset(self, commonrevs, csets, heads, source):
1876
1830
1877 cl = self.changelog
1831 cl = self.changelog
1878 mf = self.manifest
1832 mf = self.manifest
1879 mfs = {} # needed manifests
1833 mfs = {} # needed manifests
1880 fnodes = {} # needed file nodes
1834 fnodes = {} # needed file nodes
1881 changedfiles = set()
1835 changedfiles = set()
1882 fstate = ['', {}]
1836 fstate = ['', {}]
1883 count = [0, 0]
1837 count = [0, 0]
1884
1838
1885 # can we go through the fast path ?
1839 # can we go through the fast path ?
1886 heads.sort()
1840 heads.sort()
1887 if heads == sorted(self.heads()):
1841 if heads == sorted(self.heads()):
1888 return self._changegroup(csets, source)
1842 return self._changegroup(csets, source)
1889
1843
1890 # slow path
1844 # slow path
1891 self.hook('preoutgoing', throw=True, source=source)
1845 self.hook('preoutgoing', throw=True, source=source)
1892 self.changegroupinfo(csets, source)
1846 self.changegroupinfo(csets, source)
1893
1847
1894 # filter any nodes that claim to be part of the known set
1848 # filter any nodes that claim to be part of the known set
1895 def prune(revlog, missing):
1849 def prune(revlog, missing):
1896 rr, rl = revlog.rev, revlog.linkrev
1850 rr, rl = revlog.rev, revlog.linkrev
1897 return [n for n in missing
1851 return [n for n in missing
1898 if rl(rr(n)) not in commonrevs]
1852 if rl(rr(n)) not in commonrevs]
1899
1853
1900 progress = self.ui.progress
1854 progress = self.ui.progress
1901 _bundling = _('bundling')
1855 _bundling = _('bundling')
1902 _changesets = _('changesets')
1856 _changesets = _('changesets')
1903 _manifests = _('manifests')
1857 _manifests = _('manifests')
1904 _files = _('files')
1858 _files = _('files')
1905
1859
1906 def lookup(revlog, x):
1860 def lookup(revlog, x):
1907 if revlog == cl:
1861 if revlog == cl:
1908 c = cl.read(x)
1862 c = cl.read(x)
1909 changedfiles.update(c[3])
1863 changedfiles.update(c[3])
1910 mfs.setdefault(c[0], x)
1864 mfs.setdefault(c[0], x)
1911 count[0] += 1
1865 count[0] += 1
1912 progress(_bundling, count[0],
1866 progress(_bundling, count[0],
1913 unit=_changesets, total=count[1])
1867 unit=_changesets, total=count[1])
1914 return x
1868 return x
1915 elif revlog == mf:
1869 elif revlog == mf:
1916 clnode = mfs[x]
1870 clnode = mfs[x]
1917 mdata = mf.readfast(x)
1871 mdata = mf.readfast(x)
1918 for f, n in mdata.iteritems():
1872 for f, n in mdata.iteritems():
1919 if f in changedfiles:
1873 if f in changedfiles:
1920 fnodes[f].setdefault(n, clnode)
1874 fnodes[f].setdefault(n, clnode)
1921 count[0] += 1
1875 count[0] += 1
1922 progress(_bundling, count[0],
1876 progress(_bundling, count[0],
1923 unit=_manifests, total=count[1])
1877 unit=_manifests, total=count[1])
1924 return clnode
1878 return clnode
1925 else:
1879 else:
1926 progress(_bundling, count[0], item=fstate[0],
1880 progress(_bundling, count[0], item=fstate[0],
1927 unit=_files, total=count[1])
1881 unit=_files, total=count[1])
1928 return fstate[1][x]
1882 return fstate[1][x]
1929
1883
1930 bundler = changegroup.bundle10(lookup)
1884 bundler = changegroup.bundle10(lookup)
1931 reorder = self.ui.config('bundle', 'reorder', 'auto')
1885 reorder = self.ui.config('bundle', 'reorder', 'auto')
1932 if reorder == 'auto':
1886 if reorder == 'auto':
1933 reorder = None
1887 reorder = None
1934 else:
1888 else:
1935 reorder = util.parsebool(reorder)
1889 reorder = util.parsebool(reorder)
1936
1890
1937 def gengroup():
1891 def gengroup():
1938 # Create a changenode group generator that will call our functions
1892 # Create a changenode group generator that will call our functions
1939 # back to lookup the owning changenode and collect information.
1893 # back to lookup the owning changenode and collect information.
1940 count[:] = [0, len(csets)]
1894 count[:] = [0, len(csets)]
1941 for chunk in cl.group(csets, bundler, reorder=reorder):
1895 for chunk in cl.group(csets, bundler, reorder=reorder):
1942 yield chunk
1896 yield chunk
1943 progress(_bundling, None)
1897 progress(_bundling, None)
1944
1898
1945 # Create a generator for the manifestnodes that calls our lookup
1899 # Create a generator for the manifestnodes that calls our lookup
1946 # and data collection functions back.
1900 # and data collection functions back.
1947 for f in changedfiles:
1901 for f in changedfiles:
1948 fnodes[f] = {}
1902 fnodes[f] = {}
1949 count[:] = [0, len(mfs)]
1903 count[:] = [0, len(mfs)]
1950 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1904 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1951 yield chunk
1905 yield chunk
1952 progress(_bundling, None)
1906 progress(_bundling, None)
1953
1907
1954 mfs.clear()
1908 mfs.clear()
1955
1909
1956 # Go through all our files in order sorted by name.
1910 # Go through all our files in order sorted by name.
1957 count[:] = [0, len(changedfiles)]
1911 count[:] = [0, len(changedfiles)]
1958 for fname in sorted(changedfiles):
1912 for fname in sorted(changedfiles):
1959 filerevlog = self.file(fname)
1913 filerevlog = self.file(fname)
1960 if not len(filerevlog):
1914 if not len(filerevlog):
1961 raise util.Abort(_("empty or missing revlog for %s")
1915 raise util.Abort(_("empty or missing revlog for %s")
1962 % fname)
1916 % fname)
1963 fstate[0] = fname
1917 fstate[0] = fname
1964 fstate[1] = fnodes.pop(fname, {})
1918 fstate[1] = fnodes.pop(fname, {})
1965
1919
1966 nodelist = prune(filerevlog, fstate[1])
1920 nodelist = prune(filerevlog, fstate[1])
1967 if nodelist:
1921 if nodelist:
1968 count[0] += 1
1922 count[0] += 1
1969 yield bundler.fileheader(fname)
1923 yield bundler.fileheader(fname)
1970 for chunk in filerevlog.group(nodelist, bundler, reorder):
1924 for chunk in filerevlog.group(nodelist, bundler, reorder):
1971 yield chunk
1925 yield chunk
1972
1926
1973 # Signal that no more groups are left.
1927 # Signal that no more groups are left.
1974 yield bundler.close()
1928 yield bundler.close()
1975 progress(_bundling, None)
1929 progress(_bundling, None)
1976
1930
1977 if csets:
1931 if csets:
1978 self.hook('outgoing', node=hex(csets[0]), source=source)
1932 self.hook('outgoing', node=hex(csets[0]), source=source)
1979
1933
1980 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1934 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1981
1935
1982 def changegroup(self, basenodes, source):
1936 def changegroup(self, basenodes, source):
1983 # to avoid a race we use changegroupsubset() (issue1320)
1937 # to avoid a race we use changegroupsubset() (issue1320)
1984 return self.changegroupsubset(basenodes, self.heads(), source)
1938 return self.changegroupsubset(basenodes, self.heads(), source)
1985
1939
1986 def _changegroup(self, nodes, source):
1940 def _changegroup(self, nodes, source):
1987 """Compute the changegroup of all nodes that we have that a recipient
1941 """Compute the changegroup of all nodes that we have that a recipient
1988 doesn't. Return a chunkbuffer object whose read() method will return
1942 doesn't. Return a chunkbuffer object whose read() method will return
1989 successive changegroup chunks.
1943 successive changegroup chunks.
1990
1944
1991 This is much easier than the previous function as we can assume that
1945 This is much easier than the previous function as we can assume that
1992 the recipient has any changenode we aren't sending them.
1946 the recipient has any changenode we aren't sending them.
1993
1947
1994 nodes is the set of nodes to send"""
1948 nodes is the set of nodes to send"""
1995
1949
1996 cl = self.changelog
1950 cl = self.changelog
1997 mf = self.manifest
1951 mf = self.manifest
1998 mfs = {}
1952 mfs = {}
1999 changedfiles = set()
1953 changedfiles = set()
2000 fstate = ['']
1954 fstate = ['']
2001 count = [0, 0]
1955 count = [0, 0]
2002
1956
2003 self.hook('preoutgoing', throw=True, source=source)
1957 self.hook('preoutgoing', throw=True, source=source)
2004 self.changegroupinfo(nodes, source)
1958 self.changegroupinfo(nodes, source)
2005
1959
2006 revset = set([cl.rev(n) for n in nodes])
1960 revset = set([cl.rev(n) for n in nodes])
2007
1961
2008 def gennodelst(log):
1962 def gennodelst(log):
2009 ln, llr = log.node, log.linkrev
1963 ln, llr = log.node, log.linkrev
2010 return [ln(r) for r in log if llr(r) in revset]
1964 return [ln(r) for r in log if llr(r) in revset]
2011
1965
2012 progress = self.ui.progress
1966 progress = self.ui.progress
2013 _bundling = _('bundling')
1967 _bundling = _('bundling')
2014 _changesets = _('changesets')
1968 _changesets = _('changesets')
2015 _manifests = _('manifests')
1969 _manifests = _('manifests')
2016 _files = _('files')
1970 _files = _('files')
2017
1971
2018 def lookup(revlog, x):
1972 def lookup(revlog, x):
2019 if revlog == cl:
1973 if revlog == cl:
2020 c = cl.read(x)
1974 c = cl.read(x)
2021 changedfiles.update(c[3])
1975 changedfiles.update(c[3])
2022 mfs.setdefault(c[0], x)
1976 mfs.setdefault(c[0], x)
2023 count[0] += 1
1977 count[0] += 1
2024 progress(_bundling, count[0],
1978 progress(_bundling, count[0],
2025 unit=_changesets, total=count[1])
1979 unit=_changesets, total=count[1])
2026 return x
1980 return x
2027 elif revlog == mf:
1981 elif revlog == mf:
2028 count[0] += 1
1982 count[0] += 1
2029 progress(_bundling, count[0],
1983 progress(_bundling, count[0],
2030 unit=_manifests, total=count[1])
1984 unit=_manifests, total=count[1])
2031 return cl.node(revlog.linkrev(revlog.rev(x)))
1985 return cl.node(revlog.linkrev(revlog.rev(x)))
2032 else:
1986 else:
2033 progress(_bundling, count[0], item=fstate[0],
1987 progress(_bundling, count[0], item=fstate[0],
2034 total=count[1], unit=_files)
1988 total=count[1], unit=_files)
2035 return cl.node(revlog.linkrev(revlog.rev(x)))
1989 return cl.node(revlog.linkrev(revlog.rev(x)))
2036
1990
2037 bundler = changegroup.bundle10(lookup)
1991 bundler = changegroup.bundle10(lookup)
2038 reorder = self.ui.config('bundle', 'reorder', 'auto')
1992 reorder = self.ui.config('bundle', 'reorder', 'auto')
2039 if reorder == 'auto':
1993 if reorder == 'auto':
2040 reorder = None
1994 reorder = None
2041 else:
1995 else:
2042 reorder = util.parsebool(reorder)
1996 reorder = util.parsebool(reorder)
2043
1997
2044 def gengroup():
1998 def gengroup():
2045 '''yield a sequence of changegroup chunks (strings)'''
1999 '''yield a sequence of changegroup chunks (strings)'''
2046 # construct a list of all changed files
2000 # construct a list of all changed files
2047
2001
2048 count[:] = [0, len(nodes)]
2002 count[:] = [0, len(nodes)]
2049 for chunk in cl.group(nodes, bundler, reorder=reorder):
2003 for chunk in cl.group(nodes, bundler, reorder=reorder):
2050 yield chunk
2004 yield chunk
2051 progress(_bundling, None)
2005 progress(_bundling, None)
2052
2006
2053 count[:] = [0, len(mfs)]
2007 count[:] = [0, len(mfs)]
2054 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2008 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2055 yield chunk
2009 yield chunk
2056 progress(_bundling, None)
2010 progress(_bundling, None)
2057
2011
2058 count[:] = [0, len(changedfiles)]
2012 count[:] = [0, len(changedfiles)]
2059 for fname in sorted(changedfiles):
2013 for fname in sorted(changedfiles):
2060 filerevlog = self.file(fname)
2014 filerevlog = self.file(fname)
2061 if not len(filerevlog):
2015 if not len(filerevlog):
2062 raise util.Abort(_("empty or missing revlog for %s")
2016 raise util.Abort(_("empty or missing revlog for %s")
2063 % fname)
2017 % fname)
2064 fstate[0] = fname
2018 fstate[0] = fname
2065 nodelist = gennodelst(filerevlog)
2019 nodelist = gennodelst(filerevlog)
2066 if nodelist:
2020 if nodelist:
2067 count[0] += 1
2021 count[0] += 1
2068 yield bundler.fileheader(fname)
2022 yield bundler.fileheader(fname)
2069 for chunk in filerevlog.group(nodelist, bundler, reorder):
2023 for chunk in filerevlog.group(nodelist, bundler, reorder):
2070 yield chunk
2024 yield chunk
2071 yield bundler.close()
2025 yield bundler.close()
2072 progress(_bundling, None)
2026 progress(_bundling, None)
2073
2027
2074 if nodes:
2028 if nodes:
2075 self.hook('outgoing', node=hex(nodes[0]), source=source)
2029 self.hook('outgoing', node=hex(nodes[0]), source=source)
2076
2030
2077 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2031 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2078
2032
2079 def addchangegroup(self, source, srctype, url, emptyok=False):
2033 def addchangegroup(self, source, srctype, url, emptyok=False):
2080 """Add the changegroup returned by source.read() to this repo.
2034 """Add the changegroup returned by source.read() to this repo.
2081 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2035 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2082 the URL of the repo where this changegroup is coming from.
2036 the URL of the repo where this changegroup is coming from.
2083
2037
2084 Return an integer summarizing the change to this repo:
2038 Return an integer summarizing the change to this repo:
2085 - nothing changed or no source: 0
2039 - nothing changed or no source: 0
2086 - more heads than before: 1+added heads (2..n)
2040 - more heads than before: 1+added heads (2..n)
2087 - fewer heads than before: -1-removed heads (-2..-n)
2041 - fewer heads than before: -1-removed heads (-2..-n)
2088 - number of heads stays the same: 1
2042 - number of heads stays the same: 1
2089 """
2043 """
2090 def csmap(x):
2044 def csmap(x):
2091 self.ui.debug("add changeset %s\n" % short(x))
2045 self.ui.debug("add changeset %s\n" % short(x))
2092 return len(cl)
2046 return len(cl)
2093
2047
2094 def revmap(x):
2048 def revmap(x):
2095 return cl.rev(x)
2049 return cl.rev(x)
2096
2050
2097 if not source:
2051 if not source:
2098 return 0
2052 return 0
2099
2053
2100 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2054 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2101
2055
2102 changesets = files = revisions = 0
2056 changesets = files = revisions = 0
2103 efiles = set()
2057 efiles = set()
2104
2058
2105 # write changelog data to temp files so concurrent readers will not see
2059 # write changelog data to temp files so concurrent readers will not see
2106 # inconsistent view
2060 # inconsistent view
2107 cl = self.changelog
2061 cl = self.changelog
2108 cl.delayupdate()
2062 cl.delayupdate()
2109 oldheads = cl.heads()
2063 oldheads = cl.heads()
2110
2064
2111 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2065 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2112 try:
2066 try:
2113 trp = weakref.proxy(tr)
2067 trp = weakref.proxy(tr)
2114 # pull off the changeset group
2068 # pull off the changeset group
2115 self.ui.status(_("adding changesets\n"))
2069 self.ui.status(_("adding changesets\n"))
2116 clstart = len(cl)
2070 clstart = len(cl)
2117 class prog(object):
2071 class prog(object):
2118 step = _('changesets')
2072 step = _('changesets')
2119 count = 1
2073 count = 1
2120 ui = self.ui
2074 ui = self.ui
2121 total = None
2075 total = None
2122 def __call__(self):
2076 def __call__(self):
2123 self.ui.progress(self.step, self.count, unit=_('chunks'),
2077 self.ui.progress(self.step, self.count, unit=_('chunks'),
2124 total=self.total)
2078 total=self.total)
2125 self.count += 1
2079 self.count += 1
2126 pr = prog()
2080 pr = prog()
2127 source.callback = pr
2081 source.callback = pr
2128
2082
2129 source.changelogheader()
2083 source.changelogheader()
2130 srccontent = cl.addgroup(source, csmap, trp)
2084 srccontent = cl.addgroup(source, csmap, trp)
2131 if not (srccontent or emptyok):
2085 if not (srccontent or emptyok):
2132 raise util.Abort(_("received changelog group is empty"))
2086 raise util.Abort(_("received changelog group is empty"))
2133 clend = len(cl)
2087 clend = len(cl)
2134 changesets = clend - clstart
2088 changesets = clend - clstart
2135 for c in xrange(clstart, clend):
2089 for c in xrange(clstart, clend):
2136 efiles.update(self[c].files())
2090 efiles.update(self[c].files())
2137 efiles = len(efiles)
2091 efiles = len(efiles)
2138 self.ui.progress(_('changesets'), None)
2092 self.ui.progress(_('changesets'), None)
2139
2093
2140 # pull off the manifest group
2094 # pull off the manifest group
2141 self.ui.status(_("adding manifests\n"))
2095 self.ui.status(_("adding manifests\n"))
2142 pr.step = _('manifests')
2096 pr.step = _('manifests')
2143 pr.count = 1
2097 pr.count = 1
2144 pr.total = changesets # manifests <= changesets
2098 pr.total = changesets # manifests <= changesets
2145 # no need to check for empty manifest group here:
2099 # no need to check for empty manifest group here:
2146 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2100 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2147 # no new manifest will be created and the manifest group will
2101 # no new manifest will be created and the manifest group will
2148 # be empty during the pull
2102 # be empty during the pull
2149 source.manifestheader()
2103 source.manifestheader()
2150 self.manifest.addgroup(source, revmap, trp)
2104 self.manifest.addgroup(source, revmap, trp)
2151 self.ui.progress(_('manifests'), None)
2105 self.ui.progress(_('manifests'), None)
2152
2106
2153 needfiles = {}
2107 needfiles = {}
2154 if self.ui.configbool('server', 'validate', default=False):
2108 if self.ui.configbool('server', 'validate', default=False):
2155 # validate incoming csets have their manifests
2109 # validate incoming csets have their manifests
2156 for cset in xrange(clstart, clend):
2110 for cset in xrange(clstart, clend):
2157 mfest = self.changelog.read(self.changelog.node(cset))[0]
2111 mfest = self.changelog.read(self.changelog.node(cset))[0]
2158 mfest = self.manifest.readdelta(mfest)
2112 mfest = self.manifest.readdelta(mfest)
2159 # store file nodes we must see
2113 # store file nodes we must see
2160 for f, n in mfest.iteritems():
2114 for f, n in mfest.iteritems():
2161 needfiles.setdefault(f, set()).add(n)
2115 needfiles.setdefault(f, set()).add(n)
2162
2116
2163 # process the files
2117 # process the files
2164 self.ui.status(_("adding file changes\n"))
2118 self.ui.status(_("adding file changes\n"))
2165 pr.step = _('files')
2119 pr.step = _('files')
2166 pr.count = 1
2120 pr.count = 1
2167 pr.total = efiles
2121 pr.total = efiles
2168 source.callback = None
2122 source.callback = None
2169
2123
2170 while True:
2124 while True:
2171 chunkdata = source.filelogheader()
2125 chunkdata = source.filelogheader()
2172 if not chunkdata:
2126 if not chunkdata:
2173 break
2127 break
2174 f = chunkdata["filename"]
2128 f = chunkdata["filename"]
2175 self.ui.debug("adding %s revisions\n" % f)
2129 self.ui.debug("adding %s revisions\n" % f)
2176 pr()
2130 pr()
2177 fl = self.file(f)
2131 fl = self.file(f)
2178 o = len(fl)
2132 o = len(fl)
2179 if not fl.addgroup(source, revmap, trp):
2133 if not fl.addgroup(source, revmap, trp):
2180 raise util.Abort(_("received file revlog group is empty"))
2134 raise util.Abort(_("received file revlog group is empty"))
2181 revisions += len(fl) - o
2135 revisions += len(fl) - o
2182 files += 1
2136 files += 1
2183 if f in needfiles:
2137 if f in needfiles:
2184 needs = needfiles[f]
2138 needs = needfiles[f]
2185 for new in xrange(o, len(fl)):
2139 for new in xrange(o, len(fl)):
2186 n = fl.node(new)
2140 n = fl.node(new)
2187 if n in needs:
2141 if n in needs:
2188 needs.remove(n)
2142 needs.remove(n)
2189 if not needs:
2143 if not needs:
2190 del needfiles[f]
2144 del needfiles[f]
2191 self.ui.progress(_('files'), None)
2145 self.ui.progress(_('files'), None)
2192
2146
2193 for f, needs in needfiles.iteritems():
2147 for f, needs in needfiles.iteritems():
2194 fl = self.file(f)
2148 fl = self.file(f)
2195 for n in needs:
2149 for n in needs:
2196 try:
2150 try:
2197 fl.rev(n)
2151 fl.rev(n)
2198 except error.LookupError:
2152 except error.LookupError:
2199 raise util.Abort(
2153 raise util.Abort(
2200 _('missing file data for %s:%s - run hg verify') %
2154 _('missing file data for %s:%s - run hg verify') %
2201 (f, hex(n)))
2155 (f, hex(n)))
2202
2156
2203 dh = 0
2157 dh = 0
2204 if oldheads:
2158 if oldheads:
2205 heads = cl.heads()
2159 heads = cl.heads()
2206 dh = len(heads) - len(oldheads)
2160 dh = len(heads) - len(oldheads)
2207 for h in heads:
2161 for h in heads:
2208 if h not in oldheads and self[h].closesbranch():
2162 if h not in oldheads and self[h].closesbranch():
2209 dh -= 1
2163 dh -= 1
2210 htext = ""
2164 htext = ""
2211 if dh:
2165 if dh:
2212 htext = _(" (%+d heads)") % dh
2166 htext = _(" (%+d heads)") % dh
2213
2167
2214 self.ui.status(_("added %d changesets"
2168 self.ui.status(_("added %d changesets"
2215 " with %d changes to %d files%s\n")
2169 " with %d changes to %d files%s\n")
2216 % (changesets, revisions, files, htext))
2170 % (changesets, revisions, files, htext))
2217
2171
2218 if changesets > 0:
2172 if changesets > 0:
2219 p = lambda: cl.writepending() and self.root or ""
2173 p = lambda: cl.writepending() and self.root or ""
2220 self.hook('pretxnchangegroup', throw=True,
2174 self.hook('pretxnchangegroup', throw=True,
2221 node=hex(cl.node(clstart)), source=srctype,
2175 node=hex(cl.node(clstart)), source=srctype,
2222 url=url, pending=p)
2176 url=url, pending=p)
2223
2177
2224 added = [cl.node(r) for r in xrange(clstart, clend)]
2178 added = [cl.node(r) for r in xrange(clstart, clend)]
2225 publishing = self.ui.configbool('phases', 'publish', True)
2179 publishing = self.ui.configbool('phases', 'publish', True)
2226 if srctype == 'push':
2180 if srctype == 'push':
2227 # Old server can not push the boundary themself.
2181 # Old server can not push the boundary themself.
2228 # New server won't push the boundary if changeset already
2182 # New server won't push the boundary if changeset already
2229 # existed locally as secrete
2183 # existed locally as secrete
2230 #
2184 #
2231 # We should not use added here but the list of all change in
2185 # We should not use added here but the list of all change in
2232 # the bundle
2186 # the bundle
2233 if publishing:
2187 if publishing:
2234 phases.advanceboundary(self, phases.public, srccontent)
2188 phases.advanceboundary(self, phases.public, srccontent)
2235 else:
2189 else:
2236 phases.advanceboundary(self, phases.draft, srccontent)
2190 phases.advanceboundary(self, phases.draft, srccontent)
2237 phases.retractboundary(self, phases.draft, added)
2191 phases.retractboundary(self, phases.draft, added)
2238 elif srctype != 'strip':
2192 elif srctype != 'strip':
2239 # publishing only alter behavior during push
2193 # publishing only alter behavior during push
2240 #
2194 #
2241 # strip should not touch boundary at all
2195 # strip should not touch boundary at all
2242 phases.retractboundary(self, phases.draft, added)
2196 phases.retractboundary(self, phases.draft, added)
2243
2197
2244 # make changelog see real files again
2198 # make changelog see real files again
2245 cl.finalize(trp)
2199 cl.finalize(trp)
2246
2200
2247 tr.close()
2201 tr.close()
2248
2202
2249 if changesets > 0:
2203 if changesets > 0:
2250 def runhooks():
2204 def runhooks():
2251 # forcefully update the on-disk branch cache
2205 # forcefully update the on-disk branch cache
2252 self.ui.debug("updating the branch cache\n")
2206 self.ui.debug("updating the branch cache\n")
2253 self.updatebranchcache()
2207 self.updatebranchcache()
2254 self.hook("changegroup", node=hex(cl.node(clstart)),
2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2255 source=srctype, url=url)
2209 source=srctype, url=url)
2256
2210
2257 for n in added:
2211 for n in added:
2258 self.hook("incoming", node=hex(n), source=srctype,
2212 self.hook("incoming", node=hex(n), source=srctype,
2259 url=url)
2213 url=url)
2260 self._afterlock(runhooks)
2214 self._afterlock(runhooks)
2261
2215
2262 finally:
2216 finally:
2263 tr.release()
2217 tr.release()
2264 # never return 0 here:
2218 # never return 0 here:
2265 if dh < 0:
2219 if dh < 0:
2266 return dh - 1
2220 return dh - 1
2267 else:
2221 else:
2268 return dh + 1
2222 return dh + 1
2269
2223
2270 def stream_in(self, remote, requirements):
2224 def stream_in(self, remote, requirements):
2271 lock = self.lock()
2225 lock = self.lock()
2272 try:
2226 try:
2273 fp = remote.stream_out()
2227 fp = remote.stream_out()
2274 l = fp.readline()
2228 l = fp.readline()
2275 try:
2229 try:
2276 resp = int(l)
2230 resp = int(l)
2277 except ValueError:
2231 except ValueError:
2278 raise error.ResponseError(
2232 raise error.ResponseError(
2279 _('Unexpected response from remote server:'), l)
2233 _('Unexpected response from remote server:'), l)
2280 if resp == 1:
2234 if resp == 1:
2281 raise util.Abort(_('operation forbidden by server'))
2235 raise util.Abort(_('operation forbidden by server'))
2282 elif resp == 2:
2236 elif resp == 2:
2283 raise util.Abort(_('locking the remote repository failed'))
2237 raise util.Abort(_('locking the remote repository failed'))
2284 elif resp != 0:
2238 elif resp != 0:
2285 raise util.Abort(_('the server sent an unknown error code'))
2239 raise util.Abort(_('the server sent an unknown error code'))
2286 self.ui.status(_('streaming all changes\n'))
2240 self.ui.status(_('streaming all changes\n'))
2287 l = fp.readline()
2241 l = fp.readline()
2288 try:
2242 try:
2289 total_files, total_bytes = map(int, l.split(' ', 1))
2243 total_files, total_bytes = map(int, l.split(' ', 1))
2290 except (ValueError, TypeError):
2244 except (ValueError, TypeError):
2291 raise error.ResponseError(
2245 raise error.ResponseError(
2292 _('Unexpected response from remote server:'), l)
2246 _('Unexpected response from remote server:'), l)
2293 self.ui.status(_('%d files to transfer, %s of data\n') %
2247 self.ui.status(_('%d files to transfer, %s of data\n') %
2294 (total_files, util.bytecount(total_bytes)))
2248 (total_files, util.bytecount(total_bytes)))
2295 start = time.time()
2249 start = time.time()
2296 for i in xrange(total_files):
2250 for i in xrange(total_files):
2297 # XXX doesn't support '\n' or '\r' in filenames
2251 # XXX doesn't support '\n' or '\r' in filenames
2298 l = fp.readline()
2252 l = fp.readline()
2299 try:
2253 try:
2300 name, size = l.split('\0', 1)
2254 name, size = l.split('\0', 1)
2301 size = int(size)
2255 size = int(size)
2302 except (ValueError, TypeError):
2256 except (ValueError, TypeError):
2303 raise error.ResponseError(
2257 raise error.ResponseError(
2304 _('Unexpected response from remote server:'), l)
2258 _('Unexpected response from remote server:'), l)
2305 if self.ui.debugflag:
2259 if self.ui.debugflag:
2306 self.ui.debug('adding %s (%s)\n' %
2260 self.ui.debug('adding %s (%s)\n' %
2307 (name, util.bytecount(size)))
2261 (name, util.bytecount(size)))
2308 # for backwards compat, name was partially encoded
2262 # for backwards compat, name was partially encoded
2309 ofp = self.sopener(store.decodedir(name), 'w')
2263 ofp = self.sopener(store.decodedir(name), 'w')
2310 for chunk in util.filechunkiter(fp, limit=size):
2264 for chunk in util.filechunkiter(fp, limit=size):
2311 ofp.write(chunk)
2265 ofp.write(chunk)
2312 ofp.close()
2266 ofp.close()
2313 elapsed = time.time() - start
2267 elapsed = time.time() - start
2314 if elapsed <= 0:
2268 if elapsed <= 0:
2315 elapsed = 0.001
2269 elapsed = 0.001
2316 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2270 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2317 (util.bytecount(total_bytes), elapsed,
2271 (util.bytecount(total_bytes), elapsed,
2318 util.bytecount(total_bytes / elapsed)))
2272 util.bytecount(total_bytes / elapsed)))
2319
2273
2320 # new requirements = old non-format requirements +
2274 # new requirements = old non-format requirements +
2321 # new format-related
2275 # new format-related
2322 # requirements from the streamed-in repository
2276 # requirements from the streamed-in repository
2323 requirements.update(set(self.requirements) - self.supportedformats)
2277 requirements.update(set(self.requirements) - self.supportedformats)
2324 self._applyrequirements(requirements)
2278 self._applyrequirements(requirements)
2325 self._writerequirements()
2279 self._writerequirements()
2326
2280
2327 self.invalidate()
2281 self.invalidate()
2328 return len(self.heads()) + 1
2282 return len(self.heads()) + 1
2329 finally:
2283 finally:
2330 lock.release()
2284 lock.release()
2331
2285
2332 def clone(self, remote, heads=[], stream=False):
2286 def clone(self, remote, heads=[], stream=False):
2333 '''clone remote repository.
2287 '''clone remote repository.
2334
2288
2335 keyword arguments:
2289 keyword arguments:
2336 heads: list of revs to clone (forces use of pull)
2290 heads: list of revs to clone (forces use of pull)
2337 stream: use streaming clone if possible'''
2291 stream: use streaming clone if possible'''
2338
2292
2339 # now, all clients that can request uncompressed clones can
2293 # now, all clients that can request uncompressed clones can
2340 # read repo formats supported by all servers that can serve
2294 # read repo formats supported by all servers that can serve
2341 # them.
2295 # them.
2342
2296
2343 # if revlog format changes, client will have to check version
2297 # if revlog format changes, client will have to check version
2344 # and format flags on "stream" capability, and use
2298 # and format flags on "stream" capability, and use
2345 # uncompressed only if compatible.
2299 # uncompressed only if compatible.
2346
2300
2347 if not stream:
2301 if not stream:
2348 # if the server explicitely prefer to stream (for fast LANs)
2302 # if the server explicitely prefer to stream (for fast LANs)
2349 stream = remote.capable('stream-preferred')
2303 stream = remote.capable('stream-preferred')
2350
2304
2351 if stream and not heads:
2305 if stream and not heads:
2352 # 'stream' means remote revlog format is revlogv1 only
2306 # 'stream' means remote revlog format is revlogv1 only
2353 if remote.capable('stream'):
2307 if remote.capable('stream'):
2354 return self.stream_in(remote, set(('revlogv1',)))
2308 return self.stream_in(remote, set(('revlogv1',)))
2355 # otherwise, 'streamreqs' contains the remote revlog format
2309 # otherwise, 'streamreqs' contains the remote revlog format
2356 streamreqs = remote.capable('streamreqs')
2310 streamreqs = remote.capable('streamreqs')
2357 if streamreqs:
2311 if streamreqs:
2358 streamreqs = set(streamreqs.split(','))
2312 streamreqs = set(streamreqs.split(','))
2359 # if we support it, stream in and adjust our requirements
2313 # if we support it, stream in and adjust our requirements
2360 if not streamreqs - self.supportedformats:
2314 if not streamreqs - self.supportedformats:
2361 return self.stream_in(remote, streamreqs)
2315 return self.stream_in(remote, streamreqs)
2362 return self.pull(remote, heads)
2316 return self.pull(remote, heads)
2363
2317
2364 def pushkey(self, namespace, key, old, new):
2318 def pushkey(self, namespace, key, old, new):
2365 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2319 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2366 old=old, new=new)
2320 old=old, new=new)
2367 ret = pushkey.push(self, namespace, key, old, new)
2321 ret = pushkey.push(self, namespace, key, old, new)
2368 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2322 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2369 ret=ret)
2323 ret=ret)
2370 return ret
2324 return ret
2371
2325
2372 def listkeys(self, namespace):
2326 def listkeys(self, namespace):
2373 self.hook('prelistkeys', throw=True, namespace=namespace)
2327 self.hook('prelistkeys', throw=True, namespace=namespace)
2374 values = pushkey.list(self, namespace)
2328 values = pushkey.list(self, namespace)
2375 self.hook('listkeys', namespace=namespace, values=values)
2329 self.hook('listkeys', namespace=namespace, values=values)
2376 return values
2330 return values
2377
2331
2378 def debugwireargs(self, one, two, three=None, four=None, five=None):
2332 def debugwireargs(self, one, two, three=None, four=None, five=None):
2379 '''used to test argument passing over the wire'''
2333 '''used to test argument passing over the wire'''
2380 return "%s %s %s %s %s" % (one, two, three, four, five)
2334 return "%s %s %s %s %s" % (one, two, three, four, five)
2381
2335
2382 def savecommitmessage(self, text):
2336 def savecommitmessage(self, text):
2383 fp = self.opener('last-message.txt', 'wb')
2337 fp = self.opener('last-message.txt', 'wb')
2384 try:
2338 try:
2385 fp.write(text)
2339 fp.write(text)
2386 finally:
2340 finally:
2387 fp.close()
2341 fp.close()
2388 return self.pathto(fp.name[len(self.root)+1:])
2342 return self.pathto(fp.name[len(self.root)+1:])
2389
2343
2390 # used to avoid circular references so destructors work
2344 # used to avoid circular references so destructors work
2391 def aftertrans(files):
2345 def aftertrans(files):
2392 renamefiles = [tuple(t) for t in files]
2346 renamefiles = [tuple(t) for t in files]
2393 def a():
2347 def a():
2394 for src, dest in renamefiles:
2348 for src, dest in renamefiles:
2395 try:
2349 try:
2396 util.rename(src, dest)
2350 util.rename(src, dest)
2397 except OSError: # journal file does not yet exist
2351 except OSError: # journal file does not yet exist
2398 pass
2352 pass
2399 return a
2353 return a
2400
2354
2401 def undoname(fn):
2355 def undoname(fn):
2402 base, name = os.path.split(fn)
2356 base, name = os.path.split(fn)
2403 assert name.startswith('journal')
2357 assert name.startswith('journal')
2404 return os.path.join(base, name.replace('journal', 'undo', 1))
2358 return os.path.join(base, name.replace('journal', 'undo', 1))
2405
2359
2406 def instance(ui, path, create):
2360 def instance(ui, path, create):
2407 return localrepository(ui, util.urllocalpath(path), create)
2361 return localrepository(ui, util.urllocalpath(path), create)
2408
2362
2409 def islocal(path):
2363 def islocal(path):
2410 return True
2364 return True
@@ -1,185 +1,172 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from mercurial import changegroup, bookmarks
9 from mercurial import changegroup, bookmarks
10 from mercurial.node import short
10 from mercurial.node import short
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 import os
12 import os
13 import errno
13 import errno
14
14
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
16 """create a bundle with the specified revisions as a backup"""
16 """create a bundle with the specified revisions as a backup"""
17 cg = repo.changegroupsubset(bases, heads, 'strip')
17 cg = repo.changegroupsubset(bases, heads, 'strip')
18 backupdir = repo.join("strip-backup")
18 backupdir = repo.join("strip-backup")
19 if not os.path.isdir(backupdir):
19 if not os.path.isdir(backupdir):
20 os.mkdir(backupdir)
20 os.mkdir(backupdir)
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
22 if compress:
22 if compress:
23 bundletype = "HG10BZ"
23 bundletype = "HG10BZ"
24 else:
24 else:
25 bundletype = "HG10UN"
25 bundletype = "HG10UN"
26 return changegroup.writebundle(cg, name, bundletype)
26 return changegroup.writebundle(cg, name, bundletype)
27
27
28 def _collectfiles(repo, striprev):
28 def _collectfiles(repo, striprev):
29 """find out the filelogs affected by the strip"""
29 """find out the filelogs affected by the strip"""
30 files = set()
30 files = set()
31
31
32 for x in xrange(striprev, len(repo)):
32 for x in xrange(striprev, len(repo)):
33 files.update(repo[x].files())
33 files.update(repo[x].files())
34
34
35 return sorted(files)
35 return sorted(files)
36
36
37 def _collectbrokencsets(repo, files, striprev):
37 def _collectbrokencsets(repo, files, striprev):
38 """return the changesets which will be broken by the truncation"""
38 """return the changesets which will be broken by the truncation"""
39 s = set()
39 s = set()
40 def collectone(revlog):
40 def collectone(revlog):
41 linkgen = (revlog.linkrev(i) for i in revlog)
41 linkgen = (revlog.linkrev(i) for i in revlog)
42 # find the truncation point of the revlog
42 # find the truncation point of the revlog
43 for lrev in linkgen:
43 for lrev in linkgen:
44 if lrev >= striprev:
44 if lrev >= striprev:
45 break
45 break
46 # see if any revision after this point has a linkrev
46 # see if any revision after this point has a linkrev
47 # less than striprev (those will be broken by strip)
47 # less than striprev (those will be broken by strip)
48 for lrev in linkgen:
48 for lrev in linkgen:
49 if lrev < striprev:
49 if lrev < striprev:
50 s.add(lrev)
50 s.add(lrev)
51
51
52 collectone(repo.manifest)
52 collectone(repo.manifest)
53 for fname in files:
53 for fname in files:
54 collectone(repo.file(fname))
54 collectone(repo.file(fname))
55
55
56 return s
56 return s
57
57
58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
59 # It simplifies the logic around updating the branchheads cache if we only
60 # have to consider the effect of the stripped revisions and not revisions
61 # missing because the cache is out-of-date.
62 repo.updatebranchcache()
63
64 cl = repo.changelog
59 cl = repo.changelog
65 # TODO handle undo of merge sets
60 # TODO handle undo of merge sets
66 if isinstance(nodelist, str):
61 if isinstance(nodelist, str):
67 nodelist = [nodelist]
62 nodelist = [nodelist]
68 striplist = [cl.rev(node) for node in nodelist]
63 striplist = [cl.rev(node) for node in nodelist]
69 striprev = min(striplist)
64 striprev = min(striplist)
70
65
71 # Set of potential new heads resulting from the strip. The parents of any
72 # node removed could be a new head because the node to be removed could have
73 # been the only child of the parent.
74 # Do a list->set->list conversion to remove duplicates.
75 stringstriplist = [str(rev) for rev in striplist]
76 newheadrevs = set(repo.revs("parents(%lr::) - %lr::", stringstriplist,
77 stringstriplist))
78
79 keeppartialbundle = backup == 'strip'
66 keeppartialbundle = backup == 'strip'
80
67
81 # Some revisions with rev > striprev may not be descendants of striprev.
68 # Some revisions with rev > striprev may not be descendants of striprev.
82 # We have to find these revisions and put them in a bundle, so that
69 # We have to find these revisions and put them in a bundle, so that
83 # we can restore them after the truncations.
70 # we can restore them after the truncations.
84 # To create the bundle we use repo.changegroupsubset which requires
71 # To create the bundle we use repo.changegroupsubset which requires
85 # the list of heads and bases of the set of interesting revisions.
72 # the list of heads and bases of the set of interesting revisions.
86 # (head = revision in the set that has no descendant in the set;
73 # (head = revision in the set that has no descendant in the set;
87 # base = revision in the set that has no ancestor in the set)
74 # base = revision in the set that has no ancestor in the set)
88 tostrip = set(striplist)
75 tostrip = set(striplist)
89 for rev in striplist:
76 for rev in striplist:
90 for desc in cl.descendants(rev):
77 for desc in cl.descendants(rev):
91 tostrip.add(desc)
78 tostrip.add(desc)
92
79
93 files = _collectfiles(repo, striprev)
80 files = _collectfiles(repo, striprev)
94 saverevs = _collectbrokencsets(repo, files, striprev)
81 saverevs = _collectbrokencsets(repo, files, striprev)
95
82
96 # compute heads
83 # compute heads
97 saveheads = set(saverevs)
84 saveheads = set(saverevs)
98 for r in xrange(striprev + 1, len(cl)):
85 for r in xrange(striprev + 1, len(cl)):
99 if r not in tostrip:
86 if r not in tostrip:
100 saverevs.add(r)
87 saverevs.add(r)
101 saveheads.difference_update(cl.parentrevs(r))
88 saveheads.difference_update(cl.parentrevs(r))
102 saveheads.add(r)
89 saveheads.add(r)
103 saveheads = [cl.node(r) for r in saveheads]
90 saveheads = [cl.node(r) for r in saveheads]
104
91
105 # compute base nodes
92 # compute base nodes
106 if saverevs:
93 if saverevs:
107 descendants = set(cl.descendants(*saverevs))
94 descendants = set(cl.descendants(*saverevs))
108 saverevs.difference_update(descendants)
95 saverevs.difference_update(descendants)
109 savebases = [cl.node(r) for r in saverevs]
96 savebases = [cl.node(r) for r in saverevs]
110 stripbases = [cl.node(r) for r in tostrip]
97 stripbases = [cl.node(r) for r in tostrip]
111
98
112 bm = repo._bookmarks
99 bm = repo._bookmarks
113 updatebm = []
100 updatebm = []
114 for m in bm:
101 for m in bm:
115 rev = repo[bm[m]].rev()
102 rev = repo[bm[m]].rev()
116 if rev in tostrip:
103 if rev in tostrip:
117 updatebm.append(m)
104 updatebm.append(m)
118
105
119 # create a changegroup for all the branches we need to keep
106 # create a changegroup for all the branches we need to keep
120 backupfile = None
107 backupfile = None
121 if backup == "all":
108 if backup == "all":
122 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
109 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
123 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
110 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
124 if saveheads or savebases:
111 if saveheads or savebases:
125 # do not compress partial bundle if we remove it from disk later
112 # do not compress partial bundle if we remove it from disk later
126 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
113 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
127 compress=keeppartialbundle)
114 compress=keeppartialbundle)
128
115
129 mfst = repo.manifest
116 mfst = repo.manifest
130
117
131 tr = repo.transaction("strip")
118 tr = repo.transaction("strip")
132 offset = len(tr.entries)
119 offset = len(tr.entries)
133
120
134 try:
121 try:
135 tr.startgroup()
122 tr.startgroup()
136 cl.strip(striprev, tr)
123 cl.strip(striprev, tr)
137 mfst.strip(striprev, tr)
124 mfst.strip(striprev, tr)
138 for fn in files:
125 for fn in files:
139 repo.file(fn).strip(striprev, tr)
126 repo.file(fn).strip(striprev, tr)
140 tr.endgroup()
127 tr.endgroup()
141
128
142 try:
129 try:
143 for i in xrange(offset, len(tr.entries)):
130 for i in xrange(offset, len(tr.entries)):
144 file, troffset, ignore = tr.entries[i]
131 file, troffset, ignore = tr.entries[i]
145 repo.sopener(file, 'a').truncate(troffset)
132 repo.sopener(file, 'a').truncate(troffset)
146 tr.close()
133 tr.close()
147 except: # re-raises
134 except: # re-raises
148 tr.abort()
135 tr.abort()
149 raise
136 raise
150
137
151 if saveheads or savebases:
138 if saveheads or savebases:
152 ui.note(_("adding branch\n"))
139 ui.note(_("adding branch\n"))
153 f = open(chgrpfile, "rb")
140 f = open(chgrpfile, "rb")
154 gen = changegroup.readbundle(f, chgrpfile)
141 gen = changegroup.readbundle(f, chgrpfile)
155 if not repo.ui.verbose:
142 if not repo.ui.verbose:
156 # silence internal shuffling chatter
143 # silence internal shuffling chatter
157 repo.ui.pushbuffer()
144 repo.ui.pushbuffer()
158 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
145 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
159 if not repo.ui.verbose:
146 if not repo.ui.verbose:
160 repo.ui.popbuffer()
147 repo.ui.popbuffer()
161 f.close()
148 f.close()
162 if not keeppartialbundle:
149 if not keeppartialbundle:
163 os.unlink(chgrpfile)
150 os.unlink(chgrpfile)
164
151
165 # remove undo files
152 # remove undo files
166 for undofile in repo.undofiles():
153 for undofile in repo.undofiles():
167 try:
154 try:
168 os.unlink(undofile)
155 os.unlink(undofile)
169 except OSError, e:
156 except OSError, e:
170 if e.errno != errno.ENOENT:
157 if e.errno != errno.ENOENT:
171 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
158 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
172
159
173 for m in updatebm:
160 for m in updatebm:
174 bm[m] = repo['.'].node()
161 bm[m] = repo['.'].node()
175 bookmarks.write(repo)
162 bookmarks.write(repo)
176 except: # re-raises
163 except: # re-raises
177 if backupfile:
164 if backupfile:
178 ui.warn(_("strip failed, full bundle stored in '%s'\n")
165 ui.warn(_("strip failed, full bundle stored in '%s'\n")
179 % backupfile)
166 % backupfile)
180 elif saveheads:
167 elif saveheads:
181 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
168 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
182 % chgrpfile)
169 % chgrpfile)
183 raise
170 raise
184
171
185 repo.destroyed(newheadrevs)
172 repo.destroyed()
@@ -1,353 +1,352 b''
1 $ "$TESTDIR/hghave" execbit || exit 80
1 $ "$TESTDIR/hghave" execbit || exit 80
2
2
3 $ hg init
3 $ hg init
4
4
5 Setup:
5 Setup:
6
6
7 $ echo a >> a
7 $ echo a >> a
8 $ hg ci -Am 'base'
8 $ hg ci -Am 'base'
9 adding a
9 adding a
10
10
11 Refuse to amend public csets:
11 Refuse to amend public csets:
12
12
13 $ hg phase -r . -p
13 $ hg phase -r . -p
14 $ hg ci --amend
14 $ hg ci --amend
15 abort: cannot amend public changesets
15 abort: cannot amend public changesets
16 [255]
16 [255]
17 $ hg phase -r . -f -d
17 $ hg phase -r . -f -d
18
18
19 $ echo a >> a
19 $ echo a >> a
20 $ hg ci -Am 'base1'
20 $ hg ci -Am 'base1'
21
21
22 Nothing to amend:
22 Nothing to amend:
23
23
24 $ hg ci --amend
24 $ hg ci --amend
25 nothing changed
25 nothing changed
26 [1]
26 [1]
27
27
28 Amending changeset with changes in working dir:
28 Amending changeset with changes in working dir:
29
29
30 $ echo a >> a
30 $ echo a >> a
31 $ hg ci --amend -m 'amend base1'
31 $ hg ci --amend -m 'amend base1'
32 saved backup bundle to $TESTTMP/.hg/strip-backup/489edb5b847d-amend-backup.hg
32 saved backup bundle to $TESTTMP/.hg/strip-backup/489edb5b847d-amend-backup.hg
33 $ hg diff -c .
33 $ hg diff -c .
34 diff -r ad120869acf0 -r 9cd25b479c51 a
34 diff -r ad120869acf0 -r 9cd25b479c51 a
35 --- a/a Thu Jan 01 00:00:00 1970 +0000
35 --- a/a Thu Jan 01 00:00:00 1970 +0000
36 +++ b/a Thu Jan 01 00:00:00 1970 +0000
36 +++ b/a Thu Jan 01 00:00:00 1970 +0000
37 @@ -1,1 +1,3 @@
37 @@ -1,1 +1,3 @@
38 a
38 a
39 +a
39 +a
40 +a
40 +a
41 $ hg log
41 $ hg log
42 changeset: 1:9cd25b479c51
42 changeset: 1:9cd25b479c51
43 tag: tip
43 tag: tip
44 user: test
44 user: test
45 date: Thu Jan 01 00:00:00 1970 +0000
45 date: Thu Jan 01 00:00:00 1970 +0000
46 summary: amend base1
46 summary: amend base1
47
47
48 changeset: 0:ad120869acf0
48 changeset: 0:ad120869acf0
49 user: test
49 user: test
50 date: Thu Jan 01 00:00:00 1970 +0000
50 date: Thu Jan 01 00:00:00 1970 +0000
51 summary: base
51 summary: base
52
52
53
53
54 Add new file:
54 Add new file:
55
55
56 $ echo b > b
56 $ echo b > b
57 $ hg ci --amend -Am 'amend base1 new file'
57 $ hg ci --amend -Am 'amend base1 new file'
58 adding b
58 adding b
59 saved backup bundle to $TESTTMP/.hg/strip-backup/9cd25b479c51-amend-backup.hg
59 saved backup bundle to $TESTTMP/.hg/strip-backup/9cd25b479c51-amend-backup.hg
60
60
61 Remove file that was added in amended commit:
61 Remove file that was added in amended commit:
62
62
63 $ hg rm b
63 $ hg rm b
64 $ hg ci --amend -m 'amend base1 remove new file'
64 $ hg ci --amend -m 'amend base1 remove new file'
65 saved backup bundle to $TESTTMP/.hg/strip-backup/e2bb3ecffd2f-amend-backup.hg
65 saved backup bundle to $TESTTMP/.hg/strip-backup/e2bb3ecffd2f-amend-backup.hg
66
66
67 $ hg cat b
67 $ hg cat b
68 b: no such file in rev 664a9b2d60cd
68 b: no such file in rev 664a9b2d60cd
69 [1]
69 [1]
70
70
71 No changes, just a different message:
71 No changes, just a different message:
72
72
73 $ hg ci -v --amend -m 'no changes, new message'
73 $ hg ci -v --amend -m 'no changes, new message'
74 amending changeset 664a9b2d60cd
74 amending changeset 664a9b2d60cd
75 copying changeset 664a9b2d60cd to ad120869acf0
75 copying changeset 664a9b2d60cd to ad120869acf0
76 a
76 a
77 stripping amended changeset 664a9b2d60cd
77 stripping amended changeset 664a9b2d60cd
78 1 changesets found
78 1 changesets found
79 saved backup bundle to $TESTTMP/.hg/strip-backup/664a9b2d60cd-amend-backup.hg
79 saved backup bundle to $TESTTMP/.hg/strip-backup/664a9b2d60cd-amend-backup.hg
80 1 changesets found
80 1 changesets found
81 adding branch
81 adding branch
82 adding changesets
82 adding changesets
83 adding manifests
83 adding manifests
84 adding file changes
84 adding file changes
85 added 1 changesets with 1 changes to 1 files
85 added 1 changesets with 1 changes to 1 files
86 committed changeset 1:ea6e356ff2ad
86 committed changeset 1:ea6e356ff2ad
87 $ hg diff -c .
87 $ hg diff -c .
88 diff -r ad120869acf0 -r ea6e356ff2ad a
88 diff -r ad120869acf0 -r ea6e356ff2ad a
89 --- a/a Thu Jan 01 00:00:00 1970 +0000
89 --- a/a Thu Jan 01 00:00:00 1970 +0000
90 +++ b/a Thu Jan 01 00:00:00 1970 +0000
90 +++ b/a Thu Jan 01 00:00:00 1970 +0000
91 @@ -1,1 +1,3 @@
91 @@ -1,1 +1,3 @@
92 a
92 a
93 +a
93 +a
94 +a
94 +a
95 $ hg log
95 $ hg log
96 changeset: 1:ea6e356ff2ad
96 changeset: 1:ea6e356ff2ad
97 tag: tip
97 tag: tip
98 user: test
98 user: test
99 date: Thu Jan 01 00:00:00 1970 +0000
99 date: Thu Jan 01 00:00:00 1970 +0000
100 summary: no changes, new message
100 summary: no changes, new message
101
101
102 changeset: 0:ad120869acf0
102 changeset: 0:ad120869acf0
103 user: test
103 user: test
104 date: Thu Jan 01 00:00:00 1970 +0000
104 date: Thu Jan 01 00:00:00 1970 +0000
105 summary: base
105 summary: base
106
106
107
107
108 Disable default date on commit so when -d isn't given, the old date is preserved:
108 Disable default date on commit so when -d isn't given, the old date is preserved:
109
109
110 $ echo '[defaults]' >> $HGRCPATH
110 $ echo '[defaults]' >> $HGRCPATH
111 $ echo 'commit=' >> $HGRCPATH
111 $ echo 'commit=' >> $HGRCPATH
112
112
113 Test -u/-d:
113 Test -u/-d:
114
114
115 $ hg ci --amend -u foo -d '1 0'
115 $ hg ci --amend -u foo -d '1 0'
116 saved backup bundle to $TESTTMP/.hg/strip-backup/ea6e356ff2ad-amend-backup.hg
116 saved backup bundle to $TESTTMP/.hg/strip-backup/ea6e356ff2ad-amend-backup.hg
117 $ echo a >> a
117 $ echo a >> a
118 $ hg ci --amend -u foo -d '1 0'
118 $ hg ci --amend -u foo -d '1 0'
119 saved backup bundle to $TESTTMP/.hg/strip-backup/377b91ce8b56-amend-backup.hg
119 saved backup bundle to $TESTTMP/.hg/strip-backup/377b91ce8b56-amend-backup.hg
120 $ hg log -r .
120 $ hg log -r .
121 changeset: 1:2c94e4a5756f
121 changeset: 1:2c94e4a5756f
122 tag: tip
122 tag: tip
123 user: foo
123 user: foo
124 date: Thu Jan 01 00:00:01 1970 +0000
124 date: Thu Jan 01 00:00:01 1970 +0000
125 summary: no changes, new message
125 summary: no changes, new message
126
126
127
127
128 Open editor with old commit message if a message isn't given otherwise:
128 Open editor with old commit message if a message isn't given otherwise:
129
129
130 $ cat > editor << '__EOF__'
130 $ cat > editor << '__EOF__'
131 > #!/bin/sh
131 > #!/bin/sh
132 > cat $1
132 > cat $1
133 > echo "another precious commit message" > "$1"
133 > echo "another precious commit message" > "$1"
134 > __EOF__
134 > __EOF__
135 $ chmod +x editor
135 $ chmod +x editor
136 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
136 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
137 amending changeset 2c94e4a5756f
137 amending changeset 2c94e4a5756f
138 copying changeset 2c94e4a5756f to ad120869acf0
138 copying changeset 2c94e4a5756f to ad120869acf0
139 no changes, new message
139 no changes, new message
140
140
141
141
142 HG: Enter commit message. Lines beginning with 'HG:' are removed.
142 HG: Enter commit message. Lines beginning with 'HG:' are removed.
143 HG: Leave message empty to abort commit.
143 HG: Leave message empty to abort commit.
144 HG: --
144 HG: --
145 HG: user: foo
145 HG: user: foo
146 HG: branch 'default'
146 HG: branch 'default'
147 HG: changed a
147 HG: changed a
148 a
148 a
149 stripping amended changeset 2c94e4a5756f
149 stripping amended changeset 2c94e4a5756f
150 1 changesets found
150 1 changesets found
151 saved backup bundle to $TESTTMP/.hg/strip-backup/2c94e4a5756f-amend-backup.hg
151 saved backup bundle to $TESTTMP/.hg/strip-backup/2c94e4a5756f-amend-backup.hg
152 1 changesets found
152 1 changesets found
153 adding branch
153 adding branch
154 adding changesets
154 adding changesets
155 adding manifests
155 adding manifests
156 adding file changes
156 adding file changes
157 added 1 changesets with 1 changes to 1 files
157 added 1 changesets with 1 changes to 1 files
158 committed changeset 1:ffb49186f961
158 committed changeset 1:ffb49186f961
159
159
160 Same, but with changes in working dir (different code path):
160 Same, but with changes in working dir (different code path):
161
161
162 $ echo a >> a
162 $ echo a >> a
163 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
163 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
164 amending changeset ffb49186f961
164 amending changeset ffb49186f961
165 another precious commit message
165 another precious commit message
166
166
167
167
168 HG: Enter commit message. Lines beginning with 'HG:' are removed.
168 HG: Enter commit message. Lines beginning with 'HG:' are removed.
169 HG: Leave message empty to abort commit.
169 HG: Leave message empty to abort commit.
170 HG: --
170 HG: --
171 HG: user: foo
171 HG: user: foo
172 HG: branch 'default'
172 HG: branch 'default'
173 HG: changed a
173 HG: changed a
174 a
174 a
175 copying changeset 27f3aacd3011 to ad120869acf0
175 copying changeset 27f3aacd3011 to ad120869acf0
176 a
176 a
177 stripping intermediate changeset 27f3aacd3011
177 stripping intermediate changeset 27f3aacd3011
178 stripping amended changeset ffb49186f961
178 stripping amended changeset ffb49186f961
179 2 changesets found
179 2 changesets found
180 saved backup bundle to $TESTTMP/.hg/strip-backup/ffb49186f961-amend-backup.hg
180 saved backup bundle to $TESTTMP/.hg/strip-backup/ffb49186f961-amend-backup.hg
181 1 changesets found
181 1 changesets found
182 adding branch
182 adding branch
183 adding changesets
183 adding changesets
184 adding manifests
184 adding manifests
185 adding file changes
185 adding file changes
186 added 1 changesets with 1 changes to 1 files
186 added 1 changesets with 1 changes to 1 files
187 committed changeset 1:fb6cca43446f
187 committed changeset 1:fb6cca43446f
188
188
189 $ rm editor
189 $ rm editor
190 $ hg log -r .
190 $ hg log -r .
191 changeset: 1:fb6cca43446f
191 changeset: 1:fb6cca43446f
192 tag: tip
192 tag: tip
193 user: foo
193 user: foo
194 date: Thu Jan 01 00:00:01 1970 +0000
194 date: Thu Jan 01 00:00:01 1970 +0000
195 summary: another precious commit message
195 summary: another precious commit message
196
196
197
197
198 Moving bookmarks, preserve active bookmark:
198 Moving bookmarks, preserve active bookmark:
199
199
200 $ hg book book1
200 $ hg book book1
201 $ hg book book2
201 $ hg book book2
202 $ hg ci --amend -m 'move bookmarks'
202 $ hg ci --amend -m 'move bookmarks'
203 saved backup bundle to $TESTTMP/.hg/strip-backup/fb6cca43446f-amend-backup.hg
203 saved backup bundle to $TESTTMP/.hg/strip-backup/fb6cca43446f-amend-backup.hg
204 $ hg book
204 $ hg book
205 book1 1:0cf1c7a51bcf
205 book1 1:0cf1c7a51bcf
206 * book2 1:0cf1c7a51bcf
206 * book2 1:0cf1c7a51bcf
207 $ echo a >> a
207 $ echo a >> a
208 $ hg ci --amend -m 'move bookmarks'
208 $ hg ci --amend -m 'move bookmarks'
209 saved backup bundle to $TESTTMP/.hg/strip-backup/0cf1c7a51bcf-amend-backup.hg
209 saved backup bundle to $TESTTMP/.hg/strip-backup/0cf1c7a51bcf-amend-backup.hg
210 $ hg book
210 $ hg book
211 book1 1:7344472bd951
211 book1 1:7344472bd951
212 * book2 1:7344472bd951
212 * book2 1:7344472bd951
213
213
214 $ echo '[defaults]' >> $HGRCPATH
214 $ echo '[defaults]' >> $HGRCPATH
215 $ echo "commit=-d '0 0'" >> $HGRCPATH
215 $ echo "commit=-d '0 0'" >> $HGRCPATH
216
216
217 Moving branches:
217 Moving branches:
218
218
219 $ hg branch foo
219 $ hg branch foo
220 marked working directory as branch foo
220 marked working directory as branch foo
221 (branches are permanent and global, did you want a bookmark?)
221 (branches are permanent and global, did you want a bookmark?)
222 $ echo a >> a
222 $ echo a >> a
223 $ hg ci -m 'branch foo'
223 $ hg ci -m 'branch foo'
224 $ hg branch default -f
224 $ hg branch default -f
225 marked working directory as branch default
225 marked working directory as branch default
226 (branches are permanent and global, did you want a bookmark?)
226 (branches are permanent and global, did you want a bookmark?)
227 $ hg ci --amend -m 'back to default'
227 $ hg ci --amend -m 'back to default'
228 saved backup bundle to $TESTTMP/.hg/strip-backup/1661ca36a2db-amend-backup.hg
228 saved backup bundle to $TESTTMP/.hg/strip-backup/1661ca36a2db-amend-backup.hg
229 $ hg branches
229 $ hg branches
230 default 2:f24ee5961967
230 default 2:f24ee5961967
231
231
232 Close branch:
232 Close branch:
233
233
234 $ hg up -q 0
234 $ hg up -q 0
235 $ echo b >> b
235 $ echo b >> b
236 $ hg branch foo
236 $ hg branch foo
237 marked working directory as branch foo
237 marked working directory as branch foo
238 (branches are permanent and global, did you want a bookmark?)
238 (branches are permanent and global, did you want a bookmark?)
239 $ hg ci -Am 'fork'
239 $ hg ci -Am 'fork'
240 adding b
240 adding b
241 $ echo b >> b
241 $ echo b >> b
242 $ hg ci -mb
242 $ hg ci -mb
243 $ hg ci --amend --close-branch -m 'closing branch foo'
243 $ hg ci --amend --close-branch -m 'closing branch foo'
244 saved backup bundle to $TESTTMP/.hg/strip-backup/c962248fa264-amend-backup.hg
244 saved backup bundle to $TESTTMP/.hg/strip-backup/c962248fa264-amend-backup.hg
245
245
246 Same thing, different code path:
246 Same thing, different code path:
247
247
248 $ echo b >> b
248 $ echo b >> b
249 $ hg ci -m 'reopen branch'
249 $ hg ci -m 'reopen branch'
250 created new head
251 reopening closed branch head 4
250 reopening closed branch head 4
252 $ echo b >> b
251 $ echo b >> b
253 $ hg ci --amend --close-branch
252 $ hg ci --amend --close-branch
254 saved backup bundle to $TESTTMP/.hg/strip-backup/5e302dcc12b8-amend-backup.hg
253 saved backup bundle to $TESTTMP/.hg/strip-backup/5e302dcc12b8-amend-backup.hg
255 $ hg branches
254 $ hg branches
256 default 2:f24ee5961967
255 default 2:f24ee5961967
257
256
258 Refuse to amend merges:
257 Refuse to amend merges:
259
258
260 $ hg up -q default
259 $ hg up -q default
261 $ hg merge foo
260 $ hg merge foo
262 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
263 (branch merge, don't forget to commit)
262 (branch merge, don't forget to commit)
264 $ hg ci --amend
263 $ hg ci --amend
265 abort: cannot amend while merging
264 abort: cannot amend while merging
266 [255]
265 [255]
267 $ hg ci -m 'merge'
266 $ hg ci -m 'merge'
268 $ hg ci --amend
267 $ hg ci --amend
269 abort: cannot amend merge changesets
268 abort: cannot amend merge changesets
270 [255]
269 [255]
271
270
272 Follow copies/renames:
271 Follow copies/renames:
273
272
274 $ hg mv b c
273 $ hg mv b c
275 $ hg ci -m 'b -> c'
274 $ hg ci -m 'b -> c'
276 $ hg mv c d
275 $ hg mv c d
277 $ hg ci --amend -m 'b -> d'
276 $ hg ci --amend -m 'b -> d'
278 saved backup bundle to $TESTTMP/.hg/strip-backup/9c207120aa98-amend-backup.hg
277 saved backup bundle to $TESTTMP/.hg/strip-backup/9c207120aa98-amend-backup.hg
279 $ hg st --rev '.^' --copies d
278 $ hg st --rev '.^' --copies d
280 A d
279 A d
281 b
280 b
282 $ hg cp d e
281 $ hg cp d e
283 $ hg ci -m 'e = d'
282 $ hg ci -m 'e = d'
284 $ hg cp e f
283 $ hg cp e f
285 $ hg ci --amend -m 'f = d'
284 $ hg ci --amend -m 'f = d'
286 saved backup bundle to $TESTTMP/.hg/strip-backup/fda2b3b27b22-amend-backup.hg
285 saved backup bundle to $TESTTMP/.hg/strip-backup/fda2b3b27b22-amend-backup.hg
287 $ hg st --rev '.^' --copies f
286 $ hg st --rev '.^' --copies f
288 A f
287 A f
289 d
288 d
290
289
291 $ mv f f.orig
290 $ mv f f.orig
292 $ hg rm -A f
291 $ hg rm -A f
293 $ hg ci -m removef
292 $ hg ci -m removef
294 $ hg cp a f
293 $ hg cp a f
295 $ mv f.orig f
294 $ mv f.orig f
296 $ hg ci --amend -m replacef
295 $ hg ci --amend -m replacef
297 saved backup bundle to $TESTTMP/.hg/strip-backup/20a7413547f9-amend-backup.hg
296 saved backup bundle to $TESTTMP/.hg/strip-backup/20a7413547f9-amend-backup.hg
298 $ hg st --change . --copies
297 $ hg st --change . --copies
299 $ hg log -r . --template "{file_copies}\n"
298 $ hg log -r . --template "{file_copies}\n"
300
299
301
300
302 Move added file (issue3410):
301 Move added file (issue3410):
303
302
304 $ echo g >> g
303 $ echo g >> g
305 $ hg ci -Am g
304 $ hg ci -Am g
306 adding g
305 adding g
307 $ hg mv g h
306 $ hg mv g h
308 $ hg ci --amend
307 $ hg ci --amend
309 saved backup bundle to $TESTTMP/.hg/strip-backup/5daa77a5d616-amend-backup.hg
308 saved backup bundle to $TESTTMP/.hg/strip-backup/5daa77a5d616-amend-backup.hg
310 $ hg st --change . --copies h
309 $ hg st --change . --copies h
311 A h
310 A h
312 $ hg log -r . --template "{file_copies}\n"
311 $ hg log -r . --template "{file_copies}\n"
313
312
314
313
315 Can't rollback an amend:
314 Can't rollback an amend:
316
315
317 $ hg rollback
316 $ hg rollback
318 no rollback information available
317 no rollback information available
319 [1]
318 [1]
320
319
321 Preserve extra dict (issue3430):
320 Preserve extra dict (issue3430):
322
321
323 $ hg branch a
322 $ hg branch a
324 marked working directory as branch a
323 marked working directory as branch a
325 (branches are permanent and global, did you want a bookmark?)
324 (branches are permanent and global, did you want a bookmark?)
326 $ echo a >> a
325 $ echo a >> a
327 $ hg ci -ma
326 $ hg ci -ma
328 $ hg ci --amend -m "a'"
327 $ hg ci --amend -m "a'"
329 saved backup bundle to $TESTTMP/.hg/strip-backup/167f8e3031df-amend-backup.hg
328 saved backup bundle to $TESTTMP/.hg/strip-backup/167f8e3031df-amend-backup.hg
330 $ hg log -r . --template "{branch}\n"
329 $ hg log -r . --template "{branch}\n"
331 a
330 a
332 $ hg ci --amend -m "a''"
331 $ hg ci --amend -m "a''"
333 saved backup bundle to $TESTTMP/.hg/strip-backup/ceac1a44c806-amend-backup.hg
332 saved backup bundle to $TESTTMP/.hg/strip-backup/ceac1a44c806-amend-backup.hg
334 $ hg log -r . --template "{branch}\n"
333 $ hg log -r . --template "{branch}\n"
335 a
334 a
336
335
337 Also preserve other entries in the dict that are in the old commit,
336 Also preserve other entries in the dict that are in the old commit,
338 first graft something so there's an additional entry:
337 first graft something so there's an additional entry:
339
338
340 $ hg up 0 -q
339 $ hg up 0 -q
341 $ echo z > z
340 $ echo z > z
342 $ hg ci -Am 'fork'
341 $ hg ci -Am 'fork'
343 adding z
342 adding z
344 created new head
343 created new head
345 $ hg up 11
344 $ hg up 11
346 5 files updated, 0 files merged, 1 files removed, 0 files unresolved
345 5 files updated, 0 files merged, 1 files removed, 0 files unresolved
347 $ hg graft 12
346 $ hg graft 12
348 grafting revision 12
347 grafting revision 12
349 $ hg ci --amend -m 'graft amend'
348 $ hg ci --amend -m 'graft amend'
350 saved backup bundle to $TESTTMP/.hg/strip-backup/18a5124daf7a-amend-backup.hg
349 saved backup bundle to $TESTTMP/.hg/strip-backup/18a5124daf7a-amend-backup.hg
351 $ hg log -r . --debug | grep extra
350 $ hg log -r . --debug | grep extra
352 extra: branch=a
351 extra: branch=a
353 extra: source=2647734878ef0236dda712fae9c1651cf694ea8a
352 extra: source=2647734878ef0236dda712fae9c1651cf694ea8a
@@ -1,125 +1,124 b''
1 $ branches=.hg/cache/branchheads
1 $ branches=.hg/cache/branchheads
2 $ echo '[extensions]' >> $HGRCPATH
2 $ echo '[extensions]' >> $HGRCPATH
3 $ echo 'mq =' >> $HGRCPATH
3 $ echo 'mq =' >> $HGRCPATH
4
4
5 $ show_branch_cache()
5 $ show_branch_cache()
6 > {
6 > {
7 > # force cache (re)generation
7 > # force cache (re)generation
8 > hg log -r does-not-exist 2> /dev/null
8 > hg log -r does-not-exist 2> /dev/null
9 > hg log -r tip --template 'tip: {rev}\n'
9 > hg log -r tip --template 'tip: {rev}\n'
10 > if [ -f $branches ]; then
10 > if [ -f $branches ]; then
11 > sort $branches
11 > sort $branches
12 > else
12 > else
13 > echo No branch cache
13 > echo No branch cache
14 > fi
14 > fi
15 > if [ "$1" = 1 ]; then
15 > if [ "$1" = 1 ]; then
16 > for b in foo bar; do
16 > for b in foo bar; do
17 > hg log -r $b --template "branch $b: "'{rev}\n'
17 > hg log -r $b --template "branch $b: "'{rev}\n'
18 > done
18 > done
19 > fi
19 > fi
20 > }
20 > }
21
21
22 $ hg init a
22 $ hg init a
23 $ cd a
23 $ cd a
24 $ hg qinit -c
24 $ hg qinit -c
25
25
26
26
27 mq patch on an empty repo
27 mq patch on an empty repo
28
28
29 $ hg qnew -d '0 0' p1
29 $ hg qnew -d '0 0' p1
30 $ show_branch_cache
30 $ show_branch_cache
31 tip: 0
31 tip: 0
32 No branch cache
32 No branch cache
33
33
34 $ echo > pfile
34 $ echo > pfile
35 $ hg add pfile
35 $ hg add pfile
36 $ hg qrefresh -m 'patch 1'
36 $ hg qrefresh -m 'patch 1'
37 $ show_branch_cache
37 $ show_branch_cache
38 tip: 0
38 tip: 0
39 d986d5caac23a7d44a46efc0ddaf5eb9665844cf 0
39 No branch cache
40 d986d5caac23a7d44a46efc0ddaf5eb9665844cf default
41
40
42 some regular revisions
41 some regular revisions
43
42
44 $ hg qpop
43 $ hg qpop
45 popping p1
44 popping p1
46 patch queue now empty
45 patch queue now empty
47 $ echo foo > foo
46 $ echo foo > foo
48 $ hg add foo
47 $ hg add foo
49 $ echo foo > .hg/branch
48 $ echo foo > .hg/branch
50 $ hg ci -m 'branch foo'
49 $ hg ci -m 'branch foo'
51
50
52 $ echo bar > bar
51 $ echo bar > bar
53 $ hg add bar
52 $ hg add bar
54 $ echo bar > .hg/branch
53 $ echo bar > .hg/branch
55 $ hg ci -m 'branch bar'
54 $ hg ci -m 'branch bar'
56 $ show_branch_cache
55 $ show_branch_cache
57 tip: 1
56 tip: 1
58 c229711f16da3d7591f89b1b8d963b79bda22714 1
57 c229711f16da3d7591f89b1b8d963b79bda22714 1
59 c229711f16da3d7591f89b1b8d963b79bda22714 bar
58 c229711f16da3d7591f89b1b8d963b79bda22714 bar
60 dc25e3827021582e979f600811852e36cbe57341 foo
59 dc25e3827021582e979f600811852e36cbe57341 foo
61
60
62 add some mq patches
61 add some mq patches
63
62
64 $ hg qpush
63 $ hg qpush
65 applying p1
64 applying p1
66 now at: p1
65 now at: p1
67 $ show_branch_cache
66 $ show_branch_cache
68 tip: 2
67 tip: 2
69 c229711f16da3d7591f89b1b8d963b79bda22714 1
68 c229711f16da3d7591f89b1b8d963b79bda22714 1
70 c229711f16da3d7591f89b1b8d963b79bda22714 bar
69 c229711f16da3d7591f89b1b8d963b79bda22714 bar
71 dc25e3827021582e979f600811852e36cbe57341 foo
70 dc25e3827021582e979f600811852e36cbe57341 foo
72
71
73 $ hg qnew -d '0 0' p2
72 $ hg qnew -d '0 0' p2
74 $ echo foo > .hg/branch
73 $ echo foo > .hg/branch
75 $ echo foo2 >> foo
74 $ echo foo2 >> foo
76 $ hg qrefresh -m 'patch 2'
75 $ hg qrefresh -m 'patch 2'
77 $ show_branch_cache 1
76 $ show_branch_cache 1
78 tip: 3
77 tip: 3
79 982611f6955f9c48d3365decea203217c945ef0d 2
78 c229711f16da3d7591f89b1b8d963b79bda22714 1
80 982611f6955f9c48d3365decea203217c945ef0d bar
79 c229711f16da3d7591f89b1b8d963b79bda22714 bar
81 dc25e3827021582e979f600811852e36cbe57341 foo
80 dc25e3827021582e979f600811852e36cbe57341 foo
82 branch foo: 3
81 branch foo: 3
83 branch bar: 2
82 branch bar: 2
84
83
85 removing the cache
84 removing the cache
86
85
87 $ rm $branches
86 $ rm $branches
88 $ show_branch_cache 1
87 $ show_branch_cache 1
89 tip: 3
88 tip: 3
90 c229711f16da3d7591f89b1b8d963b79bda22714 1
89 c229711f16da3d7591f89b1b8d963b79bda22714 1
91 c229711f16da3d7591f89b1b8d963b79bda22714 bar
90 c229711f16da3d7591f89b1b8d963b79bda22714 bar
92 dc25e3827021582e979f600811852e36cbe57341 foo
91 dc25e3827021582e979f600811852e36cbe57341 foo
93 branch foo: 3
92 branch foo: 3
94 branch bar: 2
93 branch bar: 2
95
94
96 importing rev 1 (the cache now ends in one of the patches)
95 importing rev 1 (the cache now ends in one of the patches)
97
96
98 $ hg qimport -r 1 -n p0
97 $ hg qimport -r 1 -n p0
99 $ show_branch_cache 1
98 $ show_branch_cache 1
100 tip: 3
99 tip: 3
101 c229711f16da3d7591f89b1b8d963b79bda22714 1
100 c229711f16da3d7591f89b1b8d963b79bda22714 1
102 c229711f16da3d7591f89b1b8d963b79bda22714 bar
101 c229711f16da3d7591f89b1b8d963b79bda22714 bar
103 dc25e3827021582e979f600811852e36cbe57341 foo
102 dc25e3827021582e979f600811852e36cbe57341 foo
104 branch foo: 3
103 branch foo: 3
105 branch bar: 2
104 branch bar: 2
106 $ hg log -r qbase --template 'qbase: {rev}\n'
105 $ hg log -r qbase --template 'qbase: {rev}\n'
107 qbase: 1
106 qbase: 1
108
107
109 detect an invalid cache
108 detect an invalid cache
110
109
111 $ hg qpop -a
110 $ hg qpop -a
112 popping p2
111 popping p2
113 popping p1
112 popping p1
114 popping p0
113 popping p0
115 patch queue now empty
114 patch queue now empty
116 $ hg qpush -a
115 $ hg qpush -a
117 applying p0
116 applying p0
118 applying p1
117 applying p1
119 applying p2
118 applying p2
120 now at: p2
119 now at: p2
121 $ show_branch_cache
120 $ show_branch_cache
122 tip: 3
121 tip: 3
123 dc25e3827021582e979f600811852e36cbe57341 0
122 dc25e3827021582e979f600811852e36cbe57341 0
124 dc25e3827021582e979f600811852e36cbe57341 foo
123 dc25e3827021582e979f600811852e36cbe57341 foo
125
124
General Comments 0
You need to be logged in to leave comments. Login now