##// END OF EJS Templates
rollback: use a hint for force
Matt Mackall -
r15187:0292f88d default
parent child Browse files
Show More
@@ -1,2081 +1,2081 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener.append(
60 self.opener.append(
61 "00changelog.i",
61 "00changelog.i",
62 '\0\0\0\2' # represents revlogv2
62 '\0\0\0\2' # represents revlogv2
63 ' dummy changelog to prevent using the old repo layout'
63 ' dummy changelog to prevent using the old repo layout'
64 )
64 )
65 if self.ui.configbool('format', 'generaldelta', False):
65 if self.ui.configbool('format', 'generaldelta', False):
66 requirements.append("generaldelta")
66 requirements.append("generaldelta")
67 requirements = set(requirements)
67 requirements = set(requirements)
68 else:
68 else:
69 raise error.RepoError(_("repository %s not found") % path)
69 raise error.RepoError(_("repository %s not found") % path)
70 elif create:
70 elif create:
71 raise error.RepoError(_("repository %s already exists") % path)
71 raise error.RepoError(_("repository %s already exists") % path)
72 else:
72 else:
73 try:
73 try:
74 requirements = scmutil.readrequires(self.opener, self.supported)
74 requirements = scmutil.readrequires(self.opener, self.supported)
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 requirements = set()
78 requirements = set()
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100
100
101 self._branchcache = None
101 self._branchcache = None
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.filterpats = {}
103 self.filterpats = {}
104 self._datafilters = {}
104 self._datafilters = {}
105 self._transref = self._lockref = self._wlockref = None
105 self._transref = self._lockref = self._wlockref = None
106
106
107 # A cache for various files under .hg/ that tracks file changes,
107 # A cache for various files under .hg/ that tracks file changes,
108 # (used by the filecache decorator)
108 # (used by the filecache decorator)
109 #
109 #
110 # Maps a property name to its util.filecacheentry
110 # Maps a property name to its util.filecacheentry
111 self._filecache = {}
111 self._filecache = {}
112
112
113 def _applyrequirements(self, requirements):
113 def _applyrequirements(self, requirements):
114 self.requirements = requirements
114 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
116 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
117 if r in openerreqs)
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130
130
131 # XXX: Checking against the current working copy is wrong in
131 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
132 # the sense that it can reject things like
133 #
133 #
134 # $ hg cat -r 10 sub/x.txt
134 # $ hg cat -r 10 sub/x.txt
135 #
135 #
136 # if sub/ is no longer a subrepository in the working copy
136 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
137 # parent revision.
138 #
138 #
139 # However, it can of course also allow things that would have
139 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
140 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
141 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
142 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
143 # panics when it sees sub/.hg/.
144 #
144 #
145 # All in all, checking against the working copy seems sensible
145 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
146 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
147 # the filesystem *now*.
148 ctx = self[None]
148 ctx = self[None]
149 parts = util.splitpath(subpath)
149 parts = util.splitpath(subpath)
150 while parts:
150 while parts:
151 prefix = os.sep.join(parts)
151 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
152 if prefix in ctx.substate:
153 if prefix == subpath:
153 if prefix == subpath:
154 return True
154 return True
155 else:
155 else:
156 sub = ctx.sub(prefix)
156 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
157 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
158 else:
159 parts.pop()
159 parts.pop()
160 return False
160 return False
161
161
162 @filecache('bookmarks')
162 @filecache('bookmarks')
163 def _bookmarks(self):
163 def _bookmarks(self):
164 return bookmarks.read(self)
164 return bookmarks.read(self)
165
165
166 @filecache('bookmarks.current')
166 @filecache('bookmarks.current')
167 def _bookmarkcurrent(self):
167 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
168 return bookmarks.readcurrent(self)
169
169
170 @filecache('00changelog.i', True)
170 @filecache('00changelog.i', True)
171 def changelog(self):
171 def changelog(self):
172 c = changelog.changelog(self.sopener)
172 c = changelog.changelog(self.sopener)
173 if 'HG_PENDING' in os.environ:
173 if 'HG_PENDING' in os.environ:
174 p = os.environ['HG_PENDING']
174 p = os.environ['HG_PENDING']
175 if p.startswith(self.root):
175 if p.startswith(self.root):
176 c.readpending('00changelog.i.a')
176 c.readpending('00changelog.i.a')
177 return c
177 return c
178
178
179 @filecache('00manifest.i', True)
179 @filecache('00manifest.i', True)
180 def manifest(self):
180 def manifest(self):
181 return manifest.manifest(self.sopener)
181 return manifest.manifest(self.sopener)
182
182
183 @filecache('dirstate')
183 @filecache('dirstate')
184 def dirstate(self):
184 def dirstate(self):
185 warned = [0]
185 warned = [0]
186 def validate(node):
186 def validate(node):
187 try:
187 try:
188 self.changelog.rev(node)
188 self.changelog.rev(node)
189 return node
189 return node
190 except error.LookupError:
190 except error.LookupError:
191 if not warned[0]:
191 if not warned[0]:
192 warned[0] = True
192 warned[0] = True
193 self.ui.warn(_("warning: ignoring unknown"
193 self.ui.warn(_("warning: ignoring unknown"
194 " working parent %s!\n") % short(node))
194 " working parent %s!\n") % short(node))
195 return nullid
195 return nullid
196
196
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198
198
199 def __getitem__(self, changeid):
199 def __getitem__(self, changeid):
200 if changeid is None:
200 if changeid is None:
201 return context.workingctx(self)
201 return context.workingctx(self)
202 return context.changectx(self, changeid)
202 return context.changectx(self, changeid)
203
203
204 def __contains__(self, changeid):
204 def __contains__(self, changeid):
205 try:
205 try:
206 return bool(self.lookup(changeid))
206 return bool(self.lookup(changeid))
207 except error.RepoLookupError:
207 except error.RepoLookupError:
208 return False
208 return False
209
209
210 def __nonzero__(self):
210 def __nonzero__(self):
211 return True
211 return True
212
212
213 def __len__(self):
213 def __len__(self):
214 return len(self.changelog)
214 return len(self.changelog)
215
215
216 def __iter__(self):
216 def __iter__(self):
217 for i in xrange(len(self)):
217 for i in xrange(len(self)):
218 yield i
218 yield i
219
219
220 def set(self, expr, *args):
220 def set(self, expr, *args):
221 '''
221 '''
222 Yield a context for each matching revision, after doing arg
222 Yield a context for each matching revision, after doing arg
223 replacement via revset.formatspec
223 replacement via revset.formatspec
224 '''
224 '''
225
225
226 expr = revset.formatspec(expr, *args)
226 expr = revset.formatspec(expr, *args)
227 m = revset.match(None, expr)
227 m = revset.match(None, expr)
228 for r in m(self, range(len(self))):
228 for r in m(self, range(len(self))):
229 yield self[r]
229 yield self[r]
230
230
231 def url(self):
231 def url(self):
232 return 'file:' + self.root
232 return 'file:' + self.root
233
233
234 def hook(self, name, throw=False, **args):
234 def hook(self, name, throw=False, **args):
235 return hook.hook(self.ui, self, name, throw, **args)
235 return hook.hook(self.ui, self, name, throw, **args)
236
236
237 tag_disallowed = ':\r\n'
237 tag_disallowed = ':\r\n'
238
238
239 def _tag(self, names, node, message, local, user, date, extra={}):
239 def _tag(self, names, node, message, local, user, date, extra={}):
240 if isinstance(names, str):
240 if isinstance(names, str):
241 allchars = names
241 allchars = names
242 names = (names,)
242 names = (names,)
243 else:
243 else:
244 allchars = ''.join(names)
244 allchars = ''.join(names)
245 for c in self.tag_disallowed:
245 for c in self.tag_disallowed:
246 if c in allchars:
246 if c in allchars:
247 raise util.Abort(_('%r cannot be used in a tag name') % c)
247 raise util.Abort(_('%r cannot be used in a tag name') % c)
248
248
249 branches = self.branchmap()
249 branches = self.branchmap()
250 for name in names:
250 for name in names:
251 self.hook('pretag', throw=True, node=hex(node), tag=name,
251 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 local=local)
252 local=local)
253 if name in branches:
253 if name in branches:
254 self.ui.warn(_("warning: tag %s conflicts with existing"
254 self.ui.warn(_("warning: tag %s conflicts with existing"
255 " branch name\n") % name)
255 " branch name\n") % name)
256
256
257 def writetags(fp, names, munge, prevtags):
257 def writetags(fp, names, munge, prevtags):
258 fp.seek(0, 2)
258 fp.seek(0, 2)
259 if prevtags and prevtags[-1] != '\n':
259 if prevtags and prevtags[-1] != '\n':
260 fp.write('\n')
260 fp.write('\n')
261 for name in names:
261 for name in names:
262 m = munge and munge(name) or name
262 m = munge and munge(name) or name
263 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
263 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
264 old = self.tags().get(name, nullid)
264 old = self.tags().get(name, nullid)
265 fp.write('%s %s\n' % (hex(old), m))
265 fp.write('%s %s\n' % (hex(old), m))
266 fp.write('%s %s\n' % (hex(node), m))
266 fp.write('%s %s\n' % (hex(node), m))
267 fp.close()
267 fp.close()
268
268
269 prevtags = ''
269 prevtags = ''
270 if local:
270 if local:
271 try:
271 try:
272 fp = self.opener('localtags', 'r+')
272 fp = self.opener('localtags', 'r+')
273 except IOError:
273 except IOError:
274 fp = self.opener('localtags', 'a')
274 fp = self.opener('localtags', 'a')
275 else:
275 else:
276 prevtags = fp.read()
276 prevtags = fp.read()
277
277
278 # local tags are stored in the current charset
278 # local tags are stored in the current charset
279 writetags(fp, names, None, prevtags)
279 writetags(fp, names, None, prevtags)
280 for name in names:
280 for name in names:
281 self.hook('tag', node=hex(node), tag=name, local=local)
281 self.hook('tag', node=hex(node), tag=name, local=local)
282 return
282 return
283
283
284 try:
284 try:
285 fp = self.wfile('.hgtags', 'rb+')
285 fp = self.wfile('.hgtags', 'rb+')
286 except IOError, e:
286 except IOError, e:
287 if e.errno != errno.ENOENT:
287 if e.errno != errno.ENOENT:
288 raise
288 raise
289 fp = self.wfile('.hgtags', 'ab')
289 fp = self.wfile('.hgtags', 'ab')
290 else:
290 else:
291 prevtags = fp.read()
291 prevtags = fp.read()
292
292
293 # committed tags are stored in UTF-8
293 # committed tags are stored in UTF-8
294 writetags(fp, names, encoding.fromlocal, prevtags)
294 writetags(fp, names, encoding.fromlocal, prevtags)
295
295
296 fp.close()
296 fp.close()
297
297
298 if '.hgtags' not in self.dirstate:
298 if '.hgtags' not in self.dirstate:
299 self[None].add(['.hgtags'])
299 self[None].add(['.hgtags'])
300
300
301 m = matchmod.exact(self.root, '', ['.hgtags'])
301 m = matchmod.exact(self.root, '', ['.hgtags'])
302 tagnode = self.commit(message, user, date, extra=extra, match=m)
302 tagnode = self.commit(message, user, date, extra=extra, match=m)
303
303
304 for name in names:
304 for name in names:
305 self.hook('tag', node=hex(node), tag=name, local=local)
305 self.hook('tag', node=hex(node), tag=name, local=local)
306
306
307 return tagnode
307 return tagnode
308
308
309 def tag(self, names, node, message, local, user, date):
309 def tag(self, names, node, message, local, user, date):
310 '''tag a revision with one or more symbolic names.
310 '''tag a revision with one or more symbolic names.
311
311
312 names is a list of strings or, when adding a single tag, names may be a
312 names is a list of strings or, when adding a single tag, names may be a
313 string.
313 string.
314
314
315 if local is True, the tags are stored in a per-repository file.
315 if local is True, the tags are stored in a per-repository file.
316 otherwise, they are stored in the .hgtags file, and a new
316 otherwise, they are stored in the .hgtags file, and a new
317 changeset is committed with the change.
317 changeset is committed with the change.
318
318
319 keyword arguments:
319 keyword arguments:
320
320
321 local: whether to store tags in non-version-controlled file
321 local: whether to store tags in non-version-controlled file
322 (default False)
322 (default False)
323
323
324 message: commit message to use if committing
324 message: commit message to use if committing
325
325
326 user: name of user to use if committing
326 user: name of user to use if committing
327
327
328 date: date tuple to use if committing'''
328 date: date tuple to use if committing'''
329
329
330 if not local:
330 if not local:
331 for x in self.status()[:5]:
331 for x in self.status()[:5]:
332 if '.hgtags' in x:
332 if '.hgtags' in x:
333 raise util.Abort(_('working copy of .hgtags is changed '
333 raise util.Abort(_('working copy of .hgtags is changed '
334 '(please commit .hgtags manually)'))
334 '(please commit .hgtags manually)'))
335
335
336 self.tags() # instantiate the cache
336 self.tags() # instantiate the cache
337 self._tag(names, node, message, local, user, date)
337 self._tag(names, node, message, local, user, date)
338
338
339 @propertycache
339 @propertycache
340 def _tagscache(self):
340 def _tagscache(self):
341 '''Returns a tagscache object that contains various tags related caches.'''
341 '''Returns a tagscache object that contains various tags related caches.'''
342
342
343 # This simplifies its cache management by having one decorated
343 # This simplifies its cache management by having one decorated
344 # function (this one) and the rest simply fetch things from it.
344 # function (this one) and the rest simply fetch things from it.
345 class tagscache(object):
345 class tagscache(object):
346 def __init__(self):
346 def __init__(self):
347 # These two define the set of tags for this repository. tags
347 # These two define the set of tags for this repository. tags
348 # maps tag name to node; tagtypes maps tag name to 'global' or
348 # maps tag name to node; tagtypes maps tag name to 'global' or
349 # 'local'. (Global tags are defined by .hgtags across all
349 # 'local'. (Global tags are defined by .hgtags across all
350 # heads, and local tags are defined in .hg/localtags.)
350 # heads, and local tags are defined in .hg/localtags.)
351 # They constitute the in-memory cache of tags.
351 # They constitute the in-memory cache of tags.
352 self.tags = self.tagtypes = None
352 self.tags = self.tagtypes = None
353
353
354 self.nodetagscache = self.tagslist = None
354 self.nodetagscache = self.tagslist = None
355
355
356 cache = tagscache()
356 cache = tagscache()
357 cache.tags, cache.tagtypes = self._findtags()
357 cache.tags, cache.tagtypes = self._findtags()
358
358
359 return cache
359 return cache
360
360
361 def tags(self):
361 def tags(self):
362 '''return a mapping of tag to node'''
362 '''return a mapping of tag to node'''
363 return self._tagscache.tags
363 return self._tagscache.tags
364
364
365 def _findtags(self):
365 def _findtags(self):
366 '''Do the hard work of finding tags. Return a pair of dicts
366 '''Do the hard work of finding tags. Return a pair of dicts
367 (tags, tagtypes) where tags maps tag name to node, and tagtypes
367 (tags, tagtypes) where tags maps tag name to node, and tagtypes
368 maps tag name to a string like \'global\' or \'local\'.
368 maps tag name to a string like \'global\' or \'local\'.
369 Subclasses or extensions are free to add their own tags, but
369 Subclasses or extensions are free to add their own tags, but
370 should be aware that the returned dicts will be retained for the
370 should be aware that the returned dicts will be retained for the
371 duration of the localrepo object.'''
371 duration of the localrepo object.'''
372
372
373 # XXX what tagtype should subclasses/extensions use? Currently
373 # XXX what tagtype should subclasses/extensions use? Currently
374 # mq and bookmarks add tags, but do not set the tagtype at all.
374 # mq and bookmarks add tags, but do not set the tagtype at all.
375 # Should each extension invent its own tag type? Should there
375 # Should each extension invent its own tag type? Should there
376 # be one tagtype for all such "virtual" tags? Or is the status
376 # be one tagtype for all such "virtual" tags? Or is the status
377 # quo fine?
377 # quo fine?
378
378
379 alltags = {} # map tag name to (node, hist)
379 alltags = {} # map tag name to (node, hist)
380 tagtypes = {}
380 tagtypes = {}
381
381
382 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
382 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
383 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
383 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
384
384
385 # Build the return dicts. Have to re-encode tag names because
385 # Build the return dicts. Have to re-encode tag names because
386 # the tags module always uses UTF-8 (in order not to lose info
386 # the tags module always uses UTF-8 (in order not to lose info
387 # writing to the cache), but the rest of Mercurial wants them in
387 # writing to the cache), but the rest of Mercurial wants them in
388 # local encoding.
388 # local encoding.
389 tags = {}
389 tags = {}
390 for (name, (node, hist)) in alltags.iteritems():
390 for (name, (node, hist)) in alltags.iteritems():
391 if node != nullid:
391 if node != nullid:
392 try:
392 try:
393 # ignore tags to unknown nodes
393 # ignore tags to unknown nodes
394 self.changelog.lookup(node)
394 self.changelog.lookup(node)
395 tags[encoding.tolocal(name)] = node
395 tags[encoding.tolocal(name)] = node
396 except error.LookupError:
396 except error.LookupError:
397 pass
397 pass
398 tags['tip'] = self.changelog.tip()
398 tags['tip'] = self.changelog.tip()
399 tagtypes = dict([(encoding.tolocal(name), value)
399 tagtypes = dict([(encoding.tolocal(name), value)
400 for (name, value) in tagtypes.iteritems()])
400 for (name, value) in tagtypes.iteritems()])
401 return (tags, tagtypes)
401 return (tags, tagtypes)
402
402
403 def tagtype(self, tagname):
403 def tagtype(self, tagname):
404 '''
404 '''
405 return the type of the given tag. result can be:
405 return the type of the given tag. result can be:
406
406
407 'local' : a local tag
407 'local' : a local tag
408 'global' : a global tag
408 'global' : a global tag
409 None : tag does not exist
409 None : tag does not exist
410 '''
410 '''
411
411
412 return self._tagscache.tagtypes.get(tagname)
412 return self._tagscache.tagtypes.get(tagname)
413
413
414 def tagslist(self):
414 def tagslist(self):
415 '''return a list of tags ordered by revision'''
415 '''return a list of tags ordered by revision'''
416 if not self._tagscache.tagslist:
416 if not self._tagscache.tagslist:
417 l = []
417 l = []
418 for t, n in self.tags().iteritems():
418 for t, n in self.tags().iteritems():
419 r = self.changelog.rev(n)
419 r = self.changelog.rev(n)
420 l.append((r, t, n))
420 l.append((r, t, n))
421 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
421 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
422
422
423 return self._tagscache.tagslist
423 return self._tagscache.tagslist
424
424
425 def nodetags(self, node):
425 def nodetags(self, node):
426 '''return the tags associated with a node'''
426 '''return the tags associated with a node'''
427 if not self._tagscache.nodetagscache:
427 if not self._tagscache.nodetagscache:
428 nodetagscache = {}
428 nodetagscache = {}
429 for t, n in self.tags().iteritems():
429 for t, n in self.tags().iteritems():
430 nodetagscache.setdefault(n, []).append(t)
430 nodetagscache.setdefault(n, []).append(t)
431 for tags in nodetagscache.itervalues():
431 for tags in nodetagscache.itervalues():
432 tags.sort()
432 tags.sort()
433 self._tagscache.nodetagscache = nodetagscache
433 self._tagscache.nodetagscache = nodetagscache
434 return self._tagscache.nodetagscache.get(node, [])
434 return self._tagscache.nodetagscache.get(node, [])
435
435
436 def nodebookmarks(self, node):
436 def nodebookmarks(self, node):
437 marks = []
437 marks = []
438 for bookmark, n in self._bookmarks.iteritems():
438 for bookmark, n in self._bookmarks.iteritems():
439 if n == node:
439 if n == node:
440 marks.append(bookmark)
440 marks.append(bookmark)
441 return sorted(marks)
441 return sorted(marks)
442
442
443 def _branchtags(self, partial, lrev):
443 def _branchtags(self, partial, lrev):
444 # TODO: rename this function?
444 # TODO: rename this function?
445 tiprev = len(self) - 1
445 tiprev = len(self) - 1
446 if lrev != tiprev:
446 if lrev != tiprev:
447 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
447 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
448 self._updatebranchcache(partial, ctxgen)
448 self._updatebranchcache(partial, ctxgen)
449 self._writebranchcache(partial, self.changelog.tip(), tiprev)
449 self._writebranchcache(partial, self.changelog.tip(), tiprev)
450
450
451 return partial
451 return partial
452
452
453 def updatebranchcache(self):
453 def updatebranchcache(self):
454 tip = self.changelog.tip()
454 tip = self.changelog.tip()
455 if self._branchcache is not None and self._branchcachetip == tip:
455 if self._branchcache is not None and self._branchcachetip == tip:
456 return self._branchcache
456 return self._branchcache
457
457
458 oldtip = self._branchcachetip
458 oldtip = self._branchcachetip
459 self._branchcachetip = tip
459 self._branchcachetip = tip
460 if oldtip is None or oldtip not in self.changelog.nodemap:
460 if oldtip is None or oldtip not in self.changelog.nodemap:
461 partial, last, lrev = self._readbranchcache()
461 partial, last, lrev = self._readbranchcache()
462 else:
462 else:
463 lrev = self.changelog.rev(oldtip)
463 lrev = self.changelog.rev(oldtip)
464 partial = self._branchcache
464 partial = self._branchcache
465
465
466 self._branchtags(partial, lrev)
466 self._branchtags(partial, lrev)
467 # this private cache holds all heads (not just tips)
467 # this private cache holds all heads (not just tips)
468 self._branchcache = partial
468 self._branchcache = partial
469
469
470 def branchmap(self):
470 def branchmap(self):
471 '''returns a dictionary {branch: [branchheads]}'''
471 '''returns a dictionary {branch: [branchheads]}'''
472 self.updatebranchcache()
472 self.updatebranchcache()
473 return self._branchcache
473 return self._branchcache
474
474
475 def branchtags(self):
475 def branchtags(self):
476 '''return a dict where branch names map to the tipmost head of
476 '''return a dict where branch names map to the tipmost head of
477 the branch, open heads come before closed'''
477 the branch, open heads come before closed'''
478 bt = {}
478 bt = {}
479 for bn, heads in self.branchmap().iteritems():
479 for bn, heads in self.branchmap().iteritems():
480 tip = heads[-1]
480 tip = heads[-1]
481 for h in reversed(heads):
481 for h in reversed(heads):
482 if 'close' not in self.changelog.read(h)[5]:
482 if 'close' not in self.changelog.read(h)[5]:
483 tip = h
483 tip = h
484 break
484 break
485 bt[bn] = tip
485 bt[bn] = tip
486 return bt
486 return bt
487
487
488 def _readbranchcache(self):
488 def _readbranchcache(self):
489 partial = {}
489 partial = {}
490 try:
490 try:
491 f = self.opener("cache/branchheads")
491 f = self.opener("cache/branchheads")
492 lines = f.read().split('\n')
492 lines = f.read().split('\n')
493 f.close()
493 f.close()
494 except (IOError, OSError):
494 except (IOError, OSError):
495 return {}, nullid, nullrev
495 return {}, nullid, nullrev
496
496
497 try:
497 try:
498 last, lrev = lines.pop(0).split(" ", 1)
498 last, lrev = lines.pop(0).split(" ", 1)
499 last, lrev = bin(last), int(lrev)
499 last, lrev = bin(last), int(lrev)
500 if lrev >= len(self) or self[lrev].node() != last:
500 if lrev >= len(self) or self[lrev].node() != last:
501 # invalidate the cache
501 # invalidate the cache
502 raise ValueError('invalidating branch cache (tip differs)')
502 raise ValueError('invalidating branch cache (tip differs)')
503 for l in lines:
503 for l in lines:
504 if not l:
504 if not l:
505 continue
505 continue
506 node, label = l.split(" ", 1)
506 node, label = l.split(" ", 1)
507 label = encoding.tolocal(label.strip())
507 label = encoding.tolocal(label.strip())
508 partial.setdefault(label, []).append(bin(node))
508 partial.setdefault(label, []).append(bin(node))
509 except KeyboardInterrupt:
509 except KeyboardInterrupt:
510 raise
510 raise
511 except Exception, inst:
511 except Exception, inst:
512 if self.ui.debugflag:
512 if self.ui.debugflag:
513 self.ui.warn(str(inst), '\n')
513 self.ui.warn(str(inst), '\n')
514 partial, last, lrev = {}, nullid, nullrev
514 partial, last, lrev = {}, nullid, nullrev
515 return partial, last, lrev
515 return partial, last, lrev
516
516
517 def _writebranchcache(self, branches, tip, tiprev):
517 def _writebranchcache(self, branches, tip, tiprev):
518 try:
518 try:
519 f = self.opener("cache/branchheads", "w", atomictemp=True)
519 f = self.opener("cache/branchheads", "w", atomictemp=True)
520 f.write("%s %s\n" % (hex(tip), tiprev))
520 f.write("%s %s\n" % (hex(tip), tiprev))
521 for label, nodes in branches.iteritems():
521 for label, nodes in branches.iteritems():
522 for node in nodes:
522 for node in nodes:
523 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
523 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
524 f.close()
524 f.close()
525 except (IOError, OSError):
525 except (IOError, OSError):
526 pass
526 pass
527
527
528 def _updatebranchcache(self, partial, ctxgen):
528 def _updatebranchcache(self, partial, ctxgen):
529 # collect new branch entries
529 # collect new branch entries
530 newbranches = {}
530 newbranches = {}
531 for c in ctxgen:
531 for c in ctxgen:
532 newbranches.setdefault(c.branch(), []).append(c.node())
532 newbranches.setdefault(c.branch(), []).append(c.node())
533 # if older branchheads are reachable from new ones, they aren't
533 # if older branchheads are reachable from new ones, they aren't
534 # really branchheads. Note checking parents is insufficient:
534 # really branchheads. Note checking parents is insufficient:
535 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
535 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
536 for branch, newnodes in newbranches.iteritems():
536 for branch, newnodes in newbranches.iteritems():
537 bheads = partial.setdefault(branch, [])
537 bheads = partial.setdefault(branch, [])
538 bheads.extend(newnodes)
538 bheads.extend(newnodes)
539 if len(bheads) <= 1:
539 if len(bheads) <= 1:
540 continue
540 continue
541 bheads = sorted(bheads, key=lambda x: self[x].rev())
541 bheads = sorted(bheads, key=lambda x: self[x].rev())
542 # starting from tip means fewer passes over reachable
542 # starting from tip means fewer passes over reachable
543 while newnodes:
543 while newnodes:
544 latest = newnodes.pop()
544 latest = newnodes.pop()
545 if latest not in bheads:
545 if latest not in bheads:
546 continue
546 continue
547 minbhrev = self[bheads[0]].node()
547 minbhrev = self[bheads[0]].node()
548 reachable = self.changelog.reachable(latest, minbhrev)
548 reachable = self.changelog.reachable(latest, minbhrev)
549 reachable.remove(latest)
549 reachable.remove(latest)
550 if reachable:
550 if reachable:
551 bheads = [b for b in bheads if b not in reachable]
551 bheads = [b for b in bheads if b not in reachable]
552 partial[branch] = bheads
552 partial[branch] = bheads
553
553
554 def lookup(self, key):
554 def lookup(self, key):
555 if isinstance(key, int):
555 if isinstance(key, int):
556 return self.changelog.node(key)
556 return self.changelog.node(key)
557 elif key == '.':
557 elif key == '.':
558 return self.dirstate.p1()
558 return self.dirstate.p1()
559 elif key == 'null':
559 elif key == 'null':
560 return nullid
560 return nullid
561 elif key == 'tip':
561 elif key == 'tip':
562 return self.changelog.tip()
562 return self.changelog.tip()
563 n = self.changelog._match(key)
563 n = self.changelog._match(key)
564 if n:
564 if n:
565 return n
565 return n
566 if key in self._bookmarks:
566 if key in self._bookmarks:
567 return self._bookmarks[key]
567 return self._bookmarks[key]
568 if key in self.tags():
568 if key in self.tags():
569 return self.tags()[key]
569 return self.tags()[key]
570 if key in self.branchtags():
570 if key in self.branchtags():
571 return self.branchtags()[key]
571 return self.branchtags()[key]
572 n = self.changelog._partialmatch(key)
572 n = self.changelog._partialmatch(key)
573 if n:
573 if n:
574 return n
574 return n
575
575
576 # can't find key, check if it might have come from damaged dirstate
576 # can't find key, check if it might have come from damaged dirstate
577 if key in self.dirstate.parents():
577 if key in self.dirstate.parents():
578 raise error.Abort(_("working directory has unknown parent '%s'!")
578 raise error.Abort(_("working directory has unknown parent '%s'!")
579 % short(key))
579 % short(key))
580 try:
580 try:
581 if len(key) == 20:
581 if len(key) == 20:
582 key = hex(key)
582 key = hex(key)
583 except TypeError:
583 except TypeError:
584 pass
584 pass
585 raise error.RepoLookupError(_("unknown revision '%s'") % key)
585 raise error.RepoLookupError(_("unknown revision '%s'") % key)
586
586
587 def lookupbranch(self, key, remote=None):
587 def lookupbranch(self, key, remote=None):
588 repo = remote or self
588 repo = remote or self
589 if key in repo.branchmap():
589 if key in repo.branchmap():
590 return key
590 return key
591
591
592 repo = (remote and remote.local()) and remote or self
592 repo = (remote and remote.local()) and remote or self
593 return repo[key].branch()
593 return repo[key].branch()
594
594
595 def known(self, nodes):
595 def known(self, nodes):
596 nm = self.changelog.nodemap
596 nm = self.changelog.nodemap
597 return [(n in nm) for n in nodes]
597 return [(n in nm) for n in nodes]
598
598
599 def local(self):
599 def local(self):
600 return self
600 return self
601
601
602 def join(self, f):
602 def join(self, f):
603 return os.path.join(self.path, f)
603 return os.path.join(self.path, f)
604
604
605 def wjoin(self, f):
605 def wjoin(self, f):
606 return os.path.join(self.root, f)
606 return os.path.join(self.root, f)
607
607
608 def file(self, f):
608 def file(self, f):
609 if f[0] == '/':
609 if f[0] == '/':
610 f = f[1:]
610 f = f[1:]
611 return filelog.filelog(self.sopener, f)
611 return filelog.filelog(self.sopener, f)
612
612
613 def changectx(self, changeid):
613 def changectx(self, changeid):
614 return self[changeid]
614 return self[changeid]
615
615
616 def parents(self, changeid=None):
616 def parents(self, changeid=None):
617 '''get list of changectxs for parents of changeid'''
617 '''get list of changectxs for parents of changeid'''
618 return self[changeid].parents()
618 return self[changeid].parents()
619
619
620 def filectx(self, path, changeid=None, fileid=None):
620 def filectx(self, path, changeid=None, fileid=None):
621 """changeid can be a changeset revision, node, or tag.
621 """changeid can be a changeset revision, node, or tag.
622 fileid can be a file revision or node."""
622 fileid can be a file revision or node."""
623 return context.filectx(self, path, changeid, fileid)
623 return context.filectx(self, path, changeid, fileid)
624
624
625 def getcwd(self):
625 def getcwd(self):
626 return self.dirstate.getcwd()
626 return self.dirstate.getcwd()
627
627
628 def pathto(self, f, cwd=None):
628 def pathto(self, f, cwd=None):
629 return self.dirstate.pathto(f, cwd)
629 return self.dirstate.pathto(f, cwd)
630
630
631 def wfile(self, f, mode='r'):
631 def wfile(self, f, mode='r'):
632 return self.wopener(f, mode)
632 return self.wopener(f, mode)
633
633
634 def _link(self, f):
634 def _link(self, f):
635 return os.path.islink(self.wjoin(f))
635 return os.path.islink(self.wjoin(f))
636
636
637 def _loadfilter(self, filter):
637 def _loadfilter(self, filter):
638 if filter not in self.filterpats:
638 if filter not in self.filterpats:
639 l = []
639 l = []
640 for pat, cmd in self.ui.configitems(filter):
640 for pat, cmd in self.ui.configitems(filter):
641 if cmd == '!':
641 if cmd == '!':
642 continue
642 continue
643 mf = matchmod.match(self.root, '', [pat])
643 mf = matchmod.match(self.root, '', [pat])
644 fn = None
644 fn = None
645 params = cmd
645 params = cmd
646 for name, filterfn in self._datafilters.iteritems():
646 for name, filterfn in self._datafilters.iteritems():
647 if cmd.startswith(name):
647 if cmd.startswith(name):
648 fn = filterfn
648 fn = filterfn
649 params = cmd[len(name):].lstrip()
649 params = cmd[len(name):].lstrip()
650 break
650 break
651 if not fn:
651 if not fn:
652 fn = lambda s, c, **kwargs: util.filter(s, c)
652 fn = lambda s, c, **kwargs: util.filter(s, c)
653 # Wrap old filters not supporting keyword arguments
653 # Wrap old filters not supporting keyword arguments
654 if not inspect.getargspec(fn)[2]:
654 if not inspect.getargspec(fn)[2]:
655 oldfn = fn
655 oldfn = fn
656 fn = lambda s, c, **kwargs: oldfn(s, c)
656 fn = lambda s, c, **kwargs: oldfn(s, c)
657 l.append((mf, fn, params))
657 l.append((mf, fn, params))
658 self.filterpats[filter] = l
658 self.filterpats[filter] = l
659 return self.filterpats[filter]
659 return self.filterpats[filter]
660
660
661 def _filter(self, filterpats, filename, data):
661 def _filter(self, filterpats, filename, data):
662 for mf, fn, cmd in filterpats:
662 for mf, fn, cmd in filterpats:
663 if mf(filename):
663 if mf(filename):
664 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
664 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
665 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
665 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
666 break
666 break
667
667
668 return data
668 return data
669
669
670 @propertycache
670 @propertycache
671 def _encodefilterpats(self):
671 def _encodefilterpats(self):
672 return self._loadfilter('encode')
672 return self._loadfilter('encode')
673
673
674 @propertycache
674 @propertycache
675 def _decodefilterpats(self):
675 def _decodefilterpats(self):
676 return self._loadfilter('decode')
676 return self._loadfilter('decode')
677
677
678 def adddatafilter(self, name, filter):
678 def adddatafilter(self, name, filter):
679 self._datafilters[name] = filter
679 self._datafilters[name] = filter
680
680
681 def wread(self, filename):
681 def wread(self, filename):
682 if self._link(filename):
682 if self._link(filename):
683 data = os.readlink(self.wjoin(filename))
683 data = os.readlink(self.wjoin(filename))
684 else:
684 else:
685 data = self.wopener.read(filename)
685 data = self.wopener.read(filename)
686 return self._filter(self._encodefilterpats, filename, data)
686 return self._filter(self._encodefilterpats, filename, data)
687
687
688 def wwrite(self, filename, data, flags):
688 def wwrite(self, filename, data, flags):
689 data = self._filter(self._decodefilterpats, filename, data)
689 data = self._filter(self._decodefilterpats, filename, data)
690 if 'l' in flags:
690 if 'l' in flags:
691 self.wopener.symlink(data, filename)
691 self.wopener.symlink(data, filename)
692 else:
692 else:
693 self.wopener.write(filename, data)
693 self.wopener.write(filename, data)
694 if 'x' in flags:
694 if 'x' in flags:
695 util.setflags(self.wjoin(filename), False, True)
695 util.setflags(self.wjoin(filename), False, True)
696
696
697 def wwritedata(self, filename, data):
697 def wwritedata(self, filename, data):
698 return self._filter(self._decodefilterpats, filename, data)
698 return self._filter(self._decodefilterpats, filename, data)
699
699
700 def transaction(self, desc):
700 def transaction(self, desc):
701 tr = self._transref and self._transref() or None
701 tr = self._transref and self._transref() or None
702 if tr and tr.running():
702 if tr and tr.running():
703 return tr.nest()
703 return tr.nest()
704
704
705 # abort here if the journal already exists
705 # abort here if the journal already exists
706 if os.path.exists(self.sjoin("journal")):
706 if os.path.exists(self.sjoin("journal")):
707 raise error.RepoError(
707 raise error.RepoError(
708 _("abandoned transaction found - run hg recover"))
708 _("abandoned transaction found - run hg recover"))
709
709
710 journalfiles = self._writejournal(desc)
710 journalfiles = self._writejournal(desc)
711 renames = [(x, undoname(x)) for x in journalfiles]
711 renames = [(x, undoname(x)) for x in journalfiles]
712
712
713 tr = transaction.transaction(self.ui.warn, self.sopener,
713 tr = transaction.transaction(self.ui.warn, self.sopener,
714 self.sjoin("journal"),
714 self.sjoin("journal"),
715 aftertrans(renames),
715 aftertrans(renames),
716 self.store.createmode)
716 self.store.createmode)
717 self._transref = weakref.ref(tr)
717 self._transref = weakref.ref(tr)
718 return tr
718 return tr
719
719
720 def _writejournal(self, desc):
720 def _writejournal(self, desc):
721 # save dirstate for rollback
721 # save dirstate for rollback
722 try:
722 try:
723 ds = self.opener.read("dirstate")
723 ds = self.opener.read("dirstate")
724 except IOError:
724 except IOError:
725 ds = ""
725 ds = ""
726 self.opener.write("journal.dirstate", ds)
726 self.opener.write("journal.dirstate", ds)
727 self.opener.write("journal.branch",
727 self.opener.write("journal.branch",
728 encoding.fromlocal(self.dirstate.branch()))
728 encoding.fromlocal(self.dirstate.branch()))
729 self.opener.write("journal.desc",
729 self.opener.write("journal.desc",
730 "%d\n%s\n" % (len(self), desc))
730 "%d\n%s\n" % (len(self), desc))
731
731
732 bkname = self.join('bookmarks')
732 bkname = self.join('bookmarks')
733 if os.path.exists(bkname):
733 if os.path.exists(bkname):
734 util.copyfile(bkname, self.join('journal.bookmarks'))
734 util.copyfile(bkname, self.join('journal.bookmarks'))
735 else:
735 else:
736 self.opener.write('journal.bookmarks', '')
736 self.opener.write('journal.bookmarks', '')
737
737
738 return (self.sjoin('journal'), self.join('journal.dirstate'),
738 return (self.sjoin('journal'), self.join('journal.dirstate'),
739 self.join('journal.branch'), self.join('journal.desc'),
739 self.join('journal.branch'), self.join('journal.desc'),
740 self.join('journal.bookmarks'))
740 self.join('journal.bookmarks'))
741
741
742 def recover(self):
742 def recover(self):
743 lock = self.lock()
743 lock = self.lock()
744 try:
744 try:
745 if os.path.exists(self.sjoin("journal")):
745 if os.path.exists(self.sjoin("journal")):
746 self.ui.status(_("rolling back interrupted transaction\n"))
746 self.ui.status(_("rolling back interrupted transaction\n"))
747 transaction.rollback(self.sopener, self.sjoin("journal"),
747 transaction.rollback(self.sopener, self.sjoin("journal"),
748 self.ui.warn)
748 self.ui.warn)
749 self.invalidate()
749 self.invalidate()
750 return True
750 return True
751 else:
751 else:
752 self.ui.warn(_("no interrupted transaction available\n"))
752 self.ui.warn(_("no interrupted transaction available\n"))
753 return False
753 return False
754 finally:
754 finally:
755 lock.release()
755 lock.release()
756
756
757 def rollback(self, dryrun=False, force=False):
757 def rollback(self, dryrun=False, force=False):
758 wlock = lock = None
758 wlock = lock = None
759 try:
759 try:
760 wlock = self.wlock()
760 wlock = self.wlock()
761 lock = self.lock()
761 lock = self.lock()
762 if os.path.exists(self.sjoin("undo")):
762 if os.path.exists(self.sjoin("undo")):
763 return self._rollback(dryrun, force)
763 return self._rollback(dryrun, force)
764 else:
764 else:
765 self.ui.warn(_("no rollback information available\n"))
765 self.ui.warn(_("no rollback information available\n"))
766 return 1
766 return 1
767 finally:
767 finally:
768 release(lock, wlock)
768 release(lock, wlock)
769
769
770 def _rollback(self, dryrun, force):
770 def _rollback(self, dryrun, force):
771 ui = self.ui
771 ui = self.ui
772 try:
772 try:
773 args = self.opener.read('undo.desc').splitlines()
773 args = self.opener.read('undo.desc').splitlines()
774 (oldlen, desc, detail) = (int(args[0]), args[1], None)
774 (oldlen, desc, detail) = (int(args[0]), args[1], None)
775 if len(args) >= 3:
775 if len(args) >= 3:
776 detail = args[2]
776 detail = args[2]
777 oldtip = oldlen - 1
777 oldtip = oldlen - 1
778
778
779 if detail and ui.verbose:
779 if detail and ui.verbose:
780 msg = (_('repository tip rolled back to revision %s'
780 msg = (_('repository tip rolled back to revision %s'
781 ' (undo %s: %s)\n')
781 ' (undo %s: %s)\n')
782 % (oldtip, desc, detail))
782 % (oldtip, desc, detail))
783 else:
783 else:
784 msg = (_('repository tip rolled back to revision %s'
784 msg = (_('repository tip rolled back to revision %s'
785 ' (undo %s)\n')
785 ' (undo %s)\n')
786 % (oldtip, desc))
786 % (oldtip, desc))
787 except IOError:
787 except IOError:
788 msg = _('rolling back unknown transaction\n')
788 msg = _('rolling back unknown transaction\n')
789 desc = None
789 desc = None
790
790
791 if not force and self['.'] != self['tip'] and desc == 'commit':
791 if not force and self['.'] != self['tip'] and desc == 'commit':
792 raise util.Abort(
792 raise util.Abort(
793 _('rollback of last commit while not checked out '
793 _('rollback of last commit while not checked out '
794 'may lose data (use -f to force)'))
794 'may lose data'), hint=_('use -f to force'))
795
795
796 ui.status(msg)
796 ui.status(msg)
797 if dryrun:
797 if dryrun:
798 return 0
798 return 0
799
799
800 parents = self.dirstate.parents()
800 parents = self.dirstate.parents()
801 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
801 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
802 if os.path.exists(self.join('undo.bookmarks')):
802 if os.path.exists(self.join('undo.bookmarks')):
803 util.rename(self.join('undo.bookmarks'),
803 util.rename(self.join('undo.bookmarks'),
804 self.join('bookmarks'))
804 self.join('bookmarks'))
805 self.invalidate()
805 self.invalidate()
806
806
807 parentgone = (parents[0] not in self.changelog.nodemap or
807 parentgone = (parents[0] not in self.changelog.nodemap or
808 parents[1] not in self.changelog.nodemap)
808 parents[1] not in self.changelog.nodemap)
809 if parentgone:
809 if parentgone:
810 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
810 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
811 try:
811 try:
812 branch = self.opener.read('undo.branch')
812 branch = self.opener.read('undo.branch')
813 self.dirstate.setbranch(branch)
813 self.dirstate.setbranch(branch)
814 except IOError:
814 except IOError:
815 ui.warn(_('named branch could not be reset: '
815 ui.warn(_('named branch could not be reset: '
816 'current branch is still \'%s\'\n')
816 'current branch is still \'%s\'\n')
817 % self.dirstate.branch())
817 % self.dirstate.branch())
818
818
819 self.dirstate.invalidate()
819 self.dirstate.invalidate()
820 self.destroyed()
820 self.destroyed()
821 parents = tuple([p.rev() for p in self.parents()])
821 parents = tuple([p.rev() for p in self.parents()])
822 if len(parents) > 1:
822 if len(parents) > 1:
823 ui.status(_('working directory now based on '
823 ui.status(_('working directory now based on '
824 'revisions %d and %d\n') % parents)
824 'revisions %d and %d\n') % parents)
825 else:
825 else:
826 ui.status(_('working directory now based on '
826 ui.status(_('working directory now based on '
827 'revision %d\n') % parents)
827 'revision %d\n') % parents)
828 return 0
828 return 0
829
829
830 def invalidatecaches(self):
830 def invalidatecaches(self):
831 try:
831 try:
832 delattr(self, '_tagscache')
832 delattr(self, '_tagscache')
833 except AttributeError:
833 except AttributeError:
834 pass
834 pass
835
835
836 self._branchcache = None # in UTF-8
836 self._branchcache = None # in UTF-8
837 self._branchcachetip = None
837 self._branchcachetip = None
838
838
839 def invalidatedirstate(self):
839 def invalidatedirstate(self):
840 '''Invalidates the dirstate, causing the next call to dirstate
840 '''Invalidates the dirstate, causing the next call to dirstate
841 to check if it was modified since the last time it was read,
841 to check if it was modified since the last time it was read,
842 rereading it if it has.
842 rereading it if it has.
843
843
844 This is different to dirstate.invalidate() that it doesn't always
844 This is different to dirstate.invalidate() that it doesn't always
845 rereads the dirstate. Use dirstate.invalidate() if you want to
845 rereads the dirstate. Use dirstate.invalidate() if you want to
846 explicitly read the dirstate again (i.e. restoring it to a previous
846 explicitly read the dirstate again (i.e. restoring it to a previous
847 known good state).'''
847 known good state).'''
848 try:
848 try:
849 delattr(self, 'dirstate')
849 delattr(self, 'dirstate')
850 except AttributeError:
850 except AttributeError:
851 pass
851 pass
852
852
853 def invalidate(self):
853 def invalidate(self):
854 for k in self._filecache:
854 for k in self._filecache:
855 # dirstate is invalidated separately in invalidatedirstate()
855 # dirstate is invalidated separately in invalidatedirstate()
856 if k == 'dirstate':
856 if k == 'dirstate':
857 continue
857 continue
858
858
859 try:
859 try:
860 delattr(self, k)
860 delattr(self, k)
861 except AttributeError:
861 except AttributeError:
862 pass
862 pass
863 self.invalidatecaches()
863 self.invalidatecaches()
864
864
865 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
865 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
866 try:
866 try:
867 l = lock.lock(lockname, 0, releasefn, desc=desc)
867 l = lock.lock(lockname, 0, releasefn, desc=desc)
868 except error.LockHeld, inst:
868 except error.LockHeld, inst:
869 if not wait:
869 if not wait:
870 raise
870 raise
871 self.ui.warn(_("waiting for lock on %s held by %r\n") %
871 self.ui.warn(_("waiting for lock on %s held by %r\n") %
872 (desc, inst.locker))
872 (desc, inst.locker))
873 # default to 600 seconds timeout
873 # default to 600 seconds timeout
874 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
874 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
875 releasefn, desc=desc)
875 releasefn, desc=desc)
876 if acquirefn:
876 if acquirefn:
877 acquirefn()
877 acquirefn()
878 return l
878 return l
879
879
880 def lock(self, wait=True):
880 def lock(self, wait=True):
881 '''Lock the repository store (.hg/store) and return a weak reference
881 '''Lock the repository store (.hg/store) and return a weak reference
882 to the lock. Use this before modifying the store (e.g. committing or
882 to the lock. Use this before modifying the store (e.g. committing or
883 stripping). If you are opening a transaction, get a lock as well.)'''
883 stripping). If you are opening a transaction, get a lock as well.)'''
884 l = self._lockref and self._lockref()
884 l = self._lockref and self._lockref()
885 if l is not None and l.held:
885 if l is not None and l.held:
886 l.lock()
886 l.lock()
887 return l
887 return l
888
888
889 def unlock():
889 def unlock():
890 self.store.write()
890 self.store.write()
891 for k, ce in self._filecache.items():
891 for k, ce in self._filecache.items():
892 if k == 'dirstate':
892 if k == 'dirstate':
893 continue
893 continue
894 ce.refresh()
894 ce.refresh()
895
895
896 l = self._lock(self.sjoin("lock"), wait, unlock,
896 l = self._lock(self.sjoin("lock"), wait, unlock,
897 self.invalidate, _('repository %s') % self.origroot)
897 self.invalidate, _('repository %s') % self.origroot)
898 self._lockref = weakref.ref(l)
898 self._lockref = weakref.ref(l)
899 return l
899 return l
900
900
901 def wlock(self, wait=True):
901 def wlock(self, wait=True):
902 '''Lock the non-store parts of the repository (everything under
902 '''Lock the non-store parts of the repository (everything under
903 .hg except .hg/store) and return a weak reference to the lock.
903 .hg except .hg/store) and return a weak reference to the lock.
904 Use this before modifying files in .hg.'''
904 Use this before modifying files in .hg.'''
905 l = self._wlockref and self._wlockref()
905 l = self._wlockref and self._wlockref()
906 if l is not None and l.held:
906 if l is not None and l.held:
907 l.lock()
907 l.lock()
908 return l
908 return l
909
909
910 def unlock():
910 def unlock():
911 self.dirstate.write()
911 self.dirstate.write()
912 ce = self._filecache.get('dirstate')
912 ce = self._filecache.get('dirstate')
913 if ce:
913 if ce:
914 ce.refresh()
914 ce.refresh()
915
915
916 l = self._lock(self.join("wlock"), wait, unlock,
916 l = self._lock(self.join("wlock"), wait, unlock,
917 self.invalidatedirstate, _('working directory of %s') %
917 self.invalidatedirstate, _('working directory of %s') %
918 self.origroot)
918 self.origroot)
919 self._wlockref = weakref.ref(l)
919 self._wlockref = weakref.ref(l)
920 return l
920 return l
921
921
922 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
922 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
923 """
923 """
924 commit an individual file as part of a larger transaction
924 commit an individual file as part of a larger transaction
925 """
925 """
926
926
927 fname = fctx.path()
927 fname = fctx.path()
928 text = fctx.data()
928 text = fctx.data()
929 flog = self.file(fname)
929 flog = self.file(fname)
930 fparent1 = manifest1.get(fname, nullid)
930 fparent1 = manifest1.get(fname, nullid)
931 fparent2 = fparent2o = manifest2.get(fname, nullid)
931 fparent2 = fparent2o = manifest2.get(fname, nullid)
932
932
933 meta = {}
933 meta = {}
934 copy = fctx.renamed()
934 copy = fctx.renamed()
935 if copy and copy[0] != fname:
935 if copy and copy[0] != fname:
936 # Mark the new revision of this file as a copy of another
936 # Mark the new revision of this file as a copy of another
937 # file. This copy data will effectively act as a parent
937 # file. This copy data will effectively act as a parent
938 # of this new revision. If this is a merge, the first
938 # of this new revision. If this is a merge, the first
939 # parent will be the nullid (meaning "look up the copy data")
939 # parent will be the nullid (meaning "look up the copy data")
940 # and the second one will be the other parent. For example:
940 # and the second one will be the other parent. For example:
941 #
941 #
942 # 0 --- 1 --- 3 rev1 changes file foo
942 # 0 --- 1 --- 3 rev1 changes file foo
943 # \ / rev2 renames foo to bar and changes it
943 # \ / rev2 renames foo to bar and changes it
944 # \- 2 -/ rev3 should have bar with all changes and
944 # \- 2 -/ rev3 should have bar with all changes and
945 # should record that bar descends from
945 # should record that bar descends from
946 # bar in rev2 and foo in rev1
946 # bar in rev2 and foo in rev1
947 #
947 #
948 # this allows this merge to succeed:
948 # this allows this merge to succeed:
949 #
949 #
950 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
950 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
951 # \ / merging rev3 and rev4 should use bar@rev2
951 # \ / merging rev3 and rev4 should use bar@rev2
952 # \- 2 --- 4 as the merge base
952 # \- 2 --- 4 as the merge base
953 #
953 #
954
954
955 cfname = copy[0]
955 cfname = copy[0]
956 crev = manifest1.get(cfname)
956 crev = manifest1.get(cfname)
957 newfparent = fparent2
957 newfparent = fparent2
958
958
959 if manifest2: # branch merge
959 if manifest2: # branch merge
960 if fparent2 == nullid or crev is None: # copied on remote side
960 if fparent2 == nullid or crev is None: # copied on remote side
961 if cfname in manifest2:
961 if cfname in manifest2:
962 crev = manifest2[cfname]
962 crev = manifest2[cfname]
963 newfparent = fparent1
963 newfparent = fparent1
964
964
965 # find source in nearest ancestor if we've lost track
965 # find source in nearest ancestor if we've lost track
966 if not crev:
966 if not crev:
967 self.ui.debug(" %s: searching for copy revision for %s\n" %
967 self.ui.debug(" %s: searching for copy revision for %s\n" %
968 (fname, cfname))
968 (fname, cfname))
969 for ancestor in self[None].ancestors():
969 for ancestor in self[None].ancestors():
970 if cfname in ancestor:
970 if cfname in ancestor:
971 crev = ancestor[cfname].filenode()
971 crev = ancestor[cfname].filenode()
972 break
972 break
973
973
974 if crev:
974 if crev:
975 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
975 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
976 meta["copy"] = cfname
976 meta["copy"] = cfname
977 meta["copyrev"] = hex(crev)
977 meta["copyrev"] = hex(crev)
978 fparent1, fparent2 = nullid, newfparent
978 fparent1, fparent2 = nullid, newfparent
979 else:
979 else:
980 self.ui.warn(_("warning: can't find ancestor for '%s' "
980 self.ui.warn(_("warning: can't find ancestor for '%s' "
981 "copied from '%s'!\n") % (fname, cfname))
981 "copied from '%s'!\n") % (fname, cfname))
982
982
983 elif fparent2 != nullid:
983 elif fparent2 != nullid:
984 # is one parent an ancestor of the other?
984 # is one parent an ancestor of the other?
985 fparentancestor = flog.ancestor(fparent1, fparent2)
985 fparentancestor = flog.ancestor(fparent1, fparent2)
986 if fparentancestor == fparent1:
986 if fparentancestor == fparent1:
987 fparent1, fparent2 = fparent2, nullid
987 fparent1, fparent2 = fparent2, nullid
988 elif fparentancestor == fparent2:
988 elif fparentancestor == fparent2:
989 fparent2 = nullid
989 fparent2 = nullid
990
990
991 # is the file changed?
991 # is the file changed?
992 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
992 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
993 changelist.append(fname)
993 changelist.append(fname)
994 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
994 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
995
995
996 # are just the flags changed during merge?
996 # are just the flags changed during merge?
997 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
997 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
998 changelist.append(fname)
998 changelist.append(fname)
999
999
1000 return fparent1
1000 return fparent1
1001
1001
1002 def commit(self, text="", user=None, date=None, match=None, force=False,
1002 def commit(self, text="", user=None, date=None, match=None, force=False,
1003 editor=False, extra={}):
1003 editor=False, extra={}):
1004 """Add a new revision to current repository.
1004 """Add a new revision to current repository.
1005
1005
1006 Revision information is gathered from the working directory,
1006 Revision information is gathered from the working directory,
1007 match can be used to filter the committed files. If editor is
1007 match can be used to filter the committed files. If editor is
1008 supplied, it is called to get a commit message.
1008 supplied, it is called to get a commit message.
1009 """
1009 """
1010
1010
1011 def fail(f, msg):
1011 def fail(f, msg):
1012 raise util.Abort('%s: %s' % (f, msg))
1012 raise util.Abort('%s: %s' % (f, msg))
1013
1013
1014 if not match:
1014 if not match:
1015 match = matchmod.always(self.root, '')
1015 match = matchmod.always(self.root, '')
1016
1016
1017 if not force:
1017 if not force:
1018 vdirs = []
1018 vdirs = []
1019 match.dir = vdirs.append
1019 match.dir = vdirs.append
1020 match.bad = fail
1020 match.bad = fail
1021
1021
1022 wlock = self.wlock()
1022 wlock = self.wlock()
1023 try:
1023 try:
1024 wctx = self[None]
1024 wctx = self[None]
1025 merge = len(wctx.parents()) > 1
1025 merge = len(wctx.parents()) > 1
1026
1026
1027 if (not force and merge and match and
1027 if (not force and merge and match and
1028 (match.files() or match.anypats())):
1028 (match.files() or match.anypats())):
1029 raise util.Abort(_('cannot partially commit a merge '
1029 raise util.Abort(_('cannot partially commit a merge '
1030 '(do not specify files or patterns)'))
1030 '(do not specify files or patterns)'))
1031
1031
1032 changes = self.status(match=match, clean=force)
1032 changes = self.status(match=match, clean=force)
1033 if force:
1033 if force:
1034 changes[0].extend(changes[6]) # mq may commit unchanged files
1034 changes[0].extend(changes[6]) # mq may commit unchanged files
1035
1035
1036 # check subrepos
1036 # check subrepos
1037 subs = []
1037 subs = []
1038 removedsubs = set()
1038 removedsubs = set()
1039 if '.hgsub' in wctx:
1039 if '.hgsub' in wctx:
1040 # only manage subrepos and .hgsubstate if .hgsub is present
1040 # only manage subrepos and .hgsubstate if .hgsub is present
1041 for p in wctx.parents():
1041 for p in wctx.parents():
1042 removedsubs.update(s for s in p.substate if match(s))
1042 removedsubs.update(s for s in p.substate if match(s))
1043 for s in wctx.substate:
1043 for s in wctx.substate:
1044 removedsubs.discard(s)
1044 removedsubs.discard(s)
1045 if match(s) and wctx.sub(s).dirty():
1045 if match(s) and wctx.sub(s).dirty():
1046 subs.append(s)
1046 subs.append(s)
1047 if (subs or removedsubs):
1047 if (subs or removedsubs):
1048 if (not match('.hgsub') and
1048 if (not match('.hgsub') and
1049 '.hgsub' in (wctx.modified() + wctx.added())):
1049 '.hgsub' in (wctx.modified() + wctx.added())):
1050 raise util.Abort(
1050 raise util.Abort(
1051 _("can't commit subrepos without .hgsub"))
1051 _("can't commit subrepos without .hgsub"))
1052 if '.hgsubstate' not in changes[0]:
1052 if '.hgsubstate' not in changes[0]:
1053 changes[0].insert(0, '.hgsubstate')
1053 changes[0].insert(0, '.hgsubstate')
1054 if '.hgsubstate' in changes[2]:
1054 if '.hgsubstate' in changes[2]:
1055 changes[2].remove('.hgsubstate')
1055 changes[2].remove('.hgsubstate')
1056 elif '.hgsub' in changes[2]:
1056 elif '.hgsub' in changes[2]:
1057 # clean up .hgsubstate when .hgsub is removed
1057 # clean up .hgsubstate when .hgsub is removed
1058 if ('.hgsubstate' in wctx and
1058 if ('.hgsubstate' in wctx and
1059 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1059 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1060 changes[2].insert(0, '.hgsubstate')
1060 changes[2].insert(0, '.hgsubstate')
1061
1061
1062 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1062 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1063 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1063 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1064 if changedsubs:
1064 if changedsubs:
1065 raise util.Abort(_("uncommitted changes in subrepo %s")
1065 raise util.Abort(_("uncommitted changes in subrepo %s")
1066 % changedsubs[0])
1066 % changedsubs[0])
1067
1067
1068 # make sure all explicit patterns are matched
1068 # make sure all explicit patterns are matched
1069 if not force and match.files():
1069 if not force and match.files():
1070 matched = set(changes[0] + changes[1] + changes[2])
1070 matched = set(changes[0] + changes[1] + changes[2])
1071
1071
1072 for f in match.files():
1072 for f in match.files():
1073 if f == '.' or f in matched or f in wctx.substate:
1073 if f == '.' or f in matched or f in wctx.substate:
1074 continue
1074 continue
1075 if f in changes[3]: # missing
1075 if f in changes[3]: # missing
1076 fail(f, _('file not found!'))
1076 fail(f, _('file not found!'))
1077 if f in vdirs: # visited directory
1077 if f in vdirs: # visited directory
1078 d = f + '/'
1078 d = f + '/'
1079 for mf in matched:
1079 for mf in matched:
1080 if mf.startswith(d):
1080 if mf.startswith(d):
1081 break
1081 break
1082 else:
1082 else:
1083 fail(f, _("no match under directory!"))
1083 fail(f, _("no match under directory!"))
1084 elif f not in self.dirstate:
1084 elif f not in self.dirstate:
1085 fail(f, _("file not tracked!"))
1085 fail(f, _("file not tracked!"))
1086
1086
1087 if (not force and not extra.get("close") and not merge
1087 if (not force and not extra.get("close") and not merge
1088 and not (changes[0] or changes[1] or changes[2])
1088 and not (changes[0] or changes[1] or changes[2])
1089 and wctx.branch() == wctx.p1().branch()):
1089 and wctx.branch() == wctx.p1().branch()):
1090 return None
1090 return None
1091
1091
1092 ms = mergemod.mergestate(self)
1092 ms = mergemod.mergestate(self)
1093 for f in changes[0]:
1093 for f in changes[0]:
1094 if f in ms and ms[f] == 'u':
1094 if f in ms and ms[f] == 'u':
1095 raise util.Abort(_("unresolved merge conflicts "
1095 raise util.Abort(_("unresolved merge conflicts "
1096 "(see hg help resolve)"))
1096 "(see hg help resolve)"))
1097
1097
1098 cctx = context.workingctx(self, text, user, date, extra, changes)
1098 cctx = context.workingctx(self, text, user, date, extra, changes)
1099 if editor:
1099 if editor:
1100 cctx._text = editor(self, cctx, subs)
1100 cctx._text = editor(self, cctx, subs)
1101 edited = (text != cctx._text)
1101 edited = (text != cctx._text)
1102
1102
1103 # commit subs
1103 # commit subs
1104 if subs or removedsubs:
1104 if subs or removedsubs:
1105 state = wctx.substate.copy()
1105 state = wctx.substate.copy()
1106 for s in sorted(subs):
1106 for s in sorted(subs):
1107 sub = wctx.sub(s)
1107 sub = wctx.sub(s)
1108 self.ui.status(_('committing subrepository %s\n') %
1108 self.ui.status(_('committing subrepository %s\n') %
1109 subrepo.subrelpath(sub))
1109 subrepo.subrelpath(sub))
1110 sr = sub.commit(cctx._text, user, date)
1110 sr = sub.commit(cctx._text, user, date)
1111 state[s] = (state[s][0], sr)
1111 state[s] = (state[s][0], sr)
1112 subrepo.writestate(self, state)
1112 subrepo.writestate(self, state)
1113
1113
1114 # Save commit message in case this transaction gets rolled back
1114 # Save commit message in case this transaction gets rolled back
1115 # (e.g. by a pretxncommit hook). Leave the content alone on
1115 # (e.g. by a pretxncommit hook). Leave the content alone on
1116 # the assumption that the user will use the same editor again.
1116 # the assumption that the user will use the same editor again.
1117 msgfn = self.savecommitmessage(cctx._text)
1117 msgfn = self.savecommitmessage(cctx._text)
1118
1118
1119 p1, p2 = self.dirstate.parents()
1119 p1, p2 = self.dirstate.parents()
1120 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1120 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1121 try:
1121 try:
1122 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1122 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1123 ret = self.commitctx(cctx, True)
1123 ret = self.commitctx(cctx, True)
1124 except:
1124 except:
1125 if edited:
1125 if edited:
1126 self.ui.write(
1126 self.ui.write(
1127 _('note: commit message saved in %s\n') % msgfn)
1127 _('note: commit message saved in %s\n') % msgfn)
1128 raise
1128 raise
1129
1129
1130 # update bookmarks, dirstate and mergestate
1130 # update bookmarks, dirstate and mergestate
1131 bookmarks.update(self, p1, ret)
1131 bookmarks.update(self, p1, ret)
1132 for f in changes[0] + changes[1]:
1132 for f in changes[0] + changes[1]:
1133 self.dirstate.normal(f)
1133 self.dirstate.normal(f)
1134 for f in changes[2]:
1134 for f in changes[2]:
1135 self.dirstate.drop(f)
1135 self.dirstate.drop(f)
1136 self.dirstate.setparents(ret)
1136 self.dirstate.setparents(ret)
1137 ms.reset()
1137 ms.reset()
1138 finally:
1138 finally:
1139 wlock.release()
1139 wlock.release()
1140
1140
1141 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1141 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1142 return ret
1142 return ret
1143
1143
1144 def commitctx(self, ctx, error=False):
1144 def commitctx(self, ctx, error=False):
1145 """Add a new revision to current repository.
1145 """Add a new revision to current repository.
1146 Revision information is passed via the context argument.
1146 Revision information is passed via the context argument.
1147 """
1147 """
1148
1148
1149 tr = lock = None
1149 tr = lock = None
1150 removed = list(ctx.removed())
1150 removed = list(ctx.removed())
1151 p1, p2 = ctx.p1(), ctx.p2()
1151 p1, p2 = ctx.p1(), ctx.p2()
1152 user = ctx.user()
1152 user = ctx.user()
1153
1153
1154 lock = self.lock()
1154 lock = self.lock()
1155 try:
1155 try:
1156 tr = self.transaction("commit")
1156 tr = self.transaction("commit")
1157 trp = weakref.proxy(tr)
1157 trp = weakref.proxy(tr)
1158
1158
1159 if ctx.files():
1159 if ctx.files():
1160 m1 = p1.manifest().copy()
1160 m1 = p1.manifest().copy()
1161 m2 = p2.manifest()
1161 m2 = p2.manifest()
1162
1162
1163 # check in files
1163 # check in files
1164 new = {}
1164 new = {}
1165 changed = []
1165 changed = []
1166 linkrev = len(self)
1166 linkrev = len(self)
1167 for f in sorted(ctx.modified() + ctx.added()):
1167 for f in sorted(ctx.modified() + ctx.added()):
1168 self.ui.note(f + "\n")
1168 self.ui.note(f + "\n")
1169 try:
1169 try:
1170 fctx = ctx[f]
1170 fctx = ctx[f]
1171 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1171 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1172 changed)
1172 changed)
1173 m1.set(f, fctx.flags())
1173 m1.set(f, fctx.flags())
1174 except OSError, inst:
1174 except OSError, inst:
1175 self.ui.warn(_("trouble committing %s!\n") % f)
1175 self.ui.warn(_("trouble committing %s!\n") % f)
1176 raise
1176 raise
1177 except IOError, inst:
1177 except IOError, inst:
1178 errcode = getattr(inst, 'errno', errno.ENOENT)
1178 errcode = getattr(inst, 'errno', errno.ENOENT)
1179 if error or errcode and errcode != errno.ENOENT:
1179 if error or errcode and errcode != errno.ENOENT:
1180 self.ui.warn(_("trouble committing %s!\n") % f)
1180 self.ui.warn(_("trouble committing %s!\n") % f)
1181 raise
1181 raise
1182 else:
1182 else:
1183 removed.append(f)
1183 removed.append(f)
1184
1184
1185 # update manifest
1185 # update manifest
1186 m1.update(new)
1186 m1.update(new)
1187 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1187 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1188 drop = [f for f in removed if f in m1]
1188 drop = [f for f in removed if f in m1]
1189 for f in drop:
1189 for f in drop:
1190 del m1[f]
1190 del m1[f]
1191 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1191 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1192 p2.manifestnode(), (new, drop))
1192 p2.manifestnode(), (new, drop))
1193 files = changed + removed
1193 files = changed + removed
1194 else:
1194 else:
1195 mn = p1.manifestnode()
1195 mn = p1.manifestnode()
1196 files = []
1196 files = []
1197
1197
1198 # update changelog
1198 # update changelog
1199 self.changelog.delayupdate()
1199 self.changelog.delayupdate()
1200 n = self.changelog.add(mn, files, ctx.description(),
1200 n = self.changelog.add(mn, files, ctx.description(),
1201 trp, p1.node(), p2.node(),
1201 trp, p1.node(), p2.node(),
1202 user, ctx.date(), ctx.extra().copy())
1202 user, ctx.date(), ctx.extra().copy())
1203 p = lambda: self.changelog.writepending() and self.root or ""
1203 p = lambda: self.changelog.writepending() and self.root or ""
1204 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1204 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1205 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1205 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1206 parent2=xp2, pending=p)
1206 parent2=xp2, pending=p)
1207 self.changelog.finalize(trp)
1207 self.changelog.finalize(trp)
1208 tr.close()
1208 tr.close()
1209
1209
1210 if self._branchcache:
1210 if self._branchcache:
1211 self.updatebranchcache()
1211 self.updatebranchcache()
1212 return n
1212 return n
1213 finally:
1213 finally:
1214 if tr:
1214 if tr:
1215 tr.release()
1215 tr.release()
1216 lock.release()
1216 lock.release()
1217
1217
1218 def destroyed(self):
1218 def destroyed(self):
1219 '''Inform the repository that nodes have been destroyed.
1219 '''Inform the repository that nodes have been destroyed.
1220 Intended for use by strip and rollback, so there's a common
1220 Intended for use by strip and rollback, so there's a common
1221 place for anything that has to be done after destroying history.'''
1221 place for anything that has to be done after destroying history.'''
1222 # XXX it might be nice if we could take the list of destroyed
1222 # XXX it might be nice if we could take the list of destroyed
1223 # nodes, but I don't see an easy way for rollback() to do that
1223 # nodes, but I don't see an easy way for rollback() to do that
1224
1224
1225 # Ensure the persistent tag cache is updated. Doing it now
1225 # Ensure the persistent tag cache is updated. Doing it now
1226 # means that the tag cache only has to worry about destroyed
1226 # means that the tag cache only has to worry about destroyed
1227 # heads immediately after a strip/rollback. That in turn
1227 # heads immediately after a strip/rollback. That in turn
1228 # guarantees that "cachetip == currenttip" (comparing both rev
1228 # guarantees that "cachetip == currenttip" (comparing both rev
1229 # and node) always means no nodes have been added or destroyed.
1229 # and node) always means no nodes have been added or destroyed.
1230
1230
1231 # XXX this is suboptimal when qrefresh'ing: we strip the current
1231 # XXX this is suboptimal when qrefresh'ing: we strip the current
1232 # head, refresh the tag cache, then immediately add a new head.
1232 # head, refresh the tag cache, then immediately add a new head.
1233 # But I think doing it this way is necessary for the "instant
1233 # But I think doing it this way is necessary for the "instant
1234 # tag cache retrieval" case to work.
1234 # tag cache retrieval" case to work.
1235 self.invalidatecaches()
1235 self.invalidatecaches()
1236
1236
1237 def walk(self, match, node=None):
1237 def walk(self, match, node=None):
1238 '''
1238 '''
1239 walk recursively through the directory tree or a given
1239 walk recursively through the directory tree or a given
1240 changeset, finding all files matched by the match
1240 changeset, finding all files matched by the match
1241 function
1241 function
1242 '''
1242 '''
1243 return self[node].walk(match)
1243 return self[node].walk(match)
1244
1244
1245 def status(self, node1='.', node2=None, match=None,
1245 def status(self, node1='.', node2=None, match=None,
1246 ignored=False, clean=False, unknown=False,
1246 ignored=False, clean=False, unknown=False,
1247 listsubrepos=False):
1247 listsubrepos=False):
1248 """return status of files between two nodes or node and working directory
1248 """return status of files between two nodes or node and working directory
1249
1249
1250 If node1 is None, use the first dirstate parent instead.
1250 If node1 is None, use the first dirstate parent instead.
1251 If node2 is None, compare node1 with working directory.
1251 If node2 is None, compare node1 with working directory.
1252 """
1252 """
1253
1253
1254 def mfmatches(ctx):
1254 def mfmatches(ctx):
1255 mf = ctx.manifest().copy()
1255 mf = ctx.manifest().copy()
1256 for fn in mf.keys():
1256 for fn in mf.keys():
1257 if not match(fn):
1257 if not match(fn):
1258 del mf[fn]
1258 del mf[fn]
1259 return mf
1259 return mf
1260
1260
1261 if isinstance(node1, context.changectx):
1261 if isinstance(node1, context.changectx):
1262 ctx1 = node1
1262 ctx1 = node1
1263 else:
1263 else:
1264 ctx1 = self[node1]
1264 ctx1 = self[node1]
1265 if isinstance(node2, context.changectx):
1265 if isinstance(node2, context.changectx):
1266 ctx2 = node2
1266 ctx2 = node2
1267 else:
1267 else:
1268 ctx2 = self[node2]
1268 ctx2 = self[node2]
1269
1269
1270 working = ctx2.rev() is None
1270 working = ctx2.rev() is None
1271 parentworking = working and ctx1 == self['.']
1271 parentworking = working and ctx1 == self['.']
1272 match = match or matchmod.always(self.root, self.getcwd())
1272 match = match or matchmod.always(self.root, self.getcwd())
1273 listignored, listclean, listunknown = ignored, clean, unknown
1273 listignored, listclean, listunknown = ignored, clean, unknown
1274
1274
1275 # load earliest manifest first for caching reasons
1275 # load earliest manifest first for caching reasons
1276 if not working and ctx2.rev() < ctx1.rev():
1276 if not working and ctx2.rev() < ctx1.rev():
1277 ctx2.manifest()
1277 ctx2.manifest()
1278
1278
1279 if not parentworking:
1279 if not parentworking:
1280 def bad(f, msg):
1280 def bad(f, msg):
1281 if f not in ctx1:
1281 if f not in ctx1:
1282 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1282 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1283 match.bad = bad
1283 match.bad = bad
1284
1284
1285 if working: # we need to scan the working dir
1285 if working: # we need to scan the working dir
1286 subrepos = []
1286 subrepos = []
1287 if '.hgsub' in self.dirstate:
1287 if '.hgsub' in self.dirstate:
1288 subrepos = ctx2.substate.keys()
1288 subrepos = ctx2.substate.keys()
1289 s = self.dirstate.status(match, subrepos, listignored,
1289 s = self.dirstate.status(match, subrepos, listignored,
1290 listclean, listunknown)
1290 listclean, listunknown)
1291 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1291 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1292
1292
1293 # check for any possibly clean files
1293 # check for any possibly clean files
1294 if parentworking and cmp:
1294 if parentworking and cmp:
1295 fixup = []
1295 fixup = []
1296 # do a full compare of any files that might have changed
1296 # do a full compare of any files that might have changed
1297 for f in sorted(cmp):
1297 for f in sorted(cmp):
1298 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1298 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1299 or ctx1[f].cmp(ctx2[f])):
1299 or ctx1[f].cmp(ctx2[f])):
1300 modified.append(f)
1300 modified.append(f)
1301 else:
1301 else:
1302 fixup.append(f)
1302 fixup.append(f)
1303
1303
1304 # update dirstate for files that are actually clean
1304 # update dirstate for files that are actually clean
1305 if fixup:
1305 if fixup:
1306 if listclean:
1306 if listclean:
1307 clean += fixup
1307 clean += fixup
1308
1308
1309 try:
1309 try:
1310 # updating the dirstate is optional
1310 # updating the dirstate is optional
1311 # so we don't wait on the lock
1311 # so we don't wait on the lock
1312 wlock = self.wlock(False)
1312 wlock = self.wlock(False)
1313 try:
1313 try:
1314 for f in fixup:
1314 for f in fixup:
1315 self.dirstate.normal(f)
1315 self.dirstate.normal(f)
1316 finally:
1316 finally:
1317 wlock.release()
1317 wlock.release()
1318 except error.LockError:
1318 except error.LockError:
1319 pass
1319 pass
1320
1320
1321 if not parentworking:
1321 if not parentworking:
1322 mf1 = mfmatches(ctx1)
1322 mf1 = mfmatches(ctx1)
1323 if working:
1323 if working:
1324 # we are comparing working dir against non-parent
1324 # we are comparing working dir against non-parent
1325 # generate a pseudo-manifest for the working dir
1325 # generate a pseudo-manifest for the working dir
1326 mf2 = mfmatches(self['.'])
1326 mf2 = mfmatches(self['.'])
1327 for f in cmp + modified + added:
1327 for f in cmp + modified + added:
1328 mf2[f] = None
1328 mf2[f] = None
1329 mf2.set(f, ctx2.flags(f))
1329 mf2.set(f, ctx2.flags(f))
1330 for f in removed:
1330 for f in removed:
1331 if f in mf2:
1331 if f in mf2:
1332 del mf2[f]
1332 del mf2[f]
1333 else:
1333 else:
1334 # we are comparing two revisions
1334 # we are comparing two revisions
1335 deleted, unknown, ignored = [], [], []
1335 deleted, unknown, ignored = [], [], []
1336 mf2 = mfmatches(ctx2)
1336 mf2 = mfmatches(ctx2)
1337
1337
1338 modified, added, clean = [], [], []
1338 modified, added, clean = [], [], []
1339 for fn in mf2:
1339 for fn in mf2:
1340 if fn in mf1:
1340 if fn in mf1:
1341 if (fn not in deleted and
1341 if (fn not in deleted and
1342 (mf1.flags(fn) != mf2.flags(fn) or
1342 (mf1.flags(fn) != mf2.flags(fn) or
1343 (mf1[fn] != mf2[fn] and
1343 (mf1[fn] != mf2[fn] and
1344 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1344 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1345 modified.append(fn)
1345 modified.append(fn)
1346 elif listclean:
1346 elif listclean:
1347 clean.append(fn)
1347 clean.append(fn)
1348 del mf1[fn]
1348 del mf1[fn]
1349 elif fn not in deleted:
1349 elif fn not in deleted:
1350 added.append(fn)
1350 added.append(fn)
1351 removed = mf1.keys()
1351 removed = mf1.keys()
1352
1352
1353 r = modified, added, removed, deleted, unknown, ignored, clean
1353 r = modified, added, removed, deleted, unknown, ignored, clean
1354
1354
1355 if listsubrepos:
1355 if listsubrepos:
1356 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1356 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1357 if working:
1357 if working:
1358 rev2 = None
1358 rev2 = None
1359 else:
1359 else:
1360 rev2 = ctx2.substate[subpath][1]
1360 rev2 = ctx2.substate[subpath][1]
1361 try:
1361 try:
1362 submatch = matchmod.narrowmatcher(subpath, match)
1362 submatch = matchmod.narrowmatcher(subpath, match)
1363 s = sub.status(rev2, match=submatch, ignored=listignored,
1363 s = sub.status(rev2, match=submatch, ignored=listignored,
1364 clean=listclean, unknown=listunknown,
1364 clean=listclean, unknown=listunknown,
1365 listsubrepos=True)
1365 listsubrepos=True)
1366 for rfiles, sfiles in zip(r, s):
1366 for rfiles, sfiles in zip(r, s):
1367 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1367 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1368 except error.LookupError:
1368 except error.LookupError:
1369 self.ui.status(_("skipping missing subrepository: %s\n")
1369 self.ui.status(_("skipping missing subrepository: %s\n")
1370 % subpath)
1370 % subpath)
1371
1371
1372 for l in r:
1372 for l in r:
1373 l.sort()
1373 l.sort()
1374 return r
1374 return r
1375
1375
1376 def heads(self, start=None):
1376 def heads(self, start=None):
1377 heads = self.changelog.heads(start)
1377 heads = self.changelog.heads(start)
1378 # sort the output in rev descending order
1378 # sort the output in rev descending order
1379 return sorted(heads, key=self.changelog.rev, reverse=True)
1379 return sorted(heads, key=self.changelog.rev, reverse=True)
1380
1380
1381 def branchheads(self, branch=None, start=None, closed=False):
1381 def branchheads(self, branch=None, start=None, closed=False):
1382 '''return a (possibly filtered) list of heads for the given branch
1382 '''return a (possibly filtered) list of heads for the given branch
1383
1383
1384 Heads are returned in topological order, from newest to oldest.
1384 Heads are returned in topological order, from newest to oldest.
1385 If branch is None, use the dirstate branch.
1385 If branch is None, use the dirstate branch.
1386 If start is not None, return only heads reachable from start.
1386 If start is not None, return only heads reachable from start.
1387 If closed is True, return heads that are marked as closed as well.
1387 If closed is True, return heads that are marked as closed as well.
1388 '''
1388 '''
1389 if branch is None:
1389 if branch is None:
1390 branch = self[None].branch()
1390 branch = self[None].branch()
1391 branches = self.branchmap()
1391 branches = self.branchmap()
1392 if branch not in branches:
1392 if branch not in branches:
1393 return []
1393 return []
1394 # the cache returns heads ordered lowest to highest
1394 # the cache returns heads ordered lowest to highest
1395 bheads = list(reversed(branches[branch]))
1395 bheads = list(reversed(branches[branch]))
1396 if start is not None:
1396 if start is not None:
1397 # filter out the heads that cannot be reached from startrev
1397 # filter out the heads that cannot be reached from startrev
1398 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1398 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1399 bheads = [h for h in bheads if h in fbheads]
1399 bheads = [h for h in bheads if h in fbheads]
1400 if not closed:
1400 if not closed:
1401 bheads = [h for h in bheads if
1401 bheads = [h for h in bheads if
1402 ('close' not in self.changelog.read(h)[5])]
1402 ('close' not in self.changelog.read(h)[5])]
1403 return bheads
1403 return bheads
1404
1404
1405 def branches(self, nodes):
1405 def branches(self, nodes):
1406 if not nodes:
1406 if not nodes:
1407 nodes = [self.changelog.tip()]
1407 nodes = [self.changelog.tip()]
1408 b = []
1408 b = []
1409 for n in nodes:
1409 for n in nodes:
1410 t = n
1410 t = n
1411 while True:
1411 while True:
1412 p = self.changelog.parents(n)
1412 p = self.changelog.parents(n)
1413 if p[1] != nullid or p[0] == nullid:
1413 if p[1] != nullid or p[0] == nullid:
1414 b.append((t, n, p[0], p[1]))
1414 b.append((t, n, p[0], p[1]))
1415 break
1415 break
1416 n = p[0]
1416 n = p[0]
1417 return b
1417 return b
1418
1418
1419 def between(self, pairs):
1419 def between(self, pairs):
1420 r = []
1420 r = []
1421
1421
1422 for top, bottom in pairs:
1422 for top, bottom in pairs:
1423 n, l, i = top, [], 0
1423 n, l, i = top, [], 0
1424 f = 1
1424 f = 1
1425
1425
1426 while n != bottom and n != nullid:
1426 while n != bottom and n != nullid:
1427 p = self.changelog.parents(n)[0]
1427 p = self.changelog.parents(n)[0]
1428 if i == f:
1428 if i == f:
1429 l.append(n)
1429 l.append(n)
1430 f = f * 2
1430 f = f * 2
1431 n = p
1431 n = p
1432 i += 1
1432 i += 1
1433
1433
1434 r.append(l)
1434 r.append(l)
1435
1435
1436 return r
1436 return r
1437
1437
1438 def pull(self, remote, heads=None, force=False):
1438 def pull(self, remote, heads=None, force=False):
1439 lock = self.lock()
1439 lock = self.lock()
1440 try:
1440 try:
1441 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1441 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1442 force=force)
1442 force=force)
1443 common, fetch, rheads = tmp
1443 common, fetch, rheads = tmp
1444 if not fetch:
1444 if not fetch:
1445 self.ui.status(_("no changes found\n"))
1445 self.ui.status(_("no changes found\n"))
1446 result = 0
1446 result = 0
1447 else:
1447 else:
1448 if heads is None and list(common) == [nullid]:
1448 if heads is None and list(common) == [nullid]:
1449 self.ui.status(_("requesting all changes\n"))
1449 self.ui.status(_("requesting all changes\n"))
1450 elif heads is None and remote.capable('changegroupsubset'):
1450 elif heads is None and remote.capable('changegroupsubset'):
1451 # issue1320, avoid a race if remote changed after discovery
1451 # issue1320, avoid a race if remote changed after discovery
1452 heads = rheads
1452 heads = rheads
1453
1453
1454 if remote.capable('getbundle'):
1454 if remote.capable('getbundle'):
1455 cg = remote.getbundle('pull', common=common,
1455 cg = remote.getbundle('pull', common=common,
1456 heads=heads or rheads)
1456 heads=heads or rheads)
1457 elif heads is None:
1457 elif heads is None:
1458 cg = remote.changegroup(fetch, 'pull')
1458 cg = remote.changegroup(fetch, 'pull')
1459 elif not remote.capable('changegroupsubset'):
1459 elif not remote.capable('changegroupsubset'):
1460 raise util.Abort(_("partial pull cannot be done because "
1460 raise util.Abort(_("partial pull cannot be done because "
1461 "other repository doesn't support "
1461 "other repository doesn't support "
1462 "changegroupsubset."))
1462 "changegroupsubset."))
1463 else:
1463 else:
1464 cg = remote.changegroupsubset(fetch, heads, 'pull')
1464 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 result = self.addchangegroup(cg, 'pull', remote.url(),
1465 result = self.addchangegroup(cg, 'pull', remote.url(),
1466 lock=lock)
1466 lock=lock)
1467 finally:
1467 finally:
1468 lock.release()
1468 lock.release()
1469
1469
1470 return result
1470 return result
1471
1471
1472 def checkpush(self, force, revs):
1472 def checkpush(self, force, revs):
1473 """Extensions can override this function if additional checks have
1473 """Extensions can override this function if additional checks have
1474 to be performed before pushing, or call it if they override push
1474 to be performed before pushing, or call it if they override push
1475 command.
1475 command.
1476 """
1476 """
1477 pass
1477 pass
1478
1478
1479 def push(self, remote, force=False, revs=None, newbranch=False):
1479 def push(self, remote, force=False, revs=None, newbranch=False):
1480 '''Push outgoing changesets (limited by revs) from the current
1480 '''Push outgoing changesets (limited by revs) from the current
1481 repository to remote. Return an integer:
1481 repository to remote. Return an integer:
1482 - 0 means HTTP error *or* nothing to push
1482 - 0 means HTTP error *or* nothing to push
1483 - 1 means we pushed and remote head count is unchanged *or*
1483 - 1 means we pushed and remote head count is unchanged *or*
1484 we have outgoing changesets but refused to push
1484 we have outgoing changesets but refused to push
1485 - other values as described by addchangegroup()
1485 - other values as described by addchangegroup()
1486 '''
1486 '''
1487 # there are two ways to push to remote repo:
1487 # there are two ways to push to remote repo:
1488 #
1488 #
1489 # addchangegroup assumes local user can lock remote
1489 # addchangegroup assumes local user can lock remote
1490 # repo (local filesystem, old ssh servers).
1490 # repo (local filesystem, old ssh servers).
1491 #
1491 #
1492 # unbundle assumes local user cannot lock remote repo (new ssh
1492 # unbundle assumes local user cannot lock remote repo (new ssh
1493 # servers, http servers).
1493 # servers, http servers).
1494
1494
1495 self.checkpush(force, revs)
1495 self.checkpush(force, revs)
1496 lock = None
1496 lock = None
1497 unbundle = remote.capable('unbundle')
1497 unbundle = remote.capable('unbundle')
1498 if not unbundle:
1498 if not unbundle:
1499 lock = remote.lock()
1499 lock = remote.lock()
1500 try:
1500 try:
1501 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1501 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1502 newbranch)
1502 newbranch)
1503 ret = remote_heads
1503 ret = remote_heads
1504 if cg is not None:
1504 if cg is not None:
1505 if unbundle:
1505 if unbundle:
1506 # local repo finds heads on server, finds out what
1506 # local repo finds heads on server, finds out what
1507 # revs it must push. once revs transferred, if server
1507 # revs it must push. once revs transferred, if server
1508 # finds it has different heads (someone else won
1508 # finds it has different heads (someone else won
1509 # commit/push race), server aborts.
1509 # commit/push race), server aborts.
1510 if force:
1510 if force:
1511 remote_heads = ['force']
1511 remote_heads = ['force']
1512 # ssh: return remote's addchangegroup()
1512 # ssh: return remote's addchangegroup()
1513 # http: return remote's addchangegroup() or 0 for error
1513 # http: return remote's addchangegroup() or 0 for error
1514 ret = remote.unbundle(cg, remote_heads, 'push')
1514 ret = remote.unbundle(cg, remote_heads, 'push')
1515 else:
1515 else:
1516 # we return an integer indicating remote head count change
1516 # we return an integer indicating remote head count change
1517 ret = remote.addchangegroup(cg, 'push', self.url(),
1517 ret = remote.addchangegroup(cg, 'push', self.url(),
1518 lock=lock)
1518 lock=lock)
1519 finally:
1519 finally:
1520 if lock is not None:
1520 if lock is not None:
1521 lock.release()
1521 lock.release()
1522
1522
1523 self.ui.debug("checking for updated bookmarks\n")
1523 self.ui.debug("checking for updated bookmarks\n")
1524 rb = remote.listkeys('bookmarks')
1524 rb = remote.listkeys('bookmarks')
1525 for k in rb.keys():
1525 for k in rb.keys():
1526 if k in self._bookmarks:
1526 if k in self._bookmarks:
1527 nr, nl = rb[k], hex(self._bookmarks[k])
1527 nr, nl = rb[k], hex(self._bookmarks[k])
1528 if nr in self:
1528 if nr in self:
1529 cr = self[nr]
1529 cr = self[nr]
1530 cl = self[nl]
1530 cl = self[nl]
1531 if cl in cr.descendants():
1531 if cl in cr.descendants():
1532 r = remote.pushkey('bookmarks', k, nr, nl)
1532 r = remote.pushkey('bookmarks', k, nr, nl)
1533 if r:
1533 if r:
1534 self.ui.status(_("updating bookmark %s\n") % k)
1534 self.ui.status(_("updating bookmark %s\n") % k)
1535 else:
1535 else:
1536 self.ui.warn(_('updating bookmark %s'
1536 self.ui.warn(_('updating bookmark %s'
1537 ' failed!\n') % k)
1537 ' failed!\n') % k)
1538
1538
1539 return ret
1539 return ret
1540
1540
1541 def changegroupinfo(self, nodes, source):
1541 def changegroupinfo(self, nodes, source):
1542 if self.ui.verbose or source == 'bundle':
1542 if self.ui.verbose or source == 'bundle':
1543 self.ui.status(_("%d changesets found\n") % len(nodes))
1543 self.ui.status(_("%d changesets found\n") % len(nodes))
1544 if self.ui.debugflag:
1544 if self.ui.debugflag:
1545 self.ui.debug("list of changesets:\n")
1545 self.ui.debug("list of changesets:\n")
1546 for node in nodes:
1546 for node in nodes:
1547 self.ui.debug("%s\n" % hex(node))
1547 self.ui.debug("%s\n" % hex(node))
1548
1548
1549 def changegroupsubset(self, bases, heads, source):
1549 def changegroupsubset(self, bases, heads, source):
1550 """Compute a changegroup consisting of all the nodes that are
1550 """Compute a changegroup consisting of all the nodes that are
1551 descendants of any of the bases and ancestors of any of the heads.
1551 descendants of any of the bases and ancestors of any of the heads.
1552 Return a chunkbuffer object whose read() method will return
1552 Return a chunkbuffer object whose read() method will return
1553 successive changegroup chunks.
1553 successive changegroup chunks.
1554
1554
1555 It is fairly complex as determining which filenodes and which
1555 It is fairly complex as determining which filenodes and which
1556 manifest nodes need to be included for the changeset to be complete
1556 manifest nodes need to be included for the changeset to be complete
1557 is non-trivial.
1557 is non-trivial.
1558
1558
1559 Another wrinkle is doing the reverse, figuring out which changeset in
1559 Another wrinkle is doing the reverse, figuring out which changeset in
1560 the changegroup a particular filenode or manifestnode belongs to.
1560 the changegroup a particular filenode or manifestnode belongs to.
1561 """
1561 """
1562 cl = self.changelog
1562 cl = self.changelog
1563 if not bases:
1563 if not bases:
1564 bases = [nullid]
1564 bases = [nullid]
1565 csets, bases, heads = cl.nodesbetween(bases, heads)
1565 csets, bases, heads = cl.nodesbetween(bases, heads)
1566 # We assume that all ancestors of bases are known
1566 # We assume that all ancestors of bases are known
1567 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1567 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1568 return self._changegroupsubset(common, csets, heads, source)
1568 return self._changegroupsubset(common, csets, heads, source)
1569
1569
1570 def getbundle(self, source, heads=None, common=None):
1570 def getbundle(self, source, heads=None, common=None):
1571 """Like changegroupsubset, but returns the set difference between the
1571 """Like changegroupsubset, but returns the set difference between the
1572 ancestors of heads and the ancestors common.
1572 ancestors of heads and the ancestors common.
1573
1573
1574 If heads is None, use the local heads. If common is None, use [nullid].
1574 If heads is None, use the local heads. If common is None, use [nullid].
1575
1575
1576 The nodes in common might not all be known locally due to the way the
1576 The nodes in common might not all be known locally due to the way the
1577 current discovery protocol works.
1577 current discovery protocol works.
1578 """
1578 """
1579 cl = self.changelog
1579 cl = self.changelog
1580 if common:
1580 if common:
1581 nm = cl.nodemap
1581 nm = cl.nodemap
1582 common = [n for n in common if n in nm]
1582 common = [n for n in common if n in nm]
1583 else:
1583 else:
1584 common = [nullid]
1584 common = [nullid]
1585 if not heads:
1585 if not heads:
1586 heads = cl.heads()
1586 heads = cl.heads()
1587 common, missing = cl.findcommonmissing(common, heads)
1587 common, missing = cl.findcommonmissing(common, heads)
1588 if not missing:
1588 if not missing:
1589 return None
1589 return None
1590 return self._changegroupsubset(common, missing, heads, source)
1590 return self._changegroupsubset(common, missing, heads, source)
1591
1591
1592 def _changegroupsubset(self, commonrevs, csets, heads, source):
1592 def _changegroupsubset(self, commonrevs, csets, heads, source):
1593
1593
1594 cl = self.changelog
1594 cl = self.changelog
1595 mf = self.manifest
1595 mf = self.manifest
1596 mfs = {} # needed manifests
1596 mfs = {} # needed manifests
1597 fnodes = {} # needed file nodes
1597 fnodes = {} # needed file nodes
1598 changedfiles = set()
1598 changedfiles = set()
1599 fstate = ['', {}]
1599 fstate = ['', {}]
1600 count = [0]
1600 count = [0]
1601
1601
1602 # can we go through the fast path ?
1602 # can we go through the fast path ?
1603 heads.sort()
1603 heads.sort()
1604 if heads == sorted(self.heads()):
1604 if heads == sorted(self.heads()):
1605 return self._changegroup(csets, source)
1605 return self._changegroup(csets, source)
1606
1606
1607 # slow path
1607 # slow path
1608 self.hook('preoutgoing', throw=True, source=source)
1608 self.hook('preoutgoing', throw=True, source=source)
1609 self.changegroupinfo(csets, source)
1609 self.changegroupinfo(csets, source)
1610
1610
1611 # filter any nodes that claim to be part of the known set
1611 # filter any nodes that claim to be part of the known set
1612 def prune(revlog, missing):
1612 def prune(revlog, missing):
1613 return [n for n in missing
1613 return [n for n in missing
1614 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1614 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1615
1615
1616 def lookup(revlog, x):
1616 def lookup(revlog, x):
1617 if revlog == cl:
1617 if revlog == cl:
1618 c = cl.read(x)
1618 c = cl.read(x)
1619 changedfiles.update(c[3])
1619 changedfiles.update(c[3])
1620 mfs.setdefault(c[0], x)
1620 mfs.setdefault(c[0], x)
1621 count[0] += 1
1621 count[0] += 1
1622 self.ui.progress(_('bundling'), count[0],
1622 self.ui.progress(_('bundling'), count[0],
1623 unit=_('changesets'), total=len(csets))
1623 unit=_('changesets'), total=len(csets))
1624 return x
1624 return x
1625 elif revlog == mf:
1625 elif revlog == mf:
1626 clnode = mfs[x]
1626 clnode = mfs[x]
1627 mdata = mf.readfast(x)
1627 mdata = mf.readfast(x)
1628 for f in changedfiles:
1628 for f in changedfiles:
1629 if f in mdata:
1629 if f in mdata:
1630 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1630 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1631 count[0] += 1
1631 count[0] += 1
1632 self.ui.progress(_('bundling'), count[0],
1632 self.ui.progress(_('bundling'), count[0],
1633 unit=_('manifests'), total=len(mfs))
1633 unit=_('manifests'), total=len(mfs))
1634 return mfs[x]
1634 return mfs[x]
1635 else:
1635 else:
1636 self.ui.progress(
1636 self.ui.progress(
1637 _('bundling'), count[0], item=fstate[0],
1637 _('bundling'), count[0], item=fstate[0],
1638 unit=_('files'), total=len(changedfiles))
1638 unit=_('files'), total=len(changedfiles))
1639 return fstate[1][x]
1639 return fstate[1][x]
1640
1640
1641 bundler = changegroup.bundle10(lookup)
1641 bundler = changegroup.bundle10(lookup)
1642 reorder = self.ui.config('bundle', 'reorder', 'auto')
1642 reorder = self.ui.config('bundle', 'reorder', 'auto')
1643 if reorder == 'auto':
1643 if reorder == 'auto':
1644 reorder = None
1644 reorder = None
1645 else:
1645 else:
1646 reorder = util.parsebool(reorder)
1646 reorder = util.parsebool(reorder)
1647
1647
1648 def gengroup():
1648 def gengroup():
1649 # Create a changenode group generator that will call our functions
1649 # Create a changenode group generator that will call our functions
1650 # back to lookup the owning changenode and collect information.
1650 # back to lookup the owning changenode and collect information.
1651 for chunk in cl.group(csets, bundler, reorder=reorder):
1651 for chunk in cl.group(csets, bundler, reorder=reorder):
1652 yield chunk
1652 yield chunk
1653 self.ui.progress(_('bundling'), None)
1653 self.ui.progress(_('bundling'), None)
1654
1654
1655 # Create a generator for the manifestnodes that calls our lookup
1655 # Create a generator for the manifestnodes that calls our lookup
1656 # and data collection functions back.
1656 # and data collection functions back.
1657 count[0] = 0
1657 count[0] = 0
1658 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1658 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1659 yield chunk
1659 yield chunk
1660 self.ui.progress(_('bundling'), None)
1660 self.ui.progress(_('bundling'), None)
1661
1661
1662 mfs.clear()
1662 mfs.clear()
1663
1663
1664 # Go through all our files in order sorted by name.
1664 # Go through all our files in order sorted by name.
1665 count[0] = 0
1665 count[0] = 0
1666 for fname in sorted(changedfiles):
1666 for fname in sorted(changedfiles):
1667 filerevlog = self.file(fname)
1667 filerevlog = self.file(fname)
1668 if not len(filerevlog):
1668 if not len(filerevlog):
1669 raise util.Abort(_("empty or missing revlog for %s") % fname)
1669 raise util.Abort(_("empty or missing revlog for %s") % fname)
1670 fstate[0] = fname
1670 fstate[0] = fname
1671 fstate[1] = fnodes.pop(fname, {})
1671 fstate[1] = fnodes.pop(fname, {})
1672
1672
1673 nodelist = prune(filerevlog, fstate[1])
1673 nodelist = prune(filerevlog, fstate[1])
1674 if nodelist:
1674 if nodelist:
1675 count[0] += 1
1675 count[0] += 1
1676 yield bundler.fileheader(fname)
1676 yield bundler.fileheader(fname)
1677 for chunk in filerevlog.group(nodelist, bundler, reorder):
1677 for chunk in filerevlog.group(nodelist, bundler, reorder):
1678 yield chunk
1678 yield chunk
1679
1679
1680 # Signal that no more groups are left.
1680 # Signal that no more groups are left.
1681 yield bundler.close()
1681 yield bundler.close()
1682 self.ui.progress(_('bundling'), None)
1682 self.ui.progress(_('bundling'), None)
1683
1683
1684 if csets:
1684 if csets:
1685 self.hook('outgoing', node=hex(csets[0]), source=source)
1685 self.hook('outgoing', node=hex(csets[0]), source=source)
1686
1686
1687 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1687 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1688
1688
1689 def changegroup(self, basenodes, source):
1689 def changegroup(self, basenodes, source):
1690 # to avoid a race we use changegroupsubset() (issue1320)
1690 # to avoid a race we use changegroupsubset() (issue1320)
1691 return self.changegroupsubset(basenodes, self.heads(), source)
1691 return self.changegroupsubset(basenodes, self.heads(), source)
1692
1692
1693 def _changegroup(self, nodes, source):
1693 def _changegroup(self, nodes, source):
1694 """Compute the changegroup of all nodes that we have that a recipient
1694 """Compute the changegroup of all nodes that we have that a recipient
1695 doesn't. Return a chunkbuffer object whose read() method will return
1695 doesn't. Return a chunkbuffer object whose read() method will return
1696 successive changegroup chunks.
1696 successive changegroup chunks.
1697
1697
1698 This is much easier than the previous function as we can assume that
1698 This is much easier than the previous function as we can assume that
1699 the recipient has any changenode we aren't sending them.
1699 the recipient has any changenode we aren't sending them.
1700
1700
1701 nodes is the set of nodes to send"""
1701 nodes is the set of nodes to send"""
1702
1702
1703 cl = self.changelog
1703 cl = self.changelog
1704 mf = self.manifest
1704 mf = self.manifest
1705 mfs = {}
1705 mfs = {}
1706 changedfiles = set()
1706 changedfiles = set()
1707 fstate = ['']
1707 fstate = ['']
1708 count = [0]
1708 count = [0]
1709
1709
1710 self.hook('preoutgoing', throw=True, source=source)
1710 self.hook('preoutgoing', throw=True, source=source)
1711 self.changegroupinfo(nodes, source)
1711 self.changegroupinfo(nodes, source)
1712
1712
1713 revset = set([cl.rev(n) for n in nodes])
1713 revset = set([cl.rev(n) for n in nodes])
1714
1714
1715 def gennodelst(log):
1715 def gennodelst(log):
1716 return [log.node(r) for r in log if log.linkrev(r) in revset]
1716 return [log.node(r) for r in log if log.linkrev(r) in revset]
1717
1717
1718 def lookup(revlog, x):
1718 def lookup(revlog, x):
1719 if revlog == cl:
1719 if revlog == cl:
1720 c = cl.read(x)
1720 c = cl.read(x)
1721 changedfiles.update(c[3])
1721 changedfiles.update(c[3])
1722 mfs.setdefault(c[0], x)
1722 mfs.setdefault(c[0], x)
1723 count[0] += 1
1723 count[0] += 1
1724 self.ui.progress(_('bundling'), count[0],
1724 self.ui.progress(_('bundling'), count[0],
1725 unit=_('changesets'), total=len(nodes))
1725 unit=_('changesets'), total=len(nodes))
1726 return x
1726 return x
1727 elif revlog == mf:
1727 elif revlog == mf:
1728 count[0] += 1
1728 count[0] += 1
1729 self.ui.progress(_('bundling'), count[0],
1729 self.ui.progress(_('bundling'), count[0],
1730 unit=_('manifests'), total=len(mfs))
1730 unit=_('manifests'), total=len(mfs))
1731 return cl.node(revlog.linkrev(revlog.rev(x)))
1731 return cl.node(revlog.linkrev(revlog.rev(x)))
1732 else:
1732 else:
1733 self.ui.progress(
1733 self.ui.progress(
1734 _('bundling'), count[0], item=fstate[0],
1734 _('bundling'), count[0], item=fstate[0],
1735 total=len(changedfiles), unit=_('files'))
1735 total=len(changedfiles), unit=_('files'))
1736 return cl.node(revlog.linkrev(revlog.rev(x)))
1736 return cl.node(revlog.linkrev(revlog.rev(x)))
1737
1737
1738 bundler = changegroup.bundle10(lookup)
1738 bundler = changegroup.bundle10(lookup)
1739 reorder = self.ui.config('bundle', 'reorder', 'auto')
1739 reorder = self.ui.config('bundle', 'reorder', 'auto')
1740 if reorder == 'auto':
1740 if reorder == 'auto':
1741 reorder = None
1741 reorder = None
1742 else:
1742 else:
1743 reorder = util.parsebool(reorder)
1743 reorder = util.parsebool(reorder)
1744
1744
1745 def gengroup():
1745 def gengroup():
1746 '''yield a sequence of changegroup chunks (strings)'''
1746 '''yield a sequence of changegroup chunks (strings)'''
1747 # construct a list of all changed files
1747 # construct a list of all changed files
1748
1748
1749 for chunk in cl.group(nodes, bundler, reorder=reorder):
1749 for chunk in cl.group(nodes, bundler, reorder=reorder):
1750 yield chunk
1750 yield chunk
1751 self.ui.progress(_('bundling'), None)
1751 self.ui.progress(_('bundling'), None)
1752
1752
1753 count[0] = 0
1753 count[0] = 0
1754 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1754 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1755 yield chunk
1755 yield chunk
1756 self.ui.progress(_('bundling'), None)
1756 self.ui.progress(_('bundling'), None)
1757
1757
1758 count[0] = 0
1758 count[0] = 0
1759 for fname in sorted(changedfiles):
1759 for fname in sorted(changedfiles):
1760 filerevlog = self.file(fname)
1760 filerevlog = self.file(fname)
1761 if not len(filerevlog):
1761 if not len(filerevlog):
1762 raise util.Abort(_("empty or missing revlog for %s") % fname)
1762 raise util.Abort(_("empty or missing revlog for %s") % fname)
1763 fstate[0] = fname
1763 fstate[0] = fname
1764 nodelist = gennodelst(filerevlog)
1764 nodelist = gennodelst(filerevlog)
1765 if nodelist:
1765 if nodelist:
1766 count[0] += 1
1766 count[0] += 1
1767 yield bundler.fileheader(fname)
1767 yield bundler.fileheader(fname)
1768 for chunk in filerevlog.group(nodelist, bundler, reorder):
1768 for chunk in filerevlog.group(nodelist, bundler, reorder):
1769 yield chunk
1769 yield chunk
1770 yield bundler.close()
1770 yield bundler.close()
1771 self.ui.progress(_('bundling'), None)
1771 self.ui.progress(_('bundling'), None)
1772
1772
1773 if nodes:
1773 if nodes:
1774 self.hook('outgoing', node=hex(nodes[0]), source=source)
1774 self.hook('outgoing', node=hex(nodes[0]), source=source)
1775
1775
1776 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1776 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1777
1777
1778 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1778 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1779 """Add the changegroup returned by source.read() to this repo.
1779 """Add the changegroup returned by source.read() to this repo.
1780 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1780 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1781 the URL of the repo where this changegroup is coming from.
1781 the URL of the repo where this changegroup is coming from.
1782 If lock is not None, the function takes ownership of the lock
1782 If lock is not None, the function takes ownership of the lock
1783 and releases it after the changegroup is added.
1783 and releases it after the changegroup is added.
1784
1784
1785 Return an integer summarizing the change to this repo:
1785 Return an integer summarizing the change to this repo:
1786 - nothing changed or no source: 0
1786 - nothing changed or no source: 0
1787 - more heads than before: 1+added heads (2..n)
1787 - more heads than before: 1+added heads (2..n)
1788 - fewer heads than before: -1-removed heads (-2..-n)
1788 - fewer heads than before: -1-removed heads (-2..-n)
1789 - number of heads stays the same: 1
1789 - number of heads stays the same: 1
1790 """
1790 """
1791 def csmap(x):
1791 def csmap(x):
1792 self.ui.debug("add changeset %s\n" % short(x))
1792 self.ui.debug("add changeset %s\n" % short(x))
1793 return len(cl)
1793 return len(cl)
1794
1794
1795 def revmap(x):
1795 def revmap(x):
1796 return cl.rev(x)
1796 return cl.rev(x)
1797
1797
1798 if not source:
1798 if not source:
1799 return 0
1799 return 0
1800
1800
1801 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1801 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1802
1802
1803 changesets = files = revisions = 0
1803 changesets = files = revisions = 0
1804 efiles = set()
1804 efiles = set()
1805
1805
1806 # write changelog data to temp files so concurrent readers will not see
1806 # write changelog data to temp files so concurrent readers will not see
1807 # inconsistent view
1807 # inconsistent view
1808 cl = self.changelog
1808 cl = self.changelog
1809 cl.delayupdate()
1809 cl.delayupdate()
1810 oldheads = cl.heads()
1810 oldheads = cl.heads()
1811
1811
1812 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1812 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1813 try:
1813 try:
1814 trp = weakref.proxy(tr)
1814 trp = weakref.proxy(tr)
1815 # pull off the changeset group
1815 # pull off the changeset group
1816 self.ui.status(_("adding changesets\n"))
1816 self.ui.status(_("adding changesets\n"))
1817 clstart = len(cl)
1817 clstart = len(cl)
1818 class prog(object):
1818 class prog(object):
1819 step = _('changesets')
1819 step = _('changesets')
1820 count = 1
1820 count = 1
1821 ui = self.ui
1821 ui = self.ui
1822 total = None
1822 total = None
1823 def __call__(self):
1823 def __call__(self):
1824 self.ui.progress(self.step, self.count, unit=_('chunks'),
1824 self.ui.progress(self.step, self.count, unit=_('chunks'),
1825 total=self.total)
1825 total=self.total)
1826 self.count += 1
1826 self.count += 1
1827 pr = prog()
1827 pr = prog()
1828 source.callback = pr
1828 source.callback = pr
1829
1829
1830 source.changelogheader()
1830 source.changelogheader()
1831 if (cl.addgroup(source, csmap, trp) is None
1831 if (cl.addgroup(source, csmap, trp) is None
1832 and not emptyok):
1832 and not emptyok):
1833 raise util.Abort(_("received changelog group is empty"))
1833 raise util.Abort(_("received changelog group is empty"))
1834 clend = len(cl)
1834 clend = len(cl)
1835 changesets = clend - clstart
1835 changesets = clend - clstart
1836 for c in xrange(clstart, clend):
1836 for c in xrange(clstart, clend):
1837 efiles.update(self[c].files())
1837 efiles.update(self[c].files())
1838 efiles = len(efiles)
1838 efiles = len(efiles)
1839 self.ui.progress(_('changesets'), None)
1839 self.ui.progress(_('changesets'), None)
1840
1840
1841 # pull off the manifest group
1841 # pull off the manifest group
1842 self.ui.status(_("adding manifests\n"))
1842 self.ui.status(_("adding manifests\n"))
1843 pr.step = _('manifests')
1843 pr.step = _('manifests')
1844 pr.count = 1
1844 pr.count = 1
1845 pr.total = changesets # manifests <= changesets
1845 pr.total = changesets # manifests <= changesets
1846 # no need to check for empty manifest group here:
1846 # no need to check for empty manifest group here:
1847 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1847 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1848 # no new manifest will be created and the manifest group will
1848 # no new manifest will be created and the manifest group will
1849 # be empty during the pull
1849 # be empty during the pull
1850 source.manifestheader()
1850 source.manifestheader()
1851 self.manifest.addgroup(source, revmap, trp)
1851 self.manifest.addgroup(source, revmap, trp)
1852 self.ui.progress(_('manifests'), None)
1852 self.ui.progress(_('manifests'), None)
1853
1853
1854 needfiles = {}
1854 needfiles = {}
1855 if self.ui.configbool('server', 'validate', default=False):
1855 if self.ui.configbool('server', 'validate', default=False):
1856 # validate incoming csets have their manifests
1856 # validate incoming csets have their manifests
1857 for cset in xrange(clstart, clend):
1857 for cset in xrange(clstart, clend):
1858 mfest = self.changelog.read(self.changelog.node(cset))[0]
1858 mfest = self.changelog.read(self.changelog.node(cset))[0]
1859 mfest = self.manifest.readdelta(mfest)
1859 mfest = self.manifest.readdelta(mfest)
1860 # store file nodes we must see
1860 # store file nodes we must see
1861 for f, n in mfest.iteritems():
1861 for f, n in mfest.iteritems():
1862 needfiles.setdefault(f, set()).add(n)
1862 needfiles.setdefault(f, set()).add(n)
1863
1863
1864 # process the files
1864 # process the files
1865 self.ui.status(_("adding file changes\n"))
1865 self.ui.status(_("adding file changes\n"))
1866 pr.step = _('files')
1866 pr.step = _('files')
1867 pr.count = 1
1867 pr.count = 1
1868 pr.total = efiles
1868 pr.total = efiles
1869 source.callback = None
1869 source.callback = None
1870
1870
1871 while True:
1871 while True:
1872 chunkdata = source.filelogheader()
1872 chunkdata = source.filelogheader()
1873 if not chunkdata:
1873 if not chunkdata:
1874 break
1874 break
1875 f = chunkdata["filename"]
1875 f = chunkdata["filename"]
1876 self.ui.debug("adding %s revisions\n" % f)
1876 self.ui.debug("adding %s revisions\n" % f)
1877 pr()
1877 pr()
1878 fl = self.file(f)
1878 fl = self.file(f)
1879 o = len(fl)
1879 o = len(fl)
1880 if fl.addgroup(source, revmap, trp) is None:
1880 if fl.addgroup(source, revmap, trp) is None:
1881 raise util.Abort(_("received file revlog group is empty"))
1881 raise util.Abort(_("received file revlog group is empty"))
1882 revisions += len(fl) - o
1882 revisions += len(fl) - o
1883 files += 1
1883 files += 1
1884 if f in needfiles:
1884 if f in needfiles:
1885 needs = needfiles[f]
1885 needs = needfiles[f]
1886 for new in xrange(o, len(fl)):
1886 for new in xrange(o, len(fl)):
1887 n = fl.node(new)
1887 n = fl.node(new)
1888 if n in needs:
1888 if n in needs:
1889 needs.remove(n)
1889 needs.remove(n)
1890 if not needs:
1890 if not needs:
1891 del needfiles[f]
1891 del needfiles[f]
1892 self.ui.progress(_('files'), None)
1892 self.ui.progress(_('files'), None)
1893
1893
1894 for f, needs in needfiles.iteritems():
1894 for f, needs in needfiles.iteritems():
1895 fl = self.file(f)
1895 fl = self.file(f)
1896 for n in needs:
1896 for n in needs:
1897 try:
1897 try:
1898 fl.rev(n)
1898 fl.rev(n)
1899 except error.LookupError:
1899 except error.LookupError:
1900 raise util.Abort(
1900 raise util.Abort(
1901 _('missing file data for %s:%s - run hg verify') %
1901 _('missing file data for %s:%s - run hg verify') %
1902 (f, hex(n)))
1902 (f, hex(n)))
1903
1903
1904 dh = 0
1904 dh = 0
1905 if oldheads:
1905 if oldheads:
1906 heads = cl.heads()
1906 heads = cl.heads()
1907 dh = len(heads) - len(oldheads)
1907 dh = len(heads) - len(oldheads)
1908 for h in heads:
1908 for h in heads:
1909 if h not in oldheads and 'close' in self[h].extra():
1909 if h not in oldheads and 'close' in self[h].extra():
1910 dh -= 1
1910 dh -= 1
1911 htext = ""
1911 htext = ""
1912 if dh:
1912 if dh:
1913 htext = _(" (%+d heads)") % dh
1913 htext = _(" (%+d heads)") % dh
1914
1914
1915 self.ui.status(_("added %d changesets"
1915 self.ui.status(_("added %d changesets"
1916 " with %d changes to %d files%s\n")
1916 " with %d changes to %d files%s\n")
1917 % (changesets, revisions, files, htext))
1917 % (changesets, revisions, files, htext))
1918
1918
1919 if changesets > 0:
1919 if changesets > 0:
1920 p = lambda: cl.writepending() and self.root or ""
1920 p = lambda: cl.writepending() and self.root or ""
1921 self.hook('pretxnchangegroup', throw=True,
1921 self.hook('pretxnchangegroup', throw=True,
1922 node=hex(cl.node(clstart)), source=srctype,
1922 node=hex(cl.node(clstart)), source=srctype,
1923 url=url, pending=p)
1923 url=url, pending=p)
1924
1924
1925 # make changelog see real files again
1925 # make changelog see real files again
1926 cl.finalize(trp)
1926 cl.finalize(trp)
1927
1927
1928 tr.close()
1928 tr.close()
1929 finally:
1929 finally:
1930 tr.release()
1930 tr.release()
1931 if lock:
1931 if lock:
1932 lock.release()
1932 lock.release()
1933
1933
1934 if changesets > 0:
1934 if changesets > 0:
1935 # forcefully update the on-disk branch cache
1935 # forcefully update the on-disk branch cache
1936 self.ui.debug("updating the branch cache\n")
1936 self.ui.debug("updating the branch cache\n")
1937 self.updatebranchcache()
1937 self.updatebranchcache()
1938 self.hook("changegroup", node=hex(cl.node(clstart)),
1938 self.hook("changegroup", node=hex(cl.node(clstart)),
1939 source=srctype, url=url)
1939 source=srctype, url=url)
1940
1940
1941 for i in xrange(clstart, clend):
1941 for i in xrange(clstart, clend):
1942 self.hook("incoming", node=hex(cl.node(i)),
1942 self.hook("incoming", node=hex(cl.node(i)),
1943 source=srctype, url=url)
1943 source=srctype, url=url)
1944
1944
1945 # never return 0 here:
1945 # never return 0 here:
1946 if dh < 0:
1946 if dh < 0:
1947 return dh - 1
1947 return dh - 1
1948 else:
1948 else:
1949 return dh + 1
1949 return dh + 1
1950
1950
1951 def stream_in(self, remote, requirements):
1951 def stream_in(self, remote, requirements):
1952 lock = self.lock()
1952 lock = self.lock()
1953 try:
1953 try:
1954 fp = remote.stream_out()
1954 fp = remote.stream_out()
1955 l = fp.readline()
1955 l = fp.readline()
1956 try:
1956 try:
1957 resp = int(l)
1957 resp = int(l)
1958 except ValueError:
1958 except ValueError:
1959 raise error.ResponseError(
1959 raise error.ResponseError(
1960 _('Unexpected response from remote server:'), l)
1960 _('Unexpected response from remote server:'), l)
1961 if resp == 1:
1961 if resp == 1:
1962 raise util.Abort(_('operation forbidden by server'))
1962 raise util.Abort(_('operation forbidden by server'))
1963 elif resp == 2:
1963 elif resp == 2:
1964 raise util.Abort(_('locking the remote repository failed'))
1964 raise util.Abort(_('locking the remote repository failed'))
1965 elif resp != 0:
1965 elif resp != 0:
1966 raise util.Abort(_('the server sent an unknown error code'))
1966 raise util.Abort(_('the server sent an unknown error code'))
1967 self.ui.status(_('streaming all changes\n'))
1967 self.ui.status(_('streaming all changes\n'))
1968 l = fp.readline()
1968 l = fp.readline()
1969 try:
1969 try:
1970 total_files, total_bytes = map(int, l.split(' ', 1))
1970 total_files, total_bytes = map(int, l.split(' ', 1))
1971 except (ValueError, TypeError):
1971 except (ValueError, TypeError):
1972 raise error.ResponseError(
1972 raise error.ResponseError(
1973 _('Unexpected response from remote server:'), l)
1973 _('Unexpected response from remote server:'), l)
1974 self.ui.status(_('%d files to transfer, %s of data\n') %
1974 self.ui.status(_('%d files to transfer, %s of data\n') %
1975 (total_files, util.bytecount(total_bytes)))
1975 (total_files, util.bytecount(total_bytes)))
1976 start = time.time()
1976 start = time.time()
1977 for i in xrange(total_files):
1977 for i in xrange(total_files):
1978 # XXX doesn't support '\n' or '\r' in filenames
1978 # XXX doesn't support '\n' or '\r' in filenames
1979 l = fp.readline()
1979 l = fp.readline()
1980 try:
1980 try:
1981 name, size = l.split('\0', 1)
1981 name, size = l.split('\0', 1)
1982 size = int(size)
1982 size = int(size)
1983 except (ValueError, TypeError):
1983 except (ValueError, TypeError):
1984 raise error.ResponseError(
1984 raise error.ResponseError(
1985 _('Unexpected response from remote server:'), l)
1985 _('Unexpected response from remote server:'), l)
1986 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1986 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1987 # for backwards compat, name was partially encoded
1987 # for backwards compat, name was partially encoded
1988 ofp = self.sopener(store.decodedir(name), 'w')
1988 ofp = self.sopener(store.decodedir(name), 'w')
1989 for chunk in util.filechunkiter(fp, limit=size):
1989 for chunk in util.filechunkiter(fp, limit=size):
1990 ofp.write(chunk)
1990 ofp.write(chunk)
1991 ofp.close()
1991 ofp.close()
1992 elapsed = time.time() - start
1992 elapsed = time.time() - start
1993 if elapsed <= 0:
1993 if elapsed <= 0:
1994 elapsed = 0.001
1994 elapsed = 0.001
1995 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1995 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1996 (util.bytecount(total_bytes), elapsed,
1996 (util.bytecount(total_bytes), elapsed,
1997 util.bytecount(total_bytes / elapsed)))
1997 util.bytecount(total_bytes / elapsed)))
1998
1998
1999 # new requirements = old non-format requirements + new format-related
1999 # new requirements = old non-format requirements + new format-related
2000 # requirements from the streamed-in repository
2000 # requirements from the streamed-in repository
2001 requirements.update(set(self.requirements) - self.supportedformats)
2001 requirements.update(set(self.requirements) - self.supportedformats)
2002 self._applyrequirements(requirements)
2002 self._applyrequirements(requirements)
2003 self._writerequirements()
2003 self._writerequirements()
2004
2004
2005 self.invalidate()
2005 self.invalidate()
2006 return len(self.heads()) + 1
2006 return len(self.heads()) + 1
2007 finally:
2007 finally:
2008 lock.release()
2008 lock.release()
2009
2009
2010 def clone(self, remote, heads=[], stream=False):
2010 def clone(self, remote, heads=[], stream=False):
2011 '''clone remote repository.
2011 '''clone remote repository.
2012
2012
2013 keyword arguments:
2013 keyword arguments:
2014 heads: list of revs to clone (forces use of pull)
2014 heads: list of revs to clone (forces use of pull)
2015 stream: use streaming clone if possible'''
2015 stream: use streaming clone if possible'''
2016
2016
2017 # now, all clients that can request uncompressed clones can
2017 # now, all clients that can request uncompressed clones can
2018 # read repo formats supported by all servers that can serve
2018 # read repo formats supported by all servers that can serve
2019 # them.
2019 # them.
2020
2020
2021 # if revlog format changes, client will have to check version
2021 # if revlog format changes, client will have to check version
2022 # and format flags on "stream" capability, and use
2022 # and format flags on "stream" capability, and use
2023 # uncompressed only if compatible.
2023 # uncompressed only if compatible.
2024
2024
2025 if stream and not heads:
2025 if stream and not heads:
2026 # 'stream' means remote revlog format is revlogv1 only
2026 # 'stream' means remote revlog format is revlogv1 only
2027 if remote.capable('stream'):
2027 if remote.capable('stream'):
2028 return self.stream_in(remote, set(('revlogv1',)))
2028 return self.stream_in(remote, set(('revlogv1',)))
2029 # otherwise, 'streamreqs' contains the remote revlog format
2029 # otherwise, 'streamreqs' contains the remote revlog format
2030 streamreqs = remote.capable('streamreqs')
2030 streamreqs = remote.capable('streamreqs')
2031 if streamreqs:
2031 if streamreqs:
2032 streamreqs = set(streamreqs.split(','))
2032 streamreqs = set(streamreqs.split(','))
2033 # if we support it, stream in and adjust our requirements
2033 # if we support it, stream in and adjust our requirements
2034 if not streamreqs - self.supportedformats:
2034 if not streamreqs - self.supportedformats:
2035 return self.stream_in(remote, streamreqs)
2035 return self.stream_in(remote, streamreqs)
2036 return self.pull(remote, heads)
2036 return self.pull(remote, heads)
2037
2037
2038 def pushkey(self, namespace, key, old, new):
2038 def pushkey(self, namespace, key, old, new):
2039 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2039 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2040 old=old, new=new)
2040 old=old, new=new)
2041 ret = pushkey.push(self, namespace, key, old, new)
2041 ret = pushkey.push(self, namespace, key, old, new)
2042 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2042 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2043 ret=ret)
2043 ret=ret)
2044 return ret
2044 return ret
2045
2045
2046 def listkeys(self, namespace):
2046 def listkeys(self, namespace):
2047 self.hook('prelistkeys', throw=True, namespace=namespace)
2047 self.hook('prelistkeys', throw=True, namespace=namespace)
2048 values = pushkey.list(self, namespace)
2048 values = pushkey.list(self, namespace)
2049 self.hook('listkeys', namespace=namespace, values=values)
2049 self.hook('listkeys', namespace=namespace, values=values)
2050 return values
2050 return values
2051
2051
2052 def debugwireargs(self, one, two, three=None, four=None, five=None):
2052 def debugwireargs(self, one, two, three=None, four=None, five=None):
2053 '''used to test argument passing over the wire'''
2053 '''used to test argument passing over the wire'''
2054 return "%s %s %s %s %s" % (one, two, three, four, five)
2054 return "%s %s %s %s %s" % (one, two, three, four, five)
2055
2055
2056 def savecommitmessage(self, text):
2056 def savecommitmessage(self, text):
2057 fp = self.opener('last-message.txt', 'wb')
2057 fp = self.opener('last-message.txt', 'wb')
2058 try:
2058 try:
2059 fp.write(text)
2059 fp.write(text)
2060 finally:
2060 finally:
2061 fp.close()
2061 fp.close()
2062 return self.pathto(fp.name[len(self.root)+1:])
2062 return self.pathto(fp.name[len(self.root)+1:])
2063
2063
2064 # used to avoid circular references so destructors work
2064 # used to avoid circular references so destructors work
2065 def aftertrans(files):
2065 def aftertrans(files):
2066 renamefiles = [tuple(t) for t in files]
2066 renamefiles = [tuple(t) for t in files]
2067 def a():
2067 def a():
2068 for src, dest in renamefiles:
2068 for src, dest in renamefiles:
2069 util.rename(src, dest)
2069 util.rename(src, dest)
2070 return a
2070 return a
2071
2071
2072 def undoname(fn):
2072 def undoname(fn):
2073 base, name = os.path.split(fn)
2073 base, name = os.path.split(fn)
2074 assert name.startswith('journal')
2074 assert name.startswith('journal')
2075 return os.path.join(base, name.replace('journal', 'undo', 1))
2075 return os.path.join(base, name.replace('journal', 'undo', 1))
2076
2076
2077 def instance(ui, path, create):
2077 def instance(ui, path, create):
2078 return localrepository(ui, util.urllocalpath(path), create)
2078 return localrepository(ui, util.urllocalpath(path), create)
2079
2079
2080 def islocal(path):
2080 def islocal(path):
2081 return True
2081 return True
@@ -1,182 +1,183 b''
1 setup repo
1 setup repo
2 $ hg init t
2 $ hg init t
3 $ cd t
3 $ cd t
4 $ echo a > a
4 $ echo a > a
5 $ hg commit -Am'add a'
5 $ hg commit -Am'add a'
6 adding a
6 adding a
7 $ hg verify
7 $ hg verify
8 checking changesets
8 checking changesets
9 checking manifests
9 checking manifests
10 crosschecking files in changesets and manifests
10 crosschecking files in changesets and manifests
11 checking files
11 checking files
12 1 files, 1 changesets, 1 total revisions
12 1 files, 1 changesets, 1 total revisions
13 $ hg parents
13 $ hg parents
14 changeset: 0:1f0dee641bb7
14 changeset: 0:1f0dee641bb7
15 tag: tip
15 tag: tip
16 user: test
16 user: test
17 date: Thu Jan 01 00:00:00 1970 +0000
17 date: Thu Jan 01 00:00:00 1970 +0000
18 summary: add a
18 summary: add a
19
19
20
20
21 rollback to null revision
21 rollback to null revision
22 $ hg status
22 $ hg status
23 $ hg rollback
23 $ hg rollback
24 repository tip rolled back to revision -1 (undo commit)
24 repository tip rolled back to revision -1 (undo commit)
25 working directory now based on revision -1
25 working directory now based on revision -1
26 $ hg verify
26 $ hg verify
27 checking changesets
27 checking changesets
28 checking manifests
28 checking manifests
29 crosschecking files in changesets and manifests
29 crosschecking files in changesets and manifests
30 checking files
30 checking files
31 0 files, 0 changesets, 0 total revisions
31 0 files, 0 changesets, 0 total revisions
32 $ hg parents
32 $ hg parents
33 $ hg status
33 $ hg status
34 A a
34 A a
35
35
36 Two changesets this time so we rollback to a real changeset
36 Two changesets this time so we rollback to a real changeset
37 $ hg commit -m'add a again'
37 $ hg commit -m'add a again'
38 $ echo a >> a
38 $ echo a >> a
39 $ hg commit -m'modify a'
39 $ hg commit -m'modify a'
40
40
41 Test issue 902 (current branch is preserved)
41 Test issue 902 (current branch is preserved)
42 $ hg branch test
42 $ hg branch test
43 marked working directory as branch test
43 marked working directory as branch test
44 $ hg rollback
44 $ hg rollback
45 repository tip rolled back to revision 0 (undo commit)
45 repository tip rolled back to revision 0 (undo commit)
46 working directory now based on revision 0
46 working directory now based on revision 0
47 $ hg branch
47 $ hg branch
48 default
48 default
49
49
50 Test issue 1635 (commit message saved)
50 Test issue 1635 (commit message saved)
51 $ cat .hg/last-message.txt ; echo
51 $ cat .hg/last-message.txt ; echo
52 modify a
52 modify a
53
53
54 Test rollback of hg before issue 902 was fixed
54 Test rollback of hg before issue 902 was fixed
55
55
56 $ hg commit -m "test3"
56 $ hg commit -m "test3"
57 $ hg branch test
57 $ hg branch test
58 marked working directory as branch test
58 marked working directory as branch test
59 $ rm .hg/undo.branch
59 $ rm .hg/undo.branch
60 $ hg rollback
60 $ hg rollback
61 repository tip rolled back to revision 0 (undo commit)
61 repository tip rolled back to revision 0 (undo commit)
62 named branch could not be reset: current branch is still 'test'
62 named branch could not be reset: current branch is still 'test'
63 working directory now based on revision 0
63 working directory now based on revision 0
64 $ hg branch
64 $ hg branch
65 test
65 test
66
66
67 working dir unaffected by rollback: do not restore dirstate et. al.
67 working dir unaffected by rollback: do not restore dirstate et. al.
68 $ hg log --template '{rev} {branch} {desc|firstline}\n'
68 $ hg log --template '{rev} {branch} {desc|firstline}\n'
69 0 default add a again
69 0 default add a again
70 $ hg status
70 $ hg status
71 M a
71 M a
72 $ hg bookmark foo
72 $ hg bookmark foo
73 $ hg commit -m'modify a again'
73 $ hg commit -m'modify a again'
74 $ echo b > b
74 $ echo b > b
75 $ hg commit -Am'add b'
75 $ hg commit -Am'add b'
76 adding b
76 adding b
77 $ hg log --template '{rev} {branch} {desc|firstline}\n'
77 $ hg log --template '{rev} {branch} {desc|firstline}\n'
78 2 test add b
78 2 test add b
79 1 test modify a again
79 1 test modify a again
80 0 default add a again
80 0 default add a again
81 $ hg update default
81 $ hg update default
82 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
82 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
83 $ hg bookmark bar
83 $ hg bookmark bar
84 $ cat .hg/undo.branch ; echo
84 $ cat .hg/undo.branch ; echo
85 test
85 test
86 $ hg rollback -f
86 $ hg rollback -f
87 repository tip rolled back to revision 1 (undo commit)
87 repository tip rolled back to revision 1 (undo commit)
88 $ hg id -n
88 $ hg id -n
89 0
89 0
90 $ hg branch
90 $ hg branch
91 default
91 default
92 $ cat .hg/bookmarks.current ; echo
92 $ cat .hg/bookmarks.current ; echo
93 bar
93 bar
94 $ hg bookmark --delete foo
94 $ hg bookmark --delete foo
95
95
96 rollback by pretxncommit saves commit message (issue 1635)
96 rollback by pretxncommit saves commit message (issue 1635)
97
97
98 $ echo a >> a
98 $ echo a >> a
99 $ hg --config hooks.pretxncommit=false commit -m"precious commit message"
99 $ hg --config hooks.pretxncommit=false commit -m"precious commit message"
100 transaction abort!
100 transaction abort!
101 rollback completed
101 rollback completed
102 abort: pretxncommit hook exited with status * (glob)
102 abort: pretxncommit hook exited with status * (glob)
103 [255]
103 [255]
104 $ cat .hg/last-message.txt ; echo
104 $ cat .hg/last-message.txt ; echo
105 precious commit message
105 precious commit message
106
106
107 same thing, but run $EDITOR
107 same thing, but run $EDITOR
108
108
109 $ cat > editor << '__EOF__'
109 $ cat > editor << '__EOF__'
110 > #!/bin/sh
110 > #!/bin/sh
111 > echo "another precious commit message" > "$1"
111 > echo "another precious commit message" > "$1"
112 > __EOF__
112 > __EOF__
113 $ chmod +x editor
113 $ chmod +x editor
114 $ HGEDITOR="'`pwd`'"/editor hg --config hooks.pretxncommit=false commit 2>&1
114 $ HGEDITOR="'`pwd`'"/editor hg --config hooks.pretxncommit=false commit 2>&1
115 transaction abort!
115 transaction abort!
116 rollback completed
116 rollback completed
117 note: commit message saved in .hg/last-message.txt
117 note: commit message saved in .hg/last-message.txt
118 abort: pretxncommit hook exited with status * (glob)
118 abort: pretxncommit hook exited with status * (glob)
119 [255]
119 [255]
120 $ cat .hg/last-message.txt
120 $ cat .hg/last-message.txt
121 another precious commit message
121 another precious commit message
122
122
123 test rollback on served repository
123 test rollback on served repository
124
124
125 $ hg commit -m "precious commit message"
125 $ hg commit -m "precious commit message"
126 $ hg serve -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
126 $ hg serve -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
127 $ cat hg.pid >> $DAEMON_PIDS
127 $ cat hg.pid >> $DAEMON_PIDS
128 $ cd ..
128 $ cd ..
129 $ hg clone http://localhost:$HGPORT u
129 $ hg clone http://localhost:$HGPORT u
130 requesting all changes
130 requesting all changes
131 adding changesets
131 adding changesets
132 adding manifests
132 adding manifests
133 adding file changes
133 adding file changes
134 added 3 changesets with 2 changes to 1 files (+1 heads)
134 added 3 changesets with 2 changes to 1 files (+1 heads)
135 updating to branch default
135 updating to branch default
136 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
136 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
137 $ cd u
137 $ cd u
138 $ hg id default
138 $ hg id default
139 068774709090
139 068774709090
140
140
141 now rollback and observe that 'hg serve' reloads the repository and
141 now rollback and observe that 'hg serve' reloads the repository and
142 presents the correct tip changeset:
142 presents the correct tip changeset:
143
143
144 $ hg -R ../t rollback
144 $ hg -R ../t rollback
145 repository tip rolled back to revision 1 (undo commit)
145 repository tip rolled back to revision 1 (undo commit)
146 working directory now based on revision 0
146 working directory now based on revision 0
147 $ hg id default
147 $ hg id default
148 791dd2169706
148 791dd2169706
149
149
150 update to older changeset and then refuse rollback, because
150 update to older changeset and then refuse rollback, because
151 that would lose data (issue2998)
151 that would lose data (issue2998)
152 $ cd ../t
152 $ cd ../t
153 $ hg -q update
153 $ hg -q update
154 $ rm `hg status -un`
154 $ rm `hg status -un`
155 $ template='{rev}:{node|short} [{branch}] {desc|firstline}\n'
155 $ template='{rev}:{node|short} [{branch}] {desc|firstline}\n'
156 $ echo 'valuable new file' > b
156 $ echo 'valuable new file' > b
157 $ echo 'valuable modification' >> a
157 $ echo 'valuable modification' >> a
158 $ hg commit -A -m'a valuable change'
158 $ hg commit -A -m'a valuable change'
159 adding b
159 adding b
160 $ hg update 0
160 $ hg update 0
161 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
161 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
162 $ hg rollback
162 $ hg rollback
163 abort: rollback of last commit while not checked out may lose data (use -f to force)
163 abort: rollback of last commit while not checked out may lose data
164 (use -f to force)
164 [255]
165 [255]
165 $ hg tip -q
166 $ hg tip -q
166 2:4d9cd3795eea
167 2:4d9cd3795eea
167 $ hg rollback -f
168 $ hg rollback -f
168 repository tip rolled back to revision 1 (undo commit)
169 repository tip rolled back to revision 1 (undo commit)
169 $ hg status
170 $ hg status
170 $ hg log --removed b # yep, it's gone
171 $ hg log --removed b # yep, it's gone
171
172
172 same again, but emulate an old client that doesn't write undo.desc
173 same again, but emulate an old client that doesn't write undo.desc
173 $ hg -q update
174 $ hg -q update
174 $ echo 'valuable modification redux' >> a
175 $ echo 'valuable modification redux' >> a
175 $ hg commit -m'a valuable change redux'
176 $ hg commit -m'a valuable change redux'
176 $ rm .hg/undo.desc
177 $ rm .hg/undo.desc
177 $ hg update 0
178 $ hg update 0
178 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
179 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
179 $ hg rollback
180 $ hg rollback
180 rolling back unknown transaction
181 rolling back unknown transaction
181 $ cat a
182 $ cat a
182 a
183 a
General Comments 0
You need to be logged in to leave comments. Login now