##// END OF EJS Templates
windows: sanity-check symlink placeholders...
Matt Mackall -
r15348:c681e478 stable
parent child Browse files
Show More
@@ -1,2085 +1,2101 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener.append(
60 self.opener.append(
61 "00changelog.i",
61 "00changelog.i",
62 '\0\0\0\2' # represents revlogv2
62 '\0\0\0\2' # represents revlogv2
63 ' dummy changelog to prevent using the old repo layout'
63 ' dummy changelog to prevent using the old repo layout'
64 )
64 )
65 if self.ui.configbool('format', 'generaldelta', False):
65 if self.ui.configbool('format', 'generaldelta', False):
66 requirements.append("generaldelta")
66 requirements.append("generaldelta")
67 requirements = set(requirements)
67 requirements = set(requirements)
68 else:
68 else:
69 raise error.RepoError(_("repository %s not found") % path)
69 raise error.RepoError(_("repository %s not found") % path)
70 elif create:
70 elif create:
71 raise error.RepoError(_("repository %s already exists") % path)
71 raise error.RepoError(_("repository %s already exists") % path)
72 else:
72 else:
73 try:
73 try:
74 requirements = scmutil.readrequires(self.opener, self.supported)
74 requirements = scmutil.readrequires(self.opener, self.supported)
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 requirements = set()
78 requirements = set()
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100
100
101 self._branchcache = None
101 self._branchcache = None
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.filterpats = {}
103 self.filterpats = {}
104 self._datafilters = {}
104 self._datafilters = {}
105 self._transref = self._lockref = self._wlockref = None
105 self._transref = self._lockref = self._wlockref = None
106
106
107 # A cache for various files under .hg/ that tracks file changes,
107 # A cache for various files under .hg/ that tracks file changes,
108 # (used by the filecache decorator)
108 # (used by the filecache decorator)
109 #
109 #
110 # Maps a property name to its util.filecacheentry
110 # Maps a property name to its util.filecacheentry
111 self._filecache = {}
111 self._filecache = {}
112
112
113 def _applyrequirements(self, requirements):
113 def _applyrequirements(self, requirements):
114 self.requirements = requirements
114 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
116 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
117 if r in openerreqs)
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130
130
131 # XXX: Checking against the current working copy is wrong in
131 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
132 # the sense that it can reject things like
133 #
133 #
134 # $ hg cat -r 10 sub/x.txt
134 # $ hg cat -r 10 sub/x.txt
135 #
135 #
136 # if sub/ is no longer a subrepository in the working copy
136 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
137 # parent revision.
138 #
138 #
139 # However, it can of course also allow things that would have
139 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
140 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
141 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
142 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
143 # panics when it sees sub/.hg/.
144 #
144 #
145 # All in all, checking against the working copy seems sensible
145 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
146 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
147 # the filesystem *now*.
148 ctx = self[None]
148 ctx = self[None]
149 parts = util.splitpath(subpath)
149 parts = util.splitpath(subpath)
150 while parts:
150 while parts:
151 prefix = os.sep.join(parts)
151 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
152 if prefix in ctx.substate:
153 if prefix == subpath:
153 if prefix == subpath:
154 return True
154 return True
155 else:
155 else:
156 sub = ctx.sub(prefix)
156 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
157 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
158 else:
159 parts.pop()
159 parts.pop()
160 return False
160 return False
161
161
162 @filecache('bookmarks')
162 @filecache('bookmarks')
163 def _bookmarks(self):
163 def _bookmarks(self):
164 return bookmarks.read(self)
164 return bookmarks.read(self)
165
165
166 @filecache('bookmarks.current')
166 @filecache('bookmarks.current')
167 def _bookmarkcurrent(self):
167 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
168 return bookmarks.readcurrent(self)
169
169
170 def _writebookmarks(self, marks):
170 def _writebookmarks(self, marks):
171 bookmarks.write(self)
171 bookmarks.write(self)
172
172
173 @filecache('00changelog.i', True)
173 @filecache('00changelog.i', True)
174 def changelog(self):
174 def changelog(self):
175 c = changelog.changelog(self.sopener)
175 c = changelog.changelog(self.sopener)
176 if 'HG_PENDING' in os.environ:
176 if 'HG_PENDING' in os.environ:
177 p = os.environ['HG_PENDING']
177 p = os.environ['HG_PENDING']
178 if p.startswith(self.root):
178 if p.startswith(self.root):
179 c.readpending('00changelog.i.a')
179 c.readpending('00changelog.i.a')
180 return c
180 return c
181
181
182 @filecache('00manifest.i', True)
182 @filecache('00manifest.i', True)
183 def manifest(self):
183 def manifest(self):
184 return manifest.manifest(self.sopener)
184 return manifest.manifest(self.sopener)
185
185
186 @filecache('dirstate')
186 @filecache('dirstate')
187 def dirstate(self):
187 def dirstate(self):
188 warned = [0]
188 warned = [0]
189 def validate(node):
189 def validate(node):
190 try:
190 try:
191 self.changelog.rev(node)
191 self.changelog.rev(node)
192 return node
192 return node
193 except error.LookupError:
193 except error.LookupError:
194 if not warned[0]:
194 if not warned[0]:
195 warned[0] = True
195 warned[0] = True
196 self.ui.warn(_("warning: ignoring unknown"
196 self.ui.warn(_("warning: ignoring unknown"
197 " working parent %s!\n") % short(node))
197 " working parent %s!\n") % short(node))
198 return nullid
198 return nullid
199
199
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201
201
202 def __getitem__(self, changeid):
202 def __getitem__(self, changeid):
203 if changeid is None:
203 if changeid is None:
204 return context.workingctx(self)
204 return context.workingctx(self)
205 return context.changectx(self, changeid)
205 return context.changectx(self, changeid)
206
206
207 def __contains__(self, changeid):
207 def __contains__(self, changeid):
208 try:
208 try:
209 return bool(self.lookup(changeid))
209 return bool(self.lookup(changeid))
210 except error.RepoLookupError:
210 except error.RepoLookupError:
211 return False
211 return False
212
212
213 def __nonzero__(self):
213 def __nonzero__(self):
214 return True
214 return True
215
215
216 def __len__(self):
216 def __len__(self):
217 return len(self.changelog)
217 return len(self.changelog)
218
218
219 def __iter__(self):
219 def __iter__(self):
220 for i in xrange(len(self)):
220 for i in xrange(len(self)):
221 yield i
221 yield i
222
222
223 def set(self, expr, *args):
223 def set(self, expr, *args):
224 '''
224 '''
225 Yield a context for each matching revision, after doing arg
225 Yield a context for each matching revision, after doing arg
226 replacement via revset.formatspec
226 replacement via revset.formatspec
227 '''
227 '''
228
228
229 expr = revset.formatspec(expr, *args)
229 expr = revset.formatspec(expr, *args)
230 m = revset.match(None, expr)
230 m = revset.match(None, expr)
231 for r in m(self, range(len(self))):
231 for r in m(self, range(len(self))):
232 yield self[r]
232 yield self[r]
233
233
234 def url(self):
234 def url(self):
235 return 'file:' + self.root
235 return 'file:' + self.root
236
236
237 def hook(self, name, throw=False, **args):
237 def hook(self, name, throw=False, **args):
238 return hook.hook(self.ui, self, name, throw, **args)
238 return hook.hook(self.ui, self, name, throw, **args)
239
239
240 tag_disallowed = ':\r\n'
240 tag_disallowed = ':\r\n'
241
241
242 def _tag(self, names, node, message, local, user, date, extra={}):
242 def _tag(self, names, node, message, local, user, date, extra={}):
243 if isinstance(names, str):
243 if isinstance(names, str):
244 allchars = names
244 allchars = names
245 names = (names,)
245 names = (names,)
246 else:
246 else:
247 allchars = ''.join(names)
247 allchars = ''.join(names)
248 for c in self.tag_disallowed:
248 for c in self.tag_disallowed:
249 if c in allchars:
249 if c in allchars:
250 raise util.Abort(_('%r cannot be used in a tag name') % c)
250 raise util.Abort(_('%r cannot be used in a tag name') % c)
251
251
252 branches = self.branchmap()
252 branches = self.branchmap()
253 for name in names:
253 for name in names:
254 self.hook('pretag', throw=True, node=hex(node), tag=name,
254 self.hook('pretag', throw=True, node=hex(node), tag=name,
255 local=local)
255 local=local)
256 if name in branches:
256 if name in branches:
257 self.ui.warn(_("warning: tag %s conflicts with existing"
257 self.ui.warn(_("warning: tag %s conflicts with existing"
258 " branch name\n") % name)
258 " branch name\n") % name)
259
259
260 def writetags(fp, names, munge, prevtags):
260 def writetags(fp, names, munge, prevtags):
261 fp.seek(0, 2)
261 fp.seek(0, 2)
262 if prevtags and prevtags[-1] != '\n':
262 if prevtags and prevtags[-1] != '\n':
263 fp.write('\n')
263 fp.write('\n')
264 for name in names:
264 for name in names:
265 m = munge and munge(name) or name
265 m = munge and munge(name) or name
266 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
266 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
267 old = self.tags().get(name, nullid)
267 old = self.tags().get(name, nullid)
268 fp.write('%s %s\n' % (hex(old), m))
268 fp.write('%s %s\n' % (hex(old), m))
269 fp.write('%s %s\n' % (hex(node), m))
269 fp.write('%s %s\n' % (hex(node), m))
270 fp.close()
270 fp.close()
271
271
272 prevtags = ''
272 prevtags = ''
273 if local:
273 if local:
274 try:
274 try:
275 fp = self.opener('localtags', 'r+')
275 fp = self.opener('localtags', 'r+')
276 except IOError:
276 except IOError:
277 fp = self.opener('localtags', 'a')
277 fp = self.opener('localtags', 'a')
278 else:
278 else:
279 prevtags = fp.read()
279 prevtags = fp.read()
280
280
281 # local tags are stored in the current charset
281 # local tags are stored in the current charset
282 writetags(fp, names, None, prevtags)
282 writetags(fp, names, None, prevtags)
283 for name in names:
283 for name in names:
284 self.hook('tag', node=hex(node), tag=name, local=local)
284 self.hook('tag', node=hex(node), tag=name, local=local)
285 return
285 return
286
286
287 try:
287 try:
288 fp = self.wfile('.hgtags', 'rb+')
288 fp = self.wfile('.hgtags', 'rb+')
289 except IOError, e:
289 except IOError, e:
290 if e.errno != errno.ENOENT:
290 if e.errno != errno.ENOENT:
291 raise
291 raise
292 fp = self.wfile('.hgtags', 'ab')
292 fp = self.wfile('.hgtags', 'ab')
293 else:
293 else:
294 prevtags = fp.read()
294 prevtags = fp.read()
295
295
296 # committed tags are stored in UTF-8
296 # committed tags are stored in UTF-8
297 writetags(fp, names, encoding.fromlocal, prevtags)
297 writetags(fp, names, encoding.fromlocal, prevtags)
298
298
299 fp.close()
299 fp.close()
300
300
301 if '.hgtags' not in self.dirstate:
301 if '.hgtags' not in self.dirstate:
302 self[None].add(['.hgtags'])
302 self[None].add(['.hgtags'])
303
303
304 m = matchmod.exact(self.root, '', ['.hgtags'])
304 m = matchmod.exact(self.root, '', ['.hgtags'])
305 tagnode = self.commit(message, user, date, extra=extra, match=m)
305 tagnode = self.commit(message, user, date, extra=extra, match=m)
306
306
307 for name in names:
307 for name in names:
308 self.hook('tag', node=hex(node), tag=name, local=local)
308 self.hook('tag', node=hex(node), tag=name, local=local)
309
309
310 return tagnode
310 return tagnode
311
311
312 def tag(self, names, node, message, local, user, date):
312 def tag(self, names, node, message, local, user, date):
313 '''tag a revision with one or more symbolic names.
313 '''tag a revision with one or more symbolic names.
314
314
315 names is a list of strings or, when adding a single tag, names may be a
315 names is a list of strings or, when adding a single tag, names may be a
316 string.
316 string.
317
317
318 if local is True, the tags are stored in a per-repository file.
318 if local is True, the tags are stored in a per-repository file.
319 otherwise, they are stored in the .hgtags file, and a new
319 otherwise, they are stored in the .hgtags file, and a new
320 changeset is committed with the change.
320 changeset is committed with the change.
321
321
322 keyword arguments:
322 keyword arguments:
323
323
324 local: whether to store tags in non-version-controlled file
324 local: whether to store tags in non-version-controlled file
325 (default False)
325 (default False)
326
326
327 message: commit message to use if committing
327 message: commit message to use if committing
328
328
329 user: name of user to use if committing
329 user: name of user to use if committing
330
330
331 date: date tuple to use if committing'''
331 date: date tuple to use if committing'''
332
332
333 if not local:
333 if not local:
334 for x in self.status()[:5]:
334 for x in self.status()[:5]:
335 if '.hgtags' in x:
335 if '.hgtags' in x:
336 raise util.Abort(_('working copy of .hgtags is changed '
336 raise util.Abort(_('working copy of .hgtags is changed '
337 '(please commit .hgtags manually)'))
337 '(please commit .hgtags manually)'))
338
338
339 self.tags() # instantiate the cache
339 self.tags() # instantiate the cache
340 self._tag(names, node, message, local, user, date)
340 self._tag(names, node, message, local, user, date)
341
341
342 @propertycache
342 @propertycache
343 def _tagscache(self):
343 def _tagscache(self):
344 '''Returns a tagscache object that contains various tags related caches.'''
344 '''Returns a tagscache object that contains various tags related caches.'''
345
345
346 # This simplifies its cache management by having one decorated
346 # This simplifies its cache management by having one decorated
347 # function (this one) and the rest simply fetch things from it.
347 # function (this one) and the rest simply fetch things from it.
348 class tagscache(object):
348 class tagscache(object):
349 def __init__(self):
349 def __init__(self):
350 # These two define the set of tags for this repository. tags
350 # These two define the set of tags for this repository. tags
351 # maps tag name to node; tagtypes maps tag name to 'global' or
351 # maps tag name to node; tagtypes maps tag name to 'global' or
352 # 'local'. (Global tags are defined by .hgtags across all
352 # 'local'. (Global tags are defined by .hgtags across all
353 # heads, and local tags are defined in .hg/localtags.)
353 # heads, and local tags are defined in .hg/localtags.)
354 # They constitute the in-memory cache of tags.
354 # They constitute the in-memory cache of tags.
355 self.tags = self.tagtypes = None
355 self.tags = self.tagtypes = None
356
356
357 self.nodetagscache = self.tagslist = None
357 self.nodetagscache = self.tagslist = None
358
358
359 cache = tagscache()
359 cache = tagscache()
360 cache.tags, cache.tagtypes = self._findtags()
360 cache.tags, cache.tagtypes = self._findtags()
361
361
362 return cache
362 return cache
363
363
364 def tags(self):
364 def tags(self):
365 '''return a mapping of tag to node'''
365 '''return a mapping of tag to node'''
366 return self._tagscache.tags
366 return self._tagscache.tags
367
367
368 def _findtags(self):
368 def _findtags(self):
369 '''Do the hard work of finding tags. Return a pair of dicts
369 '''Do the hard work of finding tags. Return a pair of dicts
370 (tags, tagtypes) where tags maps tag name to node, and tagtypes
370 (tags, tagtypes) where tags maps tag name to node, and tagtypes
371 maps tag name to a string like \'global\' or \'local\'.
371 maps tag name to a string like \'global\' or \'local\'.
372 Subclasses or extensions are free to add their own tags, but
372 Subclasses or extensions are free to add their own tags, but
373 should be aware that the returned dicts will be retained for the
373 should be aware that the returned dicts will be retained for the
374 duration of the localrepo object.'''
374 duration of the localrepo object.'''
375
375
376 # XXX what tagtype should subclasses/extensions use? Currently
376 # XXX what tagtype should subclasses/extensions use? Currently
377 # mq and bookmarks add tags, but do not set the tagtype at all.
377 # mq and bookmarks add tags, but do not set the tagtype at all.
378 # Should each extension invent its own tag type? Should there
378 # Should each extension invent its own tag type? Should there
379 # be one tagtype for all such "virtual" tags? Or is the status
379 # be one tagtype for all such "virtual" tags? Or is the status
380 # quo fine?
380 # quo fine?
381
381
382 alltags = {} # map tag name to (node, hist)
382 alltags = {} # map tag name to (node, hist)
383 tagtypes = {}
383 tagtypes = {}
384
384
385 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
385 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
386 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
386 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
387
387
388 # Build the return dicts. Have to re-encode tag names because
388 # Build the return dicts. Have to re-encode tag names because
389 # the tags module always uses UTF-8 (in order not to lose info
389 # the tags module always uses UTF-8 (in order not to lose info
390 # writing to the cache), but the rest of Mercurial wants them in
390 # writing to the cache), but the rest of Mercurial wants them in
391 # local encoding.
391 # local encoding.
392 tags = {}
392 tags = {}
393 for (name, (node, hist)) in alltags.iteritems():
393 for (name, (node, hist)) in alltags.iteritems():
394 if node != nullid:
394 if node != nullid:
395 try:
395 try:
396 # ignore tags to unknown nodes
396 # ignore tags to unknown nodes
397 self.changelog.lookup(node)
397 self.changelog.lookup(node)
398 tags[encoding.tolocal(name)] = node
398 tags[encoding.tolocal(name)] = node
399 except error.LookupError:
399 except error.LookupError:
400 pass
400 pass
401 tags['tip'] = self.changelog.tip()
401 tags['tip'] = self.changelog.tip()
402 tagtypes = dict([(encoding.tolocal(name), value)
402 tagtypes = dict([(encoding.tolocal(name), value)
403 for (name, value) in tagtypes.iteritems()])
403 for (name, value) in tagtypes.iteritems()])
404 return (tags, tagtypes)
404 return (tags, tagtypes)
405
405
406 def tagtype(self, tagname):
406 def tagtype(self, tagname):
407 '''
407 '''
408 return the type of the given tag. result can be:
408 return the type of the given tag. result can be:
409
409
410 'local' : a local tag
410 'local' : a local tag
411 'global' : a global tag
411 'global' : a global tag
412 None : tag does not exist
412 None : tag does not exist
413 '''
413 '''
414
414
415 return self._tagscache.tagtypes.get(tagname)
415 return self._tagscache.tagtypes.get(tagname)
416
416
417 def tagslist(self):
417 def tagslist(self):
418 '''return a list of tags ordered by revision'''
418 '''return a list of tags ordered by revision'''
419 if not self._tagscache.tagslist:
419 if not self._tagscache.tagslist:
420 l = []
420 l = []
421 for t, n in self.tags().iteritems():
421 for t, n in self.tags().iteritems():
422 r = self.changelog.rev(n)
422 r = self.changelog.rev(n)
423 l.append((r, t, n))
423 l.append((r, t, n))
424 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
424 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
425
425
426 return self._tagscache.tagslist
426 return self._tagscache.tagslist
427
427
428 def nodetags(self, node):
428 def nodetags(self, node):
429 '''return the tags associated with a node'''
429 '''return the tags associated with a node'''
430 if not self._tagscache.nodetagscache:
430 if not self._tagscache.nodetagscache:
431 nodetagscache = {}
431 nodetagscache = {}
432 for t, n in self.tags().iteritems():
432 for t, n in self.tags().iteritems():
433 nodetagscache.setdefault(n, []).append(t)
433 nodetagscache.setdefault(n, []).append(t)
434 for tags in nodetagscache.itervalues():
434 for tags in nodetagscache.itervalues():
435 tags.sort()
435 tags.sort()
436 self._tagscache.nodetagscache = nodetagscache
436 self._tagscache.nodetagscache = nodetagscache
437 return self._tagscache.nodetagscache.get(node, [])
437 return self._tagscache.nodetagscache.get(node, [])
438
438
439 def nodebookmarks(self, node):
439 def nodebookmarks(self, node):
440 marks = []
440 marks = []
441 for bookmark, n in self._bookmarks.iteritems():
441 for bookmark, n in self._bookmarks.iteritems():
442 if n == node:
442 if n == node:
443 marks.append(bookmark)
443 marks.append(bookmark)
444 return sorted(marks)
444 return sorted(marks)
445
445
446 def _branchtags(self, partial, lrev):
446 def _branchtags(self, partial, lrev):
447 # TODO: rename this function?
447 # TODO: rename this function?
448 tiprev = len(self) - 1
448 tiprev = len(self) - 1
449 if lrev != tiprev:
449 if lrev != tiprev:
450 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
450 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
451 self._updatebranchcache(partial, ctxgen)
451 self._updatebranchcache(partial, ctxgen)
452 self._writebranchcache(partial, self.changelog.tip(), tiprev)
452 self._writebranchcache(partial, self.changelog.tip(), tiprev)
453
453
454 return partial
454 return partial
455
455
456 def updatebranchcache(self):
456 def updatebranchcache(self):
457 tip = self.changelog.tip()
457 tip = self.changelog.tip()
458 if self._branchcache is not None and self._branchcachetip == tip:
458 if self._branchcache is not None and self._branchcachetip == tip:
459 return self._branchcache
459 return self._branchcache
460
460
461 oldtip = self._branchcachetip
461 oldtip = self._branchcachetip
462 self._branchcachetip = tip
462 self._branchcachetip = tip
463 if oldtip is None or oldtip not in self.changelog.nodemap:
463 if oldtip is None or oldtip not in self.changelog.nodemap:
464 partial, last, lrev = self._readbranchcache()
464 partial, last, lrev = self._readbranchcache()
465 else:
465 else:
466 lrev = self.changelog.rev(oldtip)
466 lrev = self.changelog.rev(oldtip)
467 partial = self._branchcache
467 partial = self._branchcache
468
468
469 self._branchtags(partial, lrev)
469 self._branchtags(partial, lrev)
470 # this private cache holds all heads (not just tips)
470 # this private cache holds all heads (not just tips)
471 self._branchcache = partial
471 self._branchcache = partial
472
472
473 def branchmap(self):
473 def branchmap(self):
474 '''returns a dictionary {branch: [branchheads]}'''
474 '''returns a dictionary {branch: [branchheads]}'''
475 self.updatebranchcache()
475 self.updatebranchcache()
476 return self._branchcache
476 return self._branchcache
477
477
478 def branchtags(self):
478 def branchtags(self):
479 '''return a dict where branch names map to the tipmost head of
479 '''return a dict where branch names map to the tipmost head of
480 the branch, open heads come before closed'''
480 the branch, open heads come before closed'''
481 bt = {}
481 bt = {}
482 for bn, heads in self.branchmap().iteritems():
482 for bn, heads in self.branchmap().iteritems():
483 tip = heads[-1]
483 tip = heads[-1]
484 for h in reversed(heads):
484 for h in reversed(heads):
485 if 'close' not in self.changelog.read(h)[5]:
485 if 'close' not in self.changelog.read(h)[5]:
486 tip = h
486 tip = h
487 break
487 break
488 bt[bn] = tip
488 bt[bn] = tip
489 return bt
489 return bt
490
490
491 def _readbranchcache(self):
491 def _readbranchcache(self):
492 partial = {}
492 partial = {}
493 try:
493 try:
494 f = self.opener("cache/branchheads")
494 f = self.opener("cache/branchheads")
495 lines = f.read().split('\n')
495 lines = f.read().split('\n')
496 f.close()
496 f.close()
497 except (IOError, OSError):
497 except (IOError, OSError):
498 return {}, nullid, nullrev
498 return {}, nullid, nullrev
499
499
500 try:
500 try:
501 last, lrev = lines.pop(0).split(" ", 1)
501 last, lrev = lines.pop(0).split(" ", 1)
502 last, lrev = bin(last), int(lrev)
502 last, lrev = bin(last), int(lrev)
503 if lrev >= len(self) or self[lrev].node() != last:
503 if lrev >= len(self) or self[lrev].node() != last:
504 # invalidate the cache
504 # invalidate the cache
505 raise ValueError('invalidating branch cache (tip differs)')
505 raise ValueError('invalidating branch cache (tip differs)')
506 for l in lines:
506 for l in lines:
507 if not l:
507 if not l:
508 continue
508 continue
509 node, label = l.split(" ", 1)
509 node, label = l.split(" ", 1)
510 label = encoding.tolocal(label.strip())
510 label = encoding.tolocal(label.strip())
511 partial.setdefault(label, []).append(bin(node))
511 partial.setdefault(label, []).append(bin(node))
512 except KeyboardInterrupt:
512 except KeyboardInterrupt:
513 raise
513 raise
514 except Exception, inst:
514 except Exception, inst:
515 if self.ui.debugflag:
515 if self.ui.debugflag:
516 self.ui.warn(str(inst), '\n')
516 self.ui.warn(str(inst), '\n')
517 partial, last, lrev = {}, nullid, nullrev
517 partial, last, lrev = {}, nullid, nullrev
518 return partial, last, lrev
518 return partial, last, lrev
519
519
520 def _writebranchcache(self, branches, tip, tiprev):
520 def _writebranchcache(self, branches, tip, tiprev):
521 try:
521 try:
522 f = self.opener("cache/branchheads", "w", atomictemp=True)
522 f = self.opener("cache/branchheads", "w", atomictemp=True)
523 f.write("%s %s\n" % (hex(tip), tiprev))
523 f.write("%s %s\n" % (hex(tip), tiprev))
524 for label, nodes in branches.iteritems():
524 for label, nodes in branches.iteritems():
525 for node in nodes:
525 for node in nodes:
526 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
526 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
527 f.close()
527 f.close()
528 except (IOError, OSError):
528 except (IOError, OSError):
529 pass
529 pass
530
530
531 def _updatebranchcache(self, partial, ctxgen):
531 def _updatebranchcache(self, partial, ctxgen):
532 # collect new branch entries
532 # collect new branch entries
533 newbranches = {}
533 newbranches = {}
534 for c in ctxgen:
534 for c in ctxgen:
535 newbranches.setdefault(c.branch(), []).append(c.node())
535 newbranches.setdefault(c.branch(), []).append(c.node())
536 # if older branchheads are reachable from new ones, they aren't
536 # if older branchheads are reachable from new ones, they aren't
537 # really branchheads. Note checking parents is insufficient:
537 # really branchheads. Note checking parents is insufficient:
538 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
538 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
539 for branch, newnodes in newbranches.iteritems():
539 for branch, newnodes in newbranches.iteritems():
540 bheads = partial.setdefault(branch, [])
540 bheads = partial.setdefault(branch, [])
541 bheads.extend(newnodes)
541 bheads.extend(newnodes)
542 if len(bheads) <= 1:
542 if len(bheads) <= 1:
543 continue
543 continue
544 bheads = sorted(bheads, key=lambda x: self[x].rev())
544 bheads = sorted(bheads, key=lambda x: self[x].rev())
545 # starting from tip means fewer passes over reachable
545 # starting from tip means fewer passes over reachable
546 while newnodes:
546 while newnodes:
547 latest = newnodes.pop()
547 latest = newnodes.pop()
548 if latest not in bheads:
548 if latest not in bheads:
549 continue
549 continue
550 minbhrev = self[bheads[0]].node()
550 minbhrev = self[bheads[0]].node()
551 reachable = self.changelog.reachable(latest, minbhrev)
551 reachable = self.changelog.reachable(latest, minbhrev)
552 reachable.remove(latest)
552 reachable.remove(latest)
553 if reachable:
553 if reachable:
554 bheads = [b for b in bheads if b not in reachable]
554 bheads = [b for b in bheads if b not in reachable]
555 partial[branch] = bheads
555 partial[branch] = bheads
556
556
557 def lookup(self, key):
557 def lookup(self, key):
558 if isinstance(key, int):
558 if isinstance(key, int):
559 return self.changelog.node(key)
559 return self.changelog.node(key)
560 elif key == '.':
560 elif key == '.':
561 return self.dirstate.p1()
561 return self.dirstate.p1()
562 elif key == 'null':
562 elif key == 'null':
563 return nullid
563 return nullid
564 elif key == 'tip':
564 elif key == 'tip':
565 return self.changelog.tip()
565 return self.changelog.tip()
566 n = self.changelog._match(key)
566 n = self.changelog._match(key)
567 if n:
567 if n:
568 return n
568 return n
569 if key in self._bookmarks:
569 if key in self._bookmarks:
570 return self._bookmarks[key]
570 return self._bookmarks[key]
571 if key in self.tags():
571 if key in self.tags():
572 return self.tags()[key]
572 return self.tags()[key]
573 if key in self.branchtags():
573 if key in self.branchtags():
574 return self.branchtags()[key]
574 return self.branchtags()[key]
575 n = self.changelog._partialmatch(key)
575 n = self.changelog._partialmatch(key)
576 if n:
576 if n:
577 return n
577 return n
578
578
579 # can't find key, check if it might have come from damaged dirstate
579 # can't find key, check if it might have come from damaged dirstate
580 if key in self.dirstate.parents():
580 if key in self.dirstate.parents():
581 raise error.Abort(_("working directory has unknown parent '%s'!")
581 raise error.Abort(_("working directory has unknown parent '%s'!")
582 % short(key))
582 % short(key))
583 try:
583 try:
584 if len(key) == 20:
584 if len(key) == 20:
585 key = hex(key)
585 key = hex(key)
586 except TypeError:
586 except TypeError:
587 pass
587 pass
588 raise error.RepoLookupError(_("unknown revision '%s'") % key)
588 raise error.RepoLookupError(_("unknown revision '%s'") % key)
589
589
590 def lookupbranch(self, key, remote=None):
590 def lookupbranch(self, key, remote=None):
591 repo = remote or self
591 repo = remote or self
592 if key in repo.branchmap():
592 if key in repo.branchmap():
593 return key
593 return key
594
594
595 repo = (remote and remote.local()) and remote or self
595 repo = (remote and remote.local()) and remote or self
596 return repo[key].branch()
596 return repo[key].branch()
597
597
598 def known(self, nodes):
598 def known(self, nodes):
599 nm = self.changelog.nodemap
599 nm = self.changelog.nodemap
600 return [(n in nm) for n in nodes]
600 return [(n in nm) for n in nodes]
601
601
602 def local(self):
602 def local(self):
603 return self
603 return self
604
604
605 def join(self, f):
605 def join(self, f):
606 return os.path.join(self.path, f)
606 return os.path.join(self.path, f)
607
607
608 def wjoin(self, f):
608 def wjoin(self, f):
609 return os.path.join(self.root, f)
609 return os.path.join(self.root, f)
610
610
611 def file(self, f):
611 def file(self, f):
612 if f[0] == '/':
612 if f[0] == '/':
613 f = f[1:]
613 f = f[1:]
614 return filelog.filelog(self.sopener, f)
614 return filelog.filelog(self.sopener, f)
615
615
616 def changectx(self, changeid):
616 def changectx(self, changeid):
617 return self[changeid]
617 return self[changeid]
618
618
619 def parents(self, changeid=None):
619 def parents(self, changeid=None):
620 '''get list of changectxs for parents of changeid'''
620 '''get list of changectxs for parents of changeid'''
621 return self[changeid].parents()
621 return self[changeid].parents()
622
622
623 def filectx(self, path, changeid=None, fileid=None):
623 def filectx(self, path, changeid=None, fileid=None):
624 """changeid can be a changeset revision, node, or tag.
624 """changeid can be a changeset revision, node, or tag.
625 fileid can be a file revision or node."""
625 fileid can be a file revision or node."""
626 return context.filectx(self, path, changeid, fileid)
626 return context.filectx(self, path, changeid, fileid)
627
627
628 def getcwd(self):
628 def getcwd(self):
629 return self.dirstate.getcwd()
629 return self.dirstate.getcwd()
630
630
631 def pathto(self, f, cwd=None):
631 def pathto(self, f, cwd=None):
632 return self.dirstate.pathto(f, cwd)
632 return self.dirstate.pathto(f, cwd)
633
633
634 def wfile(self, f, mode='r'):
634 def wfile(self, f, mode='r'):
635 return self.wopener(f, mode)
635 return self.wopener(f, mode)
636
636
637 def _link(self, f):
637 def _link(self, f):
638 return os.path.islink(self.wjoin(f))
638 return os.path.islink(self.wjoin(f))
639
639
640 def _loadfilter(self, filter):
640 def _loadfilter(self, filter):
641 if filter not in self.filterpats:
641 if filter not in self.filterpats:
642 l = []
642 l = []
643 for pat, cmd in self.ui.configitems(filter):
643 for pat, cmd in self.ui.configitems(filter):
644 if cmd == '!':
644 if cmd == '!':
645 continue
645 continue
646 mf = matchmod.match(self.root, '', [pat])
646 mf = matchmod.match(self.root, '', [pat])
647 fn = None
647 fn = None
648 params = cmd
648 params = cmd
649 for name, filterfn in self._datafilters.iteritems():
649 for name, filterfn in self._datafilters.iteritems():
650 if cmd.startswith(name):
650 if cmd.startswith(name):
651 fn = filterfn
651 fn = filterfn
652 params = cmd[len(name):].lstrip()
652 params = cmd[len(name):].lstrip()
653 break
653 break
654 if not fn:
654 if not fn:
655 fn = lambda s, c, **kwargs: util.filter(s, c)
655 fn = lambda s, c, **kwargs: util.filter(s, c)
656 # Wrap old filters not supporting keyword arguments
656 # Wrap old filters not supporting keyword arguments
657 if not inspect.getargspec(fn)[2]:
657 if not inspect.getargspec(fn)[2]:
658 oldfn = fn
658 oldfn = fn
659 fn = lambda s, c, **kwargs: oldfn(s, c)
659 fn = lambda s, c, **kwargs: oldfn(s, c)
660 l.append((mf, fn, params))
660 l.append((mf, fn, params))
661 self.filterpats[filter] = l
661 self.filterpats[filter] = l
662 return self.filterpats[filter]
662 return self.filterpats[filter]
663
663
664 def _filter(self, filterpats, filename, data):
664 def _filter(self, filterpats, filename, data):
665 for mf, fn, cmd in filterpats:
665 for mf, fn, cmd in filterpats:
666 if mf(filename):
666 if mf(filename):
667 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
667 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
668 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
668 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
669 break
669 break
670
670
671 return data
671 return data
672
672
673 @propertycache
673 @propertycache
674 def _encodefilterpats(self):
674 def _encodefilterpats(self):
675 return self._loadfilter('encode')
675 return self._loadfilter('encode')
676
676
677 @propertycache
677 @propertycache
678 def _decodefilterpats(self):
678 def _decodefilterpats(self):
679 return self._loadfilter('decode')
679 return self._loadfilter('decode')
680
680
681 def adddatafilter(self, name, filter):
681 def adddatafilter(self, name, filter):
682 self._datafilters[name] = filter
682 self._datafilters[name] = filter
683
683
684 def wread(self, filename):
684 def wread(self, filename):
685 if self._link(filename):
685 if self._link(filename):
686 data = os.readlink(self.wjoin(filename))
686 data = os.readlink(self.wjoin(filename))
687 else:
687 else:
688 data = self.wopener.read(filename)
688 data = self.wopener.read(filename)
689 return self._filter(self._encodefilterpats, filename, data)
689 return self._filter(self._encodefilterpats, filename, data)
690
690
691 def wwrite(self, filename, data, flags):
691 def wwrite(self, filename, data, flags):
692 data = self._filter(self._decodefilterpats, filename, data)
692 data = self._filter(self._decodefilterpats, filename, data)
693 if 'l' in flags:
693 if 'l' in flags:
694 self.wopener.symlink(data, filename)
694 self.wopener.symlink(data, filename)
695 else:
695 else:
696 self.wopener.write(filename, data)
696 self.wopener.write(filename, data)
697 if 'x' in flags:
697 if 'x' in flags:
698 util.setflags(self.wjoin(filename), False, True)
698 util.setflags(self.wjoin(filename), False, True)
699
699
700 def wwritedata(self, filename, data):
700 def wwritedata(self, filename, data):
701 return self._filter(self._decodefilterpats, filename, data)
701 return self._filter(self._decodefilterpats, filename, data)
702
702
703 def transaction(self, desc):
703 def transaction(self, desc):
704 tr = self._transref and self._transref() or None
704 tr = self._transref and self._transref() or None
705 if tr and tr.running():
705 if tr and tr.running():
706 return tr.nest()
706 return tr.nest()
707
707
708 # abort here if the journal already exists
708 # abort here if the journal already exists
709 if os.path.exists(self.sjoin("journal")):
709 if os.path.exists(self.sjoin("journal")):
710 raise error.RepoError(
710 raise error.RepoError(
711 _("abandoned transaction found - run hg recover"))
711 _("abandoned transaction found - run hg recover"))
712
712
713 journalfiles = self._writejournal(desc)
713 journalfiles = self._writejournal(desc)
714 renames = [(x, undoname(x)) for x in journalfiles]
714 renames = [(x, undoname(x)) for x in journalfiles]
715
715
716 tr = transaction.transaction(self.ui.warn, self.sopener,
716 tr = transaction.transaction(self.ui.warn, self.sopener,
717 self.sjoin("journal"),
717 self.sjoin("journal"),
718 aftertrans(renames),
718 aftertrans(renames),
719 self.store.createmode)
719 self.store.createmode)
720 self._transref = weakref.ref(tr)
720 self._transref = weakref.ref(tr)
721 return tr
721 return tr
722
722
723 def _writejournal(self, desc):
723 def _writejournal(self, desc):
724 # save dirstate for rollback
724 # save dirstate for rollback
725 try:
725 try:
726 ds = self.opener.read("dirstate")
726 ds = self.opener.read("dirstate")
727 except IOError:
727 except IOError:
728 ds = ""
728 ds = ""
729 self.opener.write("journal.dirstate", ds)
729 self.opener.write("journal.dirstate", ds)
730 self.opener.write("journal.branch",
730 self.opener.write("journal.branch",
731 encoding.fromlocal(self.dirstate.branch()))
731 encoding.fromlocal(self.dirstate.branch()))
732 self.opener.write("journal.desc",
732 self.opener.write("journal.desc",
733 "%d\n%s\n" % (len(self), desc))
733 "%d\n%s\n" % (len(self), desc))
734
734
735 bkname = self.join('bookmarks')
735 bkname = self.join('bookmarks')
736 if os.path.exists(bkname):
736 if os.path.exists(bkname):
737 util.copyfile(bkname, self.join('journal.bookmarks'))
737 util.copyfile(bkname, self.join('journal.bookmarks'))
738 else:
738 else:
739 self.opener.write('journal.bookmarks', '')
739 self.opener.write('journal.bookmarks', '')
740
740
741 return (self.sjoin('journal'), self.join('journal.dirstate'),
741 return (self.sjoin('journal'), self.join('journal.dirstate'),
742 self.join('journal.branch'), self.join('journal.desc'),
742 self.join('journal.branch'), self.join('journal.desc'),
743 self.join('journal.bookmarks'))
743 self.join('journal.bookmarks'))
744
744
745 def recover(self):
745 def recover(self):
746 lock = self.lock()
746 lock = self.lock()
747 try:
747 try:
748 if os.path.exists(self.sjoin("journal")):
748 if os.path.exists(self.sjoin("journal")):
749 self.ui.status(_("rolling back interrupted transaction\n"))
749 self.ui.status(_("rolling back interrupted transaction\n"))
750 transaction.rollback(self.sopener, self.sjoin("journal"),
750 transaction.rollback(self.sopener, self.sjoin("journal"),
751 self.ui.warn)
751 self.ui.warn)
752 self.invalidate()
752 self.invalidate()
753 return True
753 return True
754 else:
754 else:
755 self.ui.warn(_("no interrupted transaction available\n"))
755 self.ui.warn(_("no interrupted transaction available\n"))
756 return False
756 return False
757 finally:
757 finally:
758 lock.release()
758 lock.release()
759
759
760 def rollback(self, dryrun=False, force=False):
760 def rollback(self, dryrun=False, force=False):
761 wlock = lock = None
761 wlock = lock = None
762 try:
762 try:
763 wlock = self.wlock()
763 wlock = self.wlock()
764 lock = self.lock()
764 lock = self.lock()
765 if os.path.exists(self.sjoin("undo")):
765 if os.path.exists(self.sjoin("undo")):
766 return self._rollback(dryrun, force)
766 return self._rollback(dryrun, force)
767 else:
767 else:
768 self.ui.warn(_("no rollback information available\n"))
768 self.ui.warn(_("no rollback information available\n"))
769 return 1
769 return 1
770 finally:
770 finally:
771 release(lock, wlock)
771 release(lock, wlock)
772
772
773 def _rollback(self, dryrun, force):
773 def _rollback(self, dryrun, force):
774 ui = self.ui
774 ui = self.ui
775 try:
775 try:
776 args = self.opener.read('undo.desc').splitlines()
776 args = self.opener.read('undo.desc').splitlines()
777 (oldlen, desc, detail) = (int(args[0]), args[1], None)
777 (oldlen, desc, detail) = (int(args[0]), args[1], None)
778 if len(args) >= 3:
778 if len(args) >= 3:
779 detail = args[2]
779 detail = args[2]
780 oldtip = oldlen - 1
780 oldtip = oldlen - 1
781
781
782 if detail and ui.verbose:
782 if detail and ui.verbose:
783 msg = (_('repository tip rolled back to revision %s'
783 msg = (_('repository tip rolled back to revision %s'
784 ' (undo %s: %s)\n')
784 ' (undo %s: %s)\n')
785 % (oldtip, desc, detail))
785 % (oldtip, desc, detail))
786 else:
786 else:
787 msg = (_('repository tip rolled back to revision %s'
787 msg = (_('repository tip rolled back to revision %s'
788 ' (undo %s)\n')
788 ' (undo %s)\n')
789 % (oldtip, desc))
789 % (oldtip, desc))
790 except IOError:
790 except IOError:
791 msg = _('rolling back unknown transaction\n')
791 msg = _('rolling back unknown transaction\n')
792 desc = None
792 desc = None
793
793
794 if not force and self['.'] != self['tip'] and desc == 'commit':
794 if not force and self['.'] != self['tip'] and desc == 'commit':
795 raise util.Abort(
795 raise util.Abort(
796 _('rollback of last commit while not checked out '
796 _('rollback of last commit while not checked out '
797 'may lose data'), hint=_('use -f to force'))
797 'may lose data'), hint=_('use -f to force'))
798
798
799 ui.status(msg)
799 ui.status(msg)
800 if dryrun:
800 if dryrun:
801 return 0
801 return 0
802
802
803 parents = self.dirstate.parents()
803 parents = self.dirstate.parents()
804 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
804 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
805 if os.path.exists(self.join('undo.bookmarks')):
805 if os.path.exists(self.join('undo.bookmarks')):
806 util.rename(self.join('undo.bookmarks'),
806 util.rename(self.join('undo.bookmarks'),
807 self.join('bookmarks'))
807 self.join('bookmarks'))
808 self.invalidate()
808 self.invalidate()
809
809
810 parentgone = (parents[0] not in self.changelog.nodemap or
810 parentgone = (parents[0] not in self.changelog.nodemap or
811 parents[1] not in self.changelog.nodemap)
811 parents[1] not in self.changelog.nodemap)
812 if parentgone:
812 if parentgone:
813 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
813 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
814 try:
814 try:
815 branch = self.opener.read('undo.branch')
815 branch = self.opener.read('undo.branch')
816 self.dirstate.setbranch(branch)
816 self.dirstate.setbranch(branch)
817 except IOError:
817 except IOError:
818 ui.warn(_('named branch could not be reset: '
818 ui.warn(_('named branch could not be reset: '
819 'current branch is still \'%s\'\n')
819 'current branch is still \'%s\'\n')
820 % self.dirstate.branch())
820 % self.dirstate.branch())
821
821
822 self.dirstate.invalidate()
822 self.dirstate.invalidate()
823 self.destroyed()
823 self.destroyed()
824 parents = tuple([p.rev() for p in self.parents()])
824 parents = tuple([p.rev() for p in self.parents()])
825 if len(parents) > 1:
825 if len(parents) > 1:
826 ui.status(_('working directory now based on '
826 ui.status(_('working directory now based on '
827 'revisions %d and %d\n') % parents)
827 'revisions %d and %d\n') % parents)
828 else:
828 else:
829 ui.status(_('working directory now based on '
829 ui.status(_('working directory now based on '
830 'revision %d\n') % parents)
830 'revision %d\n') % parents)
831 return 0
831 return 0
832
832
833 def invalidatecaches(self):
833 def invalidatecaches(self):
834 try:
834 try:
835 delattr(self, '_tagscache')
835 delattr(self, '_tagscache')
836 except AttributeError:
836 except AttributeError:
837 pass
837 pass
838
838
839 self._branchcache = None # in UTF-8
839 self._branchcache = None # in UTF-8
840 self._branchcachetip = None
840 self._branchcachetip = None
841
841
842 def invalidatedirstate(self):
842 def invalidatedirstate(self):
843 '''Invalidates the dirstate, causing the next call to dirstate
843 '''Invalidates the dirstate, causing the next call to dirstate
844 to check if it was modified since the last time it was read,
844 to check if it was modified since the last time it was read,
845 rereading it if it has.
845 rereading it if it has.
846
846
847 This is different to dirstate.invalidate() that it doesn't always
847 This is different to dirstate.invalidate() that it doesn't always
848 rereads the dirstate. Use dirstate.invalidate() if you want to
848 rereads the dirstate. Use dirstate.invalidate() if you want to
849 explicitly read the dirstate again (i.e. restoring it to a previous
849 explicitly read the dirstate again (i.e. restoring it to a previous
850 known good state).'''
850 known good state).'''
851 try:
851 try:
852 delattr(self, 'dirstate')
852 delattr(self, 'dirstate')
853 except AttributeError:
853 except AttributeError:
854 pass
854 pass
855
855
856 def invalidate(self):
856 def invalidate(self):
857 for k in self._filecache:
857 for k in self._filecache:
858 # dirstate is invalidated separately in invalidatedirstate()
858 # dirstate is invalidated separately in invalidatedirstate()
859 if k == 'dirstate':
859 if k == 'dirstate':
860 continue
860 continue
861
861
862 try:
862 try:
863 delattr(self, k)
863 delattr(self, k)
864 except AttributeError:
864 except AttributeError:
865 pass
865 pass
866 self.invalidatecaches()
866 self.invalidatecaches()
867
867
868 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
868 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
869 try:
869 try:
870 l = lock.lock(lockname, 0, releasefn, desc=desc)
870 l = lock.lock(lockname, 0, releasefn, desc=desc)
871 except error.LockHeld, inst:
871 except error.LockHeld, inst:
872 if not wait:
872 if not wait:
873 raise
873 raise
874 self.ui.warn(_("waiting for lock on %s held by %r\n") %
874 self.ui.warn(_("waiting for lock on %s held by %r\n") %
875 (desc, inst.locker))
875 (desc, inst.locker))
876 # default to 600 seconds timeout
876 # default to 600 seconds timeout
877 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
877 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
878 releasefn, desc=desc)
878 releasefn, desc=desc)
879 if acquirefn:
879 if acquirefn:
880 acquirefn()
880 acquirefn()
881 return l
881 return l
882
882
883 def lock(self, wait=True):
883 def lock(self, wait=True):
884 '''Lock the repository store (.hg/store) and return a weak reference
884 '''Lock the repository store (.hg/store) and return a weak reference
885 to the lock. Use this before modifying the store (e.g. committing or
885 to the lock. Use this before modifying the store (e.g. committing or
886 stripping). If you are opening a transaction, get a lock as well.)'''
886 stripping). If you are opening a transaction, get a lock as well.)'''
887 l = self._lockref and self._lockref()
887 l = self._lockref and self._lockref()
888 if l is not None and l.held:
888 if l is not None and l.held:
889 l.lock()
889 l.lock()
890 return l
890 return l
891
891
892 def unlock():
892 def unlock():
893 self.store.write()
893 self.store.write()
894 for k, ce in self._filecache.items():
894 for k, ce in self._filecache.items():
895 if k == 'dirstate':
895 if k == 'dirstate':
896 continue
896 continue
897 ce.refresh()
897 ce.refresh()
898
898
899 l = self._lock(self.sjoin("lock"), wait, unlock,
899 l = self._lock(self.sjoin("lock"), wait, unlock,
900 self.invalidate, _('repository %s') % self.origroot)
900 self.invalidate, _('repository %s') % self.origroot)
901 self._lockref = weakref.ref(l)
901 self._lockref = weakref.ref(l)
902 return l
902 return l
903
903
904 def wlock(self, wait=True):
904 def wlock(self, wait=True):
905 '''Lock the non-store parts of the repository (everything under
905 '''Lock the non-store parts of the repository (everything under
906 .hg except .hg/store) and return a weak reference to the lock.
906 .hg except .hg/store) and return a weak reference to the lock.
907 Use this before modifying files in .hg.'''
907 Use this before modifying files in .hg.'''
908 l = self._wlockref and self._wlockref()
908 l = self._wlockref and self._wlockref()
909 if l is not None and l.held:
909 if l is not None and l.held:
910 l.lock()
910 l.lock()
911 return l
911 return l
912
912
913 def unlock():
913 def unlock():
914 self.dirstate.write()
914 self.dirstate.write()
915 ce = self._filecache.get('dirstate')
915 ce = self._filecache.get('dirstate')
916 if ce:
916 if ce:
917 ce.refresh()
917 ce.refresh()
918
918
919 l = self._lock(self.join("wlock"), wait, unlock,
919 l = self._lock(self.join("wlock"), wait, unlock,
920 self.invalidatedirstate, _('working directory of %s') %
920 self.invalidatedirstate, _('working directory of %s') %
921 self.origroot)
921 self.origroot)
922 self._wlockref = weakref.ref(l)
922 self._wlockref = weakref.ref(l)
923 return l
923 return l
924
924
925 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
925 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
926 """
926 """
927 commit an individual file as part of a larger transaction
927 commit an individual file as part of a larger transaction
928 """
928 """
929
929
930 fname = fctx.path()
930 fname = fctx.path()
931 text = fctx.data()
931 text = fctx.data()
932 flog = self.file(fname)
932 flog = self.file(fname)
933 fparent1 = manifest1.get(fname, nullid)
933 fparent1 = manifest1.get(fname, nullid)
934 fparent2 = fparent2o = manifest2.get(fname, nullid)
934 fparent2 = fparent2o = manifest2.get(fname, nullid)
935
935
936 meta = {}
936 meta = {}
937 copy = fctx.renamed()
937 copy = fctx.renamed()
938 if copy and copy[0] != fname:
938 if copy and copy[0] != fname:
939 # Mark the new revision of this file as a copy of another
939 # Mark the new revision of this file as a copy of another
940 # file. This copy data will effectively act as a parent
940 # file. This copy data will effectively act as a parent
941 # of this new revision. If this is a merge, the first
941 # of this new revision. If this is a merge, the first
942 # parent will be the nullid (meaning "look up the copy data")
942 # parent will be the nullid (meaning "look up the copy data")
943 # and the second one will be the other parent. For example:
943 # and the second one will be the other parent. For example:
944 #
944 #
945 # 0 --- 1 --- 3 rev1 changes file foo
945 # 0 --- 1 --- 3 rev1 changes file foo
946 # \ / rev2 renames foo to bar and changes it
946 # \ / rev2 renames foo to bar and changes it
947 # \- 2 -/ rev3 should have bar with all changes and
947 # \- 2 -/ rev3 should have bar with all changes and
948 # should record that bar descends from
948 # should record that bar descends from
949 # bar in rev2 and foo in rev1
949 # bar in rev2 and foo in rev1
950 #
950 #
951 # this allows this merge to succeed:
951 # this allows this merge to succeed:
952 #
952 #
953 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
953 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
954 # \ / merging rev3 and rev4 should use bar@rev2
954 # \ / merging rev3 and rev4 should use bar@rev2
955 # \- 2 --- 4 as the merge base
955 # \- 2 --- 4 as the merge base
956 #
956 #
957
957
958 cfname = copy[0]
958 cfname = copy[0]
959 crev = manifest1.get(cfname)
959 crev = manifest1.get(cfname)
960 newfparent = fparent2
960 newfparent = fparent2
961
961
962 if manifest2: # branch merge
962 if manifest2: # branch merge
963 if fparent2 == nullid or crev is None: # copied on remote side
963 if fparent2 == nullid or crev is None: # copied on remote side
964 if cfname in manifest2:
964 if cfname in manifest2:
965 crev = manifest2[cfname]
965 crev = manifest2[cfname]
966 newfparent = fparent1
966 newfparent = fparent1
967
967
968 # find source in nearest ancestor if we've lost track
968 # find source in nearest ancestor if we've lost track
969 if not crev:
969 if not crev:
970 self.ui.debug(" %s: searching for copy revision for %s\n" %
970 self.ui.debug(" %s: searching for copy revision for %s\n" %
971 (fname, cfname))
971 (fname, cfname))
972 for ancestor in self[None].ancestors():
972 for ancestor in self[None].ancestors():
973 if cfname in ancestor:
973 if cfname in ancestor:
974 crev = ancestor[cfname].filenode()
974 crev = ancestor[cfname].filenode()
975 break
975 break
976
976
977 if crev:
977 if crev:
978 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
978 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
979 meta["copy"] = cfname
979 meta["copy"] = cfname
980 meta["copyrev"] = hex(crev)
980 meta["copyrev"] = hex(crev)
981 fparent1, fparent2 = nullid, newfparent
981 fparent1, fparent2 = nullid, newfparent
982 else:
982 else:
983 self.ui.warn(_("warning: can't find ancestor for '%s' "
983 self.ui.warn(_("warning: can't find ancestor for '%s' "
984 "copied from '%s'!\n") % (fname, cfname))
984 "copied from '%s'!\n") % (fname, cfname))
985
985
986 elif fparent2 != nullid:
986 elif fparent2 != nullid:
987 # is one parent an ancestor of the other?
987 # is one parent an ancestor of the other?
988 fparentancestor = flog.ancestor(fparent1, fparent2)
988 fparentancestor = flog.ancestor(fparent1, fparent2)
989 if fparentancestor == fparent1:
989 if fparentancestor == fparent1:
990 fparent1, fparent2 = fparent2, nullid
990 fparent1, fparent2 = fparent2, nullid
991 elif fparentancestor == fparent2:
991 elif fparentancestor == fparent2:
992 fparent2 = nullid
992 fparent2 = nullid
993
993
994 # is the file changed?
994 # is the file changed?
995 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
995 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
996 changelist.append(fname)
996 changelist.append(fname)
997 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
997 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
998
998
999 # are just the flags changed during merge?
999 # are just the flags changed during merge?
1000 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1000 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1001 changelist.append(fname)
1001 changelist.append(fname)
1002
1002
1003 return fparent1
1003 return fparent1
1004
1004
1005 def commit(self, text="", user=None, date=None, match=None, force=False,
1005 def commit(self, text="", user=None, date=None, match=None, force=False,
1006 editor=False, extra={}):
1006 editor=False, extra={}):
1007 """Add a new revision to current repository.
1007 """Add a new revision to current repository.
1008
1008
1009 Revision information is gathered from the working directory,
1009 Revision information is gathered from the working directory,
1010 match can be used to filter the committed files. If editor is
1010 match can be used to filter the committed files. If editor is
1011 supplied, it is called to get a commit message.
1011 supplied, it is called to get a commit message.
1012 """
1012 """
1013
1013
1014 def fail(f, msg):
1014 def fail(f, msg):
1015 raise util.Abort('%s: %s' % (f, msg))
1015 raise util.Abort('%s: %s' % (f, msg))
1016
1016
1017 if not match:
1017 if not match:
1018 match = matchmod.always(self.root, '')
1018 match = matchmod.always(self.root, '')
1019
1019
1020 if not force:
1020 if not force:
1021 vdirs = []
1021 vdirs = []
1022 match.dir = vdirs.append
1022 match.dir = vdirs.append
1023 match.bad = fail
1023 match.bad = fail
1024
1024
1025 wlock = self.wlock()
1025 wlock = self.wlock()
1026 try:
1026 try:
1027 wctx = self[None]
1027 wctx = self[None]
1028 merge = len(wctx.parents()) > 1
1028 merge = len(wctx.parents()) > 1
1029
1029
1030 if (not force and merge and match and
1030 if (not force and merge and match and
1031 (match.files() or match.anypats())):
1031 (match.files() or match.anypats())):
1032 raise util.Abort(_('cannot partially commit a merge '
1032 raise util.Abort(_('cannot partially commit a merge '
1033 '(do not specify files or patterns)'))
1033 '(do not specify files or patterns)'))
1034
1034
1035 changes = self.status(match=match, clean=force)
1035 changes = self.status(match=match, clean=force)
1036 if force:
1036 if force:
1037 changes[0].extend(changes[6]) # mq may commit unchanged files
1037 changes[0].extend(changes[6]) # mq may commit unchanged files
1038
1038
1039 # check subrepos
1039 # check subrepos
1040 subs = []
1040 subs = []
1041 removedsubs = set()
1041 removedsubs = set()
1042 if '.hgsub' in wctx:
1042 if '.hgsub' in wctx:
1043 # only manage subrepos and .hgsubstate if .hgsub is present
1043 # only manage subrepos and .hgsubstate if .hgsub is present
1044 for p in wctx.parents():
1044 for p in wctx.parents():
1045 removedsubs.update(s for s in p.substate if match(s))
1045 removedsubs.update(s for s in p.substate if match(s))
1046 for s in wctx.substate:
1046 for s in wctx.substate:
1047 removedsubs.discard(s)
1047 removedsubs.discard(s)
1048 if match(s) and wctx.sub(s).dirty():
1048 if match(s) and wctx.sub(s).dirty():
1049 subs.append(s)
1049 subs.append(s)
1050 if (subs or removedsubs):
1050 if (subs or removedsubs):
1051 if (not match('.hgsub') and
1051 if (not match('.hgsub') and
1052 '.hgsub' in (wctx.modified() + wctx.added())):
1052 '.hgsub' in (wctx.modified() + wctx.added())):
1053 raise util.Abort(
1053 raise util.Abort(
1054 _("can't commit subrepos without .hgsub"))
1054 _("can't commit subrepos without .hgsub"))
1055 if '.hgsubstate' not in changes[0]:
1055 if '.hgsubstate' not in changes[0]:
1056 changes[0].insert(0, '.hgsubstate')
1056 changes[0].insert(0, '.hgsubstate')
1057 if '.hgsubstate' in changes[2]:
1057 if '.hgsubstate' in changes[2]:
1058 changes[2].remove('.hgsubstate')
1058 changes[2].remove('.hgsubstate')
1059 elif '.hgsub' in changes[2]:
1059 elif '.hgsub' in changes[2]:
1060 # clean up .hgsubstate when .hgsub is removed
1060 # clean up .hgsubstate when .hgsub is removed
1061 if ('.hgsubstate' in wctx and
1061 if ('.hgsubstate' in wctx and
1062 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1062 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1063 changes[2].insert(0, '.hgsubstate')
1063 changes[2].insert(0, '.hgsubstate')
1064
1064
1065 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1065 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1066 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1066 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1067 if changedsubs:
1067 if changedsubs:
1068 raise util.Abort(_("uncommitted changes in subrepo %s")
1068 raise util.Abort(_("uncommitted changes in subrepo %s")
1069 % changedsubs[0],
1069 % changedsubs[0],
1070 hint=_("use --subrepos for recursive commit"))
1070 hint=_("use --subrepos for recursive commit"))
1071
1071
1072 # make sure all explicit patterns are matched
1072 # make sure all explicit patterns are matched
1073 if not force and match.files():
1073 if not force and match.files():
1074 matched = set(changes[0] + changes[1] + changes[2])
1074 matched = set(changes[0] + changes[1] + changes[2])
1075
1075
1076 for f in match.files():
1076 for f in match.files():
1077 if f == '.' or f in matched or f in wctx.substate:
1077 if f == '.' or f in matched or f in wctx.substate:
1078 continue
1078 continue
1079 if f in changes[3]: # missing
1079 if f in changes[3]: # missing
1080 fail(f, _('file not found!'))
1080 fail(f, _('file not found!'))
1081 if f in vdirs: # visited directory
1081 if f in vdirs: # visited directory
1082 d = f + '/'
1082 d = f + '/'
1083 for mf in matched:
1083 for mf in matched:
1084 if mf.startswith(d):
1084 if mf.startswith(d):
1085 break
1085 break
1086 else:
1086 else:
1087 fail(f, _("no match under directory!"))
1087 fail(f, _("no match under directory!"))
1088 elif f not in self.dirstate:
1088 elif f not in self.dirstate:
1089 fail(f, _("file not tracked!"))
1089 fail(f, _("file not tracked!"))
1090
1090
1091 if (not force and not extra.get("close") and not merge
1091 if (not force and not extra.get("close") and not merge
1092 and not (changes[0] or changes[1] or changes[2])
1092 and not (changes[0] or changes[1] or changes[2])
1093 and wctx.branch() == wctx.p1().branch()):
1093 and wctx.branch() == wctx.p1().branch()):
1094 return None
1094 return None
1095
1095
1096 ms = mergemod.mergestate(self)
1096 ms = mergemod.mergestate(self)
1097 for f in changes[0]:
1097 for f in changes[0]:
1098 if f in ms and ms[f] == 'u':
1098 if f in ms and ms[f] == 'u':
1099 raise util.Abort(_("unresolved merge conflicts "
1099 raise util.Abort(_("unresolved merge conflicts "
1100 "(see hg help resolve)"))
1100 "(see hg help resolve)"))
1101
1101
1102 cctx = context.workingctx(self, text, user, date, extra, changes)
1102 cctx = context.workingctx(self, text, user, date, extra, changes)
1103 if editor:
1103 if editor:
1104 cctx._text = editor(self, cctx, subs)
1104 cctx._text = editor(self, cctx, subs)
1105 edited = (text != cctx._text)
1105 edited = (text != cctx._text)
1106
1106
1107 # commit subs
1107 # commit subs
1108 if subs or removedsubs:
1108 if subs or removedsubs:
1109 state = wctx.substate.copy()
1109 state = wctx.substate.copy()
1110 for s in sorted(subs):
1110 for s in sorted(subs):
1111 sub = wctx.sub(s)
1111 sub = wctx.sub(s)
1112 self.ui.status(_('committing subrepository %s\n') %
1112 self.ui.status(_('committing subrepository %s\n') %
1113 subrepo.subrelpath(sub))
1113 subrepo.subrelpath(sub))
1114 sr = sub.commit(cctx._text, user, date)
1114 sr = sub.commit(cctx._text, user, date)
1115 state[s] = (state[s][0], sr)
1115 state[s] = (state[s][0], sr)
1116 subrepo.writestate(self, state)
1116 subrepo.writestate(self, state)
1117
1117
1118 # Save commit message in case this transaction gets rolled back
1118 # Save commit message in case this transaction gets rolled back
1119 # (e.g. by a pretxncommit hook). Leave the content alone on
1119 # (e.g. by a pretxncommit hook). Leave the content alone on
1120 # the assumption that the user will use the same editor again.
1120 # the assumption that the user will use the same editor again.
1121 msgfn = self.savecommitmessage(cctx._text)
1121 msgfn = self.savecommitmessage(cctx._text)
1122
1122
1123 p1, p2 = self.dirstate.parents()
1123 p1, p2 = self.dirstate.parents()
1124 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1124 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1125 try:
1125 try:
1126 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1126 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1127 ret = self.commitctx(cctx, True)
1127 ret = self.commitctx(cctx, True)
1128 except:
1128 except:
1129 if edited:
1129 if edited:
1130 self.ui.write(
1130 self.ui.write(
1131 _('note: commit message saved in %s\n') % msgfn)
1131 _('note: commit message saved in %s\n') % msgfn)
1132 raise
1132 raise
1133
1133
1134 # update bookmarks, dirstate and mergestate
1134 # update bookmarks, dirstate and mergestate
1135 bookmarks.update(self, p1, ret)
1135 bookmarks.update(self, p1, ret)
1136 for f in changes[0] + changes[1]:
1136 for f in changes[0] + changes[1]:
1137 self.dirstate.normal(f)
1137 self.dirstate.normal(f)
1138 for f in changes[2]:
1138 for f in changes[2]:
1139 self.dirstate.drop(f)
1139 self.dirstate.drop(f)
1140 self.dirstate.setparents(ret)
1140 self.dirstate.setparents(ret)
1141 ms.reset()
1141 ms.reset()
1142 finally:
1142 finally:
1143 wlock.release()
1143 wlock.release()
1144
1144
1145 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1145 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1146 return ret
1146 return ret
1147
1147
1148 def commitctx(self, ctx, error=False):
1148 def commitctx(self, ctx, error=False):
1149 """Add a new revision to current repository.
1149 """Add a new revision to current repository.
1150 Revision information is passed via the context argument.
1150 Revision information is passed via the context argument.
1151 """
1151 """
1152
1152
1153 tr = lock = None
1153 tr = lock = None
1154 removed = list(ctx.removed())
1154 removed = list(ctx.removed())
1155 p1, p2 = ctx.p1(), ctx.p2()
1155 p1, p2 = ctx.p1(), ctx.p2()
1156 user = ctx.user()
1156 user = ctx.user()
1157
1157
1158 lock = self.lock()
1158 lock = self.lock()
1159 try:
1159 try:
1160 tr = self.transaction("commit")
1160 tr = self.transaction("commit")
1161 trp = weakref.proxy(tr)
1161 trp = weakref.proxy(tr)
1162
1162
1163 if ctx.files():
1163 if ctx.files():
1164 m1 = p1.manifest().copy()
1164 m1 = p1.manifest().copy()
1165 m2 = p2.manifest()
1165 m2 = p2.manifest()
1166
1166
1167 # check in files
1167 # check in files
1168 new = {}
1168 new = {}
1169 changed = []
1169 changed = []
1170 linkrev = len(self)
1170 linkrev = len(self)
1171 for f in sorted(ctx.modified() + ctx.added()):
1171 for f in sorted(ctx.modified() + ctx.added()):
1172 self.ui.note(f + "\n")
1172 self.ui.note(f + "\n")
1173 try:
1173 try:
1174 fctx = ctx[f]
1174 fctx = ctx[f]
1175 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1175 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1176 changed)
1176 changed)
1177 m1.set(f, fctx.flags())
1177 m1.set(f, fctx.flags())
1178 except OSError, inst:
1178 except OSError, inst:
1179 self.ui.warn(_("trouble committing %s!\n") % f)
1179 self.ui.warn(_("trouble committing %s!\n") % f)
1180 raise
1180 raise
1181 except IOError, inst:
1181 except IOError, inst:
1182 errcode = getattr(inst, 'errno', errno.ENOENT)
1182 errcode = getattr(inst, 'errno', errno.ENOENT)
1183 if error or errcode and errcode != errno.ENOENT:
1183 if error or errcode and errcode != errno.ENOENT:
1184 self.ui.warn(_("trouble committing %s!\n") % f)
1184 self.ui.warn(_("trouble committing %s!\n") % f)
1185 raise
1185 raise
1186 else:
1186 else:
1187 removed.append(f)
1187 removed.append(f)
1188
1188
1189 # update manifest
1189 # update manifest
1190 m1.update(new)
1190 m1.update(new)
1191 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1191 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1192 drop = [f for f in removed if f in m1]
1192 drop = [f for f in removed if f in m1]
1193 for f in drop:
1193 for f in drop:
1194 del m1[f]
1194 del m1[f]
1195 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1195 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1196 p2.manifestnode(), (new, drop))
1196 p2.manifestnode(), (new, drop))
1197 files = changed + removed
1197 files = changed + removed
1198 else:
1198 else:
1199 mn = p1.manifestnode()
1199 mn = p1.manifestnode()
1200 files = []
1200 files = []
1201
1201
1202 # update changelog
1202 # update changelog
1203 self.changelog.delayupdate()
1203 self.changelog.delayupdate()
1204 n = self.changelog.add(mn, files, ctx.description(),
1204 n = self.changelog.add(mn, files, ctx.description(),
1205 trp, p1.node(), p2.node(),
1205 trp, p1.node(), p2.node(),
1206 user, ctx.date(), ctx.extra().copy())
1206 user, ctx.date(), ctx.extra().copy())
1207 p = lambda: self.changelog.writepending() and self.root or ""
1207 p = lambda: self.changelog.writepending() and self.root or ""
1208 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1208 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1209 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1209 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1210 parent2=xp2, pending=p)
1210 parent2=xp2, pending=p)
1211 self.changelog.finalize(trp)
1211 self.changelog.finalize(trp)
1212 tr.close()
1212 tr.close()
1213
1213
1214 if self._branchcache:
1214 if self._branchcache:
1215 self.updatebranchcache()
1215 self.updatebranchcache()
1216 return n
1216 return n
1217 finally:
1217 finally:
1218 if tr:
1218 if tr:
1219 tr.release()
1219 tr.release()
1220 lock.release()
1220 lock.release()
1221
1221
1222 def destroyed(self):
1222 def destroyed(self):
1223 '''Inform the repository that nodes have been destroyed.
1223 '''Inform the repository that nodes have been destroyed.
1224 Intended for use by strip and rollback, so there's a common
1224 Intended for use by strip and rollback, so there's a common
1225 place for anything that has to be done after destroying history.'''
1225 place for anything that has to be done after destroying history.'''
1226 # XXX it might be nice if we could take the list of destroyed
1226 # XXX it might be nice if we could take the list of destroyed
1227 # nodes, but I don't see an easy way for rollback() to do that
1227 # nodes, but I don't see an easy way for rollback() to do that
1228
1228
1229 # Ensure the persistent tag cache is updated. Doing it now
1229 # Ensure the persistent tag cache is updated. Doing it now
1230 # means that the tag cache only has to worry about destroyed
1230 # means that the tag cache only has to worry about destroyed
1231 # heads immediately after a strip/rollback. That in turn
1231 # heads immediately after a strip/rollback. That in turn
1232 # guarantees that "cachetip == currenttip" (comparing both rev
1232 # guarantees that "cachetip == currenttip" (comparing both rev
1233 # and node) always means no nodes have been added or destroyed.
1233 # and node) always means no nodes have been added or destroyed.
1234
1234
1235 # XXX this is suboptimal when qrefresh'ing: we strip the current
1235 # XXX this is suboptimal when qrefresh'ing: we strip the current
1236 # head, refresh the tag cache, then immediately add a new head.
1236 # head, refresh the tag cache, then immediately add a new head.
1237 # But I think doing it this way is necessary for the "instant
1237 # But I think doing it this way is necessary for the "instant
1238 # tag cache retrieval" case to work.
1238 # tag cache retrieval" case to work.
1239 self.invalidatecaches()
1239 self.invalidatecaches()
1240
1240
1241 def walk(self, match, node=None):
1241 def walk(self, match, node=None):
1242 '''
1242 '''
1243 walk recursively through the directory tree or a given
1243 walk recursively through the directory tree or a given
1244 changeset, finding all files matched by the match
1244 changeset, finding all files matched by the match
1245 function
1245 function
1246 '''
1246 '''
1247 return self[node].walk(match)
1247 return self[node].walk(match)
1248
1248
1249 def status(self, node1='.', node2=None, match=None,
1249 def status(self, node1='.', node2=None, match=None,
1250 ignored=False, clean=False, unknown=False,
1250 ignored=False, clean=False, unknown=False,
1251 listsubrepos=False):
1251 listsubrepos=False):
1252 """return status of files between two nodes or node and working directory
1252 """return status of files between two nodes or node and working directory
1253
1253
1254 If node1 is None, use the first dirstate parent instead.
1254 If node1 is None, use the first dirstate parent instead.
1255 If node2 is None, compare node1 with working directory.
1255 If node2 is None, compare node1 with working directory.
1256 """
1256 """
1257
1257
1258 def mfmatches(ctx):
1258 def mfmatches(ctx):
1259 mf = ctx.manifest().copy()
1259 mf = ctx.manifest().copy()
1260 for fn in mf.keys():
1260 for fn in mf.keys():
1261 if not match(fn):
1261 if not match(fn):
1262 del mf[fn]
1262 del mf[fn]
1263 return mf
1263 return mf
1264
1264
1265 if isinstance(node1, context.changectx):
1265 if isinstance(node1, context.changectx):
1266 ctx1 = node1
1266 ctx1 = node1
1267 else:
1267 else:
1268 ctx1 = self[node1]
1268 ctx1 = self[node1]
1269 if isinstance(node2, context.changectx):
1269 if isinstance(node2, context.changectx):
1270 ctx2 = node2
1270 ctx2 = node2
1271 else:
1271 else:
1272 ctx2 = self[node2]
1272 ctx2 = self[node2]
1273
1273
1274 working = ctx2.rev() is None
1274 working = ctx2.rev() is None
1275 parentworking = working and ctx1 == self['.']
1275 parentworking = working and ctx1 == self['.']
1276 match = match or matchmod.always(self.root, self.getcwd())
1276 match = match or matchmod.always(self.root, self.getcwd())
1277 listignored, listclean, listunknown = ignored, clean, unknown
1277 listignored, listclean, listunknown = ignored, clean, unknown
1278
1278
1279 # load earliest manifest first for caching reasons
1279 # load earliest manifest first for caching reasons
1280 if not working and ctx2.rev() < ctx1.rev():
1280 if not working and ctx2.rev() < ctx1.rev():
1281 ctx2.manifest()
1281 ctx2.manifest()
1282
1282
1283 if not parentworking:
1283 if not parentworking:
1284 def bad(f, msg):
1284 def bad(f, msg):
1285 if f not in ctx1:
1285 if f not in ctx1:
1286 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1286 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1287 match.bad = bad
1287 match.bad = bad
1288
1288
1289 if working: # we need to scan the working dir
1289 if working: # we need to scan the working dir
1290 subrepos = []
1290 subrepos = []
1291 if '.hgsub' in self.dirstate:
1291 if '.hgsub' in self.dirstate:
1292 subrepos = ctx2.substate.keys()
1292 subrepos = ctx2.substate.keys()
1293 s = self.dirstate.status(match, subrepos, listignored,
1293 s = self.dirstate.status(match, subrepos, listignored,
1294 listclean, listunknown)
1294 listclean, listunknown)
1295 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1295 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1296
1296
1297 # check for any possibly clean files
1297 # check for any possibly clean files
1298 if parentworking and cmp:
1298 if parentworking and cmp:
1299 fixup = []
1299 fixup = []
1300 # do a full compare of any files that might have changed
1300 # do a full compare of any files that might have changed
1301 for f in sorted(cmp):
1301 for f in sorted(cmp):
1302 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1302 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1303 or ctx1[f].cmp(ctx2[f])):
1303 or ctx1[f].cmp(ctx2[f])):
1304 modified.append(f)
1304 modified.append(f)
1305 else:
1305 else:
1306 fixup.append(f)
1306 fixup.append(f)
1307
1307
1308 # update dirstate for files that are actually clean
1308 # update dirstate for files that are actually clean
1309 if fixup:
1309 if fixup:
1310 if listclean:
1310 if listclean:
1311 clean += fixup
1311 clean += fixup
1312
1312
1313 try:
1313 try:
1314 # updating the dirstate is optional
1314 # updating the dirstate is optional
1315 # so we don't wait on the lock
1315 # so we don't wait on the lock
1316 wlock = self.wlock(False)
1316 wlock = self.wlock(False)
1317 try:
1317 try:
1318 for f in fixup:
1318 for f in fixup:
1319 self.dirstate.normal(f)
1319 self.dirstate.normal(f)
1320 finally:
1320 finally:
1321 wlock.release()
1321 wlock.release()
1322 except error.LockError:
1322 except error.LockError:
1323 pass
1323 pass
1324
1324
1325 if not parentworking:
1325 if not parentworking:
1326 mf1 = mfmatches(ctx1)
1326 mf1 = mfmatches(ctx1)
1327 if working:
1327 if working:
1328 # we are comparing working dir against non-parent
1328 # we are comparing working dir against non-parent
1329 # generate a pseudo-manifest for the working dir
1329 # generate a pseudo-manifest for the working dir
1330 mf2 = mfmatches(self['.'])
1330 mf2 = mfmatches(self['.'])
1331 for f in cmp + modified + added:
1331 for f in cmp + modified + added:
1332 mf2[f] = None
1332 mf2[f] = None
1333 mf2.set(f, ctx2.flags(f))
1333 mf2.set(f, ctx2.flags(f))
1334 for f in removed:
1334 for f in removed:
1335 if f in mf2:
1335 if f in mf2:
1336 del mf2[f]
1336 del mf2[f]
1337 else:
1337 else:
1338 # we are comparing two revisions
1338 # we are comparing two revisions
1339 deleted, unknown, ignored = [], [], []
1339 deleted, unknown, ignored = [], [], []
1340 mf2 = mfmatches(ctx2)
1340 mf2 = mfmatches(ctx2)
1341
1341
1342 modified, added, clean = [], [], []
1342 modified, added, clean = [], [], []
1343 for fn in mf2:
1343 for fn in mf2:
1344 if fn in mf1:
1344 if fn in mf1:
1345 if (fn not in deleted and
1345 if (fn not in deleted and
1346 (mf1.flags(fn) != mf2.flags(fn) or
1346 (mf1.flags(fn) != mf2.flags(fn) or
1347 (mf1[fn] != mf2[fn] and
1347 (mf1[fn] != mf2[fn] and
1348 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1348 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1349 modified.append(fn)
1349 modified.append(fn)
1350 elif listclean:
1350 elif listclean:
1351 clean.append(fn)
1351 clean.append(fn)
1352 del mf1[fn]
1352 del mf1[fn]
1353 elif fn not in deleted:
1353 elif fn not in deleted:
1354 added.append(fn)
1354 added.append(fn)
1355 removed = mf1.keys()
1355 removed = mf1.keys()
1356
1356
1357 if working and modified and not self.dirstate._checklink:
1358 # Symlink placeholders may get non-symlink-like contents
1359 # via user error or dereferencing by NFS or Samba servers,
1360 # so we filter out any placeholders that don't look like a
1361 # symlink
1362 sane = []
1363 for f in modified:
1364 if ctx2.flags(f) == 'l':
1365 d = ctx2[f].data()
1366 if len(d) >= 1024 or '\n' in d or util.binary(d):
1367 self.ui.debug('ignoring suspect symlink placeholder'
1368 ' "%s"\n' % f)
1369 continue
1370 sane.append(f)
1371 modified = sane
1372
1357 r = modified, added, removed, deleted, unknown, ignored, clean
1373 r = modified, added, removed, deleted, unknown, ignored, clean
1358
1374
1359 if listsubrepos:
1375 if listsubrepos:
1360 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1376 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1361 if working:
1377 if working:
1362 rev2 = None
1378 rev2 = None
1363 else:
1379 else:
1364 rev2 = ctx2.substate[subpath][1]
1380 rev2 = ctx2.substate[subpath][1]
1365 try:
1381 try:
1366 submatch = matchmod.narrowmatcher(subpath, match)
1382 submatch = matchmod.narrowmatcher(subpath, match)
1367 s = sub.status(rev2, match=submatch, ignored=listignored,
1383 s = sub.status(rev2, match=submatch, ignored=listignored,
1368 clean=listclean, unknown=listunknown,
1384 clean=listclean, unknown=listunknown,
1369 listsubrepos=True)
1385 listsubrepos=True)
1370 for rfiles, sfiles in zip(r, s):
1386 for rfiles, sfiles in zip(r, s):
1371 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1387 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1372 except error.LookupError:
1388 except error.LookupError:
1373 self.ui.status(_("skipping missing subrepository: %s\n")
1389 self.ui.status(_("skipping missing subrepository: %s\n")
1374 % subpath)
1390 % subpath)
1375
1391
1376 for l in r:
1392 for l in r:
1377 l.sort()
1393 l.sort()
1378 return r
1394 return r
1379
1395
1380 def heads(self, start=None):
1396 def heads(self, start=None):
1381 heads = self.changelog.heads(start)
1397 heads = self.changelog.heads(start)
1382 # sort the output in rev descending order
1398 # sort the output in rev descending order
1383 return sorted(heads, key=self.changelog.rev, reverse=True)
1399 return sorted(heads, key=self.changelog.rev, reverse=True)
1384
1400
1385 def branchheads(self, branch=None, start=None, closed=False):
1401 def branchheads(self, branch=None, start=None, closed=False):
1386 '''return a (possibly filtered) list of heads for the given branch
1402 '''return a (possibly filtered) list of heads for the given branch
1387
1403
1388 Heads are returned in topological order, from newest to oldest.
1404 Heads are returned in topological order, from newest to oldest.
1389 If branch is None, use the dirstate branch.
1405 If branch is None, use the dirstate branch.
1390 If start is not None, return only heads reachable from start.
1406 If start is not None, return only heads reachable from start.
1391 If closed is True, return heads that are marked as closed as well.
1407 If closed is True, return heads that are marked as closed as well.
1392 '''
1408 '''
1393 if branch is None:
1409 if branch is None:
1394 branch = self[None].branch()
1410 branch = self[None].branch()
1395 branches = self.branchmap()
1411 branches = self.branchmap()
1396 if branch not in branches:
1412 if branch not in branches:
1397 return []
1413 return []
1398 # the cache returns heads ordered lowest to highest
1414 # the cache returns heads ordered lowest to highest
1399 bheads = list(reversed(branches[branch]))
1415 bheads = list(reversed(branches[branch]))
1400 if start is not None:
1416 if start is not None:
1401 # filter out the heads that cannot be reached from startrev
1417 # filter out the heads that cannot be reached from startrev
1402 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1418 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1403 bheads = [h for h in bheads if h in fbheads]
1419 bheads = [h for h in bheads if h in fbheads]
1404 if not closed:
1420 if not closed:
1405 bheads = [h for h in bheads if
1421 bheads = [h for h in bheads if
1406 ('close' not in self.changelog.read(h)[5])]
1422 ('close' not in self.changelog.read(h)[5])]
1407 return bheads
1423 return bheads
1408
1424
1409 def branches(self, nodes):
1425 def branches(self, nodes):
1410 if not nodes:
1426 if not nodes:
1411 nodes = [self.changelog.tip()]
1427 nodes = [self.changelog.tip()]
1412 b = []
1428 b = []
1413 for n in nodes:
1429 for n in nodes:
1414 t = n
1430 t = n
1415 while True:
1431 while True:
1416 p = self.changelog.parents(n)
1432 p = self.changelog.parents(n)
1417 if p[1] != nullid or p[0] == nullid:
1433 if p[1] != nullid or p[0] == nullid:
1418 b.append((t, n, p[0], p[1]))
1434 b.append((t, n, p[0], p[1]))
1419 break
1435 break
1420 n = p[0]
1436 n = p[0]
1421 return b
1437 return b
1422
1438
1423 def between(self, pairs):
1439 def between(self, pairs):
1424 r = []
1440 r = []
1425
1441
1426 for top, bottom in pairs:
1442 for top, bottom in pairs:
1427 n, l, i = top, [], 0
1443 n, l, i = top, [], 0
1428 f = 1
1444 f = 1
1429
1445
1430 while n != bottom and n != nullid:
1446 while n != bottom and n != nullid:
1431 p = self.changelog.parents(n)[0]
1447 p = self.changelog.parents(n)[0]
1432 if i == f:
1448 if i == f:
1433 l.append(n)
1449 l.append(n)
1434 f = f * 2
1450 f = f * 2
1435 n = p
1451 n = p
1436 i += 1
1452 i += 1
1437
1453
1438 r.append(l)
1454 r.append(l)
1439
1455
1440 return r
1456 return r
1441
1457
1442 def pull(self, remote, heads=None, force=False):
1458 def pull(self, remote, heads=None, force=False):
1443 lock = self.lock()
1459 lock = self.lock()
1444 try:
1460 try:
1445 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1461 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1446 force=force)
1462 force=force)
1447 common, fetch, rheads = tmp
1463 common, fetch, rheads = tmp
1448 if not fetch:
1464 if not fetch:
1449 self.ui.status(_("no changes found\n"))
1465 self.ui.status(_("no changes found\n"))
1450 result = 0
1466 result = 0
1451 else:
1467 else:
1452 if heads is None and list(common) == [nullid]:
1468 if heads is None and list(common) == [nullid]:
1453 self.ui.status(_("requesting all changes\n"))
1469 self.ui.status(_("requesting all changes\n"))
1454 elif heads is None and remote.capable('changegroupsubset'):
1470 elif heads is None and remote.capable('changegroupsubset'):
1455 # issue1320, avoid a race if remote changed after discovery
1471 # issue1320, avoid a race if remote changed after discovery
1456 heads = rheads
1472 heads = rheads
1457
1473
1458 if remote.capable('getbundle'):
1474 if remote.capable('getbundle'):
1459 cg = remote.getbundle('pull', common=common,
1475 cg = remote.getbundle('pull', common=common,
1460 heads=heads or rheads)
1476 heads=heads or rheads)
1461 elif heads is None:
1477 elif heads is None:
1462 cg = remote.changegroup(fetch, 'pull')
1478 cg = remote.changegroup(fetch, 'pull')
1463 elif not remote.capable('changegroupsubset'):
1479 elif not remote.capable('changegroupsubset'):
1464 raise util.Abort(_("partial pull cannot be done because "
1480 raise util.Abort(_("partial pull cannot be done because "
1465 "other repository doesn't support "
1481 "other repository doesn't support "
1466 "changegroupsubset."))
1482 "changegroupsubset."))
1467 else:
1483 else:
1468 cg = remote.changegroupsubset(fetch, heads, 'pull')
1484 cg = remote.changegroupsubset(fetch, heads, 'pull')
1469 result = self.addchangegroup(cg, 'pull', remote.url(),
1485 result = self.addchangegroup(cg, 'pull', remote.url(),
1470 lock=lock)
1486 lock=lock)
1471 finally:
1487 finally:
1472 lock.release()
1488 lock.release()
1473
1489
1474 return result
1490 return result
1475
1491
1476 def checkpush(self, force, revs):
1492 def checkpush(self, force, revs):
1477 """Extensions can override this function if additional checks have
1493 """Extensions can override this function if additional checks have
1478 to be performed before pushing, or call it if they override push
1494 to be performed before pushing, or call it if they override push
1479 command.
1495 command.
1480 """
1496 """
1481 pass
1497 pass
1482
1498
1483 def push(self, remote, force=False, revs=None, newbranch=False):
1499 def push(self, remote, force=False, revs=None, newbranch=False):
1484 '''Push outgoing changesets (limited by revs) from the current
1500 '''Push outgoing changesets (limited by revs) from the current
1485 repository to remote. Return an integer:
1501 repository to remote. Return an integer:
1486 - 0 means HTTP error *or* nothing to push
1502 - 0 means HTTP error *or* nothing to push
1487 - 1 means we pushed and remote head count is unchanged *or*
1503 - 1 means we pushed and remote head count is unchanged *or*
1488 we have outgoing changesets but refused to push
1504 we have outgoing changesets but refused to push
1489 - other values as described by addchangegroup()
1505 - other values as described by addchangegroup()
1490 '''
1506 '''
1491 # there are two ways to push to remote repo:
1507 # there are two ways to push to remote repo:
1492 #
1508 #
1493 # addchangegroup assumes local user can lock remote
1509 # addchangegroup assumes local user can lock remote
1494 # repo (local filesystem, old ssh servers).
1510 # repo (local filesystem, old ssh servers).
1495 #
1511 #
1496 # unbundle assumes local user cannot lock remote repo (new ssh
1512 # unbundle assumes local user cannot lock remote repo (new ssh
1497 # servers, http servers).
1513 # servers, http servers).
1498
1514
1499 self.checkpush(force, revs)
1515 self.checkpush(force, revs)
1500 lock = None
1516 lock = None
1501 unbundle = remote.capable('unbundle')
1517 unbundle = remote.capable('unbundle')
1502 if not unbundle:
1518 if not unbundle:
1503 lock = remote.lock()
1519 lock = remote.lock()
1504 try:
1520 try:
1505 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1521 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1506 newbranch)
1522 newbranch)
1507 ret = remote_heads
1523 ret = remote_heads
1508 if cg is not None:
1524 if cg is not None:
1509 if unbundle:
1525 if unbundle:
1510 # local repo finds heads on server, finds out what
1526 # local repo finds heads on server, finds out what
1511 # revs it must push. once revs transferred, if server
1527 # revs it must push. once revs transferred, if server
1512 # finds it has different heads (someone else won
1528 # finds it has different heads (someone else won
1513 # commit/push race), server aborts.
1529 # commit/push race), server aborts.
1514 if force:
1530 if force:
1515 remote_heads = ['force']
1531 remote_heads = ['force']
1516 # ssh: return remote's addchangegroup()
1532 # ssh: return remote's addchangegroup()
1517 # http: return remote's addchangegroup() or 0 for error
1533 # http: return remote's addchangegroup() or 0 for error
1518 ret = remote.unbundle(cg, remote_heads, 'push')
1534 ret = remote.unbundle(cg, remote_heads, 'push')
1519 else:
1535 else:
1520 # we return an integer indicating remote head count change
1536 # we return an integer indicating remote head count change
1521 ret = remote.addchangegroup(cg, 'push', self.url(),
1537 ret = remote.addchangegroup(cg, 'push', self.url(),
1522 lock=lock)
1538 lock=lock)
1523 finally:
1539 finally:
1524 if lock is not None:
1540 if lock is not None:
1525 lock.release()
1541 lock.release()
1526
1542
1527 self.ui.debug("checking for updated bookmarks\n")
1543 self.ui.debug("checking for updated bookmarks\n")
1528 rb = remote.listkeys('bookmarks')
1544 rb = remote.listkeys('bookmarks')
1529 for k in rb.keys():
1545 for k in rb.keys():
1530 if k in self._bookmarks:
1546 if k in self._bookmarks:
1531 nr, nl = rb[k], hex(self._bookmarks[k])
1547 nr, nl = rb[k], hex(self._bookmarks[k])
1532 if nr in self:
1548 if nr in self:
1533 cr = self[nr]
1549 cr = self[nr]
1534 cl = self[nl]
1550 cl = self[nl]
1535 if cl in cr.descendants():
1551 if cl in cr.descendants():
1536 r = remote.pushkey('bookmarks', k, nr, nl)
1552 r = remote.pushkey('bookmarks', k, nr, nl)
1537 if r:
1553 if r:
1538 self.ui.status(_("updating bookmark %s\n") % k)
1554 self.ui.status(_("updating bookmark %s\n") % k)
1539 else:
1555 else:
1540 self.ui.warn(_('updating bookmark %s'
1556 self.ui.warn(_('updating bookmark %s'
1541 ' failed!\n') % k)
1557 ' failed!\n') % k)
1542
1558
1543 return ret
1559 return ret
1544
1560
1545 def changegroupinfo(self, nodes, source):
1561 def changegroupinfo(self, nodes, source):
1546 if self.ui.verbose or source == 'bundle':
1562 if self.ui.verbose or source == 'bundle':
1547 self.ui.status(_("%d changesets found\n") % len(nodes))
1563 self.ui.status(_("%d changesets found\n") % len(nodes))
1548 if self.ui.debugflag:
1564 if self.ui.debugflag:
1549 self.ui.debug("list of changesets:\n")
1565 self.ui.debug("list of changesets:\n")
1550 for node in nodes:
1566 for node in nodes:
1551 self.ui.debug("%s\n" % hex(node))
1567 self.ui.debug("%s\n" % hex(node))
1552
1568
1553 def changegroupsubset(self, bases, heads, source):
1569 def changegroupsubset(self, bases, heads, source):
1554 """Compute a changegroup consisting of all the nodes that are
1570 """Compute a changegroup consisting of all the nodes that are
1555 descendants of any of the bases and ancestors of any of the heads.
1571 descendants of any of the bases and ancestors of any of the heads.
1556 Return a chunkbuffer object whose read() method will return
1572 Return a chunkbuffer object whose read() method will return
1557 successive changegroup chunks.
1573 successive changegroup chunks.
1558
1574
1559 It is fairly complex as determining which filenodes and which
1575 It is fairly complex as determining which filenodes and which
1560 manifest nodes need to be included for the changeset to be complete
1576 manifest nodes need to be included for the changeset to be complete
1561 is non-trivial.
1577 is non-trivial.
1562
1578
1563 Another wrinkle is doing the reverse, figuring out which changeset in
1579 Another wrinkle is doing the reverse, figuring out which changeset in
1564 the changegroup a particular filenode or manifestnode belongs to.
1580 the changegroup a particular filenode or manifestnode belongs to.
1565 """
1581 """
1566 cl = self.changelog
1582 cl = self.changelog
1567 if not bases:
1583 if not bases:
1568 bases = [nullid]
1584 bases = [nullid]
1569 csets, bases, heads = cl.nodesbetween(bases, heads)
1585 csets, bases, heads = cl.nodesbetween(bases, heads)
1570 # We assume that all ancestors of bases are known
1586 # We assume that all ancestors of bases are known
1571 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1587 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1572 return self._changegroupsubset(common, csets, heads, source)
1588 return self._changegroupsubset(common, csets, heads, source)
1573
1589
1574 def getbundle(self, source, heads=None, common=None):
1590 def getbundle(self, source, heads=None, common=None):
1575 """Like changegroupsubset, but returns the set difference between the
1591 """Like changegroupsubset, but returns the set difference between the
1576 ancestors of heads and the ancestors common.
1592 ancestors of heads and the ancestors common.
1577
1593
1578 If heads is None, use the local heads. If common is None, use [nullid].
1594 If heads is None, use the local heads. If common is None, use [nullid].
1579
1595
1580 The nodes in common might not all be known locally due to the way the
1596 The nodes in common might not all be known locally due to the way the
1581 current discovery protocol works.
1597 current discovery protocol works.
1582 """
1598 """
1583 cl = self.changelog
1599 cl = self.changelog
1584 if common:
1600 if common:
1585 nm = cl.nodemap
1601 nm = cl.nodemap
1586 common = [n for n in common if n in nm]
1602 common = [n for n in common if n in nm]
1587 else:
1603 else:
1588 common = [nullid]
1604 common = [nullid]
1589 if not heads:
1605 if not heads:
1590 heads = cl.heads()
1606 heads = cl.heads()
1591 common, missing = cl.findcommonmissing(common, heads)
1607 common, missing = cl.findcommonmissing(common, heads)
1592 if not missing:
1608 if not missing:
1593 return None
1609 return None
1594 return self._changegroupsubset(common, missing, heads, source)
1610 return self._changegroupsubset(common, missing, heads, source)
1595
1611
1596 def _changegroupsubset(self, commonrevs, csets, heads, source):
1612 def _changegroupsubset(self, commonrevs, csets, heads, source):
1597
1613
1598 cl = self.changelog
1614 cl = self.changelog
1599 mf = self.manifest
1615 mf = self.manifest
1600 mfs = {} # needed manifests
1616 mfs = {} # needed manifests
1601 fnodes = {} # needed file nodes
1617 fnodes = {} # needed file nodes
1602 changedfiles = set()
1618 changedfiles = set()
1603 fstate = ['', {}]
1619 fstate = ['', {}]
1604 count = [0]
1620 count = [0]
1605
1621
1606 # can we go through the fast path ?
1622 # can we go through the fast path ?
1607 heads.sort()
1623 heads.sort()
1608 if heads == sorted(self.heads()):
1624 if heads == sorted(self.heads()):
1609 return self._changegroup(csets, source)
1625 return self._changegroup(csets, source)
1610
1626
1611 # slow path
1627 # slow path
1612 self.hook('preoutgoing', throw=True, source=source)
1628 self.hook('preoutgoing', throw=True, source=source)
1613 self.changegroupinfo(csets, source)
1629 self.changegroupinfo(csets, source)
1614
1630
1615 # filter any nodes that claim to be part of the known set
1631 # filter any nodes that claim to be part of the known set
1616 def prune(revlog, missing):
1632 def prune(revlog, missing):
1617 return [n for n in missing
1633 return [n for n in missing
1618 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1634 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1619
1635
1620 def lookup(revlog, x):
1636 def lookup(revlog, x):
1621 if revlog == cl:
1637 if revlog == cl:
1622 c = cl.read(x)
1638 c = cl.read(x)
1623 changedfiles.update(c[3])
1639 changedfiles.update(c[3])
1624 mfs.setdefault(c[0], x)
1640 mfs.setdefault(c[0], x)
1625 count[0] += 1
1641 count[0] += 1
1626 self.ui.progress(_('bundling'), count[0],
1642 self.ui.progress(_('bundling'), count[0],
1627 unit=_('changesets'), total=len(csets))
1643 unit=_('changesets'), total=len(csets))
1628 return x
1644 return x
1629 elif revlog == mf:
1645 elif revlog == mf:
1630 clnode = mfs[x]
1646 clnode = mfs[x]
1631 mdata = mf.readfast(x)
1647 mdata = mf.readfast(x)
1632 for f in changedfiles:
1648 for f in changedfiles:
1633 if f in mdata:
1649 if f in mdata:
1634 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1650 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1635 count[0] += 1
1651 count[0] += 1
1636 self.ui.progress(_('bundling'), count[0],
1652 self.ui.progress(_('bundling'), count[0],
1637 unit=_('manifests'), total=len(mfs))
1653 unit=_('manifests'), total=len(mfs))
1638 return mfs[x]
1654 return mfs[x]
1639 else:
1655 else:
1640 self.ui.progress(
1656 self.ui.progress(
1641 _('bundling'), count[0], item=fstate[0],
1657 _('bundling'), count[0], item=fstate[0],
1642 unit=_('files'), total=len(changedfiles))
1658 unit=_('files'), total=len(changedfiles))
1643 return fstate[1][x]
1659 return fstate[1][x]
1644
1660
1645 bundler = changegroup.bundle10(lookup)
1661 bundler = changegroup.bundle10(lookup)
1646 reorder = self.ui.config('bundle', 'reorder', 'auto')
1662 reorder = self.ui.config('bundle', 'reorder', 'auto')
1647 if reorder == 'auto':
1663 if reorder == 'auto':
1648 reorder = None
1664 reorder = None
1649 else:
1665 else:
1650 reorder = util.parsebool(reorder)
1666 reorder = util.parsebool(reorder)
1651
1667
1652 def gengroup():
1668 def gengroup():
1653 # Create a changenode group generator that will call our functions
1669 # Create a changenode group generator that will call our functions
1654 # back to lookup the owning changenode and collect information.
1670 # back to lookup the owning changenode and collect information.
1655 for chunk in cl.group(csets, bundler, reorder=reorder):
1671 for chunk in cl.group(csets, bundler, reorder=reorder):
1656 yield chunk
1672 yield chunk
1657 self.ui.progress(_('bundling'), None)
1673 self.ui.progress(_('bundling'), None)
1658
1674
1659 # Create a generator for the manifestnodes that calls our lookup
1675 # Create a generator for the manifestnodes that calls our lookup
1660 # and data collection functions back.
1676 # and data collection functions back.
1661 count[0] = 0
1677 count[0] = 0
1662 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1678 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1663 yield chunk
1679 yield chunk
1664 self.ui.progress(_('bundling'), None)
1680 self.ui.progress(_('bundling'), None)
1665
1681
1666 mfs.clear()
1682 mfs.clear()
1667
1683
1668 # Go through all our files in order sorted by name.
1684 # Go through all our files in order sorted by name.
1669 count[0] = 0
1685 count[0] = 0
1670 for fname in sorted(changedfiles):
1686 for fname in sorted(changedfiles):
1671 filerevlog = self.file(fname)
1687 filerevlog = self.file(fname)
1672 if not len(filerevlog):
1688 if not len(filerevlog):
1673 raise util.Abort(_("empty or missing revlog for %s") % fname)
1689 raise util.Abort(_("empty or missing revlog for %s") % fname)
1674 fstate[0] = fname
1690 fstate[0] = fname
1675 fstate[1] = fnodes.pop(fname, {})
1691 fstate[1] = fnodes.pop(fname, {})
1676
1692
1677 nodelist = prune(filerevlog, fstate[1])
1693 nodelist = prune(filerevlog, fstate[1])
1678 if nodelist:
1694 if nodelist:
1679 count[0] += 1
1695 count[0] += 1
1680 yield bundler.fileheader(fname)
1696 yield bundler.fileheader(fname)
1681 for chunk in filerevlog.group(nodelist, bundler, reorder):
1697 for chunk in filerevlog.group(nodelist, bundler, reorder):
1682 yield chunk
1698 yield chunk
1683
1699
1684 # Signal that no more groups are left.
1700 # Signal that no more groups are left.
1685 yield bundler.close()
1701 yield bundler.close()
1686 self.ui.progress(_('bundling'), None)
1702 self.ui.progress(_('bundling'), None)
1687
1703
1688 if csets:
1704 if csets:
1689 self.hook('outgoing', node=hex(csets[0]), source=source)
1705 self.hook('outgoing', node=hex(csets[0]), source=source)
1690
1706
1691 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1707 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1692
1708
1693 def changegroup(self, basenodes, source):
1709 def changegroup(self, basenodes, source):
1694 # to avoid a race we use changegroupsubset() (issue1320)
1710 # to avoid a race we use changegroupsubset() (issue1320)
1695 return self.changegroupsubset(basenodes, self.heads(), source)
1711 return self.changegroupsubset(basenodes, self.heads(), source)
1696
1712
1697 def _changegroup(self, nodes, source):
1713 def _changegroup(self, nodes, source):
1698 """Compute the changegroup of all nodes that we have that a recipient
1714 """Compute the changegroup of all nodes that we have that a recipient
1699 doesn't. Return a chunkbuffer object whose read() method will return
1715 doesn't. Return a chunkbuffer object whose read() method will return
1700 successive changegroup chunks.
1716 successive changegroup chunks.
1701
1717
1702 This is much easier than the previous function as we can assume that
1718 This is much easier than the previous function as we can assume that
1703 the recipient has any changenode we aren't sending them.
1719 the recipient has any changenode we aren't sending them.
1704
1720
1705 nodes is the set of nodes to send"""
1721 nodes is the set of nodes to send"""
1706
1722
1707 cl = self.changelog
1723 cl = self.changelog
1708 mf = self.manifest
1724 mf = self.manifest
1709 mfs = {}
1725 mfs = {}
1710 changedfiles = set()
1726 changedfiles = set()
1711 fstate = ['']
1727 fstate = ['']
1712 count = [0]
1728 count = [0]
1713
1729
1714 self.hook('preoutgoing', throw=True, source=source)
1730 self.hook('preoutgoing', throw=True, source=source)
1715 self.changegroupinfo(nodes, source)
1731 self.changegroupinfo(nodes, source)
1716
1732
1717 revset = set([cl.rev(n) for n in nodes])
1733 revset = set([cl.rev(n) for n in nodes])
1718
1734
1719 def gennodelst(log):
1735 def gennodelst(log):
1720 return [log.node(r) for r in log if log.linkrev(r) in revset]
1736 return [log.node(r) for r in log if log.linkrev(r) in revset]
1721
1737
1722 def lookup(revlog, x):
1738 def lookup(revlog, x):
1723 if revlog == cl:
1739 if revlog == cl:
1724 c = cl.read(x)
1740 c = cl.read(x)
1725 changedfiles.update(c[3])
1741 changedfiles.update(c[3])
1726 mfs.setdefault(c[0], x)
1742 mfs.setdefault(c[0], x)
1727 count[0] += 1
1743 count[0] += 1
1728 self.ui.progress(_('bundling'), count[0],
1744 self.ui.progress(_('bundling'), count[0],
1729 unit=_('changesets'), total=len(nodes))
1745 unit=_('changesets'), total=len(nodes))
1730 return x
1746 return x
1731 elif revlog == mf:
1747 elif revlog == mf:
1732 count[0] += 1
1748 count[0] += 1
1733 self.ui.progress(_('bundling'), count[0],
1749 self.ui.progress(_('bundling'), count[0],
1734 unit=_('manifests'), total=len(mfs))
1750 unit=_('manifests'), total=len(mfs))
1735 return cl.node(revlog.linkrev(revlog.rev(x)))
1751 return cl.node(revlog.linkrev(revlog.rev(x)))
1736 else:
1752 else:
1737 self.ui.progress(
1753 self.ui.progress(
1738 _('bundling'), count[0], item=fstate[0],
1754 _('bundling'), count[0], item=fstate[0],
1739 total=len(changedfiles), unit=_('files'))
1755 total=len(changedfiles), unit=_('files'))
1740 return cl.node(revlog.linkrev(revlog.rev(x)))
1756 return cl.node(revlog.linkrev(revlog.rev(x)))
1741
1757
1742 bundler = changegroup.bundle10(lookup)
1758 bundler = changegroup.bundle10(lookup)
1743 reorder = self.ui.config('bundle', 'reorder', 'auto')
1759 reorder = self.ui.config('bundle', 'reorder', 'auto')
1744 if reorder == 'auto':
1760 if reorder == 'auto':
1745 reorder = None
1761 reorder = None
1746 else:
1762 else:
1747 reorder = util.parsebool(reorder)
1763 reorder = util.parsebool(reorder)
1748
1764
1749 def gengroup():
1765 def gengroup():
1750 '''yield a sequence of changegroup chunks (strings)'''
1766 '''yield a sequence of changegroup chunks (strings)'''
1751 # construct a list of all changed files
1767 # construct a list of all changed files
1752
1768
1753 for chunk in cl.group(nodes, bundler, reorder=reorder):
1769 for chunk in cl.group(nodes, bundler, reorder=reorder):
1754 yield chunk
1770 yield chunk
1755 self.ui.progress(_('bundling'), None)
1771 self.ui.progress(_('bundling'), None)
1756
1772
1757 count[0] = 0
1773 count[0] = 0
1758 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1774 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1759 yield chunk
1775 yield chunk
1760 self.ui.progress(_('bundling'), None)
1776 self.ui.progress(_('bundling'), None)
1761
1777
1762 count[0] = 0
1778 count[0] = 0
1763 for fname in sorted(changedfiles):
1779 for fname in sorted(changedfiles):
1764 filerevlog = self.file(fname)
1780 filerevlog = self.file(fname)
1765 if not len(filerevlog):
1781 if not len(filerevlog):
1766 raise util.Abort(_("empty or missing revlog for %s") % fname)
1782 raise util.Abort(_("empty or missing revlog for %s") % fname)
1767 fstate[0] = fname
1783 fstate[0] = fname
1768 nodelist = gennodelst(filerevlog)
1784 nodelist = gennodelst(filerevlog)
1769 if nodelist:
1785 if nodelist:
1770 count[0] += 1
1786 count[0] += 1
1771 yield bundler.fileheader(fname)
1787 yield bundler.fileheader(fname)
1772 for chunk in filerevlog.group(nodelist, bundler, reorder):
1788 for chunk in filerevlog.group(nodelist, bundler, reorder):
1773 yield chunk
1789 yield chunk
1774 yield bundler.close()
1790 yield bundler.close()
1775 self.ui.progress(_('bundling'), None)
1791 self.ui.progress(_('bundling'), None)
1776
1792
1777 if nodes:
1793 if nodes:
1778 self.hook('outgoing', node=hex(nodes[0]), source=source)
1794 self.hook('outgoing', node=hex(nodes[0]), source=source)
1779
1795
1780 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1796 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1781
1797
1782 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1798 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1783 """Add the changegroup returned by source.read() to this repo.
1799 """Add the changegroup returned by source.read() to this repo.
1784 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1800 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1785 the URL of the repo where this changegroup is coming from.
1801 the URL of the repo where this changegroup is coming from.
1786 If lock is not None, the function takes ownership of the lock
1802 If lock is not None, the function takes ownership of the lock
1787 and releases it after the changegroup is added.
1803 and releases it after the changegroup is added.
1788
1804
1789 Return an integer summarizing the change to this repo:
1805 Return an integer summarizing the change to this repo:
1790 - nothing changed or no source: 0
1806 - nothing changed or no source: 0
1791 - more heads than before: 1+added heads (2..n)
1807 - more heads than before: 1+added heads (2..n)
1792 - fewer heads than before: -1-removed heads (-2..-n)
1808 - fewer heads than before: -1-removed heads (-2..-n)
1793 - number of heads stays the same: 1
1809 - number of heads stays the same: 1
1794 """
1810 """
1795 def csmap(x):
1811 def csmap(x):
1796 self.ui.debug("add changeset %s\n" % short(x))
1812 self.ui.debug("add changeset %s\n" % short(x))
1797 return len(cl)
1813 return len(cl)
1798
1814
1799 def revmap(x):
1815 def revmap(x):
1800 return cl.rev(x)
1816 return cl.rev(x)
1801
1817
1802 if not source:
1818 if not source:
1803 return 0
1819 return 0
1804
1820
1805 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1821 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1806
1822
1807 changesets = files = revisions = 0
1823 changesets = files = revisions = 0
1808 efiles = set()
1824 efiles = set()
1809
1825
1810 # write changelog data to temp files so concurrent readers will not see
1826 # write changelog data to temp files so concurrent readers will not see
1811 # inconsistent view
1827 # inconsistent view
1812 cl = self.changelog
1828 cl = self.changelog
1813 cl.delayupdate()
1829 cl.delayupdate()
1814 oldheads = cl.heads()
1830 oldheads = cl.heads()
1815
1831
1816 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1832 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1817 try:
1833 try:
1818 trp = weakref.proxy(tr)
1834 trp = weakref.proxy(tr)
1819 # pull off the changeset group
1835 # pull off the changeset group
1820 self.ui.status(_("adding changesets\n"))
1836 self.ui.status(_("adding changesets\n"))
1821 clstart = len(cl)
1837 clstart = len(cl)
1822 class prog(object):
1838 class prog(object):
1823 step = _('changesets')
1839 step = _('changesets')
1824 count = 1
1840 count = 1
1825 ui = self.ui
1841 ui = self.ui
1826 total = None
1842 total = None
1827 def __call__(self):
1843 def __call__(self):
1828 self.ui.progress(self.step, self.count, unit=_('chunks'),
1844 self.ui.progress(self.step, self.count, unit=_('chunks'),
1829 total=self.total)
1845 total=self.total)
1830 self.count += 1
1846 self.count += 1
1831 pr = prog()
1847 pr = prog()
1832 source.callback = pr
1848 source.callback = pr
1833
1849
1834 source.changelogheader()
1850 source.changelogheader()
1835 if (cl.addgroup(source, csmap, trp) is None
1851 if (cl.addgroup(source, csmap, trp) is None
1836 and not emptyok):
1852 and not emptyok):
1837 raise util.Abort(_("received changelog group is empty"))
1853 raise util.Abort(_("received changelog group is empty"))
1838 clend = len(cl)
1854 clend = len(cl)
1839 changesets = clend - clstart
1855 changesets = clend - clstart
1840 for c in xrange(clstart, clend):
1856 for c in xrange(clstart, clend):
1841 efiles.update(self[c].files())
1857 efiles.update(self[c].files())
1842 efiles = len(efiles)
1858 efiles = len(efiles)
1843 self.ui.progress(_('changesets'), None)
1859 self.ui.progress(_('changesets'), None)
1844
1860
1845 # pull off the manifest group
1861 # pull off the manifest group
1846 self.ui.status(_("adding manifests\n"))
1862 self.ui.status(_("adding manifests\n"))
1847 pr.step = _('manifests')
1863 pr.step = _('manifests')
1848 pr.count = 1
1864 pr.count = 1
1849 pr.total = changesets # manifests <= changesets
1865 pr.total = changesets # manifests <= changesets
1850 # no need to check for empty manifest group here:
1866 # no need to check for empty manifest group here:
1851 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1867 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1852 # no new manifest will be created and the manifest group will
1868 # no new manifest will be created and the manifest group will
1853 # be empty during the pull
1869 # be empty during the pull
1854 source.manifestheader()
1870 source.manifestheader()
1855 self.manifest.addgroup(source, revmap, trp)
1871 self.manifest.addgroup(source, revmap, trp)
1856 self.ui.progress(_('manifests'), None)
1872 self.ui.progress(_('manifests'), None)
1857
1873
1858 needfiles = {}
1874 needfiles = {}
1859 if self.ui.configbool('server', 'validate', default=False):
1875 if self.ui.configbool('server', 'validate', default=False):
1860 # validate incoming csets have their manifests
1876 # validate incoming csets have their manifests
1861 for cset in xrange(clstart, clend):
1877 for cset in xrange(clstart, clend):
1862 mfest = self.changelog.read(self.changelog.node(cset))[0]
1878 mfest = self.changelog.read(self.changelog.node(cset))[0]
1863 mfest = self.manifest.readdelta(mfest)
1879 mfest = self.manifest.readdelta(mfest)
1864 # store file nodes we must see
1880 # store file nodes we must see
1865 for f, n in mfest.iteritems():
1881 for f, n in mfest.iteritems():
1866 needfiles.setdefault(f, set()).add(n)
1882 needfiles.setdefault(f, set()).add(n)
1867
1883
1868 # process the files
1884 # process the files
1869 self.ui.status(_("adding file changes\n"))
1885 self.ui.status(_("adding file changes\n"))
1870 pr.step = _('files')
1886 pr.step = _('files')
1871 pr.count = 1
1887 pr.count = 1
1872 pr.total = efiles
1888 pr.total = efiles
1873 source.callback = None
1889 source.callback = None
1874
1890
1875 while True:
1891 while True:
1876 chunkdata = source.filelogheader()
1892 chunkdata = source.filelogheader()
1877 if not chunkdata:
1893 if not chunkdata:
1878 break
1894 break
1879 f = chunkdata["filename"]
1895 f = chunkdata["filename"]
1880 self.ui.debug("adding %s revisions\n" % f)
1896 self.ui.debug("adding %s revisions\n" % f)
1881 pr()
1897 pr()
1882 fl = self.file(f)
1898 fl = self.file(f)
1883 o = len(fl)
1899 o = len(fl)
1884 if fl.addgroup(source, revmap, trp) is None:
1900 if fl.addgroup(source, revmap, trp) is None:
1885 raise util.Abort(_("received file revlog group is empty"))
1901 raise util.Abort(_("received file revlog group is empty"))
1886 revisions += len(fl) - o
1902 revisions += len(fl) - o
1887 files += 1
1903 files += 1
1888 if f in needfiles:
1904 if f in needfiles:
1889 needs = needfiles[f]
1905 needs = needfiles[f]
1890 for new in xrange(o, len(fl)):
1906 for new in xrange(o, len(fl)):
1891 n = fl.node(new)
1907 n = fl.node(new)
1892 if n in needs:
1908 if n in needs:
1893 needs.remove(n)
1909 needs.remove(n)
1894 if not needs:
1910 if not needs:
1895 del needfiles[f]
1911 del needfiles[f]
1896 self.ui.progress(_('files'), None)
1912 self.ui.progress(_('files'), None)
1897
1913
1898 for f, needs in needfiles.iteritems():
1914 for f, needs in needfiles.iteritems():
1899 fl = self.file(f)
1915 fl = self.file(f)
1900 for n in needs:
1916 for n in needs:
1901 try:
1917 try:
1902 fl.rev(n)
1918 fl.rev(n)
1903 except error.LookupError:
1919 except error.LookupError:
1904 raise util.Abort(
1920 raise util.Abort(
1905 _('missing file data for %s:%s - run hg verify') %
1921 _('missing file data for %s:%s - run hg verify') %
1906 (f, hex(n)))
1922 (f, hex(n)))
1907
1923
1908 dh = 0
1924 dh = 0
1909 if oldheads:
1925 if oldheads:
1910 heads = cl.heads()
1926 heads = cl.heads()
1911 dh = len(heads) - len(oldheads)
1927 dh = len(heads) - len(oldheads)
1912 for h in heads:
1928 for h in heads:
1913 if h not in oldheads and 'close' in self[h].extra():
1929 if h not in oldheads and 'close' in self[h].extra():
1914 dh -= 1
1930 dh -= 1
1915 htext = ""
1931 htext = ""
1916 if dh:
1932 if dh:
1917 htext = _(" (%+d heads)") % dh
1933 htext = _(" (%+d heads)") % dh
1918
1934
1919 self.ui.status(_("added %d changesets"
1935 self.ui.status(_("added %d changesets"
1920 " with %d changes to %d files%s\n")
1936 " with %d changes to %d files%s\n")
1921 % (changesets, revisions, files, htext))
1937 % (changesets, revisions, files, htext))
1922
1938
1923 if changesets > 0:
1939 if changesets > 0:
1924 p = lambda: cl.writepending() and self.root or ""
1940 p = lambda: cl.writepending() and self.root or ""
1925 self.hook('pretxnchangegroup', throw=True,
1941 self.hook('pretxnchangegroup', throw=True,
1926 node=hex(cl.node(clstart)), source=srctype,
1942 node=hex(cl.node(clstart)), source=srctype,
1927 url=url, pending=p)
1943 url=url, pending=p)
1928
1944
1929 # make changelog see real files again
1945 # make changelog see real files again
1930 cl.finalize(trp)
1946 cl.finalize(trp)
1931
1947
1932 tr.close()
1948 tr.close()
1933 finally:
1949 finally:
1934 tr.release()
1950 tr.release()
1935 if lock:
1951 if lock:
1936 lock.release()
1952 lock.release()
1937
1953
1938 if changesets > 0:
1954 if changesets > 0:
1939 # forcefully update the on-disk branch cache
1955 # forcefully update the on-disk branch cache
1940 self.ui.debug("updating the branch cache\n")
1956 self.ui.debug("updating the branch cache\n")
1941 self.updatebranchcache()
1957 self.updatebranchcache()
1942 self.hook("changegroup", node=hex(cl.node(clstart)),
1958 self.hook("changegroup", node=hex(cl.node(clstart)),
1943 source=srctype, url=url)
1959 source=srctype, url=url)
1944
1960
1945 for i in xrange(clstart, clend):
1961 for i in xrange(clstart, clend):
1946 self.hook("incoming", node=hex(cl.node(i)),
1962 self.hook("incoming", node=hex(cl.node(i)),
1947 source=srctype, url=url)
1963 source=srctype, url=url)
1948
1964
1949 # never return 0 here:
1965 # never return 0 here:
1950 if dh < 0:
1966 if dh < 0:
1951 return dh - 1
1967 return dh - 1
1952 else:
1968 else:
1953 return dh + 1
1969 return dh + 1
1954
1970
1955 def stream_in(self, remote, requirements):
1971 def stream_in(self, remote, requirements):
1956 lock = self.lock()
1972 lock = self.lock()
1957 try:
1973 try:
1958 fp = remote.stream_out()
1974 fp = remote.stream_out()
1959 l = fp.readline()
1975 l = fp.readline()
1960 try:
1976 try:
1961 resp = int(l)
1977 resp = int(l)
1962 except ValueError:
1978 except ValueError:
1963 raise error.ResponseError(
1979 raise error.ResponseError(
1964 _('Unexpected response from remote server:'), l)
1980 _('Unexpected response from remote server:'), l)
1965 if resp == 1:
1981 if resp == 1:
1966 raise util.Abort(_('operation forbidden by server'))
1982 raise util.Abort(_('operation forbidden by server'))
1967 elif resp == 2:
1983 elif resp == 2:
1968 raise util.Abort(_('locking the remote repository failed'))
1984 raise util.Abort(_('locking the remote repository failed'))
1969 elif resp != 0:
1985 elif resp != 0:
1970 raise util.Abort(_('the server sent an unknown error code'))
1986 raise util.Abort(_('the server sent an unknown error code'))
1971 self.ui.status(_('streaming all changes\n'))
1987 self.ui.status(_('streaming all changes\n'))
1972 l = fp.readline()
1988 l = fp.readline()
1973 try:
1989 try:
1974 total_files, total_bytes = map(int, l.split(' ', 1))
1990 total_files, total_bytes = map(int, l.split(' ', 1))
1975 except (ValueError, TypeError):
1991 except (ValueError, TypeError):
1976 raise error.ResponseError(
1992 raise error.ResponseError(
1977 _('Unexpected response from remote server:'), l)
1993 _('Unexpected response from remote server:'), l)
1978 self.ui.status(_('%d files to transfer, %s of data\n') %
1994 self.ui.status(_('%d files to transfer, %s of data\n') %
1979 (total_files, util.bytecount(total_bytes)))
1995 (total_files, util.bytecount(total_bytes)))
1980 start = time.time()
1996 start = time.time()
1981 for i in xrange(total_files):
1997 for i in xrange(total_files):
1982 # XXX doesn't support '\n' or '\r' in filenames
1998 # XXX doesn't support '\n' or '\r' in filenames
1983 l = fp.readline()
1999 l = fp.readline()
1984 try:
2000 try:
1985 name, size = l.split('\0', 1)
2001 name, size = l.split('\0', 1)
1986 size = int(size)
2002 size = int(size)
1987 except (ValueError, TypeError):
2003 except (ValueError, TypeError):
1988 raise error.ResponseError(
2004 raise error.ResponseError(
1989 _('Unexpected response from remote server:'), l)
2005 _('Unexpected response from remote server:'), l)
1990 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2006 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1991 # for backwards compat, name was partially encoded
2007 # for backwards compat, name was partially encoded
1992 ofp = self.sopener(store.decodedir(name), 'w')
2008 ofp = self.sopener(store.decodedir(name), 'w')
1993 for chunk in util.filechunkiter(fp, limit=size):
2009 for chunk in util.filechunkiter(fp, limit=size):
1994 ofp.write(chunk)
2010 ofp.write(chunk)
1995 ofp.close()
2011 ofp.close()
1996 elapsed = time.time() - start
2012 elapsed = time.time() - start
1997 if elapsed <= 0:
2013 if elapsed <= 0:
1998 elapsed = 0.001
2014 elapsed = 0.001
1999 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2015 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2000 (util.bytecount(total_bytes), elapsed,
2016 (util.bytecount(total_bytes), elapsed,
2001 util.bytecount(total_bytes / elapsed)))
2017 util.bytecount(total_bytes / elapsed)))
2002
2018
2003 # new requirements = old non-format requirements + new format-related
2019 # new requirements = old non-format requirements + new format-related
2004 # requirements from the streamed-in repository
2020 # requirements from the streamed-in repository
2005 requirements.update(set(self.requirements) - self.supportedformats)
2021 requirements.update(set(self.requirements) - self.supportedformats)
2006 self._applyrequirements(requirements)
2022 self._applyrequirements(requirements)
2007 self._writerequirements()
2023 self._writerequirements()
2008
2024
2009 self.invalidate()
2025 self.invalidate()
2010 return len(self.heads()) + 1
2026 return len(self.heads()) + 1
2011 finally:
2027 finally:
2012 lock.release()
2028 lock.release()
2013
2029
2014 def clone(self, remote, heads=[], stream=False):
2030 def clone(self, remote, heads=[], stream=False):
2015 '''clone remote repository.
2031 '''clone remote repository.
2016
2032
2017 keyword arguments:
2033 keyword arguments:
2018 heads: list of revs to clone (forces use of pull)
2034 heads: list of revs to clone (forces use of pull)
2019 stream: use streaming clone if possible'''
2035 stream: use streaming clone if possible'''
2020
2036
2021 # now, all clients that can request uncompressed clones can
2037 # now, all clients that can request uncompressed clones can
2022 # read repo formats supported by all servers that can serve
2038 # read repo formats supported by all servers that can serve
2023 # them.
2039 # them.
2024
2040
2025 # if revlog format changes, client will have to check version
2041 # if revlog format changes, client will have to check version
2026 # and format flags on "stream" capability, and use
2042 # and format flags on "stream" capability, and use
2027 # uncompressed only if compatible.
2043 # uncompressed only if compatible.
2028
2044
2029 if stream and not heads:
2045 if stream and not heads:
2030 # 'stream' means remote revlog format is revlogv1 only
2046 # 'stream' means remote revlog format is revlogv1 only
2031 if remote.capable('stream'):
2047 if remote.capable('stream'):
2032 return self.stream_in(remote, set(('revlogv1',)))
2048 return self.stream_in(remote, set(('revlogv1',)))
2033 # otherwise, 'streamreqs' contains the remote revlog format
2049 # otherwise, 'streamreqs' contains the remote revlog format
2034 streamreqs = remote.capable('streamreqs')
2050 streamreqs = remote.capable('streamreqs')
2035 if streamreqs:
2051 if streamreqs:
2036 streamreqs = set(streamreqs.split(','))
2052 streamreqs = set(streamreqs.split(','))
2037 # if we support it, stream in and adjust our requirements
2053 # if we support it, stream in and adjust our requirements
2038 if not streamreqs - self.supportedformats:
2054 if not streamreqs - self.supportedformats:
2039 return self.stream_in(remote, streamreqs)
2055 return self.stream_in(remote, streamreqs)
2040 return self.pull(remote, heads)
2056 return self.pull(remote, heads)
2041
2057
2042 def pushkey(self, namespace, key, old, new):
2058 def pushkey(self, namespace, key, old, new):
2043 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2059 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2044 old=old, new=new)
2060 old=old, new=new)
2045 ret = pushkey.push(self, namespace, key, old, new)
2061 ret = pushkey.push(self, namespace, key, old, new)
2046 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2062 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2047 ret=ret)
2063 ret=ret)
2048 return ret
2064 return ret
2049
2065
2050 def listkeys(self, namespace):
2066 def listkeys(self, namespace):
2051 self.hook('prelistkeys', throw=True, namespace=namespace)
2067 self.hook('prelistkeys', throw=True, namespace=namespace)
2052 values = pushkey.list(self, namespace)
2068 values = pushkey.list(self, namespace)
2053 self.hook('listkeys', namespace=namespace, values=values)
2069 self.hook('listkeys', namespace=namespace, values=values)
2054 return values
2070 return values
2055
2071
2056 def debugwireargs(self, one, two, three=None, four=None, five=None):
2072 def debugwireargs(self, one, two, three=None, four=None, five=None):
2057 '''used to test argument passing over the wire'''
2073 '''used to test argument passing over the wire'''
2058 return "%s %s %s %s %s" % (one, two, three, four, five)
2074 return "%s %s %s %s %s" % (one, two, three, four, five)
2059
2075
2060 def savecommitmessage(self, text):
2076 def savecommitmessage(self, text):
2061 fp = self.opener('last-message.txt', 'wb')
2077 fp = self.opener('last-message.txt', 'wb')
2062 try:
2078 try:
2063 fp.write(text)
2079 fp.write(text)
2064 finally:
2080 finally:
2065 fp.close()
2081 fp.close()
2066 return self.pathto(fp.name[len(self.root)+1:])
2082 return self.pathto(fp.name[len(self.root)+1:])
2067
2083
2068 # used to avoid circular references so destructors work
2084 # used to avoid circular references so destructors work
2069 def aftertrans(files):
2085 def aftertrans(files):
2070 renamefiles = [tuple(t) for t in files]
2086 renamefiles = [tuple(t) for t in files]
2071 def a():
2087 def a():
2072 for src, dest in renamefiles:
2088 for src, dest in renamefiles:
2073 util.rename(src, dest)
2089 util.rename(src, dest)
2074 return a
2090 return a
2075
2091
2076 def undoname(fn):
2092 def undoname(fn):
2077 base, name = os.path.split(fn)
2093 base, name = os.path.split(fn)
2078 assert name.startswith('journal')
2094 assert name.startswith('journal')
2079 return os.path.join(base, name.replace('journal', 'undo', 1))
2095 return os.path.join(base, name.replace('journal', 'undo', 1))
2080
2096
2081 def instance(ui, path, create):
2097 def instance(ui, path, create):
2082 return localrepository(ui, util.urllocalpath(path), create)
2098 return localrepository(ui, util.urllocalpath(path), create)
2083
2099
2084 def islocal(path):
2100 def islocal(path):
2085 return True
2101 return True
General Comments 0
You need to be logged in to leave comments. Login now