##// END OF EJS Templates
windows: use normalized path to check repository nesting...
FUJIWARA Katsunori -
r15722:417127af stable
parent child Browse files
Show More
@@ -1,2101 +1,2102 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener.append(
60 self.opener.append(
61 "00changelog.i",
61 "00changelog.i",
62 '\0\0\0\2' # represents revlogv2
62 '\0\0\0\2' # represents revlogv2
63 ' dummy changelog to prevent using the old repo layout'
63 ' dummy changelog to prevent using the old repo layout'
64 )
64 )
65 if self.ui.configbool('format', 'generaldelta', False):
65 if self.ui.configbool('format', 'generaldelta', False):
66 requirements.append("generaldelta")
66 requirements.append("generaldelta")
67 requirements = set(requirements)
67 requirements = set(requirements)
68 else:
68 else:
69 raise error.RepoError(_("repository %s not found") % path)
69 raise error.RepoError(_("repository %s not found") % path)
70 elif create:
70 elif create:
71 raise error.RepoError(_("repository %s already exists") % path)
71 raise error.RepoError(_("repository %s already exists") % path)
72 else:
72 else:
73 try:
73 try:
74 requirements = scmutil.readrequires(self.opener, self.supported)
74 requirements = scmutil.readrequires(self.opener, self.supported)
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 requirements = set()
78 requirements = set()
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100
100
101 self._branchcache = None
101 self._branchcache = None
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.filterpats = {}
103 self.filterpats = {}
104 self._datafilters = {}
104 self._datafilters = {}
105 self._transref = self._lockref = self._wlockref = None
105 self._transref = self._lockref = self._wlockref = None
106
106
107 # A cache for various files under .hg/ that tracks file changes,
107 # A cache for various files under .hg/ that tracks file changes,
108 # (used by the filecache decorator)
108 # (used by the filecache decorator)
109 #
109 #
110 # Maps a property name to its util.filecacheentry
110 # Maps a property name to its util.filecacheentry
111 self._filecache = {}
111 self._filecache = {}
112
112
113 def _applyrequirements(self, requirements):
113 def _applyrequirements(self, requirements):
114 self.requirements = requirements
114 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
116 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
117 if r in openerreqs)
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130 normsubpath = util.pconvert(subpath)
130
131
131 # XXX: Checking against the current working copy is wrong in
132 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
133 # the sense that it can reject things like
133 #
134 #
134 # $ hg cat -r 10 sub/x.txt
135 # $ hg cat -r 10 sub/x.txt
135 #
136 #
136 # if sub/ is no longer a subrepository in the working copy
137 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
138 # parent revision.
138 #
139 #
139 # However, it can of course also allow things that would have
140 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
141 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
142 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
143 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
144 # panics when it sees sub/.hg/.
144 #
145 #
145 # All in all, checking against the working copy seems sensible
146 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
147 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
148 # the filesystem *now*.
148 ctx = self[None]
149 ctx = self[None]
149 parts = util.splitpath(subpath)
150 parts = util.splitpath(subpath)
150 while parts:
151 while parts:
151 prefix = os.sep.join(parts)
152 prefix = '/'.join(parts)
152 if prefix in ctx.substate:
153 if prefix in ctx.substate:
153 if prefix == subpath:
154 if prefix == normsubpath:
154 return True
155 return True
155 else:
156 else:
156 sub = ctx.sub(prefix)
157 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
158 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
159 else:
159 parts.pop()
160 parts.pop()
160 return False
161 return False
161
162
162 @filecache('bookmarks')
163 @filecache('bookmarks')
163 def _bookmarks(self):
164 def _bookmarks(self):
164 return bookmarks.read(self)
165 return bookmarks.read(self)
165
166
166 @filecache('bookmarks.current')
167 @filecache('bookmarks.current')
167 def _bookmarkcurrent(self):
168 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
169 return bookmarks.readcurrent(self)
169
170
170 def _writebookmarks(self, marks):
171 def _writebookmarks(self, marks):
171 bookmarks.write(self)
172 bookmarks.write(self)
172
173
173 @filecache('00changelog.i', True)
174 @filecache('00changelog.i', True)
174 def changelog(self):
175 def changelog(self):
175 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
176 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
177 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
178 if p.startswith(self.root):
179 if p.startswith(self.root):
179 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
180 return c
181 return c
181
182
182 @filecache('00manifest.i', True)
183 @filecache('00manifest.i', True)
183 def manifest(self):
184 def manifest(self):
184 return manifest.manifest(self.sopener)
185 return manifest.manifest(self.sopener)
185
186
186 @filecache('dirstate')
187 @filecache('dirstate')
187 def dirstate(self):
188 def dirstate(self):
188 warned = [0]
189 warned = [0]
189 def validate(node):
190 def validate(node):
190 try:
191 try:
191 self.changelog.rev(node)
192 self.changelog.rev(node)
192 return node
193 return node
193 except error.LookupError:
194 except error.LookupError:
194 if not warned[0]:
195 if not warned[0]:
195 warned[0] = True
196 warned[0] = True
196 self.ui.warn(_("warning: ignoring unknown"
197 self.ui.warn(_("warning: ignoring unknown"
197 " working parent %s!\n") % short(node))
198 " working parent %s!\n") % short(node))
198 return nullid
199 return nullid
199
200
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201
202
202 def __getitem__(self, changeid):
203 def __getitem__(self, changeid):
203 if changeid is None:
204 if changeid is None:
204 return context.workingctx(self)
205 return context.workingctx(self)
205 return context.changectx(self, changeid)
206 return context.changectx(self, changeid)
206
207
207 def __contains__(self, changeid):
208 def __contains__(self, changeid):
208 try:
209 try:
209 return bool(self.lookup(changeid))
210 return bool(self.lookup(changeid))
210 except error.RepoLookupError:
211 except error.RepoLookupError:
211 return False
212 return False
212
213
213 def __nonzero__(self):
214 def __nonzero__(self):
214 return True
215 return True
215
216
216 def __len__(self):
217 def __len__(self):
217 return len(self.changelog)
218 return len(self.changelog)
218
219
219 def __iter__(self):
220 def __iter__(self):
220 for i in xrange(len(self)):
221 for i in xrange(len(self)):
221 yield i
222 yield i
222
223
223 def set(self, expr, *args):
224 def set(self, expr, *args):
224 '''
225 '''
225 Yield a context for each matching revision, after doing arg
226 Yield a context for each matching revision, after doing arg
226 replacement via revset.formatspec
227 replacement via revset.formatspec
227 '''
228 '''
228
229
229 expr = revset.formatspec(expr, *args)
230 expr = revset.formatspec(expr, *args)
230 m = revset.match(None, expr)
231 m = revset.match(None, expr)
231 for r in m(self, range(len(self))):
232 for r in m(self, range(len(self))):
232 yield self[r]
233 yield self[r]
233
234
234 def url(self):
235 def url(self):
235 return 'file:' + self.root
236 return 'file:' + self.root
236
237
237 def hook(self, name, throw=False, **args):
238 def hook(self, name, throw=False, **args):
238 return hook.hook(self.ui, self, name, throw, **args)
239 return hook.hook(self.ui, self, name, throw, **args)
239
240
240 tag_disallowed = ':\r\n'
241 tag_disallowed = ':\r\n'
241
242
242 def _tag(self, names, node, message, local, user, date, extra={}):
243 def _tag(self, names, node, message, local, user, date, extra={}):
243 if isinstance(names, str):
244 if isinstance(names, str):
244 allchars = names
245 allchars = names
245 names = (names,)
246 names = (names,)
246 else:
247 else:
247 allchars = ''.join(names)
248 allchars = ''.join(names)
248 for c in self.tag_disallowed:
249 for c in self.tag_disallowed:
249 if c in allchars:
250 if c in allchars:
250 raise util.Abort(_('%r cannot be used in a tag name') % c)
251 raise util.Abort(_('%r cannot be used in a tag name') % c)
251
252
252 branches = self.branchmap()
253 branches = self.branchmap()
253 for name in names:
254 for name in names:
254 self.hook('pretag', throw=True, node=hex(node), tag=name,
255 self.hook('pretag', throw=True, node=hex(node), tag=name,
255 local=local)
256 local=local)
256 if name in branches:
257 if name in branches:
257 self.ui.warn(_("warning: tag %s conflicts with existing"
258 self.ui.warn(_("warning: tag %s conflicts with existing"
258 " branch name\n") % name)
259 " branch name\n") % name)
259
260
260 def writetags(fp, names, munge, prevtags):
261 def writetags(fp, names, munge, prevtags):
261 fp.seek(0, 2)
262 fp.seek(0, 2)
262 if prevtags and prevtags[-1] != '\n':
263 if prevtags and prevtags[-1] != '\n':
263 fp.write('\n')
264 fp.write('\n')
264 for name in names:
265 for name in names:
265 m = munge and munge(name) or name
266 m = munge and munge(name) or name
266 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
267 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
267 old = self.tags().get(name, nullid)
268 old = self.tags().get(name, nullid)
268 fp.write('%s %s\n' % (hex(old), m))
269 fp.write('%s %s\n' % (hex(old), m))
269 fp.write('%s %s\n' % (hex(node), m))
270 fp.write('%s %s\n' % (hex(node), m))
270 fp.close()
271 fp.close()
271
272
272 prevtags = ''
273 prevtags = ''
273 if local:
274 if local:
274 try:
275 try:
275 fp = self.opener('localtags', 'r+')
276 fp = self.opener('localtags', 'r+')
276 except IOError:
277 except IOError:
277 fp = self.opener('localtags', 'a')
278 fp = self.opener('localtags', 'a')
278 else:
279 else:
279 prevtags = fp.read()
280 prevtags = fp.read()
280
281
281 # local tags are stored in the current charset
282 # local tags are stored in the current charset
282 writetags(fp, names, None, prevtags)
283 writetags(fp, names, None, prevtags)
283 for name in names:
284 for name in names:
284 self.hook('tag', node=hex(node), tag=name, local=local)
285 self.hook('tag', node=hex(node), tag=name, local=local)
285 return
286 return
286
287
287 try:
288 try:
288 fp = self.wfile('.hgtags', 'rb+')
289 fp = self.wfile('.hgtags', 'rb+')
289 except IOError, e:
290 except IOError, e:
290 if e.errno != errno.ENOENT:
291 if e.errno != errno.ENOENT:
291 raise
292 raise
292 fp = self.wfile('.hgtags', 'ab')
293 fp = self.wfile('.hgtags', 'ab')
293 else:
294 else:
294 prevtags = fp.read()
295 prevtags = fp.read()
295
296
296 # committed tags are stored in UTF-8
297 # committed tags are stored in UTF-8
297 writetags(fp, names, encoding.fromlocal, prevtags)
298 writetags(fp, names, encoding.fromlocal, prevtags)
298
299
299 fp.close()
300 fp.close()
300
301
301 if '.hgtags' not in self.dirstate:
302 if '.hgtags' not in self.dirstate:
302 self[None].add(['.hgtags'])
303 self[None].add(['.hgtags'])
303
304
304 m = matchmod.exact(self.root, '', ['.hgtags'])
305 m = matchmod.exact(self.root, '', ['.hgtags'])
305 tagnode = self.commit(message, user, date, extra=extra, match=m)
306 tagnode = self.commit(message, user, date, extra=extra, match=m)
306
307
307 for name in names:
308 for name in names:
308 self.hook('tag', node=hex(node), tag=name, local=local)
309 self.hook('tag', node=hex(node), tag=name, local=local)
309
310
310 return tagnode
311 return tagnode
311
312
312 def tag(self, names, node, message, local, user, date):
313 def tag(self, names, node, message, local, user, date):
313 '''tag a revision with one or more symbolic names.
314 '''tag a revision with one or more symbolic names.
314
315
315 names is a list of strings or, when adding a single tag, names may be a
316 names is a list of strings or, when adding a single tag, names may be a
316 string.
317 string.
317
318
318 if local is True, the tags are stored in a per-repository file.
319 if local is True, the tags are stored in a per-repository file.
319 otherwise, they are stored in the .hgtags file, and a new
320 otherwise, they are stored in the .hgtags file, and a new
320 changeset is committed with the change.
321 changeset is committed with the change.
321
322
322 keyword arguments:
323 keyword arguments:
323
324
324 local: whether to store tags in non-version-controlled file
325 local: whether to store tags in non-version-controlled file
325 (default False)
326 (default False)
326
327
327 message: commit message to use if committing
328 message: commit message to use if committing
328
329
329 user: name of user to use if committing
330 user: name of user to use if committing
330
331
331 date: date tuple to use if committing'''
332 date: date tuple to use if committing'''
332
333
333 if not local:
334 if not local:
334 for x in self.status()[:5]:
335 for x in self.status()[:5]:
335 if '.hgtags' in x:
336 if '.hgtags' in x:
336 raise util.Abort(_('working copy of .hgtags is changed '
337 raise util.Abort(_('working copy of .hgtags is changed '
337 '(please commit .hgtags manually)'))
338 '(please commit .hgtags manually)'))
338
339
339 self.tags() # instantiate the cache
340 self.tags() # instantiate the cache
340 self._tag(names, node, message, local, user, date)
341 self._tag(names, node, message, local, user, date)
341
342
342 @propertycache
343 @propertycache
343 def _tagscache(self):
344 def _tagscache(self):
344 '''Returns a tagscache object that contains various tags related caches.'''
345 '''Returns a tagscache object that contains various tags related caches.'''
345
346
346 # This simplifies its cache management by having one decorated
347 # This simplifies its cache management by having one decorated
347 # function (this one) and the rest simply fetch things from it.
348 # function (this one) and the rest simply fetch things from it.
348 class tagscache(object):
349 class tagscache(object):
349 def __init__(self):
350 def __init__(self):
350 # These two define the set of tags for this repository. tags
351 # These two define the set of tags for this repository. tags
351 # maps tag name to node; tagtypes maps tag name to 'global' or
352 # maps tag name to node; tagtypes maps tag name to 'global' or
352 # 'local'. (Global tags are defined by .hgtags across all
353 # 'local'. (Global tags are defined by .hgtags across all
353 # heads, and local tags are defined in .hg/localtags.)
354 # heads, and local tags are defined in .hg/localtags.)
354 # They constitute the in-memory cache of tags.
355 # They constitute the in-memory cache of tags.
355 self.tags = self.tagtypes = None
356 self.tags = self.tagtypes = None
356
357
357 self.nodetagscache = self.tagslist = None
358 self.nodetagscache = self.tagslist = None
358
359
359 cache = tagscache()
360 cache = tagscache()
360 cache.tags, cache.tagtypes = self._findtags()
361 cache.tags, cache.tagtypes = self._findtags()
361
362
362 return cache
363 return cache
363
364
364 def tags(self):
365 def tags(self):
365 '''return a mapping of tag to node'''
366 '''return a mapping of tag to node'''
366 return self._tagscache.tags
367 return self._tagscache.tags
367
368
368 def _findtags(self):
369 def _findtags(self):
369 '''Do the hard work of finding tags. Return a pair of dicts
370 '''Do the hard work of finding tags. Return a pair of dicts
370 (tags, tagtypes) where tags maps tag name to node, and tagtypes
371 (tags, tagtypes) where tags maps tag name to node, and tagtypes
371 maps tag name to a string like \'global\' or \'local\'.
372 maps tag name to a string like \'global\' or \'local\'.
372 Subclasses or extensions are free to add their own tags, but
373 Subclasses or extensions are free to add their own tags, but
373 should be aware that the returned dicts will be retained for the
374 should be aware that the returned dicts will be retained for the
374 duration of the localrepo object.'''
375 duration of the localrepo object.'''
375
376
376 # XXX what tagtype should subclasses/extensions use? Currently
377 # XXX what tagtype should subclasses/extensions use? Currently
377 # mq and bookmarks add tags, but do not set the tagtype at all.
378 # mq and bookmarks add tags, but do not set the tagtype at all.
378 # Should each extension invent its own tag type? Should there
379 # Should each extension invent its own tag type? Should there
379 # be one tagtype for all such "virtual" tags? Or is the status
380 # be one tagtype for all such "virtual" tags? Or is the status
380 # quo fine?
381 # quo fine?
381
382
382 alltags = {} # map tag name to (node, hist)
383 alltags = {} # map tag name to (node, hist)
383 tagtypes = {}
384 tagtypes = {}
384
385
385 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
386 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
386 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
387 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
387
388
388 # Build the return dicts. Have to re-encode tag names because
389 # Build the return dicts. Have to re-encode tag names because
389 # the tags module always uses UTF-8 (in order not to lose info
390 # the tags module always uses UTF-8 (in order not to lose info
390 # writing to the cache), but the rest of Mercurial wants them in
391 # writing to the cache), but the rest of Mercurial wants them in
391 # local encoding.
392 # local encoding.
392 tags = {}
393 tags = {}
393 for (name, (node, hist)) in alltags.iteritems():
394 for (name, (node, hist)) in alltags.iteritems():
394 if node != nullid:
395 if node != nullid:
395 try:
396 try:
396 # ignore tags to unknown nodes
397 # ignore tags to unknown nodes
397 self.changelog.lookup(node)
398 self.changelog.lookup(node)
398 tags[encoding.tolocal(name)] = node
399 tags[encoding.tolocal(name)] = node
399 except error.LookupError:
400 except error.LookupError:
400 pass
401 pass
401 tags['tip'] = self.changelog.tip()
402 tags['tip'] = self.changelog.tip()
402 tagtypes = dict([(encoding.tolocal(name), value)
403 tagtypes = dict([(encoding.tolocal(name), value)
403 for (name, value) in tagtypes.iteritems()])
404 for (name, value) in tagtypes.iteritems()])
404 return (tags, tagtypes)
405 return (tags, tagtypes)
405
406
406 def tagtype(self, tagname):
407 def tagtype(self, tagname):
407 '''
408 '''
408 return the type of the given tag. result can be:
409 return the type of the given tag. result can be:
409
410
410 'local' : a local tag
411 'local' : a local tag
411 'global' : a global tag
412 'global' : a global tag
412 None : tag does not exist
413 None : tag does not exist
413 '''
414 '''
414
415
415 return self._tagscache.tagtypes.get(tagname)
416 return self._tagscache.tagtypes.get(tagname)
416
417
417 def tagslist(self):
418 def tagslist(self):
418 '''return a list of tags ordered by revision'''
419 '''return a list of tags ordered by revision'''
419 if not self._tagscache.tagslist:
420 if not self._tagscache.tagslist:
420 l = []
421 l = []
421 for t, n in self.tags().iteritems():
422 for t, n in self.tags().iteritems():
422 r = self.changelog.rev(n)
423 r = self.changelog.rev(n)
423 l.append((r, t, n))
424 l.append((r, t, n))
424 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
425 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
425
426
426 return self._tagscache.tagslist
427 return self._tagscache.tagslist
427
428
428 def nodetags(self, node):
429 def nodetags(self, node):
429 '''return the tags associated with a node'''
430 '''return the tags associated with a node'''
430 if not self._tagscache.nodetagscache:
431 if not self._tagscache.nodetagscache:
431 nodetagscache = {}
432 nodetagscache = {}
432 for t, n in self.tags().iteritems():
433 for t, n in self.tags().iteritems():
433 nodetagscache.setdefault(n, []).append(t)
434 nodetagscache.setdefault(n, []).append(t)
434 for tags in nodetagscache.itervalues():
435 for tags in nodetagscache.itervalues():
435 tags.sort()
436 tags.sort()
436 self._tagscache.nodetagscache = nodetagscache
437 self._tagscache.nodetagscache = nodetagscache
437 return self._tagscache.nodetagscache.get(node, [])
438 return self._tagscache.nodetagscache.get(node, [])
438
439
439 def nodebookmarks(self, node):
440 def nodebookmarks(self, node):
440 marks = []
441 marks = []
441 for bookmark, n in self._bookmarks.iteritems():
442 for bookmark, n in self._bookmarks.iteritems():
442 if n == node:
443 if n == node:
443 marks.append(bookmark)
444 marks.append(bookmark)
444 return sorted(marks)
445 return sorted(marks)
445
446
446 def _branchtags(self, partial, lrev):
447 def _branchtags(self, partial, lrev):
447 # TODO: rename this function?
448 # TODO: rename this function?
448 tiprev = len(self) - 1
449 tiprev = len(self) - 1
449 if lrev != tiprev:
450 if lrev != tiprev:
450 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
451 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
451 self._updatebranchcache(partial, ctxgen)
452 self._updatebranchcache(partial, ctxgen)
452 self._writebranchcache(partial, self.changelog.tip(), tiprev)
453 self._writebranchcache(partial, self.changelog.tip(), tiprev)
453
454
454 return partial
455 return partial
455
456
456 def updatebranchcache(self):
457 def updatebranchcache(self):
457 tip = self.changelog.tip()
458 tip = self.changelog.tip()
458 if self._branchcache is not None and self._branchcachetip == tip:
459 if self._branchcache is not None and self._branchcachetip == tip:
459 return self._branchcache
460 return self._branchcache
460
461
461 oldtip = self._branchcachetip
462 oldtip = self._branchcachetip
462 self._branchcachetip = tip
463 self._branchcachetip = tip
463 if oldtip is None or oldtip not in self.changelog.nodemap:
464 if oldtip is None or oldtip not in self.changelog.nodemap:
464 partial, last, lrev = self._readbranchcache()
465 partial, last, lrev = self._readbranchcache()
465 else:
466 else:
466 lrev = self.changelog.rev(oldtip)
467 lrev = self.changelog.rev(oldtip)
467 partial = self._branchcache
468 partial = self._branchcache
468
469
469 self._branchtags(partial, lrev)
470 self._branchtags(partial, lrev)
470 # this private cache holds all heads (not just tips)
471 # this private cache holds all heads (not just tips)
471 self._branchcache = partial
472 self._branchcache = partial
472
473
473 def branchmap(self):
474 def branchmap(self):
474 '''returns a dictionary {branch: [branchheads]}'''
475 '''returns a dictionary {branch: [branchheads]}'''
475 self.updatebranchcache()
476 self.updatebranchcache()
476 return self._branchcache
477 return self._branchcache
477
478
478 def branchtags(self):
479 def branchtags(self):
479 '''return a dict where branch names map to the tipmost head of
480 '''return a dict where branch names map to the tipmost head of
480 the branch, open heads come before closed'''
481 the branch, open heads come before closed'''
481 bt = {}
482 bt = {}
482 for bn, heads in self.branchmap().iteritems():
483 for bn, heads in self.branchmap().iteritems():
483 tip = heads[-1]
484 tip = heads[-1]
484 for h in reversed(heads):
485 for h in reversed(heads):
485 if 'close' not in self.changelog.read(h)[5]:
486 if 'close' not in self.changelog.read(h)[5]:
486 tip = h
487 tip = h
487 break
488 break
488 bt[bn] = tip
489 bt[bn] = tip
489 return bt
490 return bt
490
491
491 def _readbranchcache(self):
492 def _readbranchcache(self):
492 partial = {}
493 partial = {}
493 try:
494 try:
494 f = self.opener("cache/branchheads")
495 f = self.opener("cache/branchheads")
495 lines = f.read().split('\n')
496 lines = f.read().split('\n')
496 f.close()
497 f.close()
497 except (IOError, OSError):
498 except (IOError, OSError):
498 return {}, nullid, nullrev
499 return {}, nullid, nullrev
499
500
500 try:
501 try:
501 last, lrev = lines.pop(0).split(" ", 1)
502 last, lrev = lines.pop(0).split(" ", 1)
502 last, lrev = bin(last), int(lrev)
503 last, lrev = bin(last), int(lrev)
503 if lrev >= len(self) or self[lrev].node() != last:
504 if lrev >= len(self) or self[lrev].node() != last:
504 # invalidate the cache
505 # invalidate the cache
505 raise ValueError('invalidating branch cache (tip differs)')
506 raise ValueError('invalidating branch cache (tip differs)')
506 for l in lines:
507 for l in lines:
507 if not l:
508 if not l:
508 continue
509 continue
509 node, label = l.split(" ", 1)
510 node, label = l.split(" ", 1)
510 label = encoding.tolocal(label.strip())
511 label = encoding.tolocal(label.strip())
511 partial.setdefault(label, []).append(bin(node))
512 partial.setdefault(label, []).append(bin(node))
512 except KeyboardInterrupt:
513 except KeyboardInterrupt:
513 raise
514 raise
514 except Exception, inst:
515 except Exception, inst:
515 if self.ui.debugflag:
516 if self.ui.debugflag:
516 self.ui.warn(str(inst), '\n')
517 self.ui.warn(str(inst), '\n')
517 partial, last, lrev = {}, nullid, nullrev
518 partial, last, lrev = {}, nullid, nullrev
518 return partial, last, lrev
519 return partial, last, lrev
519
520
520 def _writebranchcache(self, branches, tip, tiprev):
521 def _writebranchcache(self, branches, tip, tiprev):
521 try:
522 try:
522 f = self.opener("cache/branchheads", "w", atomictemp=True)
523 f = self.opener("cache/branchheads", "w", atomictemp=True)
523 f.write("%s %s\n" % (hex(tip), tiprev))
524 f.write("%s %s\n" % (hex(tip), tiprev))
524 for label, nodes in branches.iteritems():
525 for label, nodes in branches.iteritems():
525 for node in nodes:
526 for node in nodes:
526 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
527 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
527 f.close()
528 f.close()
528 except (IOError, OSError):
529 except (IOError, OSError):
529 pass
530 pass
530
531
531 def _updatebranchcache(self, partial, ctxgen):
532 def _updatebranchcache(self, partial, ctxgen):
532 # collect new branch entries
533 # collect new branch entries
533 newbranches = {}
534 newbranches = {}
534 for c in ctxgen:
535 for c in ctxgen:
535 newbranches.setdefault(c.branch(), []).append(c.node())
536 newbranches.setdefault(c.branch(), []).append(c.node())
536 # if older branchheads are reachable from new ones, they aren't
537 # if older branchheads are reachable from new ones, they aren't
537 # really branchheads. Note checking parents is insufficient:
538 # really branchheads. Note checking parents is insufficient:
538 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
539 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
539 for branch, newnodes in newbranches.iteritems():
540 for branch, newnodes in newbranches.iteritems():
540 bheads = partial.setdefault(branch, [])
541 bheads = partial.setdefault(branch, [])
541 bheads.extend(newnodes)
542 bheads.extend(newnodes)
542 if len(bheads) <= 1:
543 if len(bheads) <= 1:
543 continue
544 continue
544 bheads = sorted(bheads, key=lambda x: self[x].rev())
545 bheads = sorted(bheads, key=lambda x: self[x].rev())
545 # starting from tip means fewer passes over reachable
546 # starting from tip means fewer passes over reachable
546 while newnodes:
547 while newnodes:
547 latest = newnodes.pop()
548 latest = newnodes.pop()
548 if latest not in bheads:
549 if latest not in bheads:
549 continue
550 continue
550 minbhrev = self[bheads[0]].node()
551 minbhrev = self[bheads[0]].node()
551 reachable = self.changelog.reachable(latest, minbhrev)
552 reachable = self.changelog.reachable(latest, minbhrev)
552 reachable.remove(latest)
553 reachable.remove(latest)
553 if reachable:
554 if reachable:
554 bheads = [b for b in bheads if b not in reachable]
555 bheads = [b for b in bheads if b not in reachable]
555 partial[branch] = bheads
556 partial[branch] = bheads
556
557
557 def lookup(self, key):
558 def lookup(self, key):
558 if isinstance(key, int):
559 if isinstance(key, int):
559 return self.changelog.node(key)
560 return self.changelog.node(key)
560 elif key == '.':
561 elif key == '.':
561 return self.dirstate.p1()
562 return self.dirstate.p1()
562 elif key == 'null':
563 elif key == 'null':
563 return nullid
564 return nullid
564 elif key == 'tip':
565 elif key == 'tip':
565 return self.changelog.tip()
566 return self.changelog.tip()
566 n = self.changelog._match(key)
567 n = self.changelog._match(key)
567 if n:
568 if n:
568 return n
569 return n
569 if key in self._bookmarks:
570 if key in self._bookmarks:
570 return self._bookmarks[key]
571 return self._bookmarks[key]
571 if key in self.tags():
572 if key in self.tags():
572 return self.tags()[key]
573 return self.tags()[key]
573 if key in self.branchtags():
574 if key in self.branchtags():
574 return self.branchtags()[key]
575 return self.branchtags()[key]
575 n = self.changelog._partialmatch(key)
576 n = self.changelog._partialmatch(key)
576 if n:
577 if n:
577 return n
578 return n
578
579
579 # can't find key, check if it might have come from damaged dirstate
580 # can't find key, check if it might have come from damaged dirstate
580 if key in self.dirstate.parents():
581 if key in self.dirstate.parents():
581 raise error.Abort(_("working directory has unknown parent '%s'!")
582 raise error.Abort(_("working directory has unknown parent '%s'!")
582 % short(key))
583 % short(key))
583 try:
584 try:
584 if len(key) == 20:
585 if len(key) == 20:
585 key = hex(key)
586 key = hex(key)
586 except TypeError:
587 except TypeError:
587 pass
588 pass
588 raise error.RepoLookupError(_("unknown revision '%s'") % key)
589 raise error.RepoLookupError(_("unknown revision '%s'") % key)
589
590
590 def lookupbranch(self, key, remote=None):
591 def lookupbranch(self, key, remote=None):
591 repo = remote or self
592 repo = remote or self
592 if key in repo.branchmap():
593 if key in repo.branchmap():
593 return key
594 return key
594
595
595 repo = (remote and remote.local()) and remote or self
596 repo = (remote and remote.local()) and remote or self
596 return repo[key].branch()
597 return repo[key].branch()
597
598
598 def known(self, nodes):
599 def known(self, nodes):
599 nm = self.changelog.nodemap
600 nm = self.changelog.nodemap
600 return [(n in nm) for n in nodes]
601 return [(n in nm) for n in nodes]
601
602
602 def local(self):
603 def local(self):
603 return self
604 return self
604
605
605 def join(self, f):
606 def join(self, f):
606 return os.path.join(self.path, f)
607 return os.path.join(self.path, f)
607
608
608 def wjoin(self, f):
609 def wjoin(self, f):
609 return os.path.join(self.root, f)
610 return os.path.join(self.root, f)
610
611
611 def file(self, f):
612 def file(self, f):
612 if f[0] == '/':
613 if f[0] == '/':
613 f = f[1:]
614 f = f[1:]
614 return filelog.filelog(self.sopener, f)
615 return filelog.filelog(self.sopener, f)
615
616
616 def changectx(self, changeid):
617 def changectx(self, changeid):
617 return self[changeid]
618 return self[changeid]
618
619
619 def parents(self, changeid=None):
620 def parents(self, changeid=None):
620 '''get list of changectxs for parents of changeid'''
621 '''get list of changectxs for parents of changeid'''
621 return self[changeid].parents()
622 return self[changeid].parents()
622
623
623 def filectx(self, path, changeid=None, fileid=None):
624 def filectx(self, path, changeid=None, fileid=None):
624 """changeid can be a changeset revision, node, or tag.
625 """changeid can be a changeset revision, node, or tag.
625 fileid can be a file revision or node."""
626 fileid can be a file revision or node."""
626 return context.filectx(self, path, changeid, fileid)
627 return context.filectx(self, path, changeid, fileid)
627
628
628 def getcwd(self):
629 def getcwd(self):
629 return self.dirstate.getcwd()
630 return self.dirstate.getcwd()
630
631
631 def pathto(self, f, cwd=None):
632 def pathto(self, f, cwd=None):
632 return self.dirstate.pathto(f, cwd)
633 return self.dirstate.pathto(f, cwd)
633
634
634 def wfile(self, f, mode='r'):
635 def wfile(self, f, mode='r'):
635 return self.wopener(f, mode)
636 return self.wopener(f, mode)
636
637
637 def _link(self, f):
638 def _link(self, f):
638 return os.path.islink(self.wjoin(f))
639 return os.path.islink(self.wjoin(f))
639
640
640 def _loadfilter(self, filter):
641 def _loadfilter(self, filter):
641 if filter not in self.filterpats:
642 if filter not in self.filterpats:
642 l = []
643 l = []
643 for pat, cmd in self.ui.configitems(filter):
644 for pat, cmd in self.ui.configitems(filter):
644 if cmd == '!':
645 if cmd == '!':
645 continue
646 continue
646 mf = matchmod.match(self.root, '', [pat])
647 mf = matchmod.match(self.root, '', [pat])
647 fn = None
648 fn = None
648 params = cmd
649 params = cmd
649 for name, filterfn in self._datafilters.iteritems():
650 for name, filterfn in self._datafilters.iteritems():
650 if cmd.startswith(name):
651 if cmd.startswith(name):
651 fn = filterfn
652 fn = filterfn
652 params = cmd[len(name):].lstrip()
653 params = cmd[len(name):].lstrip()
653 break
654 break
654 if not fn:
655 if not fn:
655 fn = lambda s, c, **kwargs: util.filter(s, c)
656 fn = lambda s, c, **kwargs: util.filter(s, c)
656 # Wrap old filters not supporting keyword arguments
657 # Wrap old filters not supporting keyword arguments
657 if not inspect.getargspec(fn)[2]:
658 if not inspect.getargspec(fn)[2]:
658 oldfn = fn
659 oldfn = fn
659 fn = lambda s, c, **kwargs: oldfn(s, c)
660 fn = lambda s, c, **kwargs: oldfn(s, c)
660 l.append((mf, fn, params))
661 l.append((mf, fn, params))
661 self.filterpats[filter] = l
662 self.filterpats[filter] = l
662 return self.filterpats[filter]
663 return self.filterpats[filter]
663
664
664 def _filter(self, filterpats, filename, data):
665 def _filter(self, filterpats, filename, data):
665 for mf, fn, cmd in filterpats:
666 for mf, fn, cmd in filterpats:
666 if mf(filename):
667 if mf(filename):
667 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
668 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
668 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
669 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
669 break
670 break
670
671
671 return data
672 return data
672
673
673 @propertycache
674 @propertycache
674 def _encodefilterpats(self):
675 def _encodefilterpats(self):
675 return self._loadfilter('encode')
676 return self._loadfilter('encode')
676
677
677 @propertycache
678 @propertycache
678 def _decodefilterpats(self):
679 def _decodefilterpats(self):
679 return self._loadfilter('decode')
680 return self._loadfilter('decode')
680
681
681 def adddatafilter(self, name, filter):
682 def adddatafilter(self, name, filter):
682 self._datafilters[name] = filter
683 self._datafilters[name] = filter
683
684
684 def wread(self, filename):
685 def wread(self, filename):
685 if self._link(filename):
686 if self._link(filename):
686 data = os.readlink(self.wjoin(filename))
687 data = os.readlink(self.wjoin(filename))
687 else:
688 else:
688 data = self.wopener.read(filename)
689 data = self.wopener.read(filename)
689 return self._filter(self._encodefilterpats, filename, data)
690 return self._filter(self._encodefilterpats, filename, data)
690
691
691 def wwrite(self, filename, data, flags):
692 def wwrite(self, filename, data, flags):
692 data = self._filter(self._decodefilterpats, filename, data)
693 data = self._filter(self._decodefilterpats, filename, data)
693 if 'l' in flags:
694 if 'l' in flags:
694 self.wopener.symlink(data, filename)
695 self.wopener.symlink(data, filename)
695 else:
696 else:
696 self.wopener.write(filename, data)
697 self.wopener.write(filename, data)
697 if 'x' in flags:
698 if 'x' in flags:
698 util.setflags(self.wjoin(filename), False, True)
699 util.setflags(self.wjoin(filename), False, True)
699
700
700 def wwritedata(self, filename, data):
701 def wwritedata(self, filename, data):
701 return self._filter(self._decodefilterpats, filename, data)
702 return self._filter(self._decodefilterpats, filename, data)
702
703
703 def transaction(self, desc):
704 def transaction(self, desc):
704 tr = self._transref and self._transref() or None
705 tr = self._transref and self._transref() or None
705 if tr and tr.running():
706 if tr and tr.running():
706 return tr.nest()
707 return tr.nest()
707
708
708 # abort here if the journal already exists
709 # abort here if the journal already exists
709 if os.path.exists(self.sjoin("journal")):
710 if os.path.exists(self.sjoin("journal")):
710 raise error.RepoError(
711 raise error.RepoError(
711 _("abandoned transaction found - run hg recover"))
712 _("abandoned transaction found - run hg recover"))
712
713
713 journalfiles = self._writejournal(desc)
714 journalfiles = self._writejournal(desc)
714 renames = [(x, undoname(x)) for x in journalfiles]
715 renames = [(x, undoname(x)) for x in journalfiles]
715
716
716 tr = transaction.transaction(self.ui.warn, self.sopener,
717 tr = transaction.transaction(self.ui.warn, self.sopener,
717 self.sjoin("journal"),
718 self.sjoin("journal"),
718 aftertrans(renames),
719 aftertrans(renames),
719 self.store.createmode)
720 self.store.createmode)
720 self._transref = weakref.ref(tr)
721 self._transref = weakref.ref(tr)
721 return tr
722 return tr
722
723
723 def _writejournal(self, desc):
724 def _writejournal(self, desc):
724 # save dirstate for rollback
725 # save dirstate for rollback
725 try:
726 try:
726 ds = self.opener.read("dirstate")
727 ds = self.opener.read("dirstate")
727 except IOError:
728 except IOError:
728 ds = ""
729 ds = ""
729 self.opener.write("journal.dirstate", ds)
730 self.opener.write("journal.dirstate", ds)
730 self.opener.write("journal.branch",
731 self.opener.write("journal.branch",
731 encoding.fromlocal(self.dirstate.branch()))
732 encoding.fromlocal(self.dirstate.branch()))
732 self.opener.write("journal.desc",
733 self.opener.write("journal.desc",
733 "%d\n%s\n" % (len(self), desc))
734 "%d\n%s\n" % (len(self), desc))
734
735
735 bkname = self.join('bookmarks')
736 bkname = self.join('bookmarks')
736 if os.path.exists(bkname):
737 if os.path.exists(bkname):
737 util.copyfile(bkname, self.join('journal.bookmarks'))
738 util.copyfile(bkname, self.join('journal.bookmarks'))
738 else:
739 else:
739 self.opener.write('journal.bookmarks', '')
740 self.opener.write('journal.bookmarks', '')
740
741
741 return (self.sjoin('journal'), self.join('journal.dirstate'),
742 return (self.sjoin('journal'), self.join('journal.dirstate'),
742 self.join('journal.branch'), self.join('journal.desc'),
743 self.join('journal.branch'), self.join('journal.desc'),
743 self.join('journal.bookmarks'))
744 self.join('journal.bookmarks'))
744
745
745 def recover(self):
746 def recover(self):
746 lock = self.lock()
747 lock = self.lock()
747 try:
748 try:
748 if os.path.exists(self.sjoin("journal")):
749 if os.path.exists(self.sjoin("journal")):
749 self.ui.status(_("rolling back interrupted transaction\n"))
750 self.ui.status(_("rolling back interrupted transaction\n"))
750 transaction.rollback(self.sopener, self.sjoin("journal"),
751 transaction.rollback(self.sopener, self.sjoin("journal"),
751 self.ui.warn)
752 self.ui.warn)
752 self.invalidate()
753 self.invalidate()
753 return True
754 return True
754 else:
755 else:
755 self.ui.warn(_("no interrupted transaction available\n"))
756 self.ui.warn(_("no interrupted transaction available\n"))
756 return False
757 return False
757 finally:
758 finally:
758 lock.release()
759 lock.release()
759
760
760 def rollback(self, dryrun=False, force=False):
761 def rollback(self, dryrun=False, force=False):
761 wlock = lock = None
762 wlock = lock = None
762 try:
763 try:
763 wlock = self.wlock()
764 wlock = self.wlock()
764 lock = self.lock()
765 lock = self.lock()
765 if os.path.exists(self.sjoin("undo")):
766 if os.path.exists(self.sjoin("undo")):
766 return self._rollback(dryrun, force)
767 return self._rollback(dryrun, force)
767 else:
768 else:
768 self.ui.warn(_("no rollback information available\n"))
769 self.ui.warn(_("no rollback information available\n"))
769 return 1
770 return 1
770 finally:
771 finally:
771 release(lock, wlock)
772 release(lock, wlock)
772
773
773 def _rollback(self, dryrun, force):
774 def _rollback(self, dryrun, force):
774 ui = self.ui
775 ui = self.ui
775 try:
776 try:
776 args = self.opener.read('undo.desc').splitlines()
777 args = self.opener.read('undo.desc').splitlines()
777 (oldlen, desc, detail) = (int(args[0]), args[1], None)
778 (oldlen, desc, detail) = (int(args[0]), args[1], None)
778 if len(args) >= 3:
779 if len(args) >= 3:
779 detail = args[2]
780 detail = args[2]
780 oldtip = oldlen - 1
781 oldtip = oldlen - 1
781
782
782 if detail and ui.verbose:
783 if detail and ui.verbose:
783 msg = (_('repository tip rolled back to revision %s'
784 msg = (_('repository tip rolled back to revision %s'
784 ' (undo %s: %s)\n')
785 ' (undo %s: %s)\n')
785 % (oldtip, desc, detail))
786 % (oldtip, desc, detail))
786 else:
787 else:
787 msg = (_('repository tip rolled back to revision %s'
788 msg = (_('repository tip rolled back to revision %s'
788 ' (undo %s)\n')
789 ' (undo %s)\n')
789 % (oldtip, desc))
790 % (oldtip, desc))
790 except IOError:
791 except IOError:
791 msg = _('rolling back unknown transaction\n')
792 msg = _('rolling back unknown transaction\n')
792 desc = None
793 desc = None
793
794
794 if not force and self['.'] != self['tip'] and desc == 'commit':
795 if not force and self['.'] != self['tip'] and desc == 'commit':
795 raise util.Abort(
796 raise util.Abort(
796 _('rollback of last commit while not checked out '
797 _('rollback of last commit while not checked out '
797 'may lose data'), hint=_('use -f to force'))
798 'may lose data'), hint=_('use -f to force'))
798
799
799 ui.status(msg)
800 ui.status(msg)
800 if dryrun:
801 if dryrun:
801 return 0
802 return 0
802
803
803 parents = self.dirstate.parents()
804 parents = self.dirstate.parents()
804 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
805 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
805 if os.path.exists(self.join('undo.bookmarks')):
806 if os.path.exists(self.join('undo.bookmarks')):
806 util.rename(self.join('undo.bookmarks'),
807 util.rename(self.join('undo.bookmarks'),
807 self.join('bookmarks'))
808 self.join('bookmarks'))
808 self.invalidate()
809 self.invalidate()
809
810
810 parentgone = (parents[0] not in self.changelog.nodemap or
811 parentgone = (parents[0] not in self.changelog.nodemap or
811 parents[1] not in self.changelog.nodemap)
812 parents[1] not in self.changelog.nodemap)
812 if parentgone:
813 if parentgone:
813 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
814 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
814 try:
815 try:
815 branch = self.opener.read('undo.branch')
816 branch = self.opener.read('undo.branch')
816 self.dirstate.setbranch(branch)
817 self.dirstate.setbranch(branch)
817 except IOError:
818 except IOError:
818 ui.warn(_('named branch could not be reset: '
819 ui.warn(_('named branch could not be reset: '
819 'current branch is still \'%s\'\n')
820 'current branch is still \'%s\'\n')
820 % self.dirstate.branch())
821 % self.dirstate.branch())
821
822
822 self.dirstate.invalidate()
823 self.dirstate.invalidate()
823 parents = tuple([p.rev() for p in self.parents()])
824 parents = tuple([p.rev() for p in self.parents()])
824 if len(parents) > 1:
825 if len(parents) > 1:
825 ui.status(_('working directory now based on '
826 ui.status(_('working directory now based on '
826 'revisions %d and %d\n') % parents)
827 'revisions %d and %d\n') % parents)
827 else:
828 else:
828 ui.status(_('working directory now based on '
829 ui.status(_('working directory now based on '
829 'revision %d\n') % parents)
830 'revision %d\n') % parents)
830 self.destroyed()
831 self.destroyed()
831 return 0
832 return 0
832
833
833 def invalidatecaches(self):
834 def invalidatecaches(self):
834 try:
835 try:
835 delattr(self, '_tagscache')
836 delattr(self, '_tagscache')
836 except AttributeError:
837 except AttributeError:
837 pass
838 pass
838
839
839 self._branchcache = None # in UTF-8
840 self._branchcache = None # in UTF-8
840 self._branchcachetip = None
841 self._branchcachetip = None
841
842
842 def invalidatedirstate(self):
843 def invalidatedirstate(self):
843 '''Invalidates the dirstate, causing the next call to dirstate
844 '''Invalidates the dirstate, causing the next call to dirstate
844 to check if it was modified since the last time it was read,
845 to check if it was modified since the last time it was read,
845 rereading it if it has.
846 rereading it if it has.
846
847
847 This is different to dirstate.invalidate() that it doesn't always
848 This is different to dirstate.invalidate() that it doesn't always
848 rereads the dirstate. Use dirstate.invalidate() if you want to
849 rereads the dirstate. Use dirstate.invalidate() if you want to
849 explicitly read the dirstate again (i.e. restoring it to a previous
850 explicitly read the dirstate again (i.e. restoring it to a previous
850 known good state).'''
851 known good state).'''
851 try:
852 try:
852 delattr(self, 'dirstate')
853 delattr(self, 'dirstate')
853 except AttributeError:
854 except AttributeError:
854 pass
855 pass
855
856
856 def invalidate(self):
857 def invalidate(self):
857 for k in self._filecache:
858 for k in self._filecache:
858 # dirstate is invalidated separately in invalidatedirstate()
859 # dirstate is invalidated separately in invalidatedirstate()
859 if k == 'dirstate':
860 if k == 'dirstate':
860 continue
861 continue
861
862
862 try:
863 try:
863 delattr(self, k)
864 delattr(self, k)
864 except AttributeError:
865 except AttributeError:
865 pass
866 pass
866 self.invalidatecaches()
867 self.invalidatecaches()
867
868
868 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
869 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
869 try:
870 try:
870 l = lock.lock(lockname, 0, releasefn, desc=desc)
871 l = lock.lock(lockname, 0, releasefn, desc=desc)
871 except error.LockHeld, inst:
872 except error.LockHeld, inst:
872 if not wait:
873 if not wait:
873 raise
874 raise
874 self.ui.warn(_("waiting for lock on %s held by %r\n") %
875 self.ui.warn(_("waiting for lock on %s held by %r\n") %
875 (desc, inst.locker))
876 (desc, inst.locker))
876 # default to 600 seconds timeout
877 # default to 600 seconds timeout
877 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
878 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
878 releasefn, desc=desc)
879 releasefn, desc=desc)
879 if acquirefn:
880 if acquirefn:
880 acquirefn()
881 acquirefn()
881 return l
882 return l
882
883
883 def lock(self, wait=True):
884 def lock(self, wait=True):
884 '''Lock the repository store (.hg/store) and return a weak reference
885 '''Lock the repository store (.hg/store) and return a weak reference
885 to the lock. Use this before modifying the store (e.g. committing or
886 to the lock. Use this before modifying the store (e.g. committing or
886 stripping). If you are opening a transaction, get a lock as well.)'''
887 stripping). If you are opening a transaction, get a lock as well.)'''
887 l = self._lockref and self._lockref()
888 l = self._lockref and self._lockref()
888 if l is not None and l.held:
889 if l is not None and l.held:
889 l.lock()
890 l.lock()
890 return l
891 return l
891
892
892 def unlock():
893 def unlock():
893 self.store.write()
894 self.store.write()
894 for k, ce in self._filecache.items():
895 for k, ce in self._filecache.items():
895 if k == 'dirstate':
896 if k == 'dirstate':
896 continue
897 continue
897 ce.refresh()
898 ce.refresh()
898
899
899 l = self._lock(self.sjoin("lock"), wait, unlock,
900 l = self._lock(self.sjoin("lock"), wait, unlock,
900 self.invalidate, _('repository %s') % self.origroot)
901 self.invalidate, _('repository %s') % self.origroot)
901 self._lockref = weakref.ref(l)
902 self._lockref = weakref.ref(l)
902 return l
903 return l
903
904
904 def wlock(self, wait=True):
905 def wlock(self, wait=True):
905 '''Lock the non-store parts of the repository (everything under
906 '''Lock the non-store parts of the repository (everything under
906 .hg except .hg/store) and return a weak reference to the lock.
907 .hg except .hg/store) and return a weak reference to the lock.
907 Use this before modifying files in .hg.'''
908 Use this before modifying files in .hg.'''
908 l = self._wlockref and self._wlockref()
909 l = self._wlockref and self._wlockref()
909 if l is not None and l.held:
910 if l is not None and l.held:
910 l.lock()
911 l.lock()
911 return l
912 return l
912
913
913 def unlock():
914 def unlock():
914 self.dirstate.write()
915 self.dirstate.write()
915 ce = self._filecache.get('dirstate')
916 ce = self._filecache.get('dirstate')
916 if ce:
917 if ce:
917 ce.refresh()
918 ce.refresh()
918
919
919 l = self._lock(self.join("wlock"), wait, unlock,
920 l = self._lock(self.join("wlock"), wait, unlock,
920 self.invalidatedirstate, _('working directory of %s') %
921 self.invalidatedirstate, _('working directory of %s') %
921 self.origroot)
922 self.origroot)
922 self._wlockref = weakref.ref(l)
923 self._wlockref = weakref.ref(l)
923 return l
924 return l
924
925
925 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
926 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
926 """
927 """
927 commit an individual file as part of a larger transaction
928 commit an individual file as part of a larger transaction
928 """
929 """
929
930
930 fname = fctx.path()
931 fname = fctx.path()
931 text = fctx.data()
932 text = fctx.data()
932 flog = self.file(fname)
933 flog = self.file(fname)
933 fparent1 = manifest1.get(fname, nullid)
934 fparent1 = manifest1.get(fname, nullid)
934 fparent2 = fparent2o = manifest2.get(fname, nullid)
935 fparent2 = fparent2o = manifest2.get(fname, nullid)
935
936
936 meta = {}
937 meta = {}
937 copy = fctx.renamed()
938 copy = fctx.renamed()
938 if copy and copy[0] != fname:
939 if copy and copy[0] != fname:
939 # Mark the new revision of this file as a copy of another
940 # Mark the new revision of this file as a copy of another
940 # file. This copy data will effectively act as a parent
941 # file. This copy data will effectively act as a parent
941 # of this new revision. If this is a merge, the first
942 # of this new revision. If this is a merge, the first
942 # parent will be the nullid (meaning "look up the copy data")
943 # parent will be the nullid (meaning "look up the copy data")
943 # and the second one will be the other parent. For example:
944 # and the second one will be the other parent. For example:
944 #
945 #
945 # 0 --- 1 --- 3 rev1 changes file foo
946 # 0 --- 1 --- 3 rev1 changes file foo
946 # \ / rev2 renames foo to bar and changes it
947 # \ / rev2 renames foo to bar and changes it
947 # \- 2 -/ rev3 should have bar with all changes and
948 # \- 2 -/ rev3 should have bar with all changes and
948 # should record that bar descends from
949 # should record that bar descends from
949 # bar in rev2 and foo in rev1
950 # bar in rev2 and foo in rev1
950 #
951 #
951 # this allows this merge to succeed:
952 # this allows this merge to succeed:
952 #
953 #
953 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
954 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
954 # \ / merging rev3 and rev4 should use bar@rev2
955 # \ / merging rev3 and rev4 should use bar@rev2
955 # \- 2 --- 4 as the merge base
956 # \- 2 --- 4 as the merge base
956 #
957 #
957
958
958 cfname = copy[0]
959 cfname = copy[0]
959 crev = manifest1.get(cfname)
960 crev = manifest1.get(cfname)
960 newfparent = fparent2
961 newfparent = fparent2
961
962
962 if manifest2: # branch merge
963 if manifest2: # branch merge
963 if fparent2 == nullid or crev is None: # copied on remote side
964 if fparent2 == nullid or crev is None: # copied on remote side
964 if cfname in manifest2:
965 if cfname in manifest2:
965 crev = manifest2[cfname]
966 crev = manifest2[cfname]
966 newfparent = fparent1
967 newfparent = fparent1
967
968
968 # find source in nearest ancestor if we've lost track
969 # find source in nearest ancestor if we've lost track
969 if not crev:
970 if not crev:
970 self.ui.debug(" %s: searching for copy revision for %s\n" %
971 self.ui.debug(" %s: searching for copy revision for %s\n" %
971 (fname, cfname))
972 (fname, cfname))
972 for ancestor in self[None].ancestors():
973 for ancestor in self[None].ancestors():
973 if cfname in ancestor:
974 if cfname in ancestor:
974 crev = ancestor[cfname].filenode()
975 crev = ancestor[cfname].filenode()
975 break
976 break
976
977
977 if crev:
978 if crev:
978 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
979 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
979 meta["copy"] = cfname
980 meta["copy"] = cfname
980 meta["copyrev"] = hex(crev)
981 meta["copyrev"] = hex(crev)
981 fparent1, fparent2 = nullid, newfparent
982 fparent1, fparent2 = nullid, newfparent
982 else:
983 else:
983 self.ui.warn(_("warning: can't find ancestor for '%s' "
984 self.ui.warn(_("warning: can't find ancestor for '%s' "
984 "copied from '%s'!\n") % (fname, cfname))
985 "copied from '%s'!\n") % (fname, cfname))
985
986
986 elif fparent2 != nullid:
987 elif fparent2 != nullid:
987 # is one parent an ancestor of the other?
988 # is one parent an ancestor of the other?
988 fparentancestor = flog.ancestor(fparent1, fparent2)
989 fparentancestor = flog.ancestor(fparent1, fparent2)
989 if fparentancestor == fparent1:
990 if fparentancestor == fparent1:
990 fparent1, fparent2 = fparent2, nullid
991 fparent1, fparent2 = fparent2, nullid
991 elif fparentancestor == fparent2:
992 elif fparentancestor == fparent2:
992 fparent2 = nullid
993 fparent2 = nullid
993
994
994 # is the file changed?
995 # is the file changed?
995 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
996 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
996 changelist.append(fname)
997 changelist.append(fname)
997 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
998 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
998
999
999 # are just the flags changed during merge?
1000 # are just the flags changed during merge?
1000 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1001 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1001 changelist.append(fname)
1002 changelist.append(fname)
1002
1003
1003 return fparent1
1004 return fparent1
1004
1005
1005 def commit(self, text="", user=None, date=None, match=None, force=False,
1006 def commit(self, text="", user=None, date=None, match=None, force=False,
1006 editor=False, extra={}):
1007 editor=False, extra={}):
1007 """Add a new revision to current repository.
1008 """Add a new revision to current repository.
1008
1009
1009 Revision information is gathered from the working directory,
1010 Revision information is gathered from the working directory,
1010 match can be used to filter the committed files. If editor is
1011 match can be used to filter the committed files. If editor is
1011 supplied, it is called to get a commit message.
1012 supplied, it is called to get a commit message.
1012 """
1013 """
1013
1014
1014 def fail(f, msg):
1015 def fail(f, msg):
1015 raise util.Abort('%s: %s' % (f, msg))
1016 raise util.Abort('%s: %s' % (f, msg))
1016
1017
1017 if not match:
1018 if not match:
1018 match = matchmod.always(self.root, '')
1019 match = matchmod.always(self.root, '')
1019
1020
1020 if not force:
1021 if not force:
1021 vdirs = []
1022 vdirs = []
1022 match.dir = vdirs.append
1023 match.dir = vdirs.append
1023 match.bad = fail
1024 match.bad = fail
1024
1025
1025 wlock = self.wlock()
1026 wlock = self.wlock()
1026 try:
1027 try:
1027 wctx = self[None]
1028 wctx = self[None]
1028 merge = len(wctx.parents()) > 1
1029 merge = len(wctx.parents()) > 1
1029
1030
1030 if (not force and merge and match and
1031 if (not force and merge and match and
1031 (match.files() or match.anypats())):
1032 (match.files() or match.anypats())):
1032 raise util.Abort(_('cannot partially commit a merge '
1033 raise util.Abort(_('cannot partially commit a merge '
1033 '(do not specify files or patterns)'))
1034 '(do not specify files or patterns)'))
1034
1035
1035 changes = self.status(match=match, clean=force)
1036 changes = self.status(match=match, clean=force)
1036 if force:
1037 if force:
1037 changes[0].extend(changes[6]) # mq may commit unchanged files
1038 changes[0].extend(changes[6]) # mq may commit unchanged files
1038
1039
1039 # check subrepos
1040 # check subrepos
1040 subs = []
1041 subs = []
1041 removedsubs = set()
1042 removedsubs = set()
1042 if '.hgsub' in wctx:
1043 if '.hgsub' in wctx:
1043 # only manage subrepos and .hgsubstate if .hgsub is present
1044 # only manage subrepos and .hgsubstate if .hgsub is present
1044 for p in wctx.parents():
1045 for p in wctx.parents():
1045 removedsubs.update(s for s in p.substate if match(s))
1046 removedsubs.update(s for s in p.substate if match(s))
1046 for s in wctx.substate:
1047 for s in wctx.substate:
1047 removedsubs.discard(s)
1048 removedsubs.discard(s)
1048 if match(s) and wctx.sub(s).dirty():
1049 if match(s) and wctx.sub(s).dirty():
1049 subs.append(s)
1050 subs.append(s)
1050 if (subs or removedsubs):
1051 if (subs or removedsubs):
1051 if (not match('.hgsub') and
1052 if (not match('.hgsub') and
1052 '.hgsub' in (wctx.modified() + wctx.added())):
1053 '.hgsub' in (wctx.modified() + wctx.added())):
1053 raise util.Abort(
1054 raise util.Abort(
1054 _("can't commit subrepos without .hgsub"))
1055 _("can't commit subrepos without .hgsub"))
1055 if '.hgsubstate' not in changes[0]:
1056 if '.hgsubstate' not in changes[0]:
1056 changes[0].insert(0, '.hgsubstate')
1057 changes[0].insert(0, '.hgsubstate')
1057 if '.hgsubstate' in changes[2]:
1058 if '.hgsubstate' in changes[2]:
1058 changes[2].remove('.hgsubstate')
1059 changes[2].remove('.hgsubstate')
1059 elif '.hgsub' in changes[2]:
1060 elif '.hgsub' in changes[2]:
1060 # clean up .hgsubstate when .hgsub is removed
1061 # clean up .hgsubstate when .hgsub is removed
1061 if ('.hgsubstate' in wctx and
1062 if ('.hgsubstate' in wctx and
1062 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1063 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1063 changes[2].insert(0, '.hgsubstate')
1064 changes[2].insert(0, '.hgsubstate')
1064
1065
1065 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1066 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1066 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1067 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1067 if changedsubs:
1068 if changedsubs:
1068 raise util.Abort(_("uncommitted changes in subrepo %s")
1069 raise util.Abort(_("uncommitted changes in subrepo %s")
1069 % changedsubs[0],
1070 % changedsubs[0],
1070 hint=_("use --subrepos for recursive commit"))
1071 hint=_("use --subrepos for recursive commit"))
1071
1072
1072 # make sure all explicit patterns are matched
1073 # make sure all explicit patterns are matched
1073 if not force and match.files():
1074 if not force and match.files():
1074 matched = set(changes[0] + changes[1] + changes[2])
1075 matched = set(changes[0] + changes[1] + changes[2])
1075
1076
1076 for f in match.files():
1077 for f in match.files():
1077 if f == '.' or f in matched or f in wctx.substate:
1078 if f == '.' or f in matched or f in wctx.substate:
1078 continue
1079 continue
1079 if f in changes[3]: # missing
1080 if f in changes[3]: # missing
1080 fail(f, _('file not found!'))
1081 fail(f, _('file not found!'))
1081 if f in vdirs: # visited directory
1082 if f in vdirs: # visited directory
1082 d = f + '/'
1083 d = f + '/'
1083 for mf in matched:
1084 for mf in matched:
1084 if mf.startswith(d):
1085 if mf.startswith(d):
1085 break
1086 break
1086 else:
1087 else:
1087 fail(f, _("no match under directory!"))
1088 fail(f, _("no match under directory!"))
1088 elif f not in self.dirstate:
1089 elif f not in self.dirstate:
1089 fail(f, _("file not tracked!"))
1090 fail(f, _("file not tracked!"))
1090
1091
1091 if (not force and not extra.get("close") and not merge
1092 if (not force and not extra.get("close") and not merge
1092 and not (changes[0] or changes[1] or changes[2])
1093 and not (changes[0] or changes[1] or changes[2])
1093 and wctx.branch() == wctx.p1().branch()):
1094 and wctx.branch() == wctx.p1().branch()):
1094 return None
1095 return None
1095
1096
1096 ms = mergemod.mergestate(self)
1097 ms = mergemod.mergestate(self)
1097 for f in changes[0]:
1098 for f in changes[0]:
1098 if f in ms and ms[f] == 'u':
1099 if f in ms and ms[f] == 'u':
1099 raise util.Abort(_("unresolved merge conflicts "
1100 raise util.Abort(_("unresolved merge conflicts "
1100 "(see hg help resolve)"))
1101 "(see hg help resolve)"))
1101
1102
1102 cctx = context.workingctx(self, text, user, date, extra, changes)
1103 cctx = context.workingctx(self, text, user, date, extra, changes)
1103 if editor:
1104 if editor:
1104 cctx._text = editor(self, cctx, subs)
1105 cctx._text = editor(self, cctx, subs)
1105 edited = (text != cctx._text)
1106 edited = (text != cctx._text)
1106
1107
1107 # commit subs
1108 # commit subs
1108 if subs or removedsubs:
1109 if subs or removedsubs:
1109 state = wctx.substate.copy()
1110 state = wctx.substate.copy()
1110 for s in sorted(subs):
1111 for s in sorted(subs):
1111 sub = wctx.sub(s)
1112 sub = wctx.sub(s)
1112 self.ui.status(_('committing subrepository %s\n') %
1113 self.ui.status(_('committing subrepository %s\n') %
1113 subrepo.subrelpath(sub))
1114 subrepo.subrelpath(sub))
1114 sr = sub.commit(cctx._text, user, date)
1115 sr = sub.commit(cctx._text, user, date)
1115 state[s] = (state[s][0], sr)
1116 state[s] = (state[s][0], sr)
1116 subrepo.writestate(self, state)
1117 subrepo.writestate(self, state)
1117
1118
1118 # Save commit message in case this transaction gets rolled back
1119 # Save commit message in case this transaction gets rolled back
1119 # (e.g. by a pretxncommit hook). Leave the content alone on
1120 # (e.g. by a pretxncommit hook). Leave the content alone on
1120 # the assumption that the user will use the same editor again.
1121 # the assumption that the user will use the same editor again.
1121 msgfn = self.savecommitmessage(cctx._text)
1122 msgfn = self.savecommitmessage(cctx._text)
1122
1123
1123 p1, p2 = self.dirstate.parents()
1124 p1, p2 = self.dirstate.parents()
1124 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1125 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1125 try:
1126 try:
1126 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1127 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1127 ret = self.commitctx(cctx, True)
1128 ret = self.commitctx(cctx, True)
1128 except:
1129 except:
1129 if edited:
1130 if edited:
1130 self.ui.write(
1131 self.ui.write(
1131 _('note: commit message saved in %s\n') % msgfn)
1132 _('note: commit message saved in %s\n') % msgfn)
1132 raise
1133 raise
1133
1134
1134 # update bookmarks, dirstate and mergestate
1135 # update bookmarks, dirstate and mergestate
1135 bookmarks.update(self, p1, ret)
1136 bookmarks.update(self, p1, ret)
1136 for f in changes[0] + changes[1]:
1137 for f in changes[0] + changes[1]:
1137 self.dirstate.normal(f)
1138 self.dirstate.normal(f)
1138 for f in changes[2]:
1139 for f in changes[2]:
1139 self.dirstate.drop(f)
1140 self.dirstate.drop(f)
1140 self.dirstate.setparents(ret)
1141 self.dirstate.setparents(ret)
1141 ms.reset()
1142 ms.reset()
1142 finally:
1143 finally:
1143 wlock.release()
1144 wlock.release()
1144
1145
1145 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1146 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1146 return ret
1147 return ret
1147
1148
1148 def commitctx(self, ctx, error=False):
1149 def commitctx(self, ctx, error=False):
1149 """Add a new revision to current repository.
1150 """Add a new revision to current repository.
1150 Revision information is passed via the context argument.
1151 Revision information is passed via the context argument.
1151 """
1152 """
1152
1153
1153 tr = lock = None
1154 tr = lock = None
1154 removed = list(ctx.removed())
1155 removed = list(ctx.removed())
1155 p1, p2 = ctx.p1(), ctx.p2()
1156 p1, p2 = ctx.p1(), ctx.p2()
1156 user = ctx.user()
1157 user = ctx.user()
1157
1158
1158 lock = self.lock()
1159 lock = self.lock()
1159 try:
1160 try:
1160 tr = self.transaction("commit")
1161 tr = self.transaction("commit")
1161 trp = weakref.proxy(tr)
1162 trp = weakref.proxy(tr)
1162
1163
1163 if ctx.files():
1164 if ctx.files():
1164 m1 = p1.manifest().copy()
1165 m1 = p1.manifest().copy()
1165 m2 = p2.manifest()
1166 m2 = p2.manifest()
1166
1167
1167 # check in files
1168 # check in files
1168 new = {}
1169 new = {}
1169 changed = []
1170 changed = []
1170 linkrev = len(self)
1171 linkrev = len(self)
1171 for f in sorted(ctx.modified() + ctx.added()):
1172 for f in sorted(ctx.modified() + ctx.added()):
1172 self.ui.note(f + "\n")
1173 self.ui.note(f + "\n")
1173 try:
1174 try:
1174 fctx = ctx[f]
1175 fctx = ctx[f]
1175 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1176 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1176 changed)
1177 changed)
1177 m1.set(f, fctx.flags())
1178 m1.set(f, fctx.flags())
1178 except OSError, inst:
1179 except OSError, inst:
1179 self.ui.warn(_("trouble committing %s!\n") % f)
1180 self.ui.warn(_("trouble committing %s!\n") % f)
1180 raise
1181 raise
1181 except IOError, inst:
1182 except IOError, inst:
1182 errcode = getattr(inst, 'errno', errno.ENOENT)
1183 errcode = getattr(inst, 'errno', errno.ENOENT)
1183 if error or errcode and errcode != errno.ENOENT:
1184 if error or errcode and errcode != errno.ENOENT:
1184 self.ui.warn(_("trouble committing %s!\n") % f)
1185 self.ui.warn(_("trouble committing %s!\n") % f)
1185 raise
1186 raise
1186 else:
1187 else:
1187 removed.append(f)
1188 removed.append(f)
1188
1189
1189 # update manifest
1190 # update manifest
1190 m1.update(new)
1191 m1.update(new)
1191 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1192 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1192 drop = [f for f in removed if f in m1]
1193 drop = [f for f in removed if f in m1]
1193 for f in drop:
1194 for f in drop:
1194 del m1[f]
1195 del m1[f]
1195 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1196 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1196 p2.manifestnode(), (new, drop))
1197 p2.manifestnode(), (new, drop))
1197 files = changed + removed
1198 files = changed + removed
1198 else:
1199 else:
1199 mn = p1.manifestnode()
1200 mn = p1.manifestnode()
1200 files = []
1201 files = []
1201
1202
1202 # update changelog
1203 # update changelog
1203 self.changelog.delayupdate()
1204 self.changelog.delayupdate()
1204 n = self.changelog.add(mn, files, ctx.description(),
1205 n = self.changelog.add(mn, files, ctx.description(),
1205 trp, p1.node(), p2.node(),
1206 trp, p1.node(), p2.node(),
1206 user, ctx.date(), ctx.extra().copy())
1207 user, ctx.date(), ctx.extra().copy())
1207 p = lambda: self.changelog.writepending() and self.root or ""
1208 p = lambda: self.changelog.writepending() and self.root or ""
1208 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1209 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1209 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1210 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1210 parent2=xp2, pending=p)
1211 parent2=xp2, pending=p)
1211 self.changelog.finalize(trp)
1212 self.changelog.finalize(trp)
1212 tr.close()
1213 tr.close()
1213
1214
1214 if self._branchcache:
1215 if self._branchcache:
1215 self.updatebranchcache()
1216 self.updatebranchcache()
1216 return n
1217 return n
1217 finally:
1218 finally:
1218 if tr:
1219 if tr:
1219 tr.release()
1220 tr.release()
1220 lock.release()
1221 lock.release()
1221
1222
1222 def destroyed(self):
1223 def destroyed(self):
1223 '''Inform the repository that nodes have been destroyed.
1224 '''Inform the repository that nodes have been destroyed.
1224 Intended for use by strip and rollback, so there's a common
1225 Intended for use by strip and rollback, so there's a common
1225 place for anything that has to be done after destroying history.'''
1226 place for anything that has to be done after destroying history.'''
1226 # XXX it might be nice if we could take the list of destroyed
1227 # XXX it might be nice if we could take the list of destroyed
1227 # nodes, but I don't see an easy way for rollback() to do that
1228 # nodes, but I don't see an easy way for rollback() to do that
1228
1229
1229 # Ensure the persistent tag cache is updated. Doing it now
1230 # Ensure the persistent tag cache is updated. Doing it now
1230 # means that the tag cache only has to worry about destroyed
1231 # means that the tag cache only has to worry about destroyed
1231 # heads immediately after a strip/rollback. That in turn
1232 # heads immediately after a strip/rollback. That in turn
1232 # guarantees that "cachetip == currenttip" (comparing both rev
1233 # guarantees that "cachetip == currenttip" (comparing both rev
1233 # and node) always means no nodes have been added or destroyed.
1234 # and node) always means no nodes have been added or destroyed.
1234
1235
1235 # XXX this is suboptimal when qrefresh'ing: we strip the current
1236 # XXX this is suboptimal when qrefresh'ing: we strip the current
1236 # head, refresh the tag cache, then immediately add a new head.
1237 # head, refresh the tag cache, then immediately add a new head.
1237 # But I think doing it this way is necessary for the "instant
1238 # But I think doing it this way is necessary for the "instant
1238 # tag cache retrieval" case to work.
1239 # tag cache retrieval" case to work.
1239 self.invalidatecaches()
1240 self.invalidatecaches()
1240
1241
1241 def walk(self, match, node=None):
1242 def walk(self, match, node=None):
1242 '''
1243 '''
1243 walk recursively through the directory tree or a given
1244 walk recursively through the directory tree or a given
1244 changeset, finding all files matched by the match
1245 changeset, finding all files matched by the match
1245 function
1246 function
1246 '''
1247 '''
1247 return self[node].walk(match)
1248 return self[node].walk(match)
1248
1249
1249 def status(self, node1='.', node2=None, match=None,
1250 def status(self, node1='.', node2=None, match=None,
1250 ignored=False, clean=False, unknown=False,
1251 ignored=False, clean=False, unknown=False,
1251 listsubrepos=False):
1252 listsubrepos=False):
1252 """return status of files between two nodes or node and working directory
1253 """return status of files between two nodes or node and working directory
1253
1254
1254 If node1 is None, use the first dirstate parent instead.
1255 If node1 is None, use the first dirstate parent instead.
1255 If node2 is None, compare node1 with working directory.
1256 If node2 is None, compare node1 with working directory.
1256 """
1257 """
1257
1258
1258 def mfmatches(ctx):
1259 def mfmatches(ctx):
1259 mf = ctx.manifest().copy()
1260 mf = ctx.manifest().copy()
1260 for fn in mf.keys():
1261 for fn in mf.keys():
1261 if not match(fn):
1262 if not match(fn):
1262 del mf[fn]
1263 del mf[fn]
1263 return mf
1264 return mf
1264
1265
1265 if isinstance(node1, context.changectx):
1266 if isinstance(node1, context.changectx):
1266 ctx1 = node1
1267 ctx1 = node1
1267 else:
1268 else:
1268 ctx1 = self[node1]
1269 ctx1 = self[node1]
1269 if isinstance(node2, context.changectx):
1270 if isinstance(node2, context.changectx):
1270 ctx2 = node2
1271 ctx2 = node2
1271 else:
1272 else:
1272 ctx2 = self[node2]
1273 ctx2 = self[node2]
1273
1274
1274 working = ctx2.rev() is None
1275 working = ctx2.rev() is None
1275 parentworking = working and ctx1 == self['.']
1276 parentworking = working and ctx1 == self['.']
1276 match = match or matchmod.always(self.root, self.getcwd())
1277 match = match or matchmod.always(self.root, self.getcwd())
1277 listignored, listclean, listunknown = ignored, clean, unknown
1278 listignored, listclean, listunknown = ignored, clean, unknown
1278
1279
1279 # load earliest manifest first for caching reasons
1280 # load earliest manifest first for caching reasons
1280 if not working and ctx2.rev() < ctx1.rev():
1281 if not working and ctx2.rev() < ctx1.rev():
1281 ctx2.manifest()
1282 ctx2.manifest()
1282
1283
1283 if not parentworking:
1284 if not parentworking:
1284 def bad(f, msg):
1285 def bad(f, msg):
1285 if f not in ctx1:
1286 if f not in ctx1:
1286 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1287 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1287 match.bad = bad
1288 match.bad = bad
1288
1289
1289 if working: # we need to scan the working dir
1290 if working: # we need to scan the working dir
1290 subrepos = []
1291 subrepos = []
1291 if '.hgsub' in self.dirstate:
1292 if '.hgsub' in self.dirstate:
1292 subrepos = ctx2.substate.keys()
1293 subrepos = ctx2.substate.keys()
1293 s = self.dirstate.status(match, subrepos, listignored,
1294 s = self.dirstate.status(match, subrepos, listignored,
1294 listclean, listunknown)
1295 listclean, listunknown)
1295 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1296 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1296
1297
1297 # check for any possibly clean files
1298 # check for any possibly clean files
1298 if parentworking and cmp:
1299 if parentworking and cmp:
1299 fixup = []
1300 fixup = []
1300 # do a full compare of any files that might have changed
1301 # do a full compare of any files that might have changed
1301 for f in sorted(cmp):
1302 for f in sorted(cmp):
1302 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1303 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1303 or ctx1[f].cmp(ctx2[f])):
1304 or ctx1[f].cmp(ctx2[f])):
1304 modified.append(f)
1305 modified.append(f)
1305 else:
1306 else:
1306 fixup.append(f)
1307 fixup.append(f)
1307
1308
1308 # update dirstate for files that are actually clean
1309 # update dirstate for files that are actually clean
1309 if fixup:
1310 if fixup:
1310 if listclean:
1311 if listclean:
1311 clean += fixup
1312 clean += fixup
1312
1313
1313 try:
1314 try:
1314 # updating the dirstate is optional
1315 # updating the dirstate is optional
1315 # so we don't wait on the lock
1316 # so we don't wait on the lock
1316 wlock = self.wlock(False)
1317 wlock = self.wlock(False)
1317 try:
1318 try:
1318 for f in fixup:
1319 for f in fixup:
1319 self.dirstate.normal(f)
1320 self.dirstate.normal(f)
1320 finally:
1321 finally:
1321 wlock.release()
1322 wlock.release()
1322 except error.LockError:
1323 except error.LockError:
1323 pass
1324 pass
1324
1325
1325 if not parentworking:
1326 if not parentworking:
1326 mf1 = mfmatches(ctx1)
1327 mf1 = mfmatches(ctx1)
1327 if working:
1328 if working:
1328 # we are comparing working dir against non-parent
1329 # we are comparing working dir against non-parent
1329 # generate a pseudo-manifest for the working dir
1330 # generate a pseudo-manifest for the working dir
1330 mf2 = mfmatches(self['.'])
1331 mf2 = mfmatches(self['.'])
1331 for f in cmp + modified + added:
1332 for f in cmp + modified + added:
1332 mf2[f] = None
1333 mf2[f] = None
1333 mf2.set(f, ctx2.flags(f))
1334 mf2.set(f, ctx2.flags(f))
1334 for f in removed:
1335 for f in removed:
1335 if f in mf2:
1336 if f in mf2:
1336 del mf2[f]
1337 del mf2[f]
1337 else:
1338 else:
1338 # we are comparing two revisions
1339 # we are comparing two revisions
1339 deleted, unknown, ignored = [], [], []
1340 deleted, unknown, ignored = [], [], []
1340 mf2 = mfmatches(ctx2)
1341 mf2 = mfmatches(ctx2)
1341
1342
1342 modified, added, clean = [], [], []
1343 modified, added, clean = [], [], []
1343 for fn in mf2:
1344 for fn in mf2:
1344 if fn in mf1:
1345 if fn in mf1:
1345 if (fn not in deleted and
1346 if (fn not in deleted and
1346 (mf1.flags(fn) != mf2.flags(fn) or
1347 (mf1.flags(fn) != mf2.flags(fn) or
1347 (mf1[fn] != mf2[fn] and
1348 (mf1[fn] != mf2[fn] and
1348 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1349 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1349 modified.append(fn)
1350 modified.append(fn)
1350 elif listclean:
1351 elif listclean:
1351 clean.append(fn)
1352 clean.append(fn)
1352 del mf1[fn]
1353 del mf1[fn]
1353 elif fn not in deleted:
1354 elif fn not in deleted:
1354 added.append(fn)
1355 added.append(fn)
1355 removed = mf1.keys()
1356 removed = mf1.keys()
1356
1357
1357 if working and modified and not self.dirstate._checklink:
1358 if working and modified and not self.dirstate._checklink:
1358 # Symlink placeholders may get non-symlink-like contents
1359 # Symlink placeholders may get non-symlink-like contents
1359 # via user error or dereferencing by NFS or Samba servers,
1360 # via user error or dereferencing by NFS or Samba servers,
1360 # so we filter out any placeholders that don't look like a
1361 # so we filter out any placeholders that don't look like a
1361 # symlink
1362 # symlink
1362 sane = []
1363 sane = []
1363 for f in modified:
1364 for f in modified:
1364 if ctx2.flags(f) == 'l':
1365 if ctx2.flags(f) == 'l':
1365 d = ctx2[f].data()
1366 d = ctx2[f].data()
1366 if len(d) >= 1024 or '\n' in d or util.binary(d):
1367 if len(d) >= 1024 or '\n' in d or util.binary(d):
1367 self.ui.debug('ignoring suspect symlink placeholder'
1368 self.ui.debug('ignoring suspect symlink placeholder'
1368 ' "%s"\n' % f)
1369 ' "%s"\n' % f)
1369 continue
1370 continue
1370 sane.append(f)
1371 sane.append(f)
1371 modified = sane
1372 modified = sane
1372
1373
1373 r = modified, added, removed, deleted, unknown, ignored, clean
1374 r = modified, added, removed, deleted, unknown, ignored, clean
1374
1375
1375 if listsubrepos:
1376 if listsubrepos:
1376 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1377 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1377 if working:
1378 if working:
1378 rev2 = None
1379 rev2 = None
1379 else:
1380 else:
1380 rev2 = ctx2.substate[subpath][1]
1381 rev2 = ctx2.substate[subpath][1]
1381 try:
1382 try:
1382 submatch = matchmod.narrowmatcher(subpath, match)
1383 submatch = matchmod.narrowmatcher(subpath, match)
1383 s = sub.status(rev2, match=submatch, ignored=listignored,
1384 s = sub.status(rev2, match=submatch, ignored=listignored,
1384 clean=listclean, unknown=listunknown,
1385 clean=listclean, unknown=listunknown,
1385 listsubrepos=True)
1386 listsubrepos=True)
1386 for rfiles, sfiles in zip(r, s):
1387 for rfiles, sfiles in zip(r, s):
1387 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1388 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1388 except error.LookupError:
1389 except error.LookupError:
1389 self.ui.status(_("skipping missing subrepository: %s\n")
1390 self.ui.status(_("skipping missing subrepository: %s\n")
1390 % subpath)
1391 % subpath)
1391
1392
1392 for l in r:
1393 for l in r:
1393 l.sort()
1394 l.sort()
1394 return r
1395 return r
1395
1396
1396 def heads(self, start=None):
1397 def heads(self, start=None):
1397 heads = self.changelog.heads(start)
1398 heads = self.changelog.heads(start)
1398 # sort the output in rev descending order
1399 # sort the output in rev descending order
1399 return sorted(heads, key=self.changelog.rev, reverse=True)
1400 return sorted(heads, key=self.changelog.rev, reverse=True)
1400
1401
1401 def branchheads(self, branch=None, start=None, closed=False):
1402 def branchheads(self, branch=None, start=None, closed=False):
1402 '''return a (possibly filtered) list of heads for the given branch
1403 '''return a (possibly filtered) list of heads for the given branch
1403
1404
1404 Heads are returned in topological order, from newest to oldest.
1405 Heads are returned in topological order, from newest to oldest.
1405 If branch is None, use the dirstate branch.
1406 If branch is None, use the dirstate branch.
1406 If start is not None, return only heads reachable from start.
1407 If start is not None, return only heads reachable from start.
1407 If closed is True, return heads that are marked as closed as well.
1408 If closed is True, return heads that are marked as closed as well.
1408 '''
1409 '''
1409 if branch is None:
1410 if branch is None:
1410 branch = self[None].branch()
1411 branch = self[None].branch()
1411 branches = self.branchmap()
1412 branches = self.branchmap()
1412 if branch not in branches:
1413 if branch not in branches:
1413 return []
1414 return []
1414 # the cache returns heads ordered lowest to highest
1415 # the cache returns heads ordered lowest to highest
1415 bheads = list(reversed(branches[branch]))
1416 bheads = list(reversed(branches[branch]))
1416 if start is not None:
1417 if start is not None:
1417 # filter out the heads that cannot be reached from startrev
1418 # filter out the heads that cannot be reached from startrev
1418 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1419 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1419 bheads = [h for h in bheads if h in fbheads]
1420 bheads = [h for h in bheads if h in fbheads]
1420 if not closed:
1421 if not closed:
1421 bheads = [h for h in bheads if
1422 bheads = [h for h in bheads if
1422 ('close' not in self.changelog.read(h)[5])]
1423 ('close' not in self.changelog.read(h)[5])]
1423 return bheads
1424 return bheads
1424
1425
1425 def branches(self, nodes):
1426 def branches(self, nodes):
1426 if not nodes:
1427 if not nodes:
1427 nodes = [self.changelog.tip()]
1428 nodes = [self.changelog.tip()]
1428 b = []
1429 b = []
1429 for n in nodes:
1430 for n in nodes:
1430 t = n
1431 t = n
1431 while True:
1432 while True:
1432 p = self.changelog.parents(n)
1433 p = self.changelog.parents(n)
1433 if p[1] != nullid or p[0] == nullid:
1434 if p[1] != nullid or p[0] == nullid:
1434 b.append((t, n, p[0], p[1]))
1435 b.append((t, n, p[0], p[1]))
1435 break
1436 break
1436 n = p[0]
1437 n = p[0]
1437 return b
1438 return b
1438
1439
1439 def between(self, pairs):
1440 def between(self, pairs):
1440 r = []
1441 r = []
1441
1442
1442 for top, bottom in pairs:
1443 for top, bottom in pairs:
1443 n, l, i = top, [], 0
1444 n, l, i = top, [], 0
1444 f = 1
1445 f = 1
1445
1446
1446 while n != bottom and n != nullid:
1447 while n != bottom and n != nullid:
1447 p = self.changelog.parents(n)[0]
1448 p = self.changelog.parents(n)[0]
1448 if i == f:
1449 if i == f:
1449 l.append(n)
1450 l.append(n)
1450 f = f * 2
1451 f = f * 2
1451 n = p
1452 n = p
1452 i += 1
1453 i += 1
1453
1454
1454 r.append(l)
1455 r.append(l)
1455
1456
1456 return r
1457 return r
1457
1458
1458 def pull(self, remote, heads=None, force=False):
1459 def pull(self, remote, heads=None, force=False):
1459 lock = self.lock()
1460 lock = self.lock()
1460 try:
1461 try:
1461 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1462 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1462 force=force)
1463 force=force)
1463 common, fetch, rheads = tmp
1464 common, fetch, rheads = tmp
1464 if not fetch:
1465 if not fetch:
1465 self.ui.status(_("no changes found\n"))
1466 self.ui.status(_("no changes found\n"))
1466 result = 0
1467 result = 0
1467 else:
1468 else:
1468 if heads is None and list(common) == [nullid]:
1469 if heads is None and list(common) == [nullid]:
1469 self.ui.status(_("requesting all changes\n"))
1470 self.ui.status(_("requesting all changes\n"))
1470 elif heads is None and remote.capable('changegroupsubset'):
1471 elif heads is None and remote.capable('changegroupsubset'):
1471 # issue1320, avoid a race if remote changed after discovery
1472 # issue1320, avoid a race if remote changed after discovery
1472 heads = rheads
1473 heads = rheads
1473
1474
1474 if remote.capable('getbundle'):
1475 if remote.capable('getbundle'):
1475 cg = remote.getbundle('pull', common=common,
1476 cg = remote.getbundle('pull', common=common,
1476 heads=heads or rheads)
1477 heads=heads or rheads)
1477 elif heads is None:
1478 elif heads is None:
1478 cg = remote.changegroup(fetch, 'pull')
1479 cg = remote.changegroup(fetch, 'pull')
1479 elif not remote.capable('changegroupsubset'):
1480 elif not remote.capable('changegroupsubset'):
1480 raise util.Abort(_("partial pull cannot be done because "
1481 raise util.Abort(_("partial pull cannot be done because "
1481 "other repository doesn't support "
1482 "other repository doesn't support "
1482 "changegroupsubset."))
1483 "changegroupsubset."))
1483 else:
1484 else:
1484 cg = remote.changegroupsubset(fetch, heads, 'pull')
1485 cg = remote.changegroupsubset(fetch, heads, 'pull')
1485 result = self.addchangegroup(cg, 'pull', remote.url(),
1486 result = self.addchangegroup(cg, 'pull', remote.url(),
1486 lock=lock)
1487 lock=lock)
1487 finally:
1488 finally:
1488 lock.release()
1489 lock.release()
1489
1490
1490 return result
1491 return result
1491
1492
1492 def checkpush(self, force, revs):
1493 def checkpush(self, force, revs):
1493 """Extensions can override this function if additional checks have
1494 """Extensions can override this function if additional checks have
1494 to be performed before pushing, or call it if they override push
1495 to be performed before pushing, or call it if they override push
1495 command.
1496 command.
1496 """
1497 """
1497 pass
1498 pass
1498
1499
1499 def push(self, remote, force=False, revs=None, newbranch=False):
1500 def push(self, remote, force=False, revs=None, newbranch=False):
1500 '''Push outgoing changesets (limited by revs) from the current
1501 '''Push outgoing changesets (limited by revs) from the current
1501 repository to remote. Return an integer:
1502 repository to remote. Return an integer:
1502 - 0 means HTTP error *or* nothing to push
1503 - 0 means HTTP error *or* nothing to push
1503 - 1 means we pushed and remote head count is unchanged *or*
1504 - 1 means we pushed and remote head count is unchanged *or*
1504 we have outgoing changesets but refused to push
1505 we have outgoing changesets but refused to push
1505 - other values as described by addchangegroup()
1506 - other values as described by addchangegroup()
1506 '''
1507 '''
1507 # there are two ways to push to remote repo:
1508 # there are two ways to push to remote repo:
1508 #
1509 #
1509 # addchangegroup assumes local user can lock remote
1510 # addchangegroup assumes local user can lock remote
1510 # repo (local filesystem, old ssh servers).
1511 # repo (local filesystem, old ssh servers).
1511 #
1512 #
1512 # unbundle assumes local user cannot lock remote repo (new ssh
1513 # unbundle assumes local user cannot lock remote repo (new ssh
1513 # servers, http servers).
1514 # servers, http servers).
1514
1515
1515 self.checkpush(force, revs)
1516 self.checkpush(force, revs)
1516 lock = None
1517 lock = None
1517 unbundle = remote.capable('unbundle')
1518 unbundle = remote.capable('unbundle')
1518 if not unbundle:
1519 if not unbundle:
1519 lock = remote.lock()
1520 lock = remote.lock()
1520 try:
1521 try:
1521 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1522 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1522 newbranch)
1523 newbranch)
1523 ret = remote_heads
1524 ret = remote_heads
1524 if cg is not None:
1525 if cg is not None:
1525 if unbundle:
1526 if unbundle:
1526 # local repo finds heads on server, finds out what
1527 # local repo finds heads on server, finds out what
1527 # revs it must push. once revs transferred, if server
1528 # revs it must push. once revs transferred, if server
1528 # finds it has different heads (someone else won
1529 # finds it has different heads (someone else won
1529 # commit/push race), server aborts.
1530 # commit/push race), server aborts.
1530 if force:
1531 if force:
1531 remote_heads = ['force']
1532 remote_heads = ['force']
1532 # ssh: return remote's addchangegroup()
1533 # ssh: return remote's addchangegroup()
1533 # http: return remote's addchangegroup() or 0 for error
1534 # http: return remote's addchangegroup() or 0 for error
1534 ret = remote.unbundle(cg, remote_heads, 'push')
1535 ret = remote.unbundle(cg, remote_heads, 'push')
1535 else:
1536 else:
1536 # we return an integer indicating remote head count change
1537 # we return an integer indicating remote head count change
1537 ret = remote.addchangegroup(cg, 'push', self.url(),
1538 ret = remote.addchangegroup(cg, 'push', self.url(),
1538 lock=lock)
1539 lock=lock)
1539 finally:
1540 finally:
1540 if lock is not None:
1541 if lock is not None:
1541 lock.release()
1542 lock.release()
1542
1543
1543 self.ui.debug("checking for updated bookmarks\n")
1544 self.ui.debug("checking for updated bookmarks\n")
1544 rb = remote.listkeys('bookmarks')
1545 rb = remote.listkeys('bookmarks')
1545 for k in rb.keys():
1546 for k in rb.keys():
1546 if k in self._bookmarks:
1547 if k in self._bookmarks:
1547 nr, nl = rb[k], hex(self._bookmarks[k])
1548 nr, nl = rb[k], hex(self._bookmarks[k])
1548 if nr in self:
1549 if nr in self:
1549 cr = self[nr]
1550 cr = self[nr]
1550 cl = self[nl]
1551 cl = self[nl]
1551 if cl in cr.descendants():
1552 if cl in cr.descendants():
1552 r = remote.pushkey('bookmarks', k, nr, nl)
1553 r = remote.pushkey('bookmarks', k, nr, nl)
1553 if r:
1554 if r:
1554 self.ui.status(_("updating bookmark %s\n") % k)
1555 self.ui.status(_("updating bookmark %s\n") % k)
1555 else:
1556 else:
1556 self.ui.warn(_('updating bookmark %s'
1557 self.ui.warn(_('updating bookmark %s'
1557 ' failed!\n') % k)
1558 ' failed!\n') % k)
1558
1559
1559 return ret
1560 return ret
1560
1561
1561 def changegroupinfo(self, nodes, source):
1562 def changegroupinfo(self, nodes, source):
1562 if self.ui.verbose or source == 'bundle':
1563 if self.ui.verbose or source == 'bundle':
1563 self.ui.status(_("%d changesets found\n") % len(nodes))
1564 self.ui.status(_("%d changesets found\n") % len(nodes))
1564 if self.ui.debugflag:
1565 if self.ui.debugflag:
1565 self.ui.debug("list of changesets:\n")
1566 self.ui.debug("list of changesets:\n")
1566 for node in nodes:
1567 for node in nodes:
1567 self.ui.debug("%s\n" % hex(node))
1568 self.ui.debug("%s\n" % hex(node))
1568
1569
1569 def changegroupsubset(self, bases, heads, source):
1570 def changegroupsubset(self, bases, heads, source):
1570 """Compute a changegroup consisting of all the nodes that are
1571 """Compute a changegroup consisting of all the nodes that are
1571 descendants of any of the bases and ancestors of any of the heads.
1572 descendants of any of the bases and ancestors of any of the heads.
1572 Return a chunkbuffer object whose read() method will return
1573 Return a chunkbuffer object whose read() method will return
1573 successive changegroup chunks.
1574 successive changegroup chunks.
1574
1575
1575 It is fairly complex as determining which filenodes and which
1576 It is fairly complex as determining which filenodes and which
1576 manifest nodes need to be included for the changeset to be complete
1577 manifest nodes need to be included for the changeset to be complete
1577 is non-trivial.
1578 is non-trivial.
1578
1579
1579 Another wrinkle is doing the reverse, figuring out which changeset in
1580 Another wrinkle is doing the reverse, figuring out which changeset in
1580 the changegroup a particular filenode or manifestnode belongs to.
1581 the changegroup a particular filenode or manifestnode belongs to.
1581 """
1582 """
1582 cl = self.changelog
1583 cl = self.changelog
1583 if not bases:
1584 if not bases:
1584 bases = [nullid]
1585 bases = [nullid]
1585 csets, bases, heads = cl.nodesbetween(bases, heads)
1586 csets, bases, heads = cl.nodesbetween(bases, heads)
1586 # We assume that all ancestors of bases are known
1587 # We assume that all ancestors of bases are known
1587 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1588 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1588 return self._changegroupsubset(common, csets, heads, source)
1589 return self._changegroupsubset(common, csets, heads, source)
1589
1590
1590 def getbundle(self, source, heads=None, common=None):
1591 def getbundle(self, source, heads=None, common=None):
1591 """Like changegroupsubset, but returns the set difference between the
1592 """Like changegroupsubset, but returns the set difference between the
1592 ancestors of heads and the ancestors common.
1593 ancestors of heads and the ancestors common.
1593
1594
1594 If heads is None, use the local heads. If common is None, use [nullid].
1595 If heads is None, use the local heads. If common is None, use [nullid].
1595
1596
1596 The nodes in common might not all be known locally due to the way the
1597 The nodes in common might not all be known locally due to the way the
1597 current discovery protocol works.
1598 current discovery protocol works.
1598 """
1599 """
1599 cl = self.changelog
1600 cl = self.changelog
1600 if common:
1601 if common:
1601 nm = cl.nodemap
1602 nm = cl.nodemap
1602 common = [n for n in common if n in nm]
1603 common = [n for n in common if n in nm]
1603 else:
1604 else:
1604 common = [nullid]
1605 common = [nullid]
1605 if not heads:
1606 if not heads:
1606 heads = cl.heads()
1607 heads = cl.heads()
1607 common, missing = cl.findcommonmissing(common, heads)
1608 common, missing = cl.findcommonmissing(common, heads)
1608 if not missing:
1609 if not missing:
1609 return None
1610 return None
1610 return self._changegroupsubset(common, missing, heads, source)
1611 return self._changegroupsubset(common, missing, heads, source)
1611
1612
1612 def _changegroupsubset(self, commonrevs, csets, heads, source):
1613 def _changegroupsubset(self, commonrevs, csets, heads, source):
1613
1614
1614 cl = self.changelog
1615 cl = self.changelog
1615 mf = self.manifest
1616 mf = self.manifest
1616 mfs = {} # needed manifests
1617 mfs = {} # needed manifests
1617 fnodes = {} # needed file nodes
1618 fnodes = {} # needed file nodes
1618 changedfiles = set()
1619 changedfiles = set()
1619 fstate = ['', {}]
1620 fstate = ['', {}]
1620 count = [0]
1621 count = [0]
1621
1622
1622 # can we go through the fast path ?
1623 # can we go through the fast path ?
1623 heads.sort()
1624 heads.sort()
1624 if heads == sorted(self.heads()):
1625 if heads == sorted(self.heads()):
1625 return self._changegroup(csets, source)
1626 return self._changegroup(csets, source)
1626
1627
1627 # slow path
1628 # slow path
1628 self.hook('preoutgoing', throw=True, source=source)
1629 self.hook('preoutgoing', throw=True, source=source)
1629 self.changegroupinfo(csets, source)
1630 self.changegroupinfo(csets, source)
1630
1631
1631 # filter any nodes that claim to be part of the known set
1632 # filter any nodes that claim to be part of the known set
1632 def prune(revlog, missing):
1633 def prune(revlog, missing):
1633 return [n for n in missing
1634 return [n for n in missing
1634 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1635 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1635
1636
1636 def lookup(revlog, x):
1637 def lookup(revlog, x):
1637 if revlog == cl:
1638 if revlog == cl:
1638 c = cl.read(x)
1639 c = cl.read(x)
1639 changedfiles.update(c[3])
1640 changedfiles.update(c[3])
1640 mfs.setdefault(c[0], x)
1641 mfs.setdefault(c[0], x)
1641 count[0] += 1
1642 count[0] += 1
1642 self.ui.progress(_('bundling'), count[0],
1643 self.ui.progress(_('bundling'), count[0],
1643 unit=_('changesets'), total=len(csets))
1644 unit=_('changesets'), total=len(csets))
1644 return x
1645 return x
1645 elif revlog == mf:
1646 elif revlog == mf:
1646 clnode = mfs[x]
1647 clnode = mfs[x]
1647 mdata = mf.readfast(x)
1648 mdata = mf.readfast(x)
1648 for f in changedfiles:
1649 for f in changedfiles:
1649 if f in mdata:
1650 if f in mdata:
1650 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1651 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1651 count[0] += 1
1652 count[0] += 1
1652 self.ui.progress(_('bundling'), count[0],
1653 self.ui.progress(_('bundling'), count[0],
1653 unit=_('manifests'), total=len(mfs))
1654 unit=_('manifests'), total=len(mfs))
1654 return mfs[x]
1655 return mfs[x]
1655 else:
1656 else:
1656 self.ui.progress(
1657 self.ui.progress(
1657 _('bundling'), count[0], item=fstate[0],
1658 _('bundling'), count[0], item=fstate[0],
1658 unit=_('files'), total=len(changedfiles))
1659 unit=_('files'), total=len(changedfiles))
1659 return fstate[1][x]
1660 return fstate[1][x]
1660
1661
1661 bundler = changegroup.bundle10(lookup)
1662 bundler = changegroup.bundle10(lookup)
1662 reorder = self.ui.config('bundle', 'reorder', 'auto')
1663 reorder = self.ui.config('bundle', 'reorder', 'auto')
1663 if reorder == 'auto':
1664 if reorder == 'auto':
1664 reorder = None
1665 reorder = None
1665 else:
1666 else:
1666 reorder = util.parsebool(reorder)
1667 reorder = util.parsebool(reorder)
1667
1668
1668 def gengroup():
1669 def gengroup():
1669 # Create a changenode group generator that will call our functions
1670 # Create a changenode group generator that will call our functions
1670 # back to lookup the owning changenode and collect information.
1671 # back to lookup the owning changenode and collect information.
1671 for chunk in cl.group(csets, bundler, reorder=reorder):
1672 for chunk in cl.group(csets, bundler, reorder=reorder):
1672 yield chunk
1673 yield chunk
1673 self.ui.progress(_('bundling'), None)
1674 self.ui.progress(_('bundling'), None)
1674
1675
1675 # Create a generator for the manifestnodes that calls our lookup
1676 # Create a generator for the manifestnodes that calls our lookup
1676 # and data collection functions back.
1677 # and data collection functions back.
1677 count[0] = 0
1678 count[0] = 0
1678 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1679 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1679 yield chunk
1680 yield chunk
1680 self.ui.progress(_('bundling'), None)
1681 self.ui.progress(_('bundling'), None)
1681
1682
1682 mfs.clear()
1683 mfs.clear()
1683
1684
1684 # Go through all our files in order sorted by name.
1685 # Go through all our files in order sorted by name.
1685 count[0] = 0
1686 count[0] = 0
1686 for fname in sorted(changedfiles):
1687 for fname in sorted(changedfiles):
1687 filerevlog = self.file(fname)
1688 filerevlog = self.file(fname)
1688 if not len(filerevlog):
1689 if not len(filerevlog):
1689 raise util.Abort(_("empty or missing revlog for %s") % fname)
1690 raise util.Abort(_("empty or missing revlog for %s") % fname)
1690 fstate[0] = fname
1691 fstate[0] = fname
1691 fstate[1] = fnodes.pop(fname, {})
1692 fstate[1] = fnodes.pop(fname, {})
1692
1693
1693 nodelist = prune(filerevlog, fstate[1])
1694 nodelist = prune(filerevlog, fstate[1])
1694 if nodelist:
1695 if nodelist:
1695 count[0] += 1
1696 count[0] += 1
1696 yield bundler.fileheader(fname)
1697 yield bundler.fileheader(fname)
1697 for chunk in filerevlog.group(nodelist, bundler, reorder):
1698 for chunk in filerevlog.group(nodelist, bundler, reorder):
1698 yield chunk
1699 yield chunk
1699
1700
1700 # Signal that no more groups are left.
1701 # Signal that no more groups are left.
1701 yield bundler.close()
1702 yield bundler.close()
1702 self.ui.progress(_('bundling'), None)
1703 self.ui.progress(_('bundling'), None)
1703
1704
1704 if csets:
1705 if csets:
1705 self.hook('outgoing', node=hex(csets[0]), source=source)
1706 self.hook('outgoing', node=hex(csets[0]), source=source)
1706
1707
1707 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1708 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1708
1709
1709 def changegroup(self, basenodes, source):
1710 def changegroup(self, basenodes, source):
1710 # to avoid a race we use changegroupsubset() (issue1320)
1711 # to avoid a race we use changegroupsubset() (issue1320)
1711 return self.changegroupsubset(basenodes, self.heads(), source)
1712 return self.changegroupsubset(basenodes, self.heads(), source)
1712
1713
1713 def _changegroup(self, nodes, source):
1714 def _changegroup(self, nodes, source):
1714 """Compute the changegroup of all nodes that we have that a recipient
1715 """Compute the changegroup of all nodes that we have that a recipient
1715 doesn't. Return a chunkbuffer object whose read() method will return
1716 doesn't. Return a chunkbuffer object whose read() method will return
1716 successive changegroup chunks.
1717 successive changegroup chunks.
1717
1718
1718 This is much easier than the previous function as we can assume that
1719 This is much easier than the previous function as we can assume that
1719 the recipient has any changenode we aren't sending them.
1720 the recipient has any changenode we aren't sending them.
1720
1721
1721 nodes is the set of nodes to send"""
1722 nodes is the set of nodes to send"""
1722
1723
1723 cl = self.changelog
1724 cl = self.changelog
1724 mf = self.manifest
1725 mf = self.manifest
1725 mfs = {}
1726 mfs = {}
1726 changedfiles = set()
1727 changedfiles = set()
1727 fstate = ['']
1728 fstate = ['']
1728 count = [0]
1729 count = [0]
1729
1730
1730 self.hook('preoutgoing', throw=True, source=source)
1731 self.hook('preoutgoing', throw=True, source=source)
1731 self.changegroupinfo(nodes, source)
1732 self.changegroupinfo(nodes, source)
1732
1733
1733 revset = set([cl.rev(n) for n in nodes])
1734 revset = set([cl.rev(n) for n in nodes])
1734
1735
1735 def gennodelst(log):
1736 def gennodelst(log):
1736 return [log.node(r) for r in log if log.linkrev(r) in revset]
1737 return [log.node(r) for r in log if log.linkrev(r) in revset]
1737
1738
1738 def lookup(revlog, x):
1739 def lookup(revlog, x):
1739 if revlog == cl:
1740 if revlog == cl:
1740 c = cl.read(x)
1741 c = cl.read(x)
1741 changedfiles.update(c[3])
1742 changedfiles.update(c[3])
1742 mfs.setdefault(c[0], x)
1743 mfs.setdefault(c[0], x)
1743 count[0] += 1
1744 count[0] += 1
1744 self.ui.progress(_('bundling'), count[0],
1745 self.ui.progress(_('bundling'), count[0],
1745 unit=_('changesets'), total=len(nodes))
1746 unit=_('changesets'), total=len(nodes))
1746 return x
1747 return x
1747 elif revlog == mf:
1748 elif revlog == mf:
1748 count[0] += 1
1749 count[0] += 1
1749 self.ui.progress(_('bundling'), count[0],
1750 self.ui.progress(_('bundling'), count[0],
1750 unit=_('manifests'), total=len(mfs))
1751 unit=_('manifests'), total=len(mfs))
1751 return cl.node(revlog.linkrev(revlog.rev(x)))
1752 return cl.node(revlog.linkrev(revlog.rev(x)))
1752 else:
1753 else:
1753 self.ui.progress(
1754 self.ui.progress(
1754 _('bundling'), count[0], item=fstate[0],
1755 _('bundling'), count[0], item=fstate[0],
1755 total=len(changedfiles), unit=_('files'))
1756 total=len(changedfiles), unit=_('files'))
1756 return cl.node(revlog.linkrev(revlog.rev(x)))
1757 return cl.node(revlog.linkrev(revlog.rev(x)))
1757
1758
1758 bundler = changegroup.bundle10(lookup)
1759 bundler = changegroup.bundle10(lookup)
1759 reorder = self.ui.config('bundle', 'reorder', 'auto')
1760 reorder = self.ui.config('bundle', 'reorder', 'auto')
1760 if reorder == 'auto':
1761 if reorder == 'auto':
1761 reorder = None
1762 reorder = None
1762 else:
1763 else:
1763 reorder = util.parsebool(reorder)
1764 reorder = util.parsebool(reorder)
1764
1765
1765 def gengroup():
1766 def gengroup():
1766 '''yield a sequence of changegroup chunks (strings)'''
1767 '''yield a sequence of changegroup chunks (strings)'''
1767 # construct a list of all changed files
1768 # construct a list of all changed files
1768
1769
1769 for chunk in cl.group(nodes, bundler, reorder=reorder):
1770 for chunk in cl.group(nodes, bundler, reorder=reorder):
1770 yield chunk
1771 yield chunk
1771 self.ui.progress(_('bundling'), None)
1772 self.ui.progress(_('bundling'), None)
1772
1773
1773 count[0] = 0
1774 count[0] = 0
1774 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1775 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1775 yield chunk
1776 yield chunk
1776 self.ui.progress(_('bundling'), None)
1777 self.ui.progress(_('bundling'), None)
1777
1778
1778 count[0] = 0
1779 count[0] = 0
1779 for fname in sorted(changedfiles):
1780 for fname in sorted(changedfiles):
1780 filerevlog = self.file(fname)
1781 filerevlog = self.file(fname)
1781 if not len(filerevlog):
1782 if not len(filerevlog):
1782 raise util.Abort(_("empty or missing revlog for %s") % fname)
1783 raise util.Abort(_("empty or missing revlog for %s") % fname)
1783 fstate[0] = fname
1784 fstate[0] = fname
1784 nodelist = gennodelst(filerevlog)
1785 nodelist = gennodelst(filerevlog)
1785 if nodelist:
1786 if nodelist:
1786 count[0] += 1
1787 count[0] += 1
1787 yield bundler.fileheader(fname)
1788 yield bundler.fileheader(fname)
1788 for chunk in filerevlog.group(nodelist, bundler, reorder):
1789 for chunk in filerevlog.group(nodelist, bundler, reorder):
1789 yield chunk
1790 yield chunk
1790 yield bundler.close()
1791 yield bundler.close()
1791 self.ui.progress(_('bundling'), None)
1792 self.ui.progress(_('bundling'), None)
1792
1793
1793 if nodes:
1794 if nodes:
1794 self.hook('outgoing', node=hex(nodes[0]), source=source)
1795 self.hook('outgoing', node=hex(nodes[0]), source=source)
1795
1796
1796 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1797 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1797
1798
1798 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1799 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1799 """Add the changegroup returned by source.read() to this repo.
1800 """Add the changegroup returned by source.read() to this repo.
1800 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1801 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1801 the URL of the repo where this changegroup is coming from.
1802 the URL of the repo where this changegroup is coming from.
1802 If lock is not None, the function takes ownership of the lock
1803 If lock is not None, the function takes ownership of the lock
1803 and releases it after the changegroup is added.
1804 and releases it after the changegroup is added.
1804
1805
1805 Return an integer summarizing the change to this repo:
1806 Return an integer summarizing the change to this repo:
1806 - nothing changed or no source: 0
1807 - nothing changed or no source: 0
1807 - more heads than before: 1+added heads (2..n)
1808 - more heads than before: 1+added heads (2..n)
1808 - fewer heads than before: -1-removed heads (-2..-n)
1809 - fewer heads than before: -1-removed heads (-2..-n)
1809 - number of heads stays the same: 1
1810 - number of heads stays the same: 1
1810 """
1811 """
1811 def csmap(x):
1812 def csmap(x):
1812 self.ui.debug("add changeset %s\n" % short(x))
1813 self.ui.debug("add changeset %s\n" % short(x))
1813 return len(cl)
1814 return len(cl)
1814
1815
1815 def revmap(x):
1816 def revmap(x):
1816 return cl.rev(x)
1817 return cl.rev(x)
1817
1818
1818 if not source:
1819 if not source:
1819 return 0
1820 return 0
1820
1821
1821 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1822 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1822
1823
1823 changesets = files = revisions = 0
1824 changesets = files = revisions = 0
1824 efiles = set()
1825 efiles = set()
1825
1826
1826 # write changelog data to temp files so concurrent readers will not see
1827 # write changelog data to temp files so concurrent readers will not see
1827 # inconsistent view
1828 # inconsistent view
1828 cl = self.changelog
1829 cl = self.changelog
1829 cl.delayupdate()
1830 cl.delayupdate()
1830 oldheads = cl.heads()
1831 oldheads = cl.heads()
1831
1832
1832 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1833 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1833 try:
1834 try:
1834 trp = weakref.proxy(tr)
1835 trp = weakref.proxy(tr)
1835 # pull off the changeset group
1836 # pull off the changeset group
1836 self.ui.status(_("adding changesets\n"))
1837 self.ui.status(_("adding changesets\n"))
1837 clstart = len(cl)
1838 clstart = len(cl)
1838 class prog(object):
1839 class prog(object):
1839 step = _('changesets')
1840 step = _('changesets')
1840 count = 1
1841 count = 1
1841 ui = self.ui
1842 ui = self.ui
1842 total = None
1843 total = None
1843 def __call__(self):
1844 def __call__(self):
1844 self.ui.progress(self.step, self.count, unit=_('chunks'),
1845 self.ui.progress(self.step, self.count, unit=_('chunks'),
1845 total=self.total)
1846 total=self.total)
1846 self.count += 1
1847 self.count += 1
1847 pr = prog()
1848 pr = prog()
1848 source.callback = pr
1849 source.callback = pr
1849
1850
1850 source.changelogheader()
1851 source.changelogheader()
1851 if (cl.addgroup(source, csmap, trp) is None
1852 if (cl.addgroup(source, csmap, trp) is None
1852 and not emptyok):
1853 and not emptyok):
1853 raise util.Abort(_("received changelog group is empty"))
1854 raise util.Abort(_("received changelog group is empty"))
1854 clend = len(cl)
1855 clend = len(cl)
1855 changesets = clend - clstart
1856 changesets = clend - clstart
1856 for c in xrange(clstart, clend):
1857 for c in xrange(clstart, clend):
1857 efiles.update(self[c].files())
1858 efiles.update(self[c].files())
1858 efiles = len(efiles)
1859 efiles = len(efiles)
1859 self.ui.progress(_('changesets'), None)
1860 self.ui.progress(_('changesets'), None)
1860
1861
1861 # pull off the manifest group
1862 # pull off the manifest group
1862 self.ui.status(_("adding manifests\n"))
1863 self.ui.status(_("adding manifests\n"))
1863 pr.step = _('manifests')
1864 pr.step = _('manifests')
1864 pr.count = 1
1865 pr.count = 1
1865 pr.total = changesets # manifests <= changesets
1866 pr.total = changesets # manifests <= changesets
1866 # no need to check for empty manifest group here:
1867 # no need to check for empty manifest group here:
1867 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1868 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1868 # no new manifest will be created and the manifest group will
1869 # no new manifest will be created and the manifest group will
1869 # be empty during the pull
1870 # be empty during the pull
1870 source.manifestheader()
1871 source.manifestheader()
1871 self.manifest.addgroup(source, revmap, trp)
1872 self.manifest.addgroup(source, revmap, trp)
1872 self.ui.progress(_('manifests'), None)
1873 self.ui.progress(_('manifests'), None)
1873
1874
1874 needfiles = {}
1875 needfiles = {}
1875 if self.ui.configbool('server', 'validate', default=False):
1876 if self.ui.configbool('server', 'validate', default=False):
1876 # validate incoming csets have their manifests
1877 # validate incoming csets have their manifests
1877 for cset in xrange(clstart, clend):
1878 for cset in xrange(clstart, clend):
1878 mfest = self.changelog.read(self.changelog.node(cset))[0]
1879 mfest = self.changelog.read(self.changelog.node(cset))[0]
1879 mfest = self.manifest.readdelta(mfest)
1880 mfest = self.manifest.readdelta(mfest)
1880 # store file nodes we must see
1881 # store file nodes we must see
1881 for f, n in mfest.iteritems():
1882 for f, n in mfest.iteritems():
1882 needfiles.setdefault(f, set()).add(n)
1883 needfiles.setdefault(f, set()).add(n)
1883
1884
1884 # process the files
1885 # process the files
1885 self.ui.status(_("adding file changes\n"))
1886 self.ui.status(_("adding file changes\n"))
1886 pr.step = _('files')
1887 pr.step = _('files')
1887 pr.count = 1
1888 pr.count = 1
1888 pr.total = efiles
1889 pr.total = efiles
1889 source.callback = None
1890 source.callback = None
1890
1891
1891 while True:
1892 while True:
1892 chunkdata = source.filelogheader()
1893 chunkdata = source.filelogheader()
1893 if not chunkdata:
1894 if not chunkdata:
1894 break
1895 break
1895 f = chunkdata["filename"]
1896 f = chunkdata["filename"]
1896 self.ui.debug("adding %s revisions\n" % f)
1897 self.ui.debug("adding %s revisions\n" % f)
1897 pr()
1898 pr()
1898 fl = self.file(f)
1899 fl = self.file(f)
1899 o = len(fl)
1900 o = len(fl)
1900 if fl.addgroup(source, revmap, trp) is None:
1901 if fl.addgroup(source, revmap, trp) is None:
1901 raise util.Abort(_("received file revlog group is empty"))
1902 raise util.Abort(_("received file revlog group is empty"))
1902 revisions += len(fl) - o
1903 revisions += len(fl) - o
1903 files += 1
1904 files += 1
1904 if f in needfiles:
1905 if f in needfiles:
1905 needs = needfiles[f]
1906 needs = needfiles[f]
1906 for new in xrange(o, len(fl)):
1907 for new in xrange(o, len(fl)):
1907 n = fl.node(new)
1908 n = fl.node(new)
1908 if n in needs:
1909 if n in needs:
1909 needs.remove(n)
1910 needs.remove(n)
1910 if not needs:
1911 if not needs:
1911 del needfiles[f]
1912 del needfiles[f]
1912 self.ui.progress(_('files'), None)
1913 self.ui.progress(_('files'), None)
1913
1914
1914 for f, needs in needfiles.iteritems():
1915 for f, needs in needfiles.iteritems():
1915 fl = self.file(f)
1916 fl = self.file(f)
1916 for n in needs:
1917 for n in needs:
1917 try:
1918 try:
1918 fl.rev(n)
1919 fl.rev(n)
1919 except error.LookupError:
1920 except error.LookupError:
1920 raise util.Abort(
1921 raise util.Abort(
1921 _('missing file data for %s:%s - run hg verify') %
1922 _('missing file data for %s:%s - run hg verify') %
1922 (f, hex(n)))
1923 (f, hex(n)))
1923
1924
1924 dh = 0
1925 dh = 0
1925 if oldheads:
1926 if oldheads:
1926 heads = cl.heads()
1927 heads = cl.heads()
1927 dh = len(heads) - len(oldheads)
1928 dh = len(heads) - len(oldheads)
1928 for h in heads:
1929 for h in heads:
1929 if h not in oldheads and 'close' in self[h].extra():
1930 if h not in oldheads and 'close' in self[h].extra():
1930 dh -= 1
1931 dh -= 1
1931 htext = ""
1932 htext = ""
1932 if dh:
1933 if dh:
1933 htext = _(" (%+d heads)") % dh
1934 htext = _(" (%+d heads)") % dh
1934
1935
1935 self.ui.status(_("added %d changesets"
1936 self.ui.status(_("added %d changesets"
1936 " with %d changes to %d files%s\n")
1937 " with %d changes to %d files%s\n")
1937 % (changesets, revisions, files, htext))
1938 % (changesets, revisions, files, htext))
1938
1939
1939 if changesets > 0:
1940 if changesets > 0:
1940 p = lambda: cl.writepending() and self.root or ""
1941 p = lambda: cl.writepending() and self.root or ""
1941 self.hook('pretxnchangegroup', throw=True,
1942 self.hook('pretxnchangegroup', throw=True,
1942 node=hex(cl.node(clstart)), source=srctype,
1943 node=hex(cl.node(clstart)), source=srctype,
1943 url=url, pending=p)
1944 url=url, pending=p)
1944
1945
1945 # make changelog see real files again
1946 # make changelog see real files again
1946 cl.finalize(trp)
1947 cl.finalize(trp)
1947
1948
1948 tr.close()
1949 tr.close()
1949 finally:
1950 finally:
1950 tr.release()
1951 tr.release()
1951 if lock:
1952 if lock:
1952 lock.release()
1953 lock.release()
1953
1954
1954 if changesets > 0:
1955 if changesets > 0:
1955 # forcefully update the on-disk branch cache
1956 # forcefully update the on-disk branch cache
1956 self.ui.debug("updating the branch cache\n")
1957 self.ui.debug("updating the branch cache\n")
1957 self.updatebranchcache()
1958 self.updatebranchcache()
1958 self.hook("changegroup", node=hex(cl.node(clstart)),
1959 self.hook("changegroup", node=hex(cl.node(clstart)),
1959 source=srctype, url=url)
1960 source=srctype, url=url)
1960
1961
1961 for i in xrange(clstart, clend):
1962 for i in xrange(clstart, clend):
1962 self.hook("incoming", node=hex(cl.node(i)),
1963 self.hook("incoming", node=hex(cl.node(i)),
1963 source=srctype, url=url)
1964 source=srctype, url=url)
1964
1965
1965 # never return 0 here:
1966 # never return 0 here:
1966 if dh < 0:
1967 if dh < 0:
1967 return dh - 1
1968 return dh - 1
1968 else:
1969 else:
1969 return dh + 1
1970 return dh + 1
1970
1971
1971 def stream_in(self, remote, requirements):
1972 def stream_in(self, remote, requirements):
1972 lock = self.lock()
1973 lock = self.lock()
1973 try:
1974 try:
1974 fp = remote.stream_out()
1975 fp = remote.stream_out()
1975 l = fp.readline()
1976 l = fp.readline()
1976 try:
1977 try:
1977 resp = int(l)
1978 resp = int(l)
1978 except ValueError:
1979 except ValueError:
1979 raise error.ResponseError(
1980 raise error.ResponseError(
1980 _('Unexpected response from remote server:'), l)
1981 _('Unexpected response from remote server:'), l)
1981 if resp == 1:
1982 if resp == 1:
1982 raise util.Abort(_('operation forbidden by server'))
1983 raise util.Abort(_('operation forbidden by server'))
1983 elif resp == 2:
1984 elif resp == 2:
1984 raise util.Abort(_('locking the remote repository failed'))
1985 raise util.Abort(_('locking the remote repository failed'))
1985 elif resp != 0:
1986 elif resp != 0:
1986 raise util.Abort(_('the server sent an unknown error code'))
1987 raise util.Abort(_('the server sent an unknown error code'))
1987 self.ui.status(_('streaming all changes\n'))
1988 self.ui.status(_('streaming all changes\n'))
1988 l = fp.readline()
1989 l = fp.readline()
1989 try:
1990 try:
1990 total_files, total_bytes = map(int, l.split(' ', 1))
1991 total_files, total_bytes = map(int, l.split(' ', 1))
1991 except (ValueError, TypeError):
1992 except (ValueError, TypeError):
1992 raise error.ResponseError(
1993 raise error.ResponseError(
1993 _('Unexpected response from remote server:'), l)
1994 _('Unexpected response from remote server:'), l)
1994 self.ui.status(_('%d files to transfer, %s of data\n') %
1995 self.ui.status(_('%d files to transfer, %s of data\n') %
1995 (total_files, util.bytecount(total_bytes)))
1996 (total_files, util.bytecount(total_bytes)))
1996 start = time.time()
1997 start = time.time()
1997 for i in xrange(total_files):
1998 for i in xrange(total_files):
1998 # XXX doesn't support '\n' or '\r' in filenames
1999 # XXX doesn't support '\n' or '\r' in filenames
1999 l = fp.readline()
2000 l = fp.readline()
2000 try:
2001 try:
2001 name, size = l.split('\0', 1)
2002 name, size = l.split('\0', 1)
2002 size = int(size)
2003 size = int(size)
2003 except (ValueError, TypeError):
2004 except (ValueError, TypeError):
2004 raise error.ResponseError(
2005 raise error.ResponseError(
2005 _('Unexpected response from remote server:'), l)
2006 _('Unexpected response from remote server:'), l)
2006 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2007 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2007 # for backwards compat, name was partially encoded
2008 # for backwards compat, name was partially encoded
2008 ofp = self.sopener(store.decodedir(name), 'w')
2009 ofp = self.sopener(store.decodedir(name), 'w')
2009 for chunk in util.filechunkiter(fp, limit=size):
2010 for chunk in util.filechunkiter(fp, limit=size):
2010 ofp.write(chunk)
2011 ofp.write(chunk)
2011 ofp.close()
2012 ofp.close()
2012 elapsed = time.time() - start
2013 elapsed = time.time() - start
2013 if elapsed <= 0:
2014 if elapsed <= 0:
2014 elapsed = 0.001
2015 elapsed = 0.001
2015 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2016 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2016 (util.bytecount(total_bytes), elapsed,
2017 (util.bytecount(total_bytes), elapsed,
2017 util.bytecount(total_bytes / elapsed)))
2018 util.bytecount(total_bytes / elapsed)))
2018
2019
2019 # new requirements = old non-format requirements + new format-related
2020 # new requirements = old non-format requirements + new format-related
2020 # requirements from the streamed-in repository
2021 # requirements from the streamed-in repository
2021 requirements.update(set(self.requirements) - self.supportedformats)
2022 requirements.update(set(self.requirements) - self.supportedformats)
2022 self._applyrequirements(requirements)
2023 self._applyrequirements(requirements)
2023 self._writerequirements()
2024 self._writerequirements()
2024
2025
2025 self.invalidate()
2026 self.invalidate()
2026 return len(self.heads()) + 1
2027 return len(self.heads()) + 1
2027 finally:
2028 finally:
2028 lock.release()
2029 lock.release()
2029
2030
2030 def clone(self, remote, heads=[], stream=False):
2031 def clone(self, remote, heads=[], stream=False):
2031 '''clone remote repository.
2032 '''clone remote repository.
2032
2033
2033 keyword arguments:
2034 keyword arguments:
2034 heads: list of revs to clone (forces use of pull)
2035 heads: list of revs to clone (forces use of pull)
2035 stream: use streaming clone if possible'''
2036 stream: use streaming clone if possible'''
2036
2037
2037 # now, all clients that can request uncompressed clones can
2038 # now, all clients that can request uncompressed clones can
2038 # read repo formats supported by all servers that can serve
2039 # read repo formats supported by all servers that can serve
2039 # them.
2040 # them.
2040
2041
2041 # if revlog format changes, client will have to check version
2042 # if revlog format changes, client will have to check version
2042 # and format flags on "stream" capability, and use
2043 # and format flags on "stream" capability, and use
2043 # uncompressed only if compatible.
2044 # uncompressed only if compatible.
2044
2045
2045 if stream and not heads:
2046 if stream and not heads:
2046 # 'stream' means remote revlog format is revlogv1 only
2047 # 'stream' means remote revlog format is revlogv1 only
2047 if remote.capable('stream'):
2048 if remote.capable('stream'):
2048 return self.stream_in(remote, set(('revlogv1',)))
2049 return self.stream_in(remote, set(('revlogv1',)))
2049 # otherwise, 'streamreqs' contains the remote revlog format
2050 # otherwise, 'streamreqs' contains the remote revlog format
2050 streamreqs = remote.capable('streamreqs')
2051 streamreqs = remote.capable('streamreqs')
2051 if streamreqs:
2052 if streamreqs:
2052 streamreqs = set(streamreqs.split(','))
2053 streamreqs = set(streamreqs.split(','))
2053 # if we support it, stream in and adjust our requirements
2054 # if we support it, stream in and adjust our requirements
2054 if not streamreqs - self.supportedformats:
2055 if not streamreqs - self.supportedformats:
2055 return self.stream_in(remote, streamreqs)
2056 return self.stream_in(remote, streamreqs)
2056 return self.pull(remote, heads)
2057 return self.pull(remote, heads)
2057
2058
2058 def pushkey(self, namespace, key, old, new):
2059 def pushkey(self, namespace, key, old, new):
2059 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2060 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2060 old=old, new=new)
2061 old=old, new=new)
2061 ret = pushkey.push(self, namespace, key, old, new)
2062 ret = pushkey.push(self, namespace, key, old, new)
2062 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2063 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2063 ret=ret)
2064 ret=ret)
2064 return ret
2065 return ret
2065
2066
2066 def listkeys(self, namespace):
2067 def listkeys(self, namespace):
2067 self.hook('prelistkeys', throw=True, namespace=namespace)
2068 self.hook('prelistkeys', throw=True, namespace=namespace)
2068 values = pushkey.list(self, namespace)
2069 values = pushkey.list(self, namespace)
2069 self.hook('listkeys', namespace=namespace, values=values)
2070 self.hook('listkeys', namespace=namespace, values=values)
2070 return values
2071 return values
2071
2072
2072 def debugwireargs(self, one, two, three=None, four=None, five=None):
2073 def debugwireargs(self, one, two, three=None, four=None, five=None):
2073 '''used to test argument passing over the wire'''
2074 '''used to test argument passing over the wire'''
2074 return "%s %s %s %s %s" % (one, two, three, four, five)
2075 return "%s %s %s %s %s" % (one, two, three, four, five)
2075
2076
2076 def savecommitmessage(self, text):
2077 def savecommitmessage(self, text):
2077 fp = self.opener('last-message.txt', 'wb')
2078 fp = self.opener('last-message.txt', 'wb')
2078 try:
2079 try:
2079 fp.write(text)
2080 fp.write(text)
2080 finally:
2081 finally:
2081 fp.close()
2082 fp.close()
2082 return self.pathto(fp.name[len(self.root)+1:])
2083 return self.pathto(fp.name[len(self.root)+1:])
2083
2084
2084 # used to avoid circular references so destructors work
2085 # used to avoid circular references so destructors work
2085 def aftertrans(files):
2086 def aftertrans(files):
2086 renamefiles = [tuple(t) for t in files]
2087 renamefiles = [tuple(t) for t in files]
2087 def a():
2088 def a():
2088 for src, dest in renamefiles:
2089 for src, dest in renamefiles:
2089 util.rename(src, dest)
2090 util.rename(src, dest)
2090 return a
2091 return a
2091
2092
2092 def undoname(fn):
2093 def undoname(fn):
2093 base, name = os.path.split(fn)
2094 base, name = os.path.split(fn)
2094 assert name.startswith('journal')
2095 assert name.startswith('journal')
2095 return os.path.join(base, name.replace('journal', 'undo', 1))
2096 return os.path.join(base, name.replace('journal', 'undo', 1))
2096
2097
2097 def instance(ui, path, create):
2098 def instance(ui, path, create):
2098 return localrepository(ui, util.urllocalpath(path), create)
2099 return localrepository(ui, util.urllocalpath(path), create)
2099
2100
2100 def islocal(path):
2101 def islocal(path):
2101 return True
2102 return True
General Comments 0
You need to be logged in to leave comments. Login now