##// END OF EJS Templates
phases: basic I/O logic...
Pierre-Yves David -
r15418:cf729af2 default
parent child Browse files
Show More
@@ -1,2104 +1,2108 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener.append(
60 self.opener.append(
61 "00changelog.i",
61 "00changelog.i",
62 '\0\0\0\2' # represents revlogv2
62 '\0\0\0\2' # represents revlogv2
63 ' dummy changelog to prevent using the old repo layout'
63 ' dummy changelog to prevent using the old repo layout'
64 )
64 )
65 if self.ui.configbool('format', 'generaldelta', False):
65 if self.ui.configbool('format', 'generaldelta', False):
66 requirements.append("generaldelta")
66 requirements.append("generaldelta")
67 requirements = set(requirements)
67 requirements = set(requirements)
68 else:
68 else:
69 raise error.RepoError(_("repository %s not found") % path)
69 raise error.RepoError(_("repository %s not found") % path)
70 elif create:
70 elif create:
71 raise error.RepoError(_("repository %s already exists") % path)
71 raise error.RepoError(_("repository %s already exists") % path)
72 else:
72 else:
73 try:
73 try:
74 requirements = scmutil.readrequires(self.opener, self.supported)
74 requirements = scmutil.readrequires(self.opener, self.supported)
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 requirements = set()
78 requirements = set()
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100
100
101 self._branchcache = None
101 self._branchcache = None
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.filterpats = {}
103 self.filterpats = {}
104 self._datafilters = {}
104 self._datafilters = {}
105 self._transref = self._lockref = self._wlockref = None
105 self._transref = self._lockref = self._wlockref = None
106
106
107 # A cache for various files under .hg/ that tracks file changes,
107 # A cache for various files under .hg/ that tracks file changes,
108 # (used by the filecache decorator)
108 # (used by the filecache decorator)
109 #
109 #
110 # Maps a property name to its util.filecacheentry
110 # Maps a property name to its util.filecacheentry
111 self._filecache = {}
111 self._filecache = {}
112
112
113 def _applyrequirements(self, requirements):
113 def _applyrequirements(self, requirements):
114 self.requirements = requirements
114 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
116 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
117 if r in openerreqs)
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130
130
131 # XXX: Checking against the current working copy is wrong in
131 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
132 # the sense that it can reject things like
133 #
133 #
134 # $ hg cat -r 10 sub/x.txt
134 # $ hg cat -r 10 sub/x.txt
135 #
135 #
136 # if sub/ is no longer a subrepository in the working copy
136 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
137 # parent revision.
138 #
138 #
139 # However, it can of course also allow things that would have
139 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
140 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
141 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
142 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
143 # panics when it sees sub/.hg/.
144 #
144 #
145 # All in all, checking against the working copy seems sensible
145 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
146 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
147 # the filesystem *now*.
148 ctx = self[None]
148 ctx = self[None]
149 parts = util.splitpath(subpath)
149 parts = util.splitpath(subpath)
150 while parts:
150 while parts:
151 prefix = os.sep.join(parts)
151 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
152 if prefix in ctx.substate:
153 if prefix == subpath:
153 if prefix == subpath:
154 return True
154 return True
155 else:
155 else:
156 sub = ctx.sub(prefix)
156 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
157 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
158 else:
159 parts.pop()
159 parts.pop()
160 return False
160 return False
161
161
162 @filecache('bookmarks')
162 @filecache('bookmarks')
163 def _bookmarks(self):
163 def _bookmarks(self):
164 return bookmarks.read(self)
164 return bookmarks.read(self)
165
165
166 @filecache('bookmarks.current')
166 @filecache('bookmarks.current')
167 def _bookmarkcurrent(self):
167 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
168 return bookmarks.readcurrent(self)
169
169
170 def _writebookmarks(self, marks):
170 def _writebookmarks(self, marks):
171 bookmarks.write(self)
171 bookmarks.write(self)
172
172
173 @filecache('phaseroots')
174 def _phaseroots(self):
175 return phases.readroots(self)
176
173 @filecache('00changelog.i', True)
177 @filecache('00changelog.i', True)
174 def changelog(self):
178 def changelog(self):
175 c = changelog.changelog(self.sopener)
179 c = changelog.changelog(self.sopener)
176 if 'HG_PENDING' in os.environ:
180 if 'HG_PENDING' in os.environ:
177 p = os.environ['HG_PENDING']
181 p = os.environ['HG_PENDING']
178 if p.startswith(self.root):
182 if p.startswith(self.root):
179 c.readpending('00changelog.i.a')
183 c.readpending('00changelog.i.a')
180 return c
184 return c
181
185
182 @filecache('00manifest.i', True)
186 @filecache('00manifest.i', True)
183 def manifest(self):
187 def manifest(self):
184 return manifest.manifest(self.sopener)
188 return manifest.manifest(self.sopener)
185
189
186 @filecache('dirstate')
190 @filecache('dirstate')
187 def dirstate(self):
191 def dirstate(self):
188 warned = [0]
192 warned = [0]
189 def validate(node):
193 def validate(node):
190 try:
194 try:
191 self.changelog.rev(node)
195 self.changelog.rev(node)
192 return node
196 return node
193 except error.LookupError:
197 except error.LookupError:
194 if not warned[0]:
198 if not warned[0]:
195 warned[0] = True
199 warned[0] = True
196 self.ui.warn(_("warning: ignoring unknown"
200 self.ui.warn(_("warning: ignoring unknown"
197 " working parent %s!\n") % short(node))
201 " working parent %s!\n") % short(node))
198 return nullid
202 return nullid
199
203
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
204 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201
205
202 def __getitem__(self, changeid):
206 def __getitem__(self, changeid):
203 if changeid is None:
207 if changeid is None:
204 return context.workingctx(self)
208 return context.workingctx(self)
205 return context.changectx(self, changeid)
209 return context.changectx(self, changeid)
206
210
207 def __contains__(self, changeid):
211 def __contains__(self, changeid):
208 try:
212 try:
209 return bool(self.lookup(changeid))
213 return bool(self.lookup(changeid))
210 except error.RepoLookupError:
214 except error.RepoLookupError:
211 return False
215 return False
212
216
213 def __nonzero__(self):
217 def __nonzero__(self):
214 return True
218 return True
215
219
216 def __len__(self):
220 def __len__(self):
217 return len(self.changelog)
221 return len(self.changelog)
218
222
219 def __iter__(self):
223 def __iter__(self):
220 for i in xrange(len(self)):
224 for i in xrange(len(self)):
221 yield i
225 yield i
222
226
223 def revs(self, expr, *args):
227 def revs(self, expr, *args):
224 '''Return a list of revisions matching the given revset'''
228 '''Return a list of revisions matching the given revset'''
225 expr = revset.formatspec(expr, *args)
229 expr = revset.formatspec(expr, *args)
226 m = revset.match(None, expr)
230 m = revset.match(None, expr)
227 return [r for r in m(self, range(len(self)))]
231 return [r for r in m(self, range(len(self)))]
228
232
229 def set(self, expr, *args):
233 def set(self, expr, *args):
230 '''
234 '''
231 Yield a context for each matching revision, after doing arg
235 Yield a context for each matching revision, after doing arg
232 replacement via revset.formatspec
236 replacement via revset.formatspec
233 '''
237 '''
234 for r in self.revs(expr, *args):
238 for r in self.revs(expr, *args):
235 yield self[r]
239 yield self[r]
236
240
237 def url(self):
241 def url(self):
238 return 'file:' + self.root
242 return 'file:' + self.root
239
243
240 def hook(self, name, throw=False, **args):
244 def hook(self, name, throw=False, **args):
241 return hook.hook(self.ui, self, name, throw, **args)
245 return hook.hook(self.ui, self, name, throw, **args)
242
246
243 tag_disallowed = ':\r\n'
247 tag_disallowed = ':\r\n'
244
248
245 def _tag(self, names, node, message, local, user, date, extra={}):
249 def _tag(self, names, node, message, local, user, date, extra={}):
246 if isinstance(names, str):
250 if isinstance(names, str):
247 allchars = names
251 allchars = names
248 names = (names,)
252 names = (names,)
249 else:
253 else:
250 allchars = ''.join(names)
254 allchars = ''.join(names)
251 for c in self.tag_disallowed:
255 for c in self.tag_disallowed:
252 if c in allchars:
256 if c in allchars:
253 raise util.Abort(_('%r cannot be used in a tag name') % c)
257 raise util.Abort(_('%r cannot be used in a tag name') % c)
254
258
255 branches = self.branchmap()
259 branches = self.branchmap()
256 for name in names:
260 for name in names:
257 self.hook('pretag', throw=True, node=hex(node), tag=name,
261 self.hook('pretag', throw=True, node=hex(node), tag=name,
258 local=local)
262 local=local)
259 if name in branches:
263 if name in branches:
260 self.ui.warn(_("warning: tag %s conflicts with existing"
264 self.ui.warn(_("warning: tag %s conflicts with existing"
261 " branch name\n") % name)
265 " branch name\n") % name)
262
266
263 def writetags(fp, names, munge, prevtags):
267 def writetags(fp, names, munge, prevtags):
264 fp.seek(0, 2)
268 fp.seek(0, 2)
265 if prevtags and prevtags[-1] != '\n':
269 if prevtags and prevtags[-1] != '\n':
266 fp.write('\n')
270 fp.write('\n')
267 for name in names:
271 for name in names:
268 m = munge and munge(name) or name
272 m = munge and munge(name) or name
269 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
273 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
270 old = self.tags().get(name, nullid)
274 old = self.tags().get(name, nullid)
271 fp.write('%s %s\n' % (hex(old), m))
275 fp.write('%s %s\n' % (hex(old), m))
272 fp.write('%s %s\n' % (hex(node), m))
276 fp.write('%s %s\n' % (hex(node), m))
273 fp.close()
277 fp.close()
274
278
275 prevtags = ''
279 prevtags = ''
276 if local:
280 if local:
277 try:
281 try:
278 fp = self.opener('localtags', 'r+')
282 fp = self.opener('localtags', 'r+')
279 except IOError:
283 except IOError:
280 fp = self.opener('localtags', 'a')
284 fp = self.opener('localtags', 'a')
281 else:
285 else:
282 prevtags = fp.read()
286 prevtags = fp.read()
283
287
284 # local tags are stored in the current charset
288 # local tags are stored in the current charset
285 writetags(fp, names, None, prevtags)
289 writetags(fp, names, None, prevtags)
286 for name in names:
290 for name in names:
287 self.hook('tag', node=hex(node), tag=name, local=local)
291 self.hook('tag', node=hex(node), tag=name, local=local)
288 return
292 return
289
293
290 try:
294 try:
291 fp = self.wfile('.hgtags', 'rb+')
295 fp = self.wfile('.hgtags', 'rb+')
292 except IOError, e:
296 except IOError, e:
293 if e.errno != errno.ENOENT:
297 if e.errno != errno.ENOENT:
294 raise
298 raise
295 fp = self.wfile('.hgtags', 'ab')
299 fp = self.wfile('.hgtags', 'ab')
296 else:
300 else:
297 prevtags = fp.read()
301 prevtags = fp.read()
298
302
299 # committed tags are stored in UTF-8
303 # committed tags are stored in UTF-8
300 writetags(fp, names, encoding.fromlocal, prevtags)
304 writetags(fp, names, encoding.fromlocal, prevtags)
301
305
302 fp.close()
306 fp.close()
303
307
304 if '.hgtags' not in self.dirstate:
308 if '.hgtags' not in self.dirstate:
305 self[None].add(['.hgtags'])
309 self[None].add(['.hgtags'])
306
310
307 m = matchmod.exact(self.root, '', ['.hgtags'])
311 m = matchmod.exact(self.root, '', ['.hgtags'])
308 tagnode = self.commit(message, user, date, extra=extra, match=m)
312 tagnode = self.commit(message, user, date, extra=extra, match=m)
309
313
310 for name in names:
314 for name in names:
311 self.hook('tag', node=hex(node), tag=name, local=local)
315 self.hook('tag', node=hex(node), tag=name, local=local)
312
316
313 return tagnode
317 return tagnode
314
318
315 def tag(self, names, node, message, local, user, date):
319 def tag(self, names, node, message, local, user, date):
316 '''tag a revision with one or more symbolic names.
320 '''tag a revision with one or more symbolic names.
317
321
318 names is a list of strings or, when adding a single tag, names may be a
322 names is a list of strings or, when adding a single tag, names may be a
319 string.
323 string.
320
324
321 if local is True, the tags are stored in a per-repository file.
325 if local is True, the tags are stored in a per-repository file.
322 otherwise, they are stored in the .hgtags file, and a new
326 otherwise, they are stored in the .hgtags file, and a new
323 changeset is committed with the change.
327 changeset is committed with the change.
324
328
325 keyword arguments:
329 keyword arguments:
326
330
327 local: whether to store tags in non-version-controlled file
331 local: whether to store tags in non-version-controlled file
328 (default False)
332 (default False)
329
333
330 message: commit message to use if committing
334 message: commit message to use if committing
331
335
332 user: name of user to use if committing
336 user: name of user to use if committing
333
337
334 date: date tuple to use if committing'''
338 date: date tuple to use if committing'''
335
339
336 if not local:
340 if not local:
337 for x in self.status()[:5]:
341 for x in self.status()[:5]:
338 if '.hgtags' in x:
342 if '.hgtags' in x:
339 raise util.Abort(_('working copy of .hgtags is changed '
343 raise util.Abort(_('working copy of .hgtags is changed '
340 '(please commit .hgtags manually)'))
344 '(please commit .hgtags manually)'))
341
345
342 self.tags() # instantiate the cache
346 self.tags() # instantiate the cache
343 self._tag(names, node, message, local, user, date)
347 self._tag(names, node, message, local, user, date)
344
348
345 @propertycache
349 @propertycache
346 def _tagscache(self):
350 def _tagscache(self):
347 '''Returns a tagscache object that contains various tags related caches.'''
351 '''Returns a tagscache object that contains various tags related caches.'''
348
352
349 # This simplifies its cache management by having one decorated
353 # This simplifies its cache management by having one decorated
350 # function (this one) and the rest simply fetch things from it.
354 # function (this one) and the rest simply fetch things from it.
351 class tagscache(object):
355 class tagscache(object):
352 def __init__(self):
356 def __init__(self):
353 # These two define the set of tags for this repository. tags
357 # These two define the set of tags for this repository. tags
354 # maps tag name to node; tagtypes maps tag name to 'global' or
358 # maps tag name to node; tagtypes maps tag name to 'global' or
355 # 'local'. (Global tags are defined by .hgtags across all
359 # 'local'. (Global tags are defined by .hgtags across all
356 # heads, and local tags are defined in .hg/localtags.)
360 # heads, and local tags are defined in .hg/localtags.)
357 # They constitute the in-memory cache of tags.
361 # They constitute the in-memory cache of tags.
358 self.tags = self.tagtypes = None
362 self.tags = self.tagtypes = None
359
363
360 self.nodetagscache = self.tagslist = None
364 self.nodetagscache = self.tagslist = None
361
365
362 cache = tagscache()
366 cache = tagscache()
363 cache.tags, cache.tagtypes = self._findtags()
367 cache.tags, cache.tagtypes = self._findtags()
364
368
365 return cache
369 return cache
366
370
367 def tags(self):
371 def tags(self):
368 '''return a mapping of tag to node'''
372 '''return a mapping of tag to node'''
369 return self._tagscache.tags
373 return self._tagscache.tags
370
374
371 def _findtags(self):
375 def _findtags(self):
372 '''Do the hard work of finding tags. Return a pair of dicts
376 '''Do the hard work of finding tags. Return a pair of dicts
373 (tags, tagtypes) where tags maps tag name to node, and tagtypes
377 (tags, tagtypes) where tags maps tag name to node, and tagtypes
374 maps tag name to a string like \'global\' or \'local\'.
378 maps tag name to a string like \'global\' or \'local\'.
375 Subclasses or extensions are free to add their own tags, but
379 Subclasses or extensions are free to add their own tags, but
376 should be aware that the returned dicts will be retained for the
380 should be aware that the returned dicts will be retained for the
377 duration of the localrepo object.'''
381 duration of the localrepo object.'''
378
382
379 # XXX what tagtype should subclasses/extensions use? Currently
383 # XXX what tagtype should subclasses/extensions use? Currently
380 # mq and bookmarks add tags, but do not set the tagtype at all.
384 # mq and bookmarks add tags, but do not set the tagtype at all.
381 # Should each extension invent its own tag type? Should there
385 # Should each extension invent its own tag type? Should there
382 # be one tagtype for all such "virtual" tags? Or is the status
386 # be one tagtype for all such "virtual" tags? Or is the status
383 # quo fine?
387 # quo fine?
384
388
385 alltags = {} # map tag name to (node, hist)
389 alltags = {} # map tag name to (node, hist)
386 tagtypes = {}
390 tagtypes = {}
387
391
388 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
392 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
389 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
393 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
390
394
391 # Build the return dicts. Have to re-encode tag names because
395 # Build the return dicts. Have to re-encode tag names because
392 # the tags module always uses UTF-8 (in order not to lose info
396 # the tags module always uses UTF-8 (in order not to lose info
393 # writing to the cache), but the rest of Mercurial wants them in
397 # writing to the cache), but the rest of Mercurial wants them in
394 # local encoding.
398 # local encoding.
395 tags = {}
399 tags = {}
396 for (name, (node, hist)) in alltags.iteritems():
400 for (name, (node, hist)) in alltags.iteritems():
397 if node != nullid:
401 if node != nullid:
398 try:
402 try:
399 # ignore tags to unknown nodes
403 # ignore tags to unknown nodes
400 self.changelog.lookup(node)
404 self.changelog.lookup(node)
401 tags[encoding.tolocal(name)] = node
405 tags[encoding.tolocal(name)] = node
402 except error.LookupError:
406 except error.LookupError:
403 pass
407 pass
404 tags['tip'] = self.changelog.tip()
408 tags['tip'] = self.changelog.tip()
405 tagtypes = dict([(encoding.tolocal(name), value)
409 tagtypes = dict([(encoding.tolocal(name), value)
406 for (name, value) in tagtypes.iteritems()])
410 for (name, value) in tagtypes.iteritems()])
407 return (tags, tagtypes)
411 return (tags, tagtypes)
408
412
409 def tagtype(self, tagname):
413 def tagtype(self, tagname):
410 '''
414 '''
411 return the type of the given tag. result can be:
415 return the type of the given tag. result can be:
412
416
413 'local' : a local tag
417 'local' : a local tag
414 'global' : a global tag
418 'global' : a global tag
415 None : tag does not exist
419 None : tag does not exist
416 '''
420 '''
417
421
418 return self._tagscache.tagtypes.get(tagname)
422 return self._tagscache.tagtypes.get(tagname)
419
423
420 def tagslist(self):
424 def tagslist(self):
421 '''return a list of tags ordered by revision'''
425 '''return a list of tags ordered by revision'''
422 if not self._tagscache.tagslist:
426 if not self._tagscache.tagslist:
423 l = []
427 l = []
424 for t, n in self.tags().iteritems():
428 for t, n in self.tags().iteritems():
425 r = self.changelog.rev(n)
429 r = self.changelog.rev(n)
426 l.append((r, t, n))
430 l.append((r, t, n))
427 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
431 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
428
432
429 return self._tagscache.tagslist
433 return self._tagscache.tagslist
430
434
431 def nodetags(self, node):
435 def nodetags(self, node):
432 '''return the tags associated with a node'''
436 '''return the tags associated with a node'''
433 if not self._tagscache.nodetagscache:
437 if not self._tagscache.nodetagscache:
434 nodetagscache = {}
438 nodetagscache = {}
435 for t, n in self.tags().iteritems():
439 for t, n in self.tags().iteritems():
436 nodetagscache.setdefault(n, []).append(t)
440 nodetagscache.setdefault(n, []).append(t)
437 for tags in nodetagscache.itervalues():
441 for tags in nodetagscache.itervalues():
438 tags.sort()
442 tags.sort()
439 self._tagscache.nodetagscache = nodetagscache
443 self._tagscache.nodetagscache = nodetagscache
440 return self._tagscache.nodetagscache.get(node, [])
444 return self._tagscache.nodetagscache.get(node, [])
441
445
442 def nodebookmarks(self, node):
446 def nodebookmarks(self, node):
443 marks = []
447 marks = []
444 for bookmark, n in self._bookmarks.iteritems():
448 for bookmark, n in self._bookmarks.iteritems():
445 if n == node:
449 if n == node:
446 marks.append(bookmark)
450 marks.append(bookmark)
447 return sorted(marks)
451 return sorted(marks)
448
452
449 def _branchtags(self, partial, lrev):
453 def _branchtags(self, partial, lrev):
450 # TODO: rename this function?
454 # TODO: rename this function?
451 tiprev = len(self) - 1
455 tiprev = len(self) - 1
452 if lrev != tiprev:
456 if lrev != tiprev:
453 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
457 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
454 self._updatebranchcache(partial, ctxgen)
458 self._updatebranchcache(partial, ctxgen)
455 self._writebranchcache(partial, self.changelog.tip(), tiprev)
459 self._writebranchcache(partial, self.changelog.tip(), tiprev)
456
460
457 return partial
461 return partial
458
462
459 def updatebranchcache(self):
463 def updatebranchcache(self):
460 tip = self.changelog.tip()
464 tip = self.changelog.tip()
461 if self._branchcache is not None and self._branchcachetip == tip:
465 if self._branchcache is not None and self._branchcachetip == tip:
462 return self._branchcache
466 return self._branchcache
463
467
464 oldtip = self._branchcachetip
468 oldtip = self._branchcachetip
465 self._branchcachetip = tip
469 self._branchcachetip = tip
466 if oldtip is None or oldtip not in self.changelog.nodemap:
470 if oldtip is None or oldtip not in self.changelog.nodemap:
467 partial, last, lrev = self._readbranchcache()
471 partial, last, lrev = self._readbranchcache()
468 else:
472 else:
469 lrev = self.changelog.rev(oldtip)
473 lrev = self.changelog.rev(oldtip)
470 partial = self._branchcache
474 partial = self._branchcache
471
475
472 self._branchtags(partial, lrev)
476 self._branchtags(partial, lrev)
473 # this private cache holds all heads (not just tips)
477 # this private cache holds all heads (not just tips)
474 self._branchcache = partial
478 self._branchcache = partial
475
479
476 def branchmap(self):
480 def branchmap(self):
477 '''returns a dictionary {branch: [branchheads]}'''
481 '''returns a dictionary {branch: [branchheads]}'''
478 self.updatebranchcache()
482 self.updatebranchcache()
479 return self._branchcache
483 return self._branchcache
480
484
481 def branchtags(self):
485 def branchtags(self):
482 '''return a dict where branch names map to the tipmost head of
486 '''return a dict where branch names map to the tipmost head of
483 the branch, open heads come before closed'''
487 the branch, open heads come before closed'''
484 bt = {}
488 bt = {}
485 for bn, heads in self.branchmap().iteritems():
489 for bn, heads in self.branchmap().iteritems():
486 tip = heads[-1]
490 tip = heads[-1]
487 for h in reversed(heads):
491 for h in reversed(heads):
488 if 'close' not in self.changelog.read(h)[5]:
492 if 'close' not in self.changelog.read(h)[5]:
489 tip = h
493 tip = h
490 break
494 break
491 bt[bn] = tip
495 bt[bn] = tip
492 return bt
496 return bt
493
497
494 def _readbranchcache(self):
498 def _readbranchcache(self):
495 partial = {}
499 partial = {}
496 try:
500 try:
497 f = self.opener("cache/branchheads")
501 f = self.opener("cache/branchheads")
498 lines = f.read().split('\n')
502 lines = f.read().split('\n')
499 f.close()
503 f.close()
500 except (IOError, OSError):
504 except (IOError, OSError):
501 return {}, nullid, nullrev
505 return {}, nullid, nullrev
502
506
503 try:
507 try:
504 last, lrev = lines.pop(0).split(" ", 1)
508 last, lrev = lines.pop(0).split(" ", 1)
505 last, lrev = bin(last), int(lrev)
509 last, lrev = bin(last), int(lrev)
506 if lrev >= len(self) or self[lrev].node() != last:
510 if lrev >= len(self) or self[lrev].node() != last:
507 # invalidate the cache
511 # invalidate the cache
508 raise ValueError('invalidating branch cache (tip differs)')
512 raise ValueError('invalidating branch cache (tip differs)')
509 for l in lines:
513 for l in lines:
510 if not l:
514 if not l:
511 continue
515 continue
512 node, label = l.split(" ", 1)
516 node, label = l.split(" ", 1)
513 label = encoding.tolocal(label.strip())
517 label = encoding.tolocal(label.strip())
514 partial.setdefault(label, []).append(bin(node))
518 partial.setdefault(label, []).append(bin(node))
515 except KeyboardInterrupt:
519 except KeyboardInterrupt:
516 raise
520 raise
517 except Exception, inst:
521 except Exception, inst:
518 if self.ui.debugflag:
522 if self.ui.debugflag:
519 self.ui.warn(str(inst), '\n')
523 self.ui.warn(str(inst), '\n')
520 partial, last, lrev = {}, nullid, nullrev
524 partial, last, lrev = {}, nullid, nullrev
521 return partial, last, lrev
525 return partial, last, lrev
522
526
523 def _writebranchcache(self, branches, tip, tiprev):
527 def _writebranchcache(self, branches, tip, tiprev):
524 try:
528 try:
525 f = self.opener("cache/branchheads", "w", atomictemp=True)
529 f = self.opener("cache/branchheads", "w", atomictemp=True)
526 f.write("%s %s\n" % (hex(tip), tiprev))
530 f.write("%s %s\n" % (hex(tip), tiprev))
527 for label, nodes in branches.iteritems():
531 for label, nodes in branches.iteritems():
528 for node in nodes:
532 for node in nodes:
529 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
533 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
530 f.close()
534 f.close()
531 except (IOError, OSError):
535 except (IOError, OSError):
532 pass
536 pass
533
537
534 def _updatebranchcache(self, partial, ctxgen):
538 def _updatebranchcache(self, partial, ctxgen):
535 # collect new branch entries
539 # collect new branch entries
536 newbranches = {}
540 newbranches = {}
537 for c in ctxgen:
541 for c in ctxgen:
538 newbranches.setdefault(c.branch(), []).append(c.node())
542 newbranches.setdefault(c.branch(), []).append(c.node())
539 # if older branchheads are reachable from new ones, they aren't
543 # if older branchheads are reachable from new ones, they aren't
540 # really branchheads. Note checking parents is insufficient:
544 # really branchheads. Note checking parents is insufficient:
541 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
545 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
542 for branch, newnodes in newbranches.iteritems():
546 for branch, newnodes in newbranches.iteritems():
543 bheads = partial.setdefault(branch, [])
547 bheads = partial.setdefault(branch, [])
544 bheads.extend(newnodes)
548 bheads.extend(newnodes)
545 if len(bheads) <= 1:
549 if len(bheads) <= 1:
546 continue
550 continue
547 bheads = sorted(bheads, key=lambda x: self[x].rev())
551 bheads = sorted(bheads, key=lambda x: self[x].rev())
548 # starting from tip means fewer passes over reachable
552 # starting from tip means fewer passes over reachable
549 while newnodes:
553 while newnodes:
550 latest = newnodes.pop()
554 latest = newnodes.pop()
551 if latest not in bheads:
555 if latest not in bheads:
552 continue
556 continue
553 minbhrev = self[bheads[0]].node()
557 minbhrev = self[bheads[0]].node()
554 reachable = self.changelog.reachable(latest, minbhrev)
558 reachable = self.changelog.reachable(latest, minbhrev)
555 reachable.remove(latest)
559 reachable.remove(latest)
556 if reachable:
560 if reachable:
557 bheads = [b for b in bheads if b not in reachable]
561 bheads = [b for b in bheads if b not in reachable]
558 partial[branch] = bheads
562 partial[branch] = bheads
559
563
560 def lookup(self, key):
564 def lookup(self, key):
561 if isinstance(key, int):
565 if isinstance(key, int):
562 return self.changelog.node(key)
566 return self.changelog.node(key)
563 elif key == '.':
567 elif key == '.':
564 return self.dirstate.p1()
568 return self.dirstate.p1()
565 elif key == 'null':
569 elif key == 'null':
566 return nullid
570 return nullid
567 elif key == 'tip':
571 elif key == 'tip':
568 return self.changelog.tip()
572 return self.changelog.tip()
569 n = self.changelog._match(key)
573 n = self.changelog._match(key)
570 if n:
574 if n:
571 return n
575 return n
572 if key in self._bookmarks:
576 if key in self._bookmarks:
573 return self._bookmarks[key]
577 return self._bookmarks[key]
574 if key in self.tags():
578 if key in self.tags():
575 return self.tags()[key]
579 return self.tags()[key]
576 if key in self.branchtags():
580 if key in self.branchtags():
577 return self.branchtags()[key]
581 return self.branchtags()[key]
578 n = self.changelog._partialmatch(key)
582 n = self.changelog._partialmatch(key)
579 if n:
583 if n:
580 return n
584 return n
581
585
582 # can't find key, check if it might have come from damaged dirstate
586 # can't find key, check if it might have come from damaged dirstate
583 if key in self.dirstate.parents():
587 if key in self.dirstate.parents():
584 raise error.Abort(_("working directory has unknown parent '%s'!")
588 raise error.Abort(_("working directory has unknown parent '%s'!")
585 % short(key))
589 % short(key))
586 try:
590 try:
587 if len(key) == 20:
591 if len(key) == 20:
588 key = hex(key)
592 key = hex(key)
589 except TypeError:
593 except TypeError:
590 pass
594 pass
591 raise error.RepoLookupError(_("unknown revision '%s'") % key)
595 raise error.RepoLookupError(_("unknown revision '%s'") % key)
592
596
593 def lookupbranch(self, key, remote=None):
597 def lookupbranch(self, key, remote=None):
594 repo = remote or self
598 repo = remote or self
595 if key in repo.branchmap():
599 if key in repo.branchmap():
596 return key
600 return key
597
601
598 repo = (remote and remote.local()) and remote or self
602 repo = (remote and remote.local()) and remote or self
599 return repo[key].branch()
603 return repo[key].branch()
600
604
601 def known(self, nodes):
605 def known(self, nodes):
602 nm = self.changelog.nodemap
606 nm = self.changelog.nodemap
603 return [(n in nm) for n in nodes]
607 return [(n in nm) for n in nodes]
604
608
605 def local(self):
609 def local(self):
606 return self
610 return self
607
611
608 def join(self, f):
612 def join(self, f):
609 return os.path.join(self.path, f)
613 return os.path.join(self.path, f)
610
614
611 def wjoin(self, f):
615 def wjoin(self, f):
612 return os.path.join(self.root, f)
616 return os.path.join(self.root, f)
613
617
614 def file(self, f):
618 def file(self, f):
615 if f[0] == '/':
619 if f[0] == '/':
616 f = f[1:]
620 f = f[1:]
617 return filelog.filelog(self.sopener, f)
621 return filelog.filelog(self.sopener, f)
618
622
619 def changectx(self, changeid):
623 def changectx(self, changeid):
620 return self[changeid]
624 return self[changeid]
621
625
622 def parents(self, changeid=None):
626 def parents(self, changeid=None):
623 '''get list of changectxs for parents of changeid'''
627 '''get list of changectxs for parents of changeid'''
624 return self[changeid].parents()
628 return self[changeid].parents()
625
629
626 def filectx(self, path, changeid=None, fileid=None):
630 def filectx(self, path, changeid=None, fileid=None):
627 """changeid can be a changeset revision, node, or tag.
631 """changeid can be a changeset revision, node, or tag.
628 fileid can be a file revision or node."""
632 fileid can be a file revision or node."""
629 return context.filectx(self, path, changeid, fileid)
633 return context.filectx(self, path, changeid, fileid)
630
634
631 def getcwd(self):
635 def getcwd(self):
632 return self.dirstate.getcwd()
636 return self.dirstate.getcwd()
633
637
634 def pathto(self, f, cwd=None):
638 def pathto(self, f, cwd=None):
635 return self.dirstate.pathto(f, cwd)
639 return self.dirstate.pathto(f, cwd)
636
640
637 def wfile(self, f, mode='r'):
641 def wfile(self, f, mode='r'):
638 return self.wopener(f, mode)
642 return self.wopener(f, mode)
639
643
640 def _link(self, f):
644 def _link(self, f):
641 return os.path.islink(self.wjoin(f))
645 return os.path.islink(self.wjoin(f))
642
646
643 def _loadfilter(self, filter):
647 def _loadfilter(self, filter):
644 if filter not in self.filterpats:
648 if filter not in self.filterpats:
645 l = []
649 l = []
646 for pat, cmd in self.ui.configitems(filter):
650 for pat, cmd in self.ui.configitems(filter):
647 if cmd == '!':
651 if cmd == '!':
648 continue
652 continue
649 mf = matchmod.match(self.root, '', [pat])
653 mf = matchmod.match(self.root, '', [pat])
650 fn = None
654 fn = None
651 params = cmd
655 params = cmd
652 for name, filterfn in self._datafilters.iteritems():
656 for name, filterfn in self._datafilters.iteritems():
653 if cmd.startswith(name):
657 if cmd.startswith(name):
654 fn = filterfn
658 fn = filterfn
655 params = cmd[len(name):].lstrip()
659 params = cmd[len(name):].lstrip()
656 break
660 break
657 if not fn:
661 if not fn:
658 fn = lambda s, c, **kwargs: util.filter(s, c)
662 fn = lambda s, c, **kwargs: util.filter(s, c)
659 # Wrap old filters not supporting keyword arguments
663 # Wrap old filters not supporting keyword arguments
660 if not inspect.getargspec(fn)[2]:
664 if not inspect.getargspec(fn)[2]:
661 oldfn = fn
665 oldfn = fn
662 fn = lambda s, c, **kwargs: oldfn(s, c)
666 fn = lambda s, c, **kwargs: oldfn(s, c)
663 l.append((mf, fn, params))
667 l.append((mf, fn, params))
664 self.filterpats[filter] = l
668 self.filterpats[filter] = l
665 return self.filterpats[filter]
669 return self.filterpats[filter]
666
670
667 def _filter(self, filterpats, filename, data):
671 def _filter(self, filterpats, filename, data):
668 for mf, fn, cmd in filterpats:
672 for mf, fn, cmd in filterpats:
669 if mf(filename):
673 if mf(filename):
670 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
674 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
671 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
675 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
672 break
676 break
673
677
674 return data
678 return data
675
679
676 @propertycache
680 @propertycache
677 def _encodefilterpats(self):
681 def _encodefilterpats(self):
678 return self._loadfilter('encode')
682 return self._loadfilter('encode')
679
683
680 @propertycache
684 @propertycache
681 def _decodefilterpats(self):
685 def _decodefilterpats(self):
682 return self._loadfilter('decode')
686 return self._loadfilter('decode')
683
687
684 def adddatafilter(self, name, filter):
688 def adddatafilter(self, name, filter):
685 self._datafilters[name] = filter
689 self._datafilters[name] = filter
686
690
687 def wread(self, filename):
691 def wread(self, filename):
688 if self._link(filename):
692 if self._link(filename):
689 data = os.readlink(self.wjoin(filename))
693 data = os.readlink(self.wjoin(filename))
690 else:
694 else:
691 data = self.wopener.read(filename)
695 data = self.wopener.read(filename)
692 return self._filter(self._encodefilterpats, filename, data)
696 return self._filter(self._encodefilterpats, filename, data)
693
697
694 def wwrite(self, filename, data, flags):
698 def wwrite(self, filename, data, flags):
695 data = self._filter(self._decodefilterpats, filename, data)
699 data = self._filter(self._decodefilterpats, filename, data)
696 if 'l' in flags:
700 if 'l' in flags:
697 self.wopener.symlink(data, filename)
701 self.wopener.symlink(data, filename)
698 else:
702 else:
699 self.wopener.write(filename, data)
703 self.wopener.write(filename, data)
700 if 'x' in flags:
704 if 'x' in flags:
701 util.setflags(self.wjoin(filename), False, True)
705 util.setflags(self.wjoin(filename), False, True)
702
706
703 def wwritedata(self, filename, data):
707 def wwritedata(self, filename, data):
704 return self._filter(self._decodefilterpats, filename, data)
708 return self._filter(self._decodefilterpats, filename, data)
705
709
706 def transaction(self, desc):
710 def transaction(self, desc):
707 tr = self._transref and self._transref() or None
711 tr = self._transref and self._transref() or None
708 if tr and tr.running():
712 if tr and tr.running():
709 return tr.nest()
713 return tr.nest()
710
714
711 # abort here if the journal already exists
715 # abort here if the journal already exists
712 if os.path.exists(self.sjoin("journal")):
716 if os.path.exists(self.sjoin("journal")):
713 raise error.RepoError(
717 raise error.RepoError(
714 _("abandoned transaction found - run hg recover"))
718 _("abandoned transaction found - run hg recover"))
715
719
716 journalfiles = self._writejournal(desc)
720 journalfiles = self._writejournal(desc)
717 renames = [(x, undoname(x)) for x in journalfiles]
721 renames = [(x, undoname(x)) for x in journalfiles]
718
722
719 tr = transaction.transaction(self.ui.warn, self.sopener,
723 tr = transaction.transaction(self.ui.warn, self.sopener,
720 self.sjoin("journal"),
724 self.sjoin("journal"),
721 aftertrans(renames),
725 aftertrans(renames),
722 self.store.createmode)
726 self.store.createmode)
723 self._transref = weakref.ref(tr)
727 self._transref = weakref.ref(tr)
724 return tr
728 return tr
725
729
726 def _writejournal(self, desc):
730 def _writejournal(self, desc):
727 # save dirstate for rollback
731 # save dirstate for rollback
728 try:
732 try:
729 ds = self.opener.read("dirstate")
733 ds = self.opener.read("dirstate")
730 except IOError:
734 except IOError:
731 ds = ""
735 ds = ""
732 self.opener.write("journal.dirstate", ds)
736 self.opener.write("journal.dirstate", ds)
733 self.opener.write("journal.branch",
737 self.opener.write("journal.branch",
734 encoding.fromlocal(self.dirstate.branch()))
738 encoding.fromlocal(self.dirstate.branch()))
735 self.opener.write("journal.desc",
739 self.opener.write("journal.desc",
736 "%d\n%s\n" % (len(self), desc))
740 "%d\n%s\n" % (len(self), desc))
737
741
738 bkname = self.join('bookmarks')
742 bkname = self.join('bookmarks')
739 if os.path.exists(bkname):
743 if os.path.exists(bkname):
740 util.copyfile(bkname, self.join('journal.bookmarks'))
744 util.copyfile(bkname, self.join('journal.bookmarks'))
741 else:
745 else:
742 self.opener.write('journal.bookmarks', '')
746 self.opener.write('journal.bookmarks', '')
743
747
744 return (self.sjoin('journal'), self.join('journal.dirstate'),
748 return (self.sjoin('journal'), self.join('journal.dirstate'),
745 self.join('journal.branch'), self.join('journal.desc'),
749 self.join('journal.branch'), self.join('journal.desc'),
746 self.join('journal.bookmarks'))
750 self.join('journal.bookmarks'))
747
751
748 def recover(self):
752 def recover(self):
749 lock = self.lock()
753 lock = self.lock()
750 try:
754 try:
751 if os.path.exists(self.sjoin("journal")):
755 if os.path.exists(self.sjoin("journal")):
752 self.ui.status(_("rolling back interrupted transaction\n"))
756 self.ui.status(_("rolling back interrupted transaction\n"))
753 transaction.rollback(self.sopener, self.sjoin("journal"),
757 transaction.rollback(self.sopener, self.sjoin("journal"),
754 self.ui.warn)
758 self.ui.warn)
755 self.invalidate()
759 self.invalidate()
756 return True
760 return True
757 else:
761 else:
758 self.ui.warn(_("no interrupted transaction available\n"))
762 self.ui.warn(_("no interrupted transaction available\n"))
759 return False
763 return False
760 finally:
764 finally:
761 lock.release()
765 lock.release()
762
766
763 def rollback(self, dryrun=False, force=False):
767 def rollback(self, dryrun=False, force=False):
764 wlock = lock = None
768 wlock = lock = None
765 try:
769 try:
766 wlock = self.wlock()
770 wlock = self.wlock()
767 lock = self.lock()
771 lock = self.lock()
768 if os.path.exists(self.sjoin("undo")):
772 if os.path.exists(self.sjoin("undo")):
769 return self._rollback(dryrun, force)
773 return self._rollback(dryrun, force)
770 else:
774 else:
771 self.ui.warn(_("no rollback information available\n"))
775 self.ui.warn(_("no rollback information available\n"))
772 return 1
776 return 1
773 finally:
777 finally:
774 release(lock, wlock)
778 release(lock, wlock)
775
779
776 def _rollback(self, dryrun, force):
780 def _rollback(self, dryrun, force):
777 ui = self.ui
781 ui = self.ui
778 try:
782 try:
779 args = self.opener.read('undo.desc').splitlines()
783 args = self.opener.read('undo.desc').splitlines()
780 (oldlen, desc, detail) = (int(args[0]), args[1], None)
784 (oldlen, desc, detail) = (int(args[0]), args[1], None)
781 if len(args) >= 3:
785 if len(args) >= 3:
782 detail = args[2]
786 detail = args[2]
783 oldtip = oldlen - 1
787 oldtip = oldlen - 1
784
788
785 if detail and ui.verbose:
789 if detail and ui.verbose:
786 msg = (_('repository tip rolled back to revision %s'
790 msg = (_('repository tip rolled back to revision %s'
787 ' (undo %s: %s)\n')
791 ' (undo %s: %s)\n')
788 % (oldtip, desc, detail))
792 % (oldtip, desc, detail))
789 else:
793 else:
790 msg = (_('repository tip rolled back to revision %s'
794 msg = (_('repository tip rolled back to revision %s'
791 ' (undo %s)\n')
795 ' (undo %s)\n')
792 % (oldtip, desc))
796 % (oldtip, desc))
793 except IOError:
797 except IOError:
794 msg = _('rolling back unknown transaction\n')
798 msg = _('rolling back unknown transaction\n')
795 desc = None
799 desc = None
796
800
797 if not force and self['.'] != self['tip'] and desc == 'commit':
801 if not force and self['.'] != self['tip'] and desc == 'commit':
798 raise util.Abort(
802 raise util.Abort(
799 _('rollback of last commit while not checked out '
803 _('rollback of last commit while not checked out '
800 'may lose data'), hint=_('use -f to force'))
804 'may lose data'), hint=_('use -f to force'))
801
805
802 ui.status(msg)
806 ui.status(msg)
803 if dryrun:
807 if dryrun:
804 return 0
808 return 0
805
809
806 parents = self.dirstate.parents()
810 parents = self.dirstate.parents()
807 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
811 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
808 if os.path.exists(self.join('undo.bookmarks')):
812 if os.path.exists(self.join('undo.bookmarks')):
809 util.rename(self.join('undo.bookmarks'),
813 util.rename(self.join('undo.bookmarks'),
810 self.join('bookmarks'))
814 self.join('bookmarks'))
811 self.invalidate()
815 self.invalidate()
812
816
813 parentgone = (parents[0] not in self.changelog.nodemap or
817 parentgone = (parents[0] not in self.changelog.nodemap or
814 parents[1] not in self.changelog.nodemap)
818 parents[1] not in self.changelog.nodemap)
815 if parentgone:
819 if parentgone:
816 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
820 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
817 try:
821 try:
818 branch = self.opener.read('undo.branch')
822 branch = self.opener.read('undo.branch')
819 self.dirstate.setbranch(branch)
823 self.dirstate.setbranch(branch)
820 except IOError:
824 except IOError:
821 ui.warn(_('named branch could not be reset: '
825 ui.warn(_('named branch could not be reset: '
822 'current branch is still \'%s\'\n')
826 'current branch is still \'%s\'\n')
823 % self.dirstate.branch())
827 % self.dirstate.branch())
824
828
825 self.dirstate.invalidate()
829 self.dirstate.invalidate()
826 self.destroyed()
830 self.destroyed()
827 parents = tuple([p.rev() for p in self.parents()])
831 parents = tuple([p.rev() for p in self.parents()])
828 if len(parents) > 1:
832 if len(parents) > 1:
829 ui.status(_('working directory now based on '
833 ui.status(_('working directory now based on '
830 'revisions %d and %d\n') % parents)
834 'revisions %d and %d\n') % parents)
831 else:
835 else:
832 ui.status(_('working directory now based on '
836 ui.status(_('working directory now based on '
833 'revision %d\n') % parents)
837 'revision %d\n') % parents)
834 return 0
838 return 0
835
839
836 def invalidatecaches(self):
840 def invalidatecaches(self):
837 try:
841 try:
838 delattr(self, '_tagscache')
842 delattr(self, '_tagscache')
839 except AttributeError:
843 except AttributeError:
840 pass
844 pass
841
845
842 self._branchcache = None # in UTF-8
846 self._branchcache = None # in UTF-8
843 self._branchcachetip = None
847 self._branchcachetip = None
844
848
845 def invalidatedirstate(self):
849 def invalidatedirstate(self):
846 '''Invalidates the dirstate, causing the next call to dirstate
850 '''Invalidates the dirstate, causing the next call to dirstate
847 to check if it was modified since the last time it was read,
851 to check if it was modified since the last time it was read,
848 rereading it if it has.
852 rereading it if it has.
849
853
850 This is different to dirstate.invalidate() that it doesn't always
854 This is different to dirstate.invalidate() that it doesn't always
851 rereads the dirstate. Use dirstate.invalidate() if you want to
855 rereads the dirstate. Use dirstate.invalidate() if you want to
852 explicitly read the dirstate again (i.e. restoring it to a previous
856 explicitly read the dirstate again (i.e. restoring it to a previous
853 known good state).'''
857 known good state).'''
854 try:
858 try:
855 delattr(self, 'dirstate')
859 delattr(self, 'dirstate')
856 except AttributeError:
860 except AttributeError:
857 pass
861 pass
858
862
859 def invalidate(self):
863 def invalidate(self):
860 for k in self._filecache:
864 for k in self._filecache:
861 # dirstate is invalidated separately in invalidatedirstate()
865 # dirstate is invalidated separately in invalidatedirstate()
862 if k == 'dirstate':
866 if k == 'dirstate':
863 continue
867 continue
864
868
865 try:
869 try:
866 delattr(self, k)
870 delattr(self, k)
867 except AttributeError:
871 except AttributeError:
868 pass
872 pass
869 self.invalidatecaches()
873 self.invalidatecaches()
870
874
871 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
875 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
872 try:
876 try:
873 l = lock.lock(lockname, 0, releasefn, desc=desc)
877 l = lock.lock(lockname, 0, releasefn, desc=desc)
874 except error.LockHeld, inst:
878 except error.LockHeld, inst:
875 if not wait:
879 if not wait:
876 raise
880 raise
877 self.ui.warn(_("waiting for lock on %s held by %r\n") %
881 self.ui.warn(_("waiting for lock on %s held by %r\n") %
878 (desc, inst.locker))
882 (desc, inst.locker))
879 # default to 600 seconds timeout
883 # default to 600 seconds timeout
880 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
884 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
881 releasefn, desc=desc)
885 releasefn, desc=desc)
882 if acquirefn:
886 if acquirefn:
883 acquirefn()
887 acquirefn()
884 return l
888 return l
885
889
886 def lock(self, wait=True):
890 def lock(self, wait=True):
887 '''Lock the repository store (.hg/store) and return a weak reference
891 '''Lock the repository store (.hg/store) and return a weak reference
888 to the lock. Use this before modifying the store (e.g. committing or
892 to the lock. Use this before modifying the store (e.g. committing or
889 stripping). If you are opening a transaction, get a lock as well.)'''
893 stripping). If you are opening a transaction, get a lock as well.)'''
890 l = self._lockref and self._lockref()
894 l = self._lockref and self._lockref()
891 if l is not None and l.held:
895 if l is not None and l.held:
892 l.lock()
896 l.lock()
893 return l
897 return l
894
898
895 def unlock():
899 def unlock():
896 self.store.write()
900 self.store.write()
897 for k, ce in self._filecache.items():
901 for k, ce in self._filecache.items():
898 if k == 'dirstate':
902 if k == 'dirstate':
899 continue
903 continue
900 ce.refresh()
904 ce.refresh()
901
905
902 l = self._lock(self.sjoin("lock"), wait, unlock,
906 l = self._lock(self.sjoin("lock"), wait, unlock,
903 self.invalidate, _('repository %s') % self.origroot)
907 self.invalidate, _('repository %s') % self.origroot)
904 self._lockref = weakref.ref(l)
908 self._lockref = weakref.ref(l)
905 return l
909 return l
906
910
907 def wlock(self, wait=True):
911 def wlock(self, wait=True):
908 '''Lock the non-store parts of the repository (everything under
912 '''Lock the non-store parts of the repository (everything under
909 .hg except .hg/store) and return a weak reference to the lock.
913 .hg except .hg/store) and return a weak reference to the lock.
910 Use this before modifying files in .hg.'''
914 Use this before modifying files in .hg.'''
911 l = self._wlockref and self._wlockref()
915 l = self._wlockref and self._wlockref()
912 if l is not None and l.held:
916 if l is not None and l.held:
913 l.lock()
917 l.lock()
914 return l
918 return l
915
919
916 def unlock():
920 def unlock():
917 self.dirstate.write()
921 self.dirstate.write()
918 ce = self._filecache.get('dirstate')
922 ce = self._filecache.get('dirstate')
919 if ce:
923 if ce:
920 ce.refresh()
924 ce.refresh()
921
925
922 l = self._lock(self.join("wlock"), wait, unlock,
926 l = self._lock(self.join("wlock"), wait, unlock,
923 self.invalidatedirstate, _('working directory of %s') %
927 self.invalidatedirstate, _('working directory of %s') %
924 self.origroot)
928 self.origroot)
925 self._wlockref = weakref.ref(l)
929 self._wlockref = weakref.ref(l)
926 return l
930 return l
927
931
928 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
932 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
929 """
933 """
930 commit an individual file as part of a larger transaction
934 commit an individual file as part of a larger transaction
931 """
935 """
932
936
933 fname = fctx.path()
937 fname = fctx.path()
934 text = fctx.data()
938 text = fctx.data()
935 flog = self.file(fname)
939 flog = self.file(fname)
936 fparent1 = manifest1.get(fname, nullid)
940 fparent1 = manifest1.get(fname, nullid)
937 fparent2 = fparent2o = manifest2.get(fname, nullid)
941 fparent2 = fparent2o = manifest2.get(fname, nullid)
938
942
939 meta = {}
943 meta = {}
940 copy = fctx.renamed()
944 copy = fctx.renamed()
941 if copy and copy[0] != fname:
945 if copy and copy[0] != fname:
942 # Mark the new revision of this file as a copy of another
946 # Mark the new revision of this file as a copy of another
943 # file. This copy data will effectively act as a parent
947 # file. This copy data will effectively act as a parent
944 # of this new revision. If this is a merge, the first
948 # of this new revision. If this is a merge, the first
945 # parent will be the nullid (meaning "look up the copy data")
949 # parent will be the nullid (meaning "look up the copy data")
946 # and the second one will be the other parent. For example:
950 # and the second one will be the other parent. For example:
947 #
951 #
948 # 0 --- 1 --- 3 rev1 changes file foo
952 # 0 --- 1 --- 3 rev1 changes file foo
949 # \ / rev2 renames foo to bar and changes it
953 # \ / rev2 renames foo to bar and changes it
950 # \- 2 -/ rev3 should have bar with all changes and
954 # \- 2 -/ rev3 should have bar with all changes and
951 # should record that bar descends from
955 # should record that bar descends from
952 # bar in rev2 and foo in rev1
956 # bar in rev2 and foo in rev1
953 #
957 #
954 # this allows this merge to succeed:
958 # this allows this merge to succeed:
955 #
959 #
956 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
960 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
957 # \ / merging rev3 and rev4 should use bar@rev2
961 # \ / merging rev3 and rev4 should use bar@rev2
958 # \- 2 --- 4 as the merge base
962 # \- 2 --- 4 as the merge base
959 #
963 #
960
964
961 cfname = copy[0]
965 cfname = copy[0]
962 crev = manifest1.get(cfname)
966 crev = manifest1.get(cfname)
963 newfparent = fparent2
967 newfparent = fparent2
964
968
965 if manifest2: # branch merge
969 if manifest2: # branch merge
966 if fparent2 == nullid or crev is None: # copied on remote side
970 if fparent2 == nullid or crev is None: # copied on remote side
967 if cfname in manifest2:
971 if cfname in manifest2:
968 crev = manifest2[cfname]
972 crev = manifest2[cfname]
969 newfparent = fparent1
973 newfparent = fparent1
970
974
971 # find source in nearest ancestor if we've lost track
975 # find source in nearest ancestor if we've lost track
972 if not crev:
976 if not crev:
973 self.ui.debug(" %s: searching for copy revision for %s\n" %
977 self.ui.debug(" %s: searching for copy revision for %s\n" %
974 (fname, cfname))
978 (fname, cfname))
975 for ancestor in self[None].ancestors():
979 for ancestor in self[None].ancestors():
976 if cfname in ancestor:
980 if cfname in ancestor:
977 crev = ancestor[cfname].filenode()
981 crev = ancestor[cfname].filenode()
978 break
982 break
979
983
980 if crev:
984 if crev:
981 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
985 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
982 meta["copy"] = cfname
986 meta["copy"] = cfname
983 meta["copyrev"] = hex(crev)
987 meta["copyrev"] = hex(crev)
984 fparent1, fparent2 = nullid, newfparent
988 fparent1, fparent2 = nullid, newfparent
985 else:
989 else:
986 self.ui.warn(_("warning: can't find ancestor for '%s' "
990 self.ui.warn(_("warning: can't find ancestor for '%s' "
987 "copied from '%s'!\n") % (fname, cfname))
991 "copied from '%s'!\n") % (fname, cfname))
988
992
989 elif fparent2 != nullid:
993 elif fparent2 != nullid:
990 # is one parent an ancestor of the other?
994 # is one parent an ancestor of the other?
991 fparentancestor = flog.ancestor(fparent1, fparent2)
995 fparentancestor = flog.ancestor(fparent1, fparent2)
992 if fparentancestor == fparent1:
996 if fparentancestor == fparent1:
993 fparent1, fparent2 = fparent2, nullid
997 fparent1, fparent2 = fparent2, nullid
994 elif fparentancestor == fparent2:
998 elif fparentancestor == fparent2:
995 fparent2 = nullid
999 fparent2 = nullid
996
1000
997 # is the file changed?
1001 # is the file changed?
998 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1002 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
999 changelist.append(fname)
1003 changelist.append(fname)
1000 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1004 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1001
1005
1002 # are just the flags changed during merge?
1006 # are just the flags changed during merge?
1003 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1007 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1004 changelist.append(fname)
1008 changelist.append(fname)
1005
1009
1006 return fparent1
1010 return fparent1
1007
1011
1008 def commit(self, text="", user=None, date=None, match=None, force=False,
1012 def commit(self, text="", user=None, date=None, match=None, force=False,
1009 editor=False, extra={}):
1013 editor=False, extra={}):
1010 """Add a new revision to current repository.
1014 """Add a new revision to current repository.
1011
1015
1012 Revision information is gathered from the working directory,
1016 Revision information is gathered from the working directory,
1013 match can be used to filter the committed files. If editor is
1017 match can be used to filter the committed files. If editor is
1014 supplied, it is called to get a commit message.
1018 supplied, it is called to get a commit message.
1015 """
1019 """
1016
1020
1017 def fail(f, msg):
1021 def fail(f, msg):
1018 raise util.Abort('%s: %s' % (f, msg))
1022 raise util.Abort('%s: %s' % (f, msg))
1019
1023
1020 if not match:
1024 if not match:
1021 match = matchmod.always(self.root, '')
1025 match = matchmod.always(self.root, '')
1022
1026
1023 if not force:
1027 if not force:
1024 vdirs = []
1028 vdirs = []
1025 match.dir = vdirs.append
1029 match.dir = vdirs.append
1026 match.bad = fail
1030 match.bad = fail
1027
1031
1028 wlock = self.wlock()
1032 wlock = self.wlock()
1029 try:
1033 try:
1030 wctx = self[None]
1034 wctx = self[None]
1031 merge = len(wctx.parents()) > 1
1035 merge = len(wctx.parents()) > 1
1032
1036
1033 if (not force and merge and match and
1037 if (not force and merge and match and
1034 (match.files() or match.anypats())):
1038 (match.files() or match.anypats())):
1035 raise util.Abort(_('cannot partially commit a merge '
1039 raise util.Abort(_('cannot partially commit a merge '
1036 '(do not specify files or patterns)'))
1040 '(do not specify files or patterns)'))
1037
1041
1038 changes = self.status(match=match, clean=force)
1042 changes = self.status(match=match, clean=force)
1039 if force:
1043 if force:
1040 changes[0].extend(changes[6]) # mq may commit unchanged files
1044 changes[0].extend(changes[6]) # mq may commit unchanged files
1041
1045
1042 # check subrepos
1046 # check subrepos
1043 subs = []
1047 subs = []
1044 removedsubs = set()
1048 removedsubs = set()
1045 if '.hgsub' in wctx:
1049 if '.hgsub' in wctx:
1046 # only manage subrepos and .hgsubstate if .hgsub is present
1050 # only manage subrepos and .hgsubstate if .hgsub is present
1047 for p in wctx.parents():
1051 for p in wctx.parents():
1048 removedsubs.update(s for s in p.substate if match(s))
1052 removedsubs.update(s for s in p.substate if match(s))
1049 for s in wctx.substate:
1053 for s in wctx.substate:
1050 removedsubs.discard(s)
1054 removedsubs.discard(s)
1051 if match(s) and wctx.sub(s).dirty():
1055 if match(s) and wctx.sub(s).dirty():
1052 subs.append(s)
1056 subs.append(s)
1053 if (subs or removedsubs):
1057 if (subs or removedsubs):
1054 if (not match('.hgsub') and
1058 if (not match('.hgsub') and
1055 '.hgsub' in (wctx.modified() + wctx.added())):
1059 '.hgsub' in (wctx.modified() + wctx.added())):
1056 raise util.Abort(
1060 raise util.Abort(
1057 _("can't commit subrepos without .hgsub"))
1061 _("can't commit subrepos without .hgsub"))
1058 if '.hgsubstate' not in changes[0]:
1062 if '.hgsubstate' not in changes[0]:
1059 changes[0].insert(0, '.hgsubstate')
1063 changes[0].insert(0, '.hgsubstate')
1060 if '.hgsubstate' in changes[2]:
1064 if '.hgsubstate' in changes[2]:
1061 changes[2].remove('.hgsubstate')
1065 changes[2].remove('.hgsubstate')
1062 elif '.hgsub' in changes[2]:
1066 elif '.hgsub' in changes[2]:
1063 # clean up .hgsubstate when .hgsub is removed
1067 # clean up .hgsubstate when .hgsub is removed
1064 if ('.hgsubstate' in wctx and
1068 if ('.hgsubstate' in wctx and
1065 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1069 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1066 changes[2].insert(0, '.hgsubstate')
1070 changes[2].insert(0, '.hgsubstate')
1067
1071
1068 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1072 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1069 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1073 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1070 if changedsubs:
1074 if changedsubs:
1071 raise util.Abort(_("uncommitted changes in subrepo %s")
1075 raise util.Abort(_("uncommitted changes in subrepo %s")
1072 % changedsubs[0],
1076 % changedsubs[0],
1073 hint=_("use --subrepos for recursive commit"))
1077 hint=_("use --subrepos for recursive commit"))
1074
1078
1075 # make sure all explicit patterns are matched
1079 # make sure all explicit patterns are matched
1076 if not force and match.files():
1080 if not force and match.files():
1077 matched = set(changes[0] + changes[1] + changes[2])
1081 matched = set(changes[0] + changes[1] + changes[2])
1078
1082
1079 for f in match.files():
1083 for f in match.files():
1080 if f == '.' or f in matched or f in wctx.substate:
1084 if f == '.' or f in matched or f in wctx.substate:
1081 continue
1085 continue
1082 if f in changes[3]: # missing
1086 if f in changes[3]: # missing
1083 fail(f, _('file not found!'))
1087 fail(f, _('file not found!'))
1084 if f in vdirs: # visited directory
1088 if f in vdirs: # visited directory
1085 d = f + '/'
1089 d = f + '/'
1086 for mf in matched:
1090 for mf in matched:
1087 if mf.startswith(d):
1091 if mf.startswith(d):
1088 break
1092 break
1089 else:
1093 else:
1090 fail(f, _("no match under directory!"))
1094 fail(f, _("no match under directory!"))
1091 elif f not in self.dirstate:
1095 elif f not in self.dirstate:
1092 fail(f, _("file not tracked!"))
1096 fail(f, _("file not tracked!"))
1093
1097
1094 if (not force and not extra.get("close") and not merge
1098 if (not force and not extra.get("close") and not merge
1095 and not (changes[0] or changes[1] or changes[2])
1099 and not (changes[0] or changes[1] or changes[2])
1096 and wctx.branch() == wctx.p1().branch()):
1100 and wctx.branch() == wctx.p1().branch()):
1097 return None
1101 return None
1098
1102
1099 ms = mergemod.mergestate(self)
1103 ms = mergemod.mergestate(self)
1100 for f in changes[0]:
1104 for f in changes[0]:
1101 if f in ms and ms[f] == 'u':
1105 if f in ms and ms[f] == 'u':
1102 raise util.Abort(_("unresolved merge conflicts "
1106 raise util.Abort(_("unresolved merge conflicts "
1103 "(see hg help resolve)"))
1107 "(see hg help resolve)"))
1104
1108
1105 cctx = context.workingctx(self, text, user, date, extra, changes)
1109 cctx = context.workingctx(self, text, user, date, extra, changes)
1106 if editor:
1110 if editor:
1107 cctx._text = editor(self, cctx, subs)
1111 cctx._text = editor(self, cctx, subs)
1108 edited = (text != cctx._text)
1112 edited = (text != cctx._text)
1109
1113
1110 # commit subs
1114 # commit subs
1111 if subs or removedsubs:
1115 if subs or removedsubs:
1112 state = wctx.substate.copy()
1116 state = wctx.substate.copy()
1113 for s in sorted(subs):
1117 for s in sorted(subs):
1114 sub = wctx.sub(s)
1118 sub = wctx.sub(s)
1115 self.ui.status(_('committing subrepository %s\n') %
1119 self.ui.status(_('committing subrepository %s\n') %
1116 subrepo.subrelpath(sub))
1120 subrepo.subrelpath(sub))
1117 sr = sub.commit(cctx._text, user, date)
1121 sr = sub.commit(cctx._text, user, date)
1118 state[s] = (state[s][0], sr)
1122 state[s] = (state[s][0], sr)
1119 subrepo.writestate(self, state)
1123 subrepo.writestate(self, state)
1120
1124
1121 # Save commit message in case this transaction gets rolled back
1125 # Save commit message in case this transaction gets rolled back
1122 # (e.g. by a pretxncommit hook). Leave the content alone on
1126 # (e.g. by a pretxncommit hook). Leave the content alone on
1123 # the assumption that the user will use the same editor again.
1127 # the assumption that the user will use the same editor again.
1124 msgfn = self.savecommitmessage(cctx._text)
1128 msgfn = self.savecommitmessage(cctx._text)
1125
1129
1126 p1, p2 = self.dirstate.parents()
1130 p1, p2 = self.dirstate.parents()
1127 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1131 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1128 try:
1132 try:
1129 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1133 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1130 ret = self.commitctx(cctx, True)
1134 ret = self.commitctx(cctx, True)
1131 except:
1135 except:
1132 if edited:
1136 if edited:
1133 self.ui.write(
1137 self.ui.write(
1134 _('note: commit message saved in %s\n') % msgfn)
1138 _('note: commit message saved in %s\n') % msgfn)
1135 raise
1139 raise
1136
1140
1137 # update bookmarks, dirstate and mergestate
1141 # update bookmarks, dirstate and mergestate
1138 bookmarks.update(self, p1, ret)
1142 bookmarks.update(self, p1, ret)
1139 for f in changes[0] + changes[1]:
1143 for f in changes[0] + changes[1]:
1140 self.dirstate.normal(f)
1144 self.dirstate.normal(f)
1141 for f in changes[2]:
1145 for f in changes[2]:
1142 self.dirstate.drop(f)
1146 self.dirstate.drop(f)
1143 self.dirstate.setparents(ret)
1147 self.dirstate.setparents(ret)
1144 ms.reset()
1148 ms.reset()
1145 finally:
1149 finally:
1146 wlock.release()
1150 wlock.release()
1147
1151
1148 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1152 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1149 return ret
1153 return ret
1150
1154
1151 def commitctx(self, ctx, error=False):
1155 def commitctx(self, ctx, error=False):
1152 """Add a new revision to current repository.
1156 """Add a new revision to current repository.
1153 Revision information is passed via the context argument.
1157 Revision information is passed via the context argument.
1154 """
1158 """
1155
1159
1156 tr = lock = None
1160 tr = lock = None
1157 removed = list(ctx.removed())
1161 removed = list(ctx.removed())
1158 p1, p2 = ctx.p1(), ctx.p2()
1162 p1, p2 = ctx.p1(), ctx.p2()
1159 user = ctx.user()
1163 user = ctx.user()
1160
1164
1161 lock = self.lock()
1165 lock = self.lock()
1162 try:
1166 try:
1163 tr = self.transaction("commit")
1167 tr = self.transaction("commit")
1164 trp = weakref.proxy(tr)
1168 trp = weakref.proxy(tr)
1165
1169
1166 if ctx.files():
1170 if ctx.files():
1167 m1 = p1.manifest().copy()
1171 m1 = p1.manifest().copy()
1168 m2 = p2.manifest()
1172 m2 = p2.manifest()
1169
1173
1170 # check in files
1174 # check in files
1171 new = {}
1175 new = {}
1172 changed = []
1176 changed = []
1173 linkrev = len(self)
1177 linkrev = len(self)
1174 for f in sorted(ctx.modified() + ctx.added()):
1178 for f in sorted(ctx.modified() + ctx.added()):
1175 self.ui.note(f + "\n")
1179 self.ui.note(f + "\n")
1176 try:
1180 try:
1177 fctx = ctx[f]
1181 fctx = ctx[f]
1178 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1182 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1179 changed)
1183 changed)
1180 m1.set(f, fctx.flags())
1184 m1.set(f, fctx.flags())
1181 except OSError, inst:
1185 except OSError, inst:
1182 self.ui.warn(_("trouble committing %s!\n") % f)
1186 self.ui.warn(_("trouble committing %s!\n") % f)
1183 raise
1187 raise
1184 except IOError, inst:
1188 except IOError, inst:
1185 errcode = getattr(inst, 'errno', errno.ENOENT)
1189 errcode = getattr(inst, 'errno', errno.ENOENT)
1186 if error or errcode and errcode != errno.ENOENT:
1190 if error or errcode and errcode != errno.ENOENT:
1187 self.ui.warn(_("trouble committing %s!\n") % f)
1191 self.ui.warn(_("trouble committing %s!\n") % f)
1188 raise
1192 raise
1189 else:
1193 else:
1190 removed.append(f)
1194 removed.append(f)
1191
1195
1192 # update manifest
1196 # update manifest
1193 m1.update(new)
1197 m1.update(new)
1194 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1198 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1195 drop = [f for f in removed if f in m1]
1199 drop = [f for f in removed if f in m1]
1196 for f in drop:
1200 for f in drop:
1197 del m1[f]
1201 del m1[f]
1198 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1202 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1199 p2.manifestnode(), (new, drop))
1203 p2.manifestnode(), (new, drop))
1200 files = changed + removed
1204 files = changed + removed
1201 else:
1205 else:
1202 mn = p1.manifestnode()
1206 mn = p1.manifestnode()
1203 files = []
1207 files = []
1204
1208
1205 # update changelog
1209 # update changelog
1206 self.changelog.delayupdate()
1210 self.changelog.delayupdate()
1207 n = self.changelog.add(mn, files, ctx.description(),
1211 n = self.changelog.add(mn, files, ctx.description(),
1208 trp, p1.node(), p2.node(),
1212 trp, p1.node(), p2.node(),
1209 user, ctx.date(), ctx.extra().copy())
1213 user, ctx.date(), ctx.extra().copy())
1210 p = lambda: self.changelog.writepending() and self.root or ""
1214 p = lambda: self.changelog.writepending() and self.root or ""
1211 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1215 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1212 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1216 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1213 parent2=xp2, pending=p)
1217 parent2=xp2, pending=p)
1214 self.changelog.finalize(trp)
1218 self.changelog.finalize(trp)
1215 tr.close()
1219 tr.close()
1216
1220
1217 if self._branchcache:
1221 if self._branchcache:
1218 self.updatebranchcache()
1222 self.updatebranchcache()
1219 return n
1223 return n
1220 finally:
1224 finally:
1221 if tr:
1225 if tr:
1222 tr.release()
1226 tr.release()
1223 lock.release()
1227 lock.release()
1224
1228
1225 def destroyed(self):
1229 def destroyed(self):
1226 '''Inform the repository that nodes have been destroyed.
1230 '''Inform the repository that nodes have been destroyed.
1227 Intended for use by strip and rollback, so there's a common
1231 Intended for use by strip and rollback, so there's a common
1228 place for anything that has to be done after destroying history.'''
1232 place for anything that has to be done after destroying history.'''
1229 # XXX it might be nice if we could take the list of destroyed
1233 # XXX it might be nice if we could take the list of destroyed
1230 # nodes, but I don't see an easy way for rollback() to do that
1234 # nodes, but I don't see an easy way for rollback() to do that
1231
1235
1232 # Ensure the persistent tag cache is updated. Doing it now
1236 # Ensure the persistent tag cache is updated. Doing it now
1233 # means that the tag cache only has to worry about destroyed
1237 # means that the tag cache only has to worry about destroyed
1234 # heads immediately after a strip/rollback. That in turn
1238 # heads immediately after a strip/rollback. That in turn
1235 # guarantees that "cachetip == currenttip" (comparing both rev
1239 # guarantees that "cachetip == currenttip" (comparing both rev
1236 # and node) always means no nodes have been added or destroyed.
1240 # and node) always means no nodes have been added or destroyed.
1237
1241
1238 # XXX this is suboptimal when qrefresh'ing: we strip the current
1242 # XXX this is suboptimal when qrefresh'ing: we strip the current
1239 # head, refresh the tag cache, then immediately add a new head.
1243 # head, refresh the tag cache, then immediately add a new head.
1240 # But I think doing it this way is necessary for the "instant
1244 # But I think doing it this way is necessary for the "instant
1241 # tag cache retrieval" case to work.
1245 # tag cache retrieval" case to work.
1242 self.invalidatecaches()
1246 self.invalidatecaches()
1243
1247
1244 def walk(self, match, node=None):
1248 def walk(self, match, node=None):
1245 '''
1249 '''
1246 walk recursively through the directory tree or a given
1250 walk recursively through the directory tree or a given
1247 changeset, finding all files matched by the match
1251 changeset, finding all files matched by the match
1248 function
1252 function
1249 '''
1253 '''
1250 return self[node].walk(match)
1254 return self[node].walk(match)
1251
1255
1252 def status(self, node1='.', node2=None, match=None,
1256 def status(self, node1='.', node2=None, match=None,
1253 ignored=False, clean=False, unknown=False,
1257 ignored=False, clean=False, unknown=False,
1254 listsubrepos=False):
1258 listsubrepos=False):
1255 """return status of files between two nodes or node and working directory
1259 """return status of files between two nodes or node and working directory
1256
1260
1257 If node1 is None, use the first dirstate parent instead.
1261 If node1 is None, use the first dirstate parent instead.
1258 If node2 is None, compare node1 with working directory.
1262 If node2 is None, compare node1 with working directory.
1259 """
1263 """
1260
1264
1261 def mfmatches(ctx):
1265 def mfmatches(ctx):
1262 mf = ctx.manifest().copy()
1266 mf = ctx.manifest().copy()
1263 for fn in mf.keys():
1267 for fn in mf.keys():
1264 if not match(fn):
1268 if not match(fn):
1265 del mf[fn]
1269 del mf[fn]
1266 return mf
1270 return mf
1267
1271
1268 if isinstance(node1, context.changectx):
1272 if isinstance(node1, context.changectx):
1269 ctx1 = node1
1273 ctx1 = node1
1270 else:
1274 else:
1271 ctx1 = self[node1]
1275 ctx1 = self[node1]
1272 if isinstance(node2, context.changectx):
1276 if isinstance(node2, context.changectx):
1273 ctx2 = node2
1277 ctx2 = node2
1274 else:
1278 else:
1275 ctx2 = self[node2]
1279 ctx2 = self[node2]
1276
1280
1277 working = ctx2.rev() is None
1281 working = ctx2.rev() is None
1278 parentworking = working and ctx1 == self['.']
1282 parentworking = working and ctx1 == self['.']
1279 match = match or matchmod.always(self.root, self.getcwd())
1283 match = match or matchmod.always(self.root, self.getcwd())
1280 listignored, listclean, listunknown = ignored, clean, unknown
1284 listignored, listclean, listunknown = ignored, clean, unknown
1281
1285
1282 # load earliest manifest first for caching reasons
1286 # load earliest manifest first for caching reasons
1283 if not working and ctx2.rev() < ctx1.rev():
1287 if not working and ctx2.rev() < ctx1.rev():
1284 ctx2.manifest()
1288 ctx2.manifest()
1285
1289
1286 if not parentworking:
1290 if not parentworking:
1287 def bad(f, msg):
1291 def bad(f, msg):
1288 if f not in ctx1:
1292 if f not in ctx1:
1289 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1293 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1290 match.bad = bad
1294 match.bad = bad
1291
1295
1292 if working: # we need to scan the working dir
1296 if working: # we need to scan the working dir
1293 subrepos = []
1297 subrepos = []
1294 if '.hgsub' in self.dirstate:
1298 if '.hgsub' in self.dirstate:
1295 subrepos = ctx2.substate.keys()
1299 subrepos = ctx2.substate.keys()
1296 s = self.dirstate.status(match, subrepos, listignored,
1300 s = self.dirstate.status(match, subrepos, listignored,
1297 listclean, listunknown)
1301 listclean, listunknown)
1298 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1302 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1299
1303
1300 # check for any possibly clean files
1304 # check for any possibly clean files
1301 if parentworking and cmp:
1305 if parentworking and cmp:
1302 fixup = []
1306 fixup = []
1303 # do a full compare of any files that might have changed
1307 # do a full compare of any files that might have changed
1304 for f in sorted(cmp):
1308 for f in sorted(cmp):
1305 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1309 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1306 or ctx1[f].cmp(ctx2[f])):
1310 or ctx1[f].cmp(ctx2[f])):
1307 modified.append(f)
1311 modified.append(f)
1308 else:
1312 else:
1309 fixup.append(f)
1313 fixup.append(f)
1310
1314
1311 # update dirstate for files that are actually clean
1315 # update dirstate for files that are actually clean
1312 if fixup:
1316 if fixup:
1313 if listclean:
1317 if listclean:
1314 clean += fixup
1318 clean += fixup
1315
1319
1316 try:
1320 try:
1317 # updating the dirstate is optional
1321 # updating the dirstate is optional
1318 # so we don't wait on the lock
1322 # so we don't wait on the lock
1319 wlock = self.wlock(False)
1323 wlock = self.wlock(False)
1320 try:
1324 try:
1321 for f in fixup:
1325 for f in fixup:
1322 self.dirstate.normal(f)
1326 self.dirstate.normal(f)
1323 finally:
1327 finally:
1324 wlock.release()
1328 wlock.release()
1325 except error.LockError:
1329 except error.LockError:
1326 pass
1330 pass
1327
1331
1328 if not parentworking:
1332 if not parentworking:
1329 mf1 = mfmatches(ctx1)
1333 mf1 = mfmatches(ctx1)
1330 if working:
1334 if working:
1331 # we are comparing working dir against non-parent
1335 # we are comparing working dir against non-parent
1332 # generate a pseudo-manifest for the working dir
1336 # generate a pseudo-manifest for the working dir
1333 mf2 = mfmatches(self['.'])
1337 mf2 = mfmatches(self['.'])
1334 for f in cmp + modified + added:
1338 for f in cmp + modified + added:
1335 mf2[f] = None
1339 mf2[f] = None
1336 mf2.set(f, ctx2.flags(f))
1340 mf2.set(f, ctx2.flags(f))
1337 for f in removed:
1341 for f in removed:
1338 if f in mf2:
1342 if f in mf2:
1339 del mf2[f]
1343 del mf2[f]
1340 else:
1344 else:
1341 # we are comparing two revisions
1345 # we are comparing two revisions
1342 deleted, unknown, ignored = [], [], []
1346 deleted, unknown, ignored = [], [], []
1343 mf2 = mfmatches(ctx2)
1347 mf2 = mfmatches(ctx2)
1344
1348
1345 modified, added, clean = [], [], []
1349 modified, added, clean = [], [], []
1346 for fn in mf2:
1350 for fn in mf2:
1347 if fn in mf1:
1351 if fn in mf1:
1348 if (fn not in deleted and
1352 if (fn not in deleted and
1349 (mf1.flags(fn) != mf2.flags(fn) or
1353 (mf1.flags(fn) != mf2.flags(fn) or
1350 (mf1[fn] != mf2[fn] and
1354 (mf1[fn] != mf2[fn] and
1351 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1355 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1352 modified.append(fn)
1356 modified.append(fn)
1353 elif listclean:
1357 elif listclean:
1354 clean.append(fn)
1358 clean.append(fn)
1355 del mf1[fn]
1359 del mf1[fn]
1356 elif fn not in deleted:
1360 elif fn not in deleted:
1357 added.append(fn)
1361 added.append(fn)
1358 removed = mf1.keys()
1362 removed = mf1.keys()
1359
1363
1360 if working and modified and not self.dirstate._checklink:
1364 if working and modified and not self.dirstate._checklink:
1361 # Symlink placeholders may get non-symlink-like contents
1365 # Symlink placeholders may get non-symlink-like contents
1362 # via user error or dereferencing by NFS or Samba servers,
1366 # via user error or dereferencing by NFS or Samba servers,
1363 # so we filter out any placeholders that don't look like a
1367 # so we filter out any placeholders that don't look like a
1364 # symlink
1368 # symlink
1365 sane = []
1369 sane = []
1366 for f in modified:
1370 for f in modified:
1367 if ctx2.flags(f) == 'l':
1371 if ctx2.flags(f) == 'l':
1368 d = ctx2[f].data()
1372 d = ctx2[f].data()
1369 if len(d) >= 1024 or '\n' in d or util.binary(d):
1373 if len(d) >= 1024 or '\n' in d or util.binary(d):
1370 self.ui.debug('ignoring suspect symlink placeholder'
1374 self.ui.debug('ignoring suspect symlink placeholder'
1371 ' "%s"\n' % f)
1375 ' "%s"\n' % f)
1372 continue
1376 continue
1373 sane.append(f)
1377 sane.append(f)
1374 modified = sane
1378 modified = sane
1375
1379
1376 r = modified, added, removed, deleted, unknown, ignored, clean
1380 r = modified, added, removed, deleted, unknown, ignored, clean
1377
1381
1378 if listsubrepos:
1382 if listsubrepos:
1379 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1383 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1380 if working:
1384 if working:
1381 rev2 = None
1385 rev2 = None
1382 else:
1386 else:
1383 rev2 = ctx2.substate[subpath][1]
1387 rev2 = ctx2.substate[subpath][1]
1384 try:
1388 try:
1385 submatch = matchmod.narrowmatcher(subpath, match)
1389 submatch = matchmod.narrowmatcher(subpath, match)
1386 s = sub.status(rev2, match=submatch, ignored=listignored,
1390 s = sub.status(rev2, match=submatch, ignored=listignored,
1387 clean=listclean, unknown=listunknown,
1391 clean=listclean, unknown=listunknown,
1388 listsubrepos=True)
1392 listsubrepos=True)
1389 for rfiles, sfiles in zip(r, s):
1393 for rfiles, sfiles in zip(r, s):
1390 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1394 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1391 except error.LookupError:
1395 except error.LookupError:
1392 self.ui.status(_("skipping missing subrepository: %s\n")
1396 self.ui.status(_("skipping missing subrepository: %s\n")
1393 % subpath)
1397 % subpath)
1394
1398
1395 for l in r:
1399 for l in r:
1396 l.sort()
1400 l.sort()
1397 return r
1401 return r
1398
1402
1399 def heads(self, start=None):
1403 def heads(self, start=None):
1400 heads = self.changelog.heads(start)
1404 heads = self.changelog.heads(start)
1401 # sort the output in rev descending order
1405 # sort the output in rev descending order
1402 return sorted(heads, key=self.changelog.rev, reverse=True)
1406 return sorted(heads, key=self.changelog.rev, reverse=True)
1403
1407
1404 def branchheads(self, branch=None, start=None, closed=False):
1408 def branchheads(self, branch=None, start=None, closed=False):
1405 '''return a (possibly filtered) list of heads for the given branch
1409 '''return a (possibly filtered) list of heads for the given branch
1406
1410
1407 Heads are returned in topological order, from newest to oldest.
1411 Heads are returned in topological order, from newest to oldest.
1408 If branch is None, use the dirstate branch.
1412 If branch is None, use the dirstate branch.
1409 If start is not None, return only heads reachable from start.
1413 If start is not None, return only heads reachable from start.
1410 If closed is True, return heads that are marked as closed as well.
1414 If closed is True, return heads that are marked as closed as well.
1411 '''
1415 '''
1412 if branch is None:
1416 if branch is None:
1413 branch = self[None].branch()
1417 branch = self[None].branch()
1414 branches = self.branchmap()
1418 branches = self.branchmap()
1415 if branch not in branches:
1419 if branch not in branches:
1416 return []
1420 return []
1417 # the cache returns heads ordered lowest to highest
1421 # the cache returns heads ordered lowest to highest
1418 bheads = list(reversed(branches[branch]))
1422 bheads = list(reversed(branches[branch]))
1419 if start is not None:
1423 if start is not None:
1420 # filter out the heads that cannot be reached from startrev
1424 # filter out the heads that cannot be reached from startrev
1421 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1425 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1422 bheads = [h for h in bheads if h in fbheads]
1426 bheads = [h for h in bheads if h in fbheads]
1423 if not closed:
1427 if not closed:
1424 bheads = [h for h in bheads if
1428 bheads = [h for h in bheads if
1425 ('close' not in self.changelog.read(h)[5])]
1429 ('close' not in self.changelog.read(h)[5])]
1426 return bheads
1430 return bheads
1427
1431
1428 def branches(self, nodes):
1432 def branches(self, nodes):
1429 if not nodes:
1433 if not nodes:
1430 nodes = [self.changelog.tip()]
1434 nodes = [self.changelog.tip()]
1431 b = []
1435 b = []
1432 for n in nodes:
1436 for n in nodes:
1433 t = n
1437 t = n
1434 while True:
1438 while True:
1435 p = self.changelog.parents(n)
1439 p = self.changelog.parents(n)
1436 if p[1] != nullid or p[0] == nullid:
1440 if p[1] != nullid or p[0] == nullid:
1437 b.append((t, n, p[0], p[1]))
1441 b.append((t, n, p[0], p[1]))
1438 break
1442 break
1439 n = p[0]
1443 n = p[0]
1440 return b
1444 return b
1441
1445
1442 def between(self, pairs):
1446 def between(self, pairs):
1443 r = []
1447 r = []
1444
1448
1445 for top, bottom in pairs:
1449 for top, bottom in pairs:
1446 n, l, i = top, [], 0
1450 n, l, i = top, [], 0
1447 f = 1
1451 f = 1
1448
1452
1449 while n != bottom and n != nullid:
1453 while n != bottom and n != nullid:
1450 p = self.changelog.parents(n)[0]
1454 p = self.changelog.parents(n)[0]
1451 if i == f:
1455 if i == f:
1452 l.append(n)
1456 l.append(n)
1453 f = f * 2
1457 f = f * 2
1454 n = p
1458 n = p
1455 i += 1
1459 i += 1
1456
1460
1457 r.append(l)
1461 r.append(l)
1458
1462
1459 return r
1463 return r
1460
1464
1461 def pull(self, remote, heads=None, force=False):
1465 def pull(self, remote, heads=None, force=False):
1462 lock = self.lock()
1466 lock = self.lock()
1463 try:
1467 try:
1464 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1468 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1465 force=force)
1469 force=force)
1466 common, fetch, rheads = tmp
1470 common, fetch, rheads = tmp
1467 if not fetch:
1471 if not fetch:
1468 self.ui.status(_("no changes found\n"))
1472 self.ui.status(_("no changes found\n"))
1469 result = 0
1473 result = 0
1470 else:
1474 else:
1471 if heads is None and list(common) == [nullid]:
1475 if heads is None and list(common) == [nullid]:
1472 self.ui.status(_("requesting all changes\n"))
1476 self.ui.status(_("requesting all changes\n"))
1473 elif heads is None and remote.capable('changegroupsubset'):
1477 elif heads is None and remote.capable('changegroupsubset'):
1474 # issue1320, avoid a race if remote changed after discovery
1478 # issue1320, avoid a race if remote changed after discovery
1475 heads = rheads
1479 heads = rheads
1476
1480
1477 if remote.capable('getbundle'):
1481 if remote.capable('getbundle'):
1478 cg = remote.getbundle('pull', common=common,
1482 cg = remote.getbundle('pull', common=common,
1479 heads=heads or rheads)
1483 heads=heads or rheads)
1480 elif heads is None:
1484 elif heads is None:
1481 cg = remote.changegroup(fetch, 'pull')
1485 cg = remote.changegroup(fetch, 'pull')
1482 elif not remote.capable('changegroupsubset'):
1486 elif not remote.capable('changegroupsubset'):
1483 raise util.Abort(_("partial pull cannot be done because "
1487 raise util.Abort(_("partial pull cannot be done because "
1484 "other repository doesn't support "
1488 "other repository doesn't support "
1485 "changegroupsubset."))
1489 "changegroupsubset."))
1486 else:
1490 else:
1487 cg = remote.changegroupsubset(fetch, heads, 'pull')
1491 cg = remote.changegroupsubset(fetch, heads, 'pull')
1488 result = self.addchangegroup(cg, 'pull', remote.url(),
1492 result = self.addchangegroup(cg, 'pull', remote.url(),
1489 lock=lock)
1493 lock=lock)
1490 finally:
1494 finally:
1491 lock.release()
1495 lock.release()
1492
1496
1493 return result
1497 return result
1494
1498
1495 def checkpush(self, force, revs):
1499 def checkpush(self, force, revs):
1496 """Extensions can override this function if additional checks have
1500 """Extensions can override this function if additional checks have
1497 to be performed before pushing, or call it if they override push
1501 to be performed before pushing, or call it if they override push
1498 command.
1502 command.
1499 """
1503 """
1500 pass
1504 pass
1501
1505
1502 def push(self, remote, force=False, revs=None, newbranch=False):
1506 def push(self, remote, force=False, revs=None, newbranch=False):
1503 '''Push outgoing changesets (limited by revs) from the current
1507 '''Push outgoing changesets (limited by revs) from the current
1504 repository to remote. Return an integer:
1508 repository to remote. Return an integer:
1505 - 0 means HTTP error *or* nothing to push
1509 - 0 means HTTP error *or* nothing to push
1506 - 1 means we pushed and remote head count is unchanged *or*
1510 - 1 means we pushed and remote head count is unchanged *or*
1507 we have outgoing changesets but refused to push
1511 we have outgoing changesets but refused to push
1508 - other values as described by addchangegroup()
1512 - other values as described by addchangegroup()
1509 '''
1513 '''
1510 # there are two ways to push to remote repo:
1514 # there are two ways to push to remote repo:
1511 #
1515 #
1512 # addchangegroup assumes local user can lock remote
1516 # addchangegroup assumes local user can lock remote
1513 # repo (local filesystem, old ssh servers).
1517 # repo (local filesystem, old ssh servers).
1514 #
1518 #
1515 # unbundle assumes local user cannot lock remote repo (new ssh
1519 # unbundle assumes local user cannot lock remote repo (new ssh
1516 # servers, http servers).
1520 # servers, http servers).
1517
1521
1518 self.checkpush(force, revs)
1522 self.checkpush(force, revs)
1519 lock = None
1523 lock = None
1520 unbundle = remote.capable('unbundle')
1524 unbundle = remote.capable('unbundle')
1521 if not unbundle:
1525 if not unbundle:
1522 lock = remote.lock()
1526 lock = remote.lock()
1523 try:
1527 try:
1524 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1528 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1525 newbranch)
1529 newbranch)
1526 ret = remote_heads
1530 ret = remote_heads
1527 if cg is not None:
1531 if cg is not None:
1528 if unbundle:
1532 if unbundle:
1529 # local repo finds heads on server, finds out what
1533 # local repo finds heads on server, finds out what
1530 # revs it must push. once revs transferred, if server
1534 # revs it must push. once revs transferred, if server
1531 # finds it has different heads (someone else won
1535 # finds it has different heads (someone else won
1532 # commit/push race), server aborts.
1536 # commit/push race), server aborts.
1533 if force:
1537 if force:
1534 remote_heads = ['force']
1538 remote_heads = ['force']
1535 # ssh: return remote's addchangegroup()
1539 # ssh: return remote's addchangegroup()
1536 # http: return remote's addchangegroup() or 0 for error
1540 # http: return remote's addchangegroup() or 0 for error
1537 ret = remote.unbundle(cg, remote_heads, 'push')
1541 ret = remote.unbundle(cg, remote_heads, 'push')
1538 else:
1542 else:
1539 # we return an integer indicating remote head count change
1543 # we return an integer indicating remote head count change
1540 ret = remote.addchangegroup(cg, 'push', self.url(),
1544 ret = remote.addchangegroup(cg, 'push', self.url(),
1541 lock=lock)
1545 lock=lock)
1542 finally:
1546 finally:
1543 if lock is not None:
1547 if lock is not None:
1544 lock.release()
1548 lock.release()
1545
1549
1546 self.ui.debug("checking for updated bookmarks\n")
1550 self.ui.debug("checking for updated bookmarks\n")
1547 rb = remote.listkeys('bookmarks')
1551 rb = remote.listkeys('bookmarks')
1548 for k in rb.keys():
1552 for k in rb.keys():
1549 if k in self._bookmarks:
1553 if k in self._bookmarks:
1550 nr, nl = rb[k], hex(self._bookmarks[k])
1554 nr, nl = rb[k], hex(self._bookmarks[k])
1551 if nr in self:
1555 if nr in self:
1552 cr = self[nr]
1556 cr = self[nr]
1553 cl = self[nl]
1557 cl = self[nl]
1554 if cl in cr.descendants():
1558 if cl in cr.descendants():
1555 r = remote.pushkey('bookmarks', k, nr, nl)
1559 r = remote.pushkey('bookmarks', k, nr, nl)
1556 if r:
1560 if r:
1557 self.ui.status(_("updating bookmark %s\n") % k)
1561 self.ui.status(_("updating bookmark %s\n") % k)
1558 else:
1562 else:
1559 self.ui.warn(_('updating bookmark %s'
1563 self.ui.warn(_('updating bookmark %s'
1560 ' failed!\n') % k)
1564 ' failed!\n') % k)
1561
1565
1562 return ret
1566 return ret
1563
1567
1564 def changegroupinfo(self, nodes, source):
1568 def changegroupinfo(self, nodes, source):
1565 if self.ui.verbose or source == 'bundle':
1569 if self.ui.verbose or source == 'bundle':
1566 self.ui.status(_("%d changesets found\n") % len(nodes))
1570 self.ui.status(_("%d changesets found\n") % len(nodes))
1567 if self.ui.debugflag:
1571 if self.ui.debugflag:
1568 self.ui.debug("list of changesets:\n")
1572 self.ui.debug("list of changesets:\n")
1569 for node in nodes:
1573 for node in nodes:
1570 self.ui.debug("%s\n" % hex(node))
1574 self.ui.debug("%s\n" % hex(node))
1571
1575
1572 def changegroupsubset(self, bases, heads, source):
1576 def changegroupsubset(self, bases, heads, source):
1573 """Compute a changegroup consisting of all the nodes that are
1577 """Compute a changegroup consisting of all the nodes that are
1574 descendants of any of the bases and ancestors of any of the heads.
1578 descendants of any of the bases and ancestors of any of the heads.
1575 Return a chunkbuffer object whose read() method will return
1579 Return a chunkbuffer object whose read() method will return
1576 successive changegroup chunks.
1580 successive changegroup chunks.
1577
1581
1578 It is fairly complex as determining which filenodes and which
1582 It is fairly complex as determining which filenodes and which
1579 manifest nodes need to be included for the changeset to be complete
1583 manifest nodes need to be included for the changeset to be complete
1580 is non-trivial.
1584 is non-trivial.
1581
1585
1582 Another wrinkle is doing the reverse, figuring out which changeset in
1586 Another wrinkle is doing the reverse, figuring out which changeset in
1583 the changegroup a particular filenode or manifestnode belongs to.
1587 the changegroup a particular filenode or manifestnode belongs to.
1584 """
1588 """
1585 cl = self.changelog
1589 cl = self.changelog
1586 if not bases:
1590 if not bases:
1587 bases = [nullid]
1591 bases = [nullid]
1588 csets, bases, heads = cl.nodesbetween(bases, heads)
1592 csets, bases, heads = cl.nodesbetween(bases, heads)
1589 # We assume that all ancestors of bases are known
1593 # We assume that all ancestors of bases are known
1590 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1594 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1591 return self._changegroupsubset(common, csets, heads, source)
1595 return self._changegroupsubset(common, csets, heads, source)
1592
1596
1593 def getbundle(self, source, heads=None, common=None):
1597 def getbundle(self, source, heads=None, common=None):
1594 """Like changegroupsubset, but returns the set difference between the
1598 """Like changegroupsubset, but returns the set difference between the
1595 ancestors of heads and the ancestors common.
1599 ancestors of heads and the ancestors common.
1596
1600
1597 If heads is None, use the local heads. If common is None, use [nullid].
1601 If heads is None, use the local heads. If common is None, use [nullid].
1598
1602
1599 The nodes in common might not all be known locally due to the way the
1603 The nodes in common might not all be known locally due to the way the
1600 current discovery protocol works.
1604 current discovery protocol works.
1601 """
1605 """
1602 cl = self.changelog
1606 cl = self.changelog
1603 if common:
1607 if common:
1604 nm = cl.nodemap
1608 nm = cl.nodemap
1605 common = [n for n in common if n in nm]
1609 common = [n for n in common if n in nm]
1606 else:
1610 else:
1607 common = [nullid]
1611 common = [nullid]
1608 if not heads:
1612 if not heads:
1609 heads = cl.heads()
1613 heads = cl.heads()
1610 common, missing = cl.findcommonmissing(common, heads)
1614 common, missing = cl.findcommonmissing(common, heads)
1611 if not missing:
1615 if not missing:
1612 return None
1616 return None
1613 return self._changegroupsubset(common, missing, heads, source)
1617 return self._changegroupsubset(common, missing, heads, source)
1614
1618
1615 def _changegroupsubset(self, commonrevs, csets, heads, source):
1619 def _changegroupsubset(self, commonrevs, csets, heads, source):
1616
1620
1617 cl = self.changelog
1621 cl = self.changelog
1618 mf = self.manifest
1622 mf = self.manifest
1619 mfs = {} # needed manifests
1623 mfs = {} # needed manifests
1620 fnodes = {} # needed file nodes
1624 fnodes = {} # needed file nodes
1621 changedfiles = set()
1625 changedfiles = set()
1622 fstate = ['', {}]
1626 fstate = ['', {}]
1623 count = [0]
1627 count = [0]
1624
1628
1625 # can we go through the fast path ?
1629 # can we go through the fast path ?
1626 heads.sort()
1630 heads.sort()
1627 if heads == sorted(self.heads()):
1631 if heads == sorted(self.heads()):
1628 return self._changegroup(csets, source)
1632 return self._changegroup(csets, source)
1629
1633
1630 # slow path
1634 # slow path
1631 self.hook('preoutgoing', throw=True, source=source)
1635 self.hook('preoutgoing', throw=True, source=source)
1632 self.changegroupinfo(csets, source)
1636 self.changegroupinfo(csets, source)
1633
1637
1634 # filter any nodes that claim to be part of the known set
1638 # filter any nodes that claim to be part of the known set
1635 def prune(revlog, missing):
1639 def prune(revlog, missing):
1636 return [n for n in missing
1640 return [n for n in missing
1637 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1641 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1638
1642
1639 def lookup(revlog, x):
1643 def lookup(revlog, x):
1640 if revlog == cl:
1644 if revlog == cl:
1641 c = cl.read(x)
1645 c = cl.read(x)
1642 changedfiles.update(c[3])
1646 changedfiles.update(c[3])
1643 mfs.setdefault(c[0], x)
1647 mfs.setdefault(c[0], x)
1644 count[0] += 1
1648 count[0] += 1
1645 self.ui.progress(_('bundling'), count[0],
1649 self.ui.progress(_('bundling'), count[0],
1646 unit=_('changesets'), total=len(csets))
1650 unit=_('changesets'), total=len(csets))
1647 return x
1651 return x
1648 elif revlog == mf:
1652 elif revlog == mf:
1649 clnode = mfs[x]
1653 clnode = mfs[x]
1650 mdata = mf.readfast(x)
1654 mdata = mf.readfast(x)
1651 for f in changedfiles:
1655 for f in changedfiles:
1652 if f in mdata:
1656 if f in mdata:
1653 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1657 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1654 count[0] += 1
1658 count[0] += 1
1655 self.ui.progress(_('bundling'), count[0],
1659 self.ui.progress(_('bundling'), count[0],
1656 unit=_('manifests'), total=len(mfs))
1660 unit=_('manifests'), total=len(mfs))
1657 return mfs[x]
1661 return mfs[x]
1658 else:
1662 else:
1659 self.ui.progress(
1663 self.ui.progress(
1660 _('bundling'), count[0], item=fstate[0],
1664 _('bundling'), count[0], item=fstate[0],
1661 unit=_('files'), total=len(changedfiles))
1665 unit=_('files'), total=len(changedfiles))
1662 return fstate[1][x]
1666 return fstate[1][x]
1663
1667
1664 bundler = changegroup.bundle10(lookup)
1668 bundler = changegroup.bundle10(lookup)
1665 reorder = self.ui.config('bundle', 'reorder', 'auto')
1669 reorder = self.ui.config('bundle', 'reorder', 'auto')
1666 if reorder == 'auto':
1670 if reorder == 'auto':
1667 reorder = None
1671 reorder = None
1668 else:
1672 else:
1669 reorder = util.parsebool(reorder)
1673 reorder = util.parsebool(reorder)
1670
1674
1671 def gengroup():
1675 def gengroup():
1672 # Create a changenode group generator that will call our functions
1676 # Create a changenode group generator that will call our functions
1673 # back to lookup the owning changenode and collect information.
1677 # back to lookup the owning changenode and collect information.
1674 for chunk in cl.group(csets, bundler, reorder=reorder):
1678 for chunk in cl.group(csets, bundler, reorder=reorder):
1675 yield chunk
1679 yield chunk
1676 self.ui.progress(_('bundling'), None)
1680 self.ui.progress(_('bundling'), None)
1677
1681
1678 # Create a generator for the manifestnodes that calls our lookup
1682 # Create a generator for the manifestnodes that calls our lookup
1679 # and data collection functions back.
1683 # and data collection functions back.
1680 count[0] = 0
1684 count[0] = 0
1681 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1685 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1682 yield chunk
1686 yield chunk
1683 self.ui.progress(_('bundling'), None)
1687 self.ui.progress(_('bundling'), None)
1684
1688
1685 mfs.clear()
1689 mfs.clear()
1686
1690
1687 # Go through all our files in order sorted by name.
1691 # Go through all our files in order sorted by name.
1688 count[0] = 0
1692 count[0] = 0
1689 for fname in sorted(changedfiles):
1693 for fname in sorted(changedfiles):
1690 filerevlog = self.file(fname)
1694 filerevlog = self.file(fname)
1691 if not len(filerevlog):
1695 if not len(filerevlog):
1692 raise util.Abort(_("empty or missing revlog for %s") % fname)
1696 raise util.Abort(_("empty or missing revlog for %s") % fname)
1693 fstate[0] = fname
1697 fstate[0] = fname
1694 fstate[1] = fnodes.pop(fname, {})
1698 fstate[1] = fnodes.pop(fname, {})
1695
1699
1696 nodelist = prune(filerevlog, fstate[1])
1700 nodelist = prune(filerevlog, fstate[1])
1697 if nodelist:
1701 if nodelist:
1698 count[0] += 1
1702 count[0] += 1
1699 yield bundler.fileheader(fname)
1703 yield bundler.fileheader(fname)
1700 for chunk in filerevlog.group(nodelist, bundler, reorder):
1704 for chunk in filerevlog.group(nodelist, bundler, reorder):
1701 yield chunk
1705 yield chunk
1702
1706
1703 # Signal that no more groups are left.
1707 # Signal that no more groups are left.
1704 yield bundler.close()
1708 yield bundler.close()
1705 self.ui.progress(_('bundling'), None)
1709 self.ui.progress(_('bundling'), None)
1706
1710
1707 if csets:
1711 if csets:
1708 self.hook('outgoing', node=hex(csets[0]), source=source)
1712 self.hook('outgoing', node=hex(csets[0]), source=source)
1709
1713
1710 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1714 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1711
1715
1712 def changegroup(self, basenodes, source):
1716 def changegroup(self, basenodes, source):
1713 # to avoid a race we use changegroupsubset() (issue1320)
1717 # to avoid a race we use changegroupsubset() (issue1320)
1714 return self.changegroupsubset(basenodes, self.heads(), source)
1718 return self.changegroupsubset(basenodes, self.heads(), source)
1715
1719
1716 def _changegroup(self, nodes, source):
1720 def _changegroup(self, nodes, source):
1717 """Compute the changegroup of all nodes that we have that a recipient
1721 """Compute the changegroup of all nodes that we have that a recipient
1718 doesn't. Return a chunkbuffer object whose read() method will return
1722 doesn't. Return a chunkbuffer object whose read() method will return
1719 successive changegroup chunks.
1723 successive changegroup chunks.
1720
1724
1721 This is much easier than the previous function as we can assume that
1725 This is much easier than the previous function as we can assume that
1722 the recipient has any changenode we aren't sending them.
1726 the recipient has any changenode we aren't sending them.
1723
1727
1724 nodes is the set of nodes to send"""
1728 nodes is the set of nodes to send"""
1725
1729
1726 cl = self.changelog
1730 cl = self.changelog
1727 mf = self.manifest
1731 mf = self.manifest
1728 mfs = {}
1732 mfs = {}
1729 changedfiles = set()
1733 changedfiles = set()
1730 fstate = ['']
1734 fstate = ['']
1731 count = [0]
1735 count = [0]
1732
1736
1733 self.hook('preoutgoing', throw=True, source=source)
1737 self.hook('preoutgoing', throw=True, source=source)
1734 self.changegroupinfo(nodes, source)
1738 self.changegroupinfo(nodes, source)
1735
1739
1736 revset = set([cl.rev(n) for n in nodes])
1740 revset = set([cl.rev(n) for n in nodes])
1737
1741
1738 def gennodelst(log):
1742 def gennodelst(log):
1739 return [log.node(r) for r in log if log.linkrev(r) in revset]
1743 return [log.node(r) for r in log if log.linkrev(r) in revset]
1740
1744
1741 def lookup(revlog, x):
1745 def lookup(revlog, x):
1742 if revlog == cl:
1746 if revlog == cl:
1743 c = cl.read(x)
1747 c = cl.read(x)
1744 changedfiles.update(c[3])
1748 changedfiles.update(c[3])
1745 mfs.setdefault(c[0], x)
1749 mfs.setdefault(c[0], x)
1746 count[0] += 1
1750 count[0] += 1
1747 self.ui.progress(_('bundling'), count[0],
1751 self.ui.progress(_('bundling'), count[0],
1748 unit=_('changesets'), total=len(nodes))
1752 unit=_('changesets'), total=len(nodes))
1749 return x
1753 return x
1750 elif revlog == mf:
1754 elif revlog == mf:
1751 count[0] += 1
1755 count[0] += 1
1752 self.ui.progress(_('bundling'), count[0],
1756 self.ui.progress(_('bundling'), count[0],
1753 unit=_('manifests'), total=len(mfs))
1757 unit=_('manifests'), total=len(mfs))
1754 return cl.node(revlog.linkrev(revlog.rev(x)))
1758 return cl.node(revlog.linkrev(revlog.rev(x)))
1755 else:
1759 else:
1756 self.ui.progress(
1760 self.ui.progress(
1757 _('bundling'), count[0], item=fstate[0],
1761 _('bundling'), count[0], item=fstate[0],
1758 total=len(changedfiles), unit=_('files'))
1762 total=len(changedfiles), unit=_('files'))
1759 return cl.node(revlog.linkrev(revlog.rev(x)))
1763 return cl.node(revlog.linkrev(revlog.rev(x)))
1760
1764
1761 bundler = changegroup.bundle10(lookup)
1765 bundler = changegroup.bundle10(lookup)
1762 reorder = self.ui.config('bundle', 'reorder', 'auto')
1766 reorder = self.ui.config('bundle', 'reorder', 'auto')
1763 if reorder == 'auto':
1767 if reorder == 'auto':
1764 reorder = None
1768 reorder = None
1765 else:
1769 else:
1766 reorder = util.parsebool(reorder)
1770 reorder = util.parsebool(reorder)
1767
1771
1768 def gengroup():
1772 def gengroup():
1769 '''yield a sequence of changegroup chunks (strings)'''
1773 '''yield a sequence of changegroup chunks (strings)'''
1770 # construct a list of all changed files
1774 # construct a list of all changed files
1771
1775
1772 for chunk in cl.group(nodes, bundler, reorder=reorder):
1776 for chunk in cl.group(nodes, bundler, reorder=reorder):
1773 yield chunk
1777 yield chunk
1774 self.ui.progress(_('bundling'), None)
1778 self.ui.progress(_('bundling'), None)
1775
1779
1776 count[0] = 0
1780 count[0] = 0
1777 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1781 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1778 yield chunk
1782 yield chunk
1779 self.ui.progress(_('bundling'), None)
1783 self.ui.progress(_('bundling'), None)
1780
1784
1781 count[0] = 0
1785 count[0] = 0
1782 for fname in sorted(changedfiles):
1786 for fname in sorted(changedfiles):
1783 filerevlog = self.file(fname)
1787 filerevlog = self.file(fname)
1784 if not len(filerevlog):
1788 if not len(filerevlog):
1785 raise util.Abort(_("empty or missing revlog for %s") % fname)
1789 raise util.Abort(_("empty or missing revlog for %s") % fname)
1786 fstate[0] = fname
1790 fstate[0] = fname
1787 nodelist = gennodelst(filerevlog)
1791 nodelist = gennodelst(filerevlog)
1788 if nodelist:
1792 if nodelist:
1789 count[0] += 1
1793 count[0] += 1
1790 yield bundler.fileheader(fname)
1794 yield bundler.fileheader(fname)
1791 for chunk in filerevlog.group(nodelist, bundler, reorder):
1795 for chunk in filerevlog.group(nodelist, bundler, reorder):
1792 yield chunk
1796 yield chunk
1793 yield bundler.close()
1797 yield bundler.close()
1794 self.ui.progress(_('bundling'), None)
1798 self.ui.progress(_('bundling'), None)
1795
1799
1796 if nodes:
1800 if nodes:
1797 self.hook('outgoing', node=hex(nodes[0]), source=source)
1801 self.hook('outgoing', node=hex(nodes[0]), source=source)
1798
1802
1799 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1803 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1800
1804
1801 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1805 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1802 """Add the changegroup returned by source.read() to this repo.
1806 """Add the changegroup returned by source.read() to this repo.
1803 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1807 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1804 the URL of the repo where this changegroup is coming from.
1808 the URL of the repo where this changegroup is coming from.
1805 If lock is not None, the function takes ownership of the lock
1809 If lock is not None, the function takes ownership of the lock
1806 and releases it after the changegroup is added.
1810 and releases it after the changegroup is added.
1807
1811
1808 Return an integer summarizing the change to this repo:
1812 Return an integer summarizing the change to this repo:
1809 - nothing changed or no source: 0
1813 - nothing changed or no source: 0
1810 - more heads than before: 1+added heads (2..n)
1814 - more heads than before: 1+added heads (2..n)
1811 - fewer heads than before: -1-removed heads (-2..-n)
1815 - fewer heads than before: -1-removed heads (-2..-n)
1812 - number of heads stays the same: 1
1816 - number of heads stays the same: 1
1813 """
1817 """
1814 def csmap(x):
1818 def csmap(x):
1815 self.ui.debug("add changeset %s\n" % short(x))
1819 self.ui.debug("add changeset %s\n" % short(x))
1816 return len(cl)
1820 return len(cl)
1817
1821
1818 def revmap(x):
1822 def revmap(x):
1819 return cl.rev(x)
1823 return cl.rev(x)
1820
1824
1821 if not source:
1825 if not source:
1822 return 0
1826 return 0
1823
1827
1824 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1828 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1825
1829
1826 changesets = files = revisions = 0
1830 changesets = files = revisions = 0
1827 efiles = set()
1831 efiles = set()
1828
1832
1829 # write changelog data to temp files so concurrent readers will not see
1833 # write changelog data to temp files so concurrent readers will not see
1830 # inconsistent view
1834 # inconsistent view
1831 cl = self.changelog
1835 cl = self.changelog
1832 cl.delayupdate()
1836 cl.delayupdate()
1833 oldheads = cl.heads()
1837 oldheads = cl.heads()
1834
1838
1835 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1839 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1836 try:
1840 try:
1837 trp = weakref.proxy(tr)
1841 trp = weakref.proxy(tr)
1838 # pull off the changeset group
1842 # pull off the changeset group
1839 self.ui.status(_("adding changesets\n"))
1843 self.ui.status(_("adding changesets\n"))
1840 clstart = len(cl)
1844 clstart = len(cl)
1841 class prog(object):
1845 class prog(object):
1842 step = _('changesets')
1846 step = _('changesets')
1843 count = 1
1847 count = 1
1844 ui = self.ui
1848 ui = self.ui
1845 total = None
1849 total = None
1846 def __call__(self):
1850 def __call__(self):
1847 self.ui.progress(self.step, self.count, unit=_('chunks'),
1851 self.ui.progress(self.step, self.count, unit=_('chunks'),
1848 total=self.total)
1852 total=self.total)
1849 self.count += 1
1853 self.count += 1
1850 pr = prog()
1854 pr = prog()
1851 source.callback = pr
1855 source.callback = pr
1852
1856
1853 source.changelogheader()
1857 source.changelogheader()
1854 if (cl.addgroup(source, csmap, trp) is None
1858 if (cl.addgroup(source, csmap, trp) is None
1855 and not emptyok):
1859 and not emptyok):
1856 raise util.Abort(_("received changelog group is empty"))
1860 raise util.Abort(_("received changelog group is empty"))
1857 clend = len(cl)
1861 clend = len(cl)
1858 changesets = clend - clstart
1862 changesets = clend - clstart
1859 for c in xrange(clstart, clend):
1863 for c in xrange(clstart, clend):
1860 efiles.update(self[c].files())
1864 efiles.update(self[c].files())
1861 efiles = len(efiles)
1865 efiles = len(efiles)
1862 self.ui.progress(_('changesets'), None)
1866 self.ui.progress(_('changesets'), None)
1863
1867
1864 # pull off the manifest group
1868 # pull off the manifest group
1865 self.ui.status(_("adding manifests\n"))
1869 self.ui.status(_("adding manifests\n"))
1866 pr.step = _('manifests')
1870 pr.step = _('manifests')
1867 pr.count = 1
1871 pr.count = 1
1868 pr.total = changesets # manifests <= changesets
1872 pr.total = changesets # manifests <= changesets
1869 # no need to check for empty manifest group here:
1873 # no need to check for empty manifest group here:
1870 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1874 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1871 # no new manifest will be created and the manifest group will
1875 # no new manifest will be created and the manifest group will
1872 # be empty during the pull
1876 # be empty during the pull
1873 source.manifestheader()
1877 source.manifestheader()
1874 self.manifest.addgroup(source, revmap, trp)
1878 self.manifest.addgroup(source, revmap, trp)
1875 self.ui.progress(_('manifests'), None)
1879 self.ui.progress(_('manifests'), None)
1876
1880
1877 needfiles = {}
1881 needfiles = {}
1878 if self.ui.configbool('server', 'validate', default=False):
1882 if self.ui.configbool('server', 'validate', default=False):
1879 # validate incoming csets have their manifests
1883 # validate incoming csets have their manifests
1880 for cset in xrange(clstart, clend):
1884 for cset in xrange(clstart, clend):
1881 mfest = self.changelog.read(self.changelog.node(cset))[0]
1885 mfest = self.changelog.read(self.changelog.node(cset))[0]
1882 mfest = self.manifest.readdelta(mfest)
1886 mfest = self.manifest.readdelta(mfest)
1883 # store file nodes we must see
1887 # store file nodes we must see
1884 for f, n in mfest.iteritems():
1888 for f, n in mfest.iteritems():
1885 needfiles.setdefault(f, set()).add(n)
1889 needfiles.setdefault(f, set()).add(n)
1886
1890
1887 # process the files
1891 # process the files
1888 self.ui.status(_("adding file changes\n"))
1892 self.ui.status(_("adding file changes\n"))
1889 pr.step = _('files')
1893 pr.step = _('files')
1890 pr.count = 1
1894 pr.count = 1
1891 pr.total = efiles
1895 pr.total = efiles
1892 source.callback = None
1896 source.callback = None
1893
1897
1894 while True:
1898 while True:
1895 chunkdata = source.filelogheader()
1899 chunkdata = source.filelogheader()
1896 if not chunkdata:
1900 if not chunkdata:
1897 break
1901 break
1898 f = chunkdata["filename"]
1902 f = chunkdata["filename"]
1899 self.ui.debug("adding %s revisions\n" % f)
1903 self.ui.debug("adding %s revisions\n" % f)
1900 pr()
1904 pr()
1901 fl = self.file(f)
1905 fl = self.file(f)
1902 o = len(fl)
1906 o = len(fl)
1903 if fl.addgroup(source, revmap, trp) is None:
1907 if fl.addgroup(source, revmap, trp) is None:
1904 raise util.Abort(_("received file revlog group is empty"))
1908 raise util.Abort(_("received file revlog group is empty"))
1905 revisions += len(fl) - o
1909 revisions += len(fl) - o
1906 files += 1
1910 files += 1
1907 if f in needfiles:
1911 if f in needfiles:
1908 needs = needfiles[f]
1912 needs = needfiles[f]
1909 for new in xrange(o, len(fl)):
1913 for new in xrange(o, len(fl)):
1910 n = fl.node(new)
1914 n = fl.node(new)
1911 if n in needs:
1915 if n in needs:
1912 needs.remove(n)
1916 needs.remove(n)
1913 if not needs:
1917 if not needs:
1914 del needfiles[f]
1918 del needfiles[f]
1915 self.ui.progress(_('files'), None)
1919 self.ui.progress(_('files'), None)
1916
1920
1917 for f, needs in needfiles.iteritems():
1921 for f, needs in needfiles.iteritems():
1918 fl = self.file(f)
1922 fl = self.file(f)
1919 for n in needs:
1923 for n in needs:
1920 try:
1924 try:
1921 fl.rev(n)
1925 fl.rev(n)
1922 except error.LookupError:
1926 except error.LookupError:
1923 raise util.Abort(
1927 raise util.Abort(
1924 _('missing file data for %s:%s - run hg verify') %
1928 _('missing file data for %s:%s - run hg verify') %
1925 (f, hex(n)))
1929 (f, hex(n)))
1926
1930
1927 dh = 0
1931 dh = 0
1928 if oldheads:
1932 if oldheads:
1929 heads = cl.heads()
1933 heads = cl.heads()
1930 dh = len(heads) - len(oldheads)
1934 dh = len(heads) - len(oldheads)
1931 for h in heads:
1935 for h in heads:
1932 if h not in oldheads and 'close' in self[h].extra():
1936 if h not in oldheads and 'close' in self[h].extra():
1933 dh -= 1
1937 dh -= 1
1934 htext = ""
1938 htext = ""
1935 if dh:
1939 if dh:
1936 htext = _(" (%+d heads)") % dh
1940 htext = _(" (%+d heads)") % dh
1937
1941
1938 self.ui.status(_("added %d changesets"
1942 self.ui.status(_("added %d changesets"
1939 " with %d changes to %d files%s\n")
1943 " with %d changes to %d files%s\n")
1940 % (changesets, revisions, files, htext))
1944 % (changesets, revisions, files, htext))
1941
1945
1942 if changesets > 0:
1946 if changesets > 0:
1943 p = lambda: cl.writepending() and self.root or ""
1947 p = lambda: cl.writepending() and self.root or ""
1944 self.hook('pretxnchangegroup', throw=True,
1948 self.hook('pretxnchangegroup', throw=True,
1945 node=hex(cl.node(clstart)), source=srctype,
1949 node=hex(cl.node(clstart)), source=srctype,
1946 url=url, pending=p)
1950 url=url, pending=p)
1947
1951
1948 # make changelog see real files again
1952 # make changelog see real files again
1949 cl.finalize(trp)
1953 cl.finalize(trp)
1950
1954
1951 tr.close()
1955 tr.close()
1952 finally:
1956 finally:
1953 tr.release()
1957 tr.release()
1954 if lock:
1958 if lock:
1955 lock.release()
1959 lock.release()
1956
1960
1957 if changesets > 0:
1961 if changesets > 0:
1958 # forcefully update the on-disk branch cache
1962 # forcefully update the on-disk branch cache
1959 self.ui.debug("updating the branch cache\n")
1963 self.ui.debug("updating the branch cache\n")
1960 self.updatebranchcache()
1964 self.updatebranchcache()
1961 self.hook("changegroup", node=hex(cl.node(clstart)),
1965 self.hook("changegroup", node=hex(cl.node(clstart)),
1962 source=srctype, url=url)
1966 source=srctype, url=url)
1963
1967
1964 for i in xrange(clstart, clend):
1968 for i in xrange(clstart, clend):
1965 self.hook("incoming", node=hex(cl.node(i)),
1969 self.hook("incoming", node=hex(cl.node(i)),
1966 source=srctype, url=url)
1970 source=srctype, url=url)
1967
1971
1968 # never return 0 here:
1972 # never return 0 here:
1969 if dh < 0:
1973 if dh < 0:
1970 return dh - 1
1974 return dh - 1
1971 else:
1975 else:
1972 return dh + 1
1976 return dh + 1
1973
1977
1974 def stream_in(self, remote, requirements):
1978 def stream_in(self, remote, requirements):
1975 lock = self.lock()
1979 lock = self.lock()
1976 try:
1980 try:
1977 fp = remote.stream_out()
1981 fp = remote.stream_out()
1978 l = fp.readline()
1982 l = fp.readline()
1979 try:
1983 try:
1980 resp = int(l)
1984 resp = int(l)
1981 except ValueError:
1985 except ValueError:
1982 raise error.ResponseError(
1986 raise error.ResponseError(
1983 _('Unexpected response from remote server:'), l)
1987 _('Unexpected response from remote server:'), l)
1984 if resp == 1:
1988 if resp == 1:
1985 raise util.Abort(_('operation forbidden by server'))
1989 raise util.Abort(_('operation forbidden by server'))
1986 elif resp == 2:
1990 elif resp == 2:
1987 raise util.Abort(_('locking the remote repository failed'))
1991 raise util.Abort(_('locking the remote repository failed'))
1988 elif resp != 0:
1992 elif resp != 0:
1989 raise util.Abort(_('the server sent an unknown error code'))
1993 raise util.Abort(_('the server sent an unknown error code'))
1990 self.ui.status(_('streaming all changes\n'))
1994 self.ui.status(_('streaming all changes\n'))
1991 l = fp.readline()
1995 l = fp.readline()
1992 try:
1996 try:
1993 total_files, total_bytes = map(int, l.split(' ', 1))
1997 total_files, total_bytes = map(int, l.split(' ', 1))
1994 except (ValueError, TypeError):
1998 except (ValueError, TypeError):
1995 raise error.ResponseError(
1999 raise error.ResponseError(
1996 _('Unexpected response from remote server:'), l)
2000 _('Unexpected response from remote server:'), l)
1997 self.ui.status(_('%d files to transfer, %s of data\n') %
2001 self.ui.status(_('%d files to transfer, %s of data\n') %
1998 (total_files, util.bytecount(total_bytes)))
2002 (total_files, util.bytecount(total_bytes)))
1999 start = time.time()
2003 start = time.time()
2000 for i in xrange(total_files):
2004 for i in xrange(total_files):
2001 # XXX doesn't support '\n' or '\r' in filenames
2005 # XXX doesn't support '\n' or '\r' in filenames
2002 l = fp.readline()
2006 l = fp.readline()
2003 try:
2007 try:
2004 name, size = l.split('\0', 1)
2008 name, size = l.split('\0', 1)
2005 size = int(size)
2009 size = int(size)
2006 except (ValueError, TypeError):
2010 except (ValueError, TypeError):
2007 raise error.ResponseError(
2011 raise error.ResponseError(
2008 _('Unexpected response from remote server:'), l)
2012 _('Unexpected response from remote server:'), l)
2009 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2013 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2010 # for backwards compat, name was partially encoded
2014 # for backwards compat, name was partially encoded
2011 ofp = self.sopener(store.decodedir(name), 'w')
2015 ofp = self.sopener(store.decodedir(name), 'w')
2012 for chunk in util.filechunkiter(fp, limit=size):
2016 for chunk in util.filechunkiter(fp, limit=size):
2013 ofp.write(chunk)
2017 ofp.write(chunk)
2014 ofp.close()
2018 ofp.close()
2015 elapsed = time.time() - start
2019 elapsed = time.time() - start
2016 if elapsed <= 0:
2020 if elapsed <= 0:
2017 elapsed = 0.001
2021 elapsed = 0.001
2018 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2022 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2019 (util.bytecount(total_bytes), elapsed,
2023 (util.bytecount(total_bytes), elapsed,
2020 util.bytecount(total_bytes / elapsed)))
2024 util.bytecount(total_bytes / elapsed)))
2021
2025
2022 # new requirements = old non-format requirements + new format-related
2026 # new requirements = old non-format requirements + new format-related
2023 # requirements from the streamed-in repository
2027 # requirements from the streamed-in repository
2024 requirements.update(set(self.requirements) - self.supportedformats)
2028 requirements.update(set(self.requirements) - self.supportedformats)
2025 self._applyrequirements(requirements)
2029 self._applyrequirements(requirements)
2026 self._writerequirements()
2030 self._writerequirements()
2027
2031
2028 self.invalidate()
2032 self.invalidate()
2029 return len(self.heads()) + 1
2033 return len(self.heads()) + 1
2030 finally:
2034 finally:
2031 lock.release()
2035 lock.release()
2032
2036
2033 def clone(self, remote, heads=[], stream=False):
2037 def clone(self, remote, heads=[], stream=False):
2034 '''clone remote repository.
2038 '''clone remote repository.
2035
2039
2036 keyword arguments:
2040 keyword arguments:
2037 heads: list of revs to clone (forces use of pull)
2041 heads: list of revs to clone (forces use of pull)
2038 stream: use streaming clone if possible'''
2042 stream: use streaming clone if possible'''
2039
2043
2040 # now, all clients that can request uncompressed clones can
2044 # now, all clients that can request uncompressed clones can
2041 # read repo formats supported by all servers that can serve
2045 # read repo formats supported by all servers that can serve
2042 # them.
2046 # them.
2043
2047
2044 # if revlog format changes, client will have to check version
2048 # if revlog format changes, client will have to check version
2045 # and format flags on "stream" capability, and use
2049 # and format flags on "stream" capability, and use
2046 # uncompressed only if compatible.
2050 # uncompressed only if compatible.
2047
2051
2048 if stream and not heads:
2052 if stream and not heads:
2049 # 'stream' means remote revlog format is revlogv1 only
2053 # 'stream' means remote revlog format is revlogv1 only
2050 if remote.capable('stream'):
2054 if remote.capable('stream'):
2051 return self.stream_in(remote, set(('revlogv1',)))
2055 return self.stream_in(remote, set(('revlogv1',)))
2052 # otherwise, 'streamreqs' contains the remote revlog format
2056 # otherwise, 'streamreqs' contains the remote revlog format
2053 streamreqs = remote.capable('streamreqs')
2057 streamreqs = remote.capable('streamreqs')
2054 if streamreqs:
2058 if streamreqs:
2055 streamreqs = set(streamreqs.split(','))
2059 streamreqs = set(streamreqs.split(','))
2056 # if we support it, stream in and adjust our requirements
2060 # if we support it, stream in and adjust our requirements
2057 if not streamreqs - self.supportedformats:
2061 if not streamreqs - self.supportedformats:
2058 return self.stream_in(remote, streamreqs)
2062 return self.stream_in(remote, streamreqs)
2059 return self.pull(remote, heads)
2063 return self.pull(remote, heads)
2060
2064
2061 def pushkey(self, namespace, key, old, new):
2065 def pushkey(self, namespace, key, old, new):
2062 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2066 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2063 old=old, new=new)
2067 old=old, new=new)
2064 ret = pushkey.push(self, namespace, key, old, new)
2068 ret = pushkey.push(self, namespace, key, old, new)
2065 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2069 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2066 ret=ret)
2070 ret=ret)
2067 return ret
2071 return ret
2068
2072
2069 def listkeys(self, namespace):
2073 def listkeys(self, namespace):
2070 self.hook('prelistkeys', throw=True, namespace=namespace)
2074 self.hook('prelistkeys', throw=True, namespace=namespace)
2071 values = pushkey.list(self, namespace)
2075 values = pushkey.list(self, namespace)
2072 self.hook('listkeys', namespace=namespace, values=values)
2076 self.hook('listkeys', namespace=namespace, values=values)
2073 return values
2077 return values
2074
2078
2075 def debugwireargs(self, one, two, three=None, four=None, five=None):
2079 def debugwireargs(self, one, two, three=None, four=None, five=None):
2076 '''used to test argument passing over the wire'''
2080 '''used to test argument passing over the wire'''
2077 return "%s %s %s %s %s" % (one, two, three, four, five)
2081 return "%s %s %s %s %s" % (one, two, three, four, five)
2078
2082
2079 def savecommitmessage(self, text):
2083 def savecommitmessage(self, text):
2080 fp = self.opener('last-message.txt', 'wb')
2084 fp = self.opener('last-message.txt', 'wb')
2081 try:
2085 try:
2082 fp.write(text)
2086 fp.write(text)
2083 finally:
2087 finally:
2084 fp.close()
2088 fp.close()
2085 return self.pathto(fp.name[len(self.root)+1:])
2089 return self.pathto(fp.name[len(self.root)+1:])
2086
2090
2087 # used to avoid circular references so destructors work
2091 # used to avoid circular references so destructors work
2088 def aftertrans(files):
2092 def aftertrans(files):
2089 renamefiles = [tuple(t) for t in files]
2093 renamefiles = [tuple(t) for t in files]
2090 def a():
2094 def a():
2091 for src, dest in renamefiles:
2095 for src, dest in renamefiles:
2092 util.rename(src, dest)
2096 util.rename(src, dest)
2093 return a
2097 return a
2094
2098
2095 def undoname(fn):
2099 def undoname(fn):
2096 base, name = os.path.split(fn)
2100 base, name = os.path.split(fn)
2097 assert name.startswith('journal')
2101 assert name.startswith('journal')
2098 return os.path.join(base, name.replace('journal', 'undo', 1))
2102 return os.path.join(base, name.replace('journal', 'undo', 1))
2099
2103
2100 def instance(ui, path, create):
2104 def instance(ui, path, create):
2101 return localrepository(ui, util.urllocalpath(path), create)
2105 return localrepository(ui, util.urllocalpath(path), create)
2102
2106
2103 def islocal(path):
2107 def islocal(path):
2104 return True
2108 return True
@@ -1,11 +1,39 b''
1 # Mercurial phases support code
1 # Mercurial phases support code
2 #
2 #
3 # Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 # Augie Fackler <durin42@gmail.com>
5 # Augie Fackler <durin42@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from node import nullid, bin, hex
11
10 allphases = range(2)
12 allphases = range(2)
11 trackedphases = allphases[1:]
13 trackedphases = allphases[1:]
14
15 def readroots(repo):
16 """Read phase roots from disk"""
17 roots = [set() for i in allphases]
18 roots[0].add(nullid)
19 try:
20 f = repo.sopener('phaseroots')
21 try:
22 for line in f:
23 phase, nh = line.strip().split()
24 roots[int(phase)].add(bin(nh))
25 finally:
26 f.close()
27 except IOError:
28 pass # default value are enough
29 return roots
30
31 def writeroots(repo):
32 """Write phase roots from disk"""
33 f = repo.sopener('phaseroots', 'w', atomictemp=True)
34 try:
35 for phase, roots in enumerate(repo._phaseroots):
36 for h in roots:
37 f.write('%i %s\n' % (phase, hex(h)))
38 finally:
39 f.close()
General Comments 0
You need to be logged in to leave comments. Login now