##// END OF EJS Templates
revlog: make addgroup returns a list of node contained in the added source...
Pierre-Yves David -
r15890:e234eda2 default
parent child Browse files
Show More
@@ -1,2277 +1,2277
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 extensions.loadall(self.ui)
43 extensions.loadall(self.ui)
44 except IOError:
44 except IOError:
45 pass
45 pass
46
46
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 if create:
48 if create:
49 if not os.path.exists(path):
49 if not os.path.exists(path):
50 util.makedirs(path)
50 util.makedirs(path)
51 util.makedir(self.path, notindexed=True)
51 util.makedir(self.path, notindexed=True)
52 requirements = ["revlogv1"]
52 requirements = ["revlogv1"]
53 if self.ui.configbool('format', 'usestore', True):
53 if self.ui.configbool('format', 'usestore', True):
54 os.mkdir(os.path.join(self.path, "store"))
54 os.mkdir(os.path.join(self.path, "store"))
55 requirements.append("store")
55 requirements.append("store")
56 if self.ui.configbool('format', 'usefncache', True):
56 if self.ui.configbool('format', 'usefncache', True):
57 requirements.append("fncache")
57 requirements.append("fncache")
58 if self.ui.configbool('format', 'dotencode', True):
58 if self.ui.configbool('format', 'dotencode', True):
59 requirements.append('dotencode')
59 requirements.append('dotencode')
60 # create an invalid changelog
60 # create an invalid changelog
61 self.opener.append(
61 self.opener.append(
62 "00changelog.i",
62 "00changelog.i",
63 '\0\0\0\2' # represents revlogv2
63 '\0\0\0\2' # represents revlogv2
64 ' dummy changelog to prevent using the old repo layout'
64 ' dummy changelog to prevent using the old repo layout'
65 )
65 )
66 if self.ui.configbool('format', 'generaldelta', False):
66 if self.ui.configbool('format', 'generaldelta', False):
67 requirements.append("generaldelta")
67 requirements.append("generaldelta")
68 requirements = set(requirements)
68 requirements = set(requirements)
69 else:
69 else:
70 raise error.RepoError(_("repository %s not found") % path)
70 raise error.RepoError(_("repository %s not found") % path)
71 elif create:
71 elif create:
72 raise error.RepoError(_("repository %s already exists") % path)
72 raise error.RepoError(_("repository %s already exists") % path)
73 else:
73 else:
74 try:
74 try:
75 requirements = scmutil.readrequires(self.opener, self.supported)
75 requirements = scmutil.readrequires(self.opener, self.supported)
76 except IOError, inst:
76 except IOError, inst:
77 if inst.errno != errno.ENOENT:
77 if inst.errno != errno.ENOENT:
78 raise
78 raise
79 requirements = set()
79 requirements = set()
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101
101
102 self._branchcache = None
102 self._branchcache = None
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 # A cache for various files under .hg/ that tracks file changes,
108 # A cache for various files under .hg/ that tracks file changes,
109 # (used by the filecache decorator)
109 # (used by the filecache decorator)
110 #
110 #
111 # Maps a property name to its util.filecacheentry
111 # Maps a property name to its util.filecacheentry
112 self._filecache = {}
112 self._filecache = {}
113
113
114 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
115 self.requirements = requirements
115 self.requirements = requirements
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
117 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
118 if r in openerreqs)
118 if r in openerreqs)
119
119
120 def _writerequirements(self):
120 def _writerequirements(self):
121 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
122 for r in self.requirements:
122 for r in self.requirements:
123 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
124 reqfile.close()
124 reqfile.close()
125
125
126 def _checknested(self, path):
126 def _checknested(self, path):
127 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
128 if not path.startswith(self.root):
128 if not path.startswith(self.root):
129 return False
129 return False
130 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
131 normsubpath = util.pconvert(subpath)
131 normsubpath = util.pconvert(subpath)
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = '/'.join(parts)
153 prefix = '/'.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == normsubpath:
155 if prefix == normsubpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164 @filecache('bookmarks')
164 @filecache('bookmarks')
165 def _bookmarks(self):
165 def _bookmarks(self):
166 return bookmarks.read(self)
166 return bookmarks.read(self)
167
167
168 @filecache('bookmarks.current')
168 @filecache('bookmarks.current')
169 def _bookmarkcurrent(self):
169 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
170 return bookmarks.readcurrent(self)
171
171
172 def _writebookmarks(self, marks):
172 def _writebookmarks(self, marks):
173 bookmarks.write(self)
173 bookmarks.write(self)
174
174
175 @filecache('phaseroots')
175 @filecache('phaseroots')
176 def _phaseroots(self):
176 def _phaseroots(self):
177 self._dirtyphases = False
177 self._dirtyphases = False
178 phaseroots = phases.readroots(self)
178 phaseroots = phases.readroots(self)
179 phases.filterunknown(self, phaseroots)
179 phases.filterunknown(self, phaseroots)
180 return phaseroots
180 return phaseroots
181
181
182 @propertycache
182 @propertycache
183 def _phaserev(self):
183 def _phaserev(self):
184 cache = [phases.public] * len(self)
184 cache = [phases.public] * len(self)
185 for phase in phases.trackedphases:
185 for phase in phases.trackedphases:
186 roots = map(self.changelog.rev, self._phaseroots[phase])
186 roots = map(self.changelog.rev, self._phaseroots[phase])
187 if roots:
187 if roots:
188 for rev in roots:
188 for rev in roots:
189 cache[rev] = phase
189 cache[rev] = phase
190 for rev in self.changelog.descendants(*roots):
190 for rev in self.changelog.descendants(*roots):
191 cache[rev] = phase
191 cache[rev] = phase
192 return cache
192 return cache
193
193
194 @filecache('00changelog.i', True)
194 @filecache('00changelog.i', True)
195 def changelog(self):
195 def changelog(self):
196 c = changelog.changelog(self.sopener)
196 c = changelog.changelog(self.sopener)
197 if 'HG_PENDING' in os.environ:
197 if 'HG_PENDING' in os.environ:
198 p = os.environ['HG_PENDING']
198 p = os.environ['HG_PENDING']
199 if p.startswith(self.root):
199 if p.startswith(self.root):
200 c.readpending('00changelog.i.a')
200 c.readpending('00changelog.i.a')
201 return c
201 return c
202
202
203 @filecache('00manifest.i', True)
203 @filecache('00manifest.i', True)
204 def manifest(self):
204 def manifest(self):
205 return manifest.manifest(self.sopener)
205 return manifest.manifest(self.sopener)
206
206
207 @filecache('dirstate')
207 @filecache('dirstate')
208 def dirstate(self):
208 def dirstate(self):
209 warned = [0]
209 warned = [0]
210 def validate(node):
210 def validate(node):
211 try:
211 try:
212 self.changelog.rev(node)
212 self.changelog.rev(node)
213 return node
213 return node
214 except error.LookupError:
214 except error.LookupError:
215 if not warned[0]:
215 if not warned[0]:
216 warned[0] = True
216 warned[0] = True
217 self.ui.warn(_("warning: ignoring unknown"
217 self.ui.warn(_("warning: ignoring unknown"
218 " working parent %s!\n") % short(node))
218 " working parent %s!\n") % short(node))
219 return nullid
219 return nullid
220
220
221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222
222
223 def __getitem__(self, changeid):
223 def __getitem__(self, changeid):
224 if changeid is None:
224 if changeid is None:
225 return context.workingctx(self)
225 return context.workingctx(self)
226 return context.changectx(self, changeid)
226 return context.changectx(self, changeid)
227
227
228 def __contains__(self, changeid):
228 def __contains__(self, changeid):
229 try:
229 try:
230 return bool(self.lookup(changeid))
230 return bool(self.lookup(changeid))
231 except error.RepoLookupError:
231 except error.RepoLookupError:
232 return False
232 return False
233
233
234 def __nonzero__(self):
234 def __nonzero__(self):
235 return True
235 return True
236
236
237 def __len__(self):
237 def __len__(self):
238 return len(self.changelog)
238 return len(self.changelog)
239
239
240 def __iter__(self):
240 def __iter__(self):
241 for i in xrange(len(self)):
241 for i in xrange(len(self)):
242 yield i
242 yield i
243
243
244 def revs(self, expr, *args):
244 def revs(self, expr, *args):
245 '''Return a list of revisions matching the given revset'''
245 '''Return a list of revisions matching the given revset'''
246 expr = revset.formatspec(expr, *args)
246 expr = revset.formatspec(expr, *args)
247 m = revset.match(None, expr)
247 m = revset.match(None, expr)
248 return [r for r in m(self, range(len(self)))]
248 return [r for r in m(self, range(len(self)))]
249
249
250 def set(self, expr, *args):
250 def set(self, expr, *args):
251 '''
251 '''
252 Yield a context for each matching revision, after doing arg
252 Yield a context for each matching revision, after doing arg
253 replacement via revset.formatspec
253 replacement via revset.formatspec
254 '''
254 '''
255 for r in self.revs(expr, *args):
255 for r in self.revs(expr, *args):
256 yield self[r]
256 yield self[r]
257
257
258 def url(self):
258 def url(self):
259 return 'file:' + self.root
259 return 'file:' + self.root
260
260
261 def hook(self, name, throw=False, **args):
261 def hook(self, name, throw=False, **args):
262 return hook.hook(self.ui, self, name, throw, **args)
262 return hook.hook(self.ui, self, name, throw, **args)
263
263
264 tag_disallowed = ':\r\n'
264 tag_disallowed = ':\r\n'
265
265
266 def _tag(self, names, node, message, local, user, date, extra={}):
266 def _tag(self, names, node, message, local, user, date, extra={}):
267 if isinstance(names, str):
267 if isinstance(names, str):
268 allchars = names
268 allchars = names
269 names = (names,)
269 names = (names,)
270 else:
270 else:
271 allchars = ''.join(names)
271 allchars = ''.join(names)
272 for c in self.tag_disallowed:
272 for c in self.tag_disallowed:
273 if c in allchars:
273 if c in allchars:
274 raise util.Abort(_('%r cannot be used in a tag name') % c)
274 raise util.Abort(_('%r cannot be used in a tag name') % c)
275
275
276 branches = self.branchmap()
276 branches = self.branchmap()
277 for name in names:
277 for name in names:
278 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 local=local)
279 local=local)
280 if name in branches:
280 if name in branches:
281 self.ui.warn(_("warning: tag %s conflicts with existing"
281 self.ui.warn(_("warning: tag %s conflicts with existing"
282 " branch name\n") % name)
282 " branch name\n") % name)
283
283
284 def writetags(fp, names, munge, prevtags):
284 def writetags(fp, names, munge, prevtags):
285 fp.seek(0, 2)
285 fp.seek(0, 2)
286 if prevtags and prevtags[-1] != '\n':
286 if prevtags and prevtags[-1] != '\n':
287 fp.write('\n')
287 fp.write('\n')
288 for name in names:
288 for name in names:
289 m = munge and munge(name) or name
289 m = munge and munge(name) or name
290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
291 old = self.tags().get(name, nullid)
291 old = self.tags().get(name, nullid)
292 fp.write('%s %s\n' % (hex(old), m))
292 fp.write('%s %s\n' % (hex(old), m))
293 fp.write('%s %s\n' % (hex(node), m))
293 fp.write('%s %s\n' % (hex(node), m))
294 fp.close()
294 fp.close()
295
295
296 prevtags = ''
296 prevtags = ''
297 if local:
297 if local:
298 try:
298 try:
299 fp = self.opener('localtags', 'r+')
299 fp = self.opener('localtags', 'r+')
300 except IOError:
300 except IOError:
301 fp = self.opener('localtags', 'a')
301 fp = self.opener('localtags', 'a')
302 else:
302 else:
303 prevtags = fp.read()
303 prevtags = fp.read()
304
304
305 # local tags are stored in the current charset
305 # local tags are stored in the current charset
306 writetags(fp, names, None, prevtags)
306 writetags(fp, names, None, prevtags)
307 for name in names:
307 for name in names:
308 self.hook('tag', node=hex(node), tag=name, local=local)
308 self.hook('tag', node=hex(node), tag=name, local=local)
309 return
309 return
310
310
311 try:
311 try:
312 fp = self.wfile('.hgtags', 'rb+')
312 fp = self.wfile('.hgtags', 'rb+')
313 except IOError, e:
313 except IOError, e:
314 if e.errno != errno.ENOENT:
314 if e.errno != errno.ENOENT:
315 raise
315 raise
316 fp = self.wfile('.hgtags', 'ab')
316 fp = self.wfile('.hgtags', 'ab')
317 else:
317 else:
318 prevtags = fp.read()
318 prevtags = fp.read()
319
319
320 # committed tags are stored in UTF-8
320 # committed tags are stored in UTF-8
321 writetags(fp, names, encoding.fromlocal, prevtags)
321 writetags(fp, names, encoding.fromlocal, prevtags)
322
322
323 fp.close()
323 fp.close()
324
324
325 if '.hgtags' not in self.dirstate:
325 if '.hgtags' not in self.dirstate:
326 self[None].add(['.hgtags'])
326 self[None].add(['.hgtags'])
327
327
328 m = matchmod.exact(self.root, '', ['.hgtags'])
328 m = matchmod.exact(self.root, '', ['.hgtags'])
329 tagnode = self.commit(message, user, date, extra=extra, match=m)
329 tagnode = self.commit(message, user, date, extra=extra, match=m)
330
330
331 for name in names:
331 for name in names:
332 self.hook('tag', node=hex(node), tag=name, local=local)
332 self.hook('tag', node=hex(node), tag=name, local=local)
333
333
334 return tagnode
334 return tagnode
335
335
336 def tag(self, names, node, message, local, user, date):
336 def tag(self, names, node, message, local, user, date):
337 '''tag a revision with one or more symbolic names.
337 '''tag a revision with one or more symbolic names.
338
338
339 names is a list of strings or, when adding a single tag, names may be a
339 names is a list of strings or, when adding a single tag, names may be a
340 string.
340 string.
341
341
342 if local is True, the tags are stored in a per-repository file.
342 if local is True, the tags are stored in a per-repository file.
343 otherwise, they are stored in the .hgtags file, and a new
343 otherwise, they are stored in the .hgtags file, and a new
344 changeset is committed with the change.
344 changeset is committed with the change.
345
345
346 keyword arguments:
346 keyword arguments:
347
347
348 local: whether to store tags in non-version-controlled file
348 local: whether to store tags in non-version-controlled file
349 (default False)
349 (default False)
350
350
351 message: commit message to use if committing
351 message: commit message to use if committing
352
352
353 user: name of user to use if committing
353 user: name of user to use if committing
354
354
355 date: date tuple to use if committing'''
355 date: date tuple to use if committing'''
356
356
357 if not local:
357 if not local:
358 for x in self.status()[:5]:
358 for x in self.status()[:5]:
359 if '.hgtags' in x:
359 if '.hgtags' in x:
360 raise util.Abort(_('working copy of .hgtags is changed '
360 raise util.Abort(_('working copy of .hgtags is changed '
361 '(please commit .hgtags manually)'))
361 '(please commit .hgtags manually)'))
362
362
363 self.tags() # instantiate the cache
363 self.tags() # instantiate the cache
364 self._tag(names, node, message, local, user, date)
364 self._tag(names, node, message, local, user, date)
365
365
366 @propertycache
366 @propertycache
367 def _tagscache(self):
367 def _tagscache(self):
368 '''Returns a tagscache object that contains various tags related caches.'''
368 '''Returns a tagscache object that contains various tags related caches.'''
369
369
370 # This simplifies its cache management by having one decorated
370 # This simplifies its cache management by having one decorated
371 # function (this one) and the rest simply fetch things from it.
371 # function (this one) and the rest simply fetch things from it.
372 class tagscache(object):
372 class tagscache(object):
373 def __init__(self):
373 def __init__(self):
374 # These two define the set of tags for this repository. tags
374 # These two define the set of tags for this repository. tags
375 # maps tag name to node; tagtypes maps tag name to 'global' or
375 # maps tag name to node; tagtypes maps tag name to 'global' or
376 # 'local'. (Global tags are defined by .hgtags across all
376 # 'local'. (Global tags are defined by .hgtags across all
377 # heads, and local tags are defined in .hg/localtags.)
377 # heads, and local tags are defined in .hg/localtags.)
378 # They constitute the in-memory cache of tags.
378 # They constitute the in-memory cache of tags.
379 self.tags = self.tagtypes = None
379 self.tags = self.tagtypes = None
380
380
381 self.nodetagscache = self.tagslist = None
381 self.nodetagscache = self.tagslist = None
382
382
383 cache = tagscache()
383 cache = tagscache()
384 cache.tags, cache.tagtypes = self._findtags()
384 cache.tags, cache.tagtypes = self._findtags()
385
385
386 return cache
386 return cache
387
387
388 def tags(self):
388 def tags(self):
389 '''return a mapping of tag to node'''
389 '''return a mapping of tag to node'''
390 return self._tagscache.tags
390 return self._tagscache.tags
391
391
392 def _findtags(self):
392 def _findtags(self):
393 '''Do the hard work of finding tags. Return a pair of dicts
393 '''Do the hard work of finding tags. Return a pair of dicts
394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
395 maps tag name to a string like \'global\' or \'local\'.
395 maps tag name to a string like \'global\' or \'local\'.
396 Subclasses or extensions are free to add their own tags, but
396 Subclasses or extensions are free to add their own tags, but
397 should be aware that the returned dicts will be retained for the
397 should be aware that the returned dicts will be retained for the
398 duration of the localrepo object.'''
398 duration of the localrepo object.'''
399
399
400 # XXX what tagtype should subclasses/extensions use? Currently
400 # XXX what tagtype should subclasses/extensions use? Currently
401 # mq and bookmarks add tags, but do not set the tagtype at all.
401 # mq and bookmarks add tags, but do not set the tagtype at all.
402 # Should each extension invent its own tag type? Should there
402 # Should each extension invent its own tag type? Should there
403 # be one tagtype for all such "virtual" tags? Or is the status
403 # be one tagtype for all such "virtual" tags? Or is the status
404 # quo fine?
404 # quo fine?
405
405
406 alltags = {} # map tag name to (node, hist)
406 alltags = {} # map tag name to (node, hist)
407 tagtypes = {}
407 tagtypes = {}
408
408
409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
411
411
412 # Build the return dicts. Have to re-encode tag names because
412 # Build the return dicts. Have to re-encode tag names because
413 # the tags module always uses UTF-8 (in order not to lose info
413 # the tags module always uses UTF-8 (in order not to lose info
414 # writing to the cache), but the rest of Mercurial wants them in
414 # writing to the cache), but the rest of Mercurial wants them in
415 # local encoding.
415 # local encoding.
416 tags = {}
416 tags = {}
417 for (name, (node, hist)) in alltags.iteritems():
417 for (name, (node, hist)) in alltags.iteritems():
418 if node != nullid:
418 if node != nullid:
419 try:
419 try:
420 # ignore tags to unknown nodes
420 # ignore tags to unknown nodes
421 self.changelog.lookup(node)
421 self.changelog.lookup(node)
422 tags[encoding.tolocal(name)] = node
422 tags[encoding.tolocal(name)] = node
423 except error.LookupError:
423 except error.LookupError:
424 pass
424 pass
425 tags['tip'] = self.changelog.tip()
425 tags['tip'] = self.changelog.tip()
426 tagtypes = dict([(encoding.tolocal(name), value)
426 tagtypes = dict([(encoding.tolocal(name), value)
427 for (name, value) in tagtypes.iteritems()])
427 for (name, value) in tagtypes.iteritems()])
428 return (tags, tagtypes)
428 return (tags, tagtypes)
429
429
430 def tagtype(self, tagname):
430 def tagtype(self, tagname):
431 '''
431 '''
432 return the type of the given tag. result can be:
432 return the type of the given tag. result can be:
433
433
434 'local' : a local tag
434 'local' : a local tag
435 'global' : a global tag
435 'global' : a global tag
436 None : tag does not exist
436 None : tag does not exist
437 '''
437 '''
438
438
439 return self._tagscache.tagtypes.get(tagname)
439 return self._tagscache.tagtypes.get(tagname)
440
440
441 def tagslist(self):
441 def tagslist(self):
442 '''return a list of tags ordered by revision'''
442 '''return a list of tags ordered by revision'''
443 if not self._tagscache.tagslist:
443 if not self._tagscache.tagslist:
444 l = []
444 l = []
445 for t, n in self.tags().iteritems():
445 for t, n in self.tags().iteritems():
446 r = self.changelog.rev(n)
446 r = self.changelog.rev(n)
447 l.append((r, t, n))
447 l.append((r, t, n))
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
449
449
450 return self._tagscache.tagslist
450 return self._tagscache.tagslist
451
451
452 def nodetags(self, node):
452 def nodetags(self, node):
453 '''return the tags associated with a node'''
453 '''return the tags associated with a node'''
454 if not self._tagscache.nodetagscache:
454 if not self._tagscache.nodetagscache:
455 nodetagscache = {}
455 nodetagscache = {}
456 for t, n in self.tags().iteritems():
456 for t, n in self.tags().iteritems():
457 nodetagscache.setdefault(n, []).append(t)
457 nodetagscache.setdefault(n, []).append(t)
458 for tags in nodetagscache.itervalues():
458 for tags in nodetagscache.itervalues():
459 tags.sort()
459 tags.sort()
460 self._tagscache.nodetagscache = nodetagscache
460 self._tagscache.nodetagscache = nodetagscache
461 return self._tagscache.nodetagscache.get(node, [])
461 return self._tagscache.nodetagscache.get(node, [])
462
462
463 def nodebookmarks(self, node):
463 def nodebookmarks(self, node):
464 marks = []
464 marks = []
465 for bookmark, n in self._bookmarks.iteritems():
465 for bookmark, n in self._bookmarks.iteritems():
466 if n == node:
466 if n == node:
467 marks.append(bookmark)
467 marks.append(bookmark)
468 return sorted(marks)
468 return sorted(marks)
469
469
470 def _branchtags(self, partial, lrev):
470 def _branchtags(self, partial, lrev):
471 # TODO: rename this function?
471 # TODO: rename this function?
472 tiprev = len(self) - 1
472 tiprev = len(self) - 1
473 if lrev != tiprev:
473 if lrev != tiprev:
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
475 self._updatebranchcache(partial, ctxgen)
475 self._updatebranchcache(partial, ctxgen)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
477
477
478 return partial
478 return partial
479
479
480 def updatebranchcache(self):
480 def updatebranchcache(self):
481 tip = self.changelog.tip()
481 tip = self.changelog.tip()
482 if self._branchcache is not None and self._branchcachetip == tip:
482 if self._branchcache is not None and self._branchcachetip == tip:
483 return
483 return
484
484
485 oldtip = self._branchcachetip
485 oldtip = self._branchcachetip
486 self._branchcachetip = tip
486 self._branchcachetip = tip
487 if oldtip is None or oldtip not in self.changelog.nodemap:
487 if oldtip is None or oldtip not in self.changelog.nodemap:
488 partial, last, lrev = self._readbranchcache()
488 partial, last, lrev = self._readbranchcache()
489 else:
489 else:
490 lrev = self.changelog.rev(oldtip)
490 lrev = self.changelog.rev(oldtip)
491 partial = self._branchcache
491 partial = self._branchcache
492
492
493 self._branchtags(partial, lrev)
493 self._branchtags(partial, lrev)
494 # this private cache holds all heads (not just tips)
494 # this private cache holds all heads (not just tips)
495 self._branchcache = partial
495 self._branchcache = partial
496
496
497 def branchmap(self):
497 def branchmap(self):
498 '''returns a dictionary {branch: [branchheads]}'''
498 '''returns a dictionary {branch: [branchheads]}'''
499 self.updatebranchcache()
499 self.updatebranchcache()
500 return self._branchcache
500 return self._branchcache
501
501
502 def branchtags(self):
502 def branchtags(self):
503 '''return a dict where branch names map to the tipmost head of
503 '''return a dict where branch names map to the tipmost head of
504 the branch, open heads come before closed'''
504 the branch, open heads come before closed'''
505 bt = {}
505 bt = {}
506 for bn, heads in self.branchmap().iteritems():
506 for bn, heads in self.branchmap().iteritems():
507 tip = heads[-1]
507 tip = heads[-1]
508 for h in reversed(heads):
508 for h in reversed(heads):
509 if 'close' not in self.changelog.read(h)[5]:
509 if 'close' not in self.changelog.read(h)[5]:
510 tip = h
510 tip = h
511 break
511 break
512 bt[bn] = tip
512 bt[bn] = tip
513 return bt
513 return bt
514
514
515 def _readbranchcache(self):
515 def _readbranchcache(self):
516 partial = {}
516 partial = {}
517 try:
517 try:
518 f = self.opener("cache/branchheads")
518 f = self.opener("cache/branchheads")
519 lines = f.read().split('\n')
519 lines = f.read().split('\n')
520 f.close()
520 f.close()
521 except (IOError, OSError):
521 except (IOError, OSError):
522 return {}, nullid, nullrev
522 return {}, nullid, nullrev
523
523
524 try:
524 try:
525 last, lrev = lines.pop(0).split(" ", 1)
525 last, lrev = lines.pop(0).split(" ", 1)
526 last, lrev = bin(last), int(lrev)
526 last, lrev = bin(last), int(lrev)
527 if lrev >= len(self) or self[lrev].node() != last:
527 if lrev >= len(self) or self[lrev].node() != last:
528 # invalidate the cache
528 # invalidate the cache
529 raise ValueError('invalidating branch cache (tip differs)')
529 raise ValueError('invalidating branch cache (tip differs)')
530 for l in lines:
530 for l in lines:
531 if not l:
531 if not l:
532 continue
532 continue
533 node, label = l.split(" ", 1)
533 node, label = l.split(" ", 1)
534 label = encoding.tolocal(label.strip())
534 label = encoding.tolocal(label.strip())
535 partial.setdefault(label, []).append(bin(node))
535 partial.setdefault(label, []).append(bin(node))
536 except KeyboardInterrupt:
536 except KeyboardInterrupt:
537 raise
537 raise
538 except Exception, inst:
538 except Exception, inst:
539 if self.ui.debugflag:
539 if self.ui.debugflag:
540 self.ui.warn(str(inst), '\n')
540 self.ui.warn(str(inst), '\n')
541 partial, last, lrev = {}, nullid, nullrev
541 partial, last, lrev = {}, nullid, nullrev
542 return partial, last, lrev
542 return partial, last, lrev
543
543
544 def _writebranchcache(self, branches, tip, tiprev):
544 def _writebranchcache(self, branches, tip, tiprev):
545 try:
545 try:
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
547 f.write("%s %s\n" % (hex(tip), tiprev))
547 f.write("%s %s\n" % (hex(tip), tiprev))
548 for label, nodes in branches.iteritems():
548 for label, nodes in branches.iteritems():
549 for node in nodes:
549 for node in nodes:
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
551 f.close()
551 f.close()
552 except (IOError, OSError):
552 except (IOError, OSError):
553 pass
553 pass
554
554
555 def _updatebranchcache(self, partial, ctxgen):
555 def _updatebranchcache(self, partial, ctxgen):
556 # collect new branch entries
556 # collect new branch entries
557 newbranches = {}
557 newbranches = {}
558 for c in ctxgen:
558 for c in ctxgen:
559 newbranches.setdefault(c.branch(), []).append(c.node())
559 newbranches.setdefault(c.branch(), []).append(c.node())
560 # if older branchheads are reachable from new ones, they aren't
560 # if older branchheads are reachable from new ones, they aren't
561 # really branchheads. Note checking parents is insufficient:
561 # really branchheads. Note checking parents is insufficient:
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
563 for branch, newnodes in newbranches.iteritems():
563 for branch, newnodes in newbranches.iteritems():
564 bheads = partial.setdefault(branch, [])
564 bheads = partial.setdefault(branch, [])
565 bheads.extend(newnodes)
565 bheads.extend(newnodes)
566 if len(bheads) <= 1:
566 if len(bheads) <= 1:
567 continue
567 continue
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
569 # starting from tip means fewer passes over reachable
569 # starting from tip means fewer passes over reachable
570 while newnodes:
570 while newnodes:
571 latest = newnodes.pop()
571 latest = newnodes.pop()
572 if latest not in bheads:
572 if latest not in bheads:
573 continue
573 continue
574 minbhrev = self[bheads[0]].node()
574 minbhrev = self[bheads[0]].node()
575 reachable = self.changelog.reachable(latest, minbhrev)
575 reachable = self.changelog.reachable(latest, minbhrev)
576 reachable.remove(latest)
576 reachable.remove(latest)
577 if reachable:
577 if reachable:
578 bheads = [b for b in bheads if b not in reachable]
578 bheads = [b for b in bheads if b not in reachable]
579 partial[branch] = bheads
579 partial[branch] = bheads
580
580
581 def lookup(self, key):
581 def lookup(self, key):
582 if isinstance(key, int):
582 if isinstance(key, int):
583 return self.changelog.node(key)
583 return self.changelog.node(key)
584 elif key == '.':
584 elif key == '.':
585 return self.dirstate.p1()
585 return self.dirstate.p1()
586 elif key == 'null':
586 elif key == 'null':
587 return nullid
587 return nullid
588 elif key == 'tip':
588 elif key == 'tip':
589 return self.changelog.tip()
589 return self.changelog.tip()
590 n = self.changelog._match(key)
590 n = self.changelog._match(key)
591 if n:
591 if n:
592 return n
592 return n
593 if key in self._bookmarks:
593 if key in self._bookmarks:
594 return self._bookmarks[key]
594 return self._bookmarks[key]
595 if key in self.tags():
595 if key in self.tags():
596 return self.tags()[key]
596 return self.tags()[key]
597 if key in self.branchtags():
597 if key in self.branchtags():
598 return self.branchtags()[key]
598 return self.branchtags()[key]
599 n = self.changelog._partialmatch(key)
599 n = self.changelog._partialmatch(key)
600 if n:
600 if n:
601 return n
601 return n
602
602
603 # can't find key, check if it might have come from damaged dirstate
603 # can't find key, check if it might have come from damaged dirstate
604 if key in self.dirstate.parents():
604 if key in self.dirstate.parents():
605 raise error.Abort(_("working directory has unknown parent '%s'!")
605 raise error.Abort(_("working directory has unknown parent '%s'!")
606 % short(key))
606 % short(key))
607 try:
607 try:
608 if len(key) == 20:
608 if len(key) == 20:
609 key = hex(key)
609 key = hex(key)
610 except TypeError:
610 except TypeError:
611 pass
611 pass
612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
613
613
614 def lookupbranch(self, key, remote=None):
614 def lookupbranch(self, key, remote=None):
615 repo = remote or self
615 repo = remote or self
616 if key in repo.branchmap():
616 if key in repo.branchmap():
617 return key
617 return key
618
618
619 repo = (remote and remote.local()) and remote or self
619 repo = (remote and remote.local()) and remote or self
620 return repo[key].branch()
620 return repo[key].branch()
621
621
622 def known(self, nodes):
622 def known(self, nodes):
623 nm = self.changelog.nodemap
623 nm = self.changelog.nodemap
624 result = []
624 result = []
625 for n in nodes:
625 for n in nodes:
626 r = nm.get(n)
626 r = nm.get(n)
627 resp = not (r is None or self._phaserev[r] >= phases.secret)
627 resp = not (r is None or self._phaserev[r] >= phases.secret)
628 result.append(resp)
628 result.append(resp)
629 return result
629 return result
630
630
631 def local(self):
631 def local(self):
632 return self
632 return self
633
633
634 def cancopy(self):
634 def cancopy(self):
635 return (repo.repository.cancopy(self)
635 return (repo.repository.cancopy(self)
636 and not self._phaseroots[phases.secret])
636 and not self._phaseroots[phases.secret])
637
637
638 def join(self, f):
638 def join(self, f):
639 return os.path.join(self.path, f)
639 return os.path.join(self.path, f)
640
640
641 def wjoin(self, f):
641 def wjoin(self, f):
642 return os.path.join(self.root, f)
642 return os.path.join(self.root, f)
643
643
644 def file(self, f):
644 def file(self, f):
645 if f[0] == '/':
645 if f[0] == '/':
646 f = f[1:]
646 f = f[1:]
647 return filelog.filelog(self.sopener, f)
647 return filelog.filelog(self.sopener, f)
648
648
649 def changectx(self, changeid):
649 def changectx(self, changeid):
650 return self[changeid]
650 return self[changeid]
651
651
652 def parents(self, changeid=None):
652 def parents(self, changeid=None):
653 '''get list of changectxs for parents of changeid'''
653 '''get list of changectxs for parents of changeid'''
654 return self[changeid].parents()
654 return self[changeid].parents()
655
655
656 def filectx(self, path, changeid=None, fileid=None):
656 def filectx(self, path, changeid=None, fileid=None):
657 """changeid can be a changeset revision, node, or tag.
657 """changeid can be a changeset revision, node, or tag.
658 fileid can be a file revision or node."""
658 fileid can be a file revision or node."""
659 return context.filectx(self, path, changeid, fileid)
659 return context.filectx(self, path, changeid, fileid)
660
660
661 def getcwd(self):
661 def getcwd(self):
662 return self.dirstate.getcwd()
662 return self.dirstate.getcwd()
663
663
664 def pathto(self, f, cwd=None):
664 def pathto(self, f, cwd=None):
665 return self.dirstate.pathto(f, cwd)
665 return self.dirstate.pathto(f, cwd)
666
666
667 def wfile(self, f, mode='r'):
667 def wfile(self, f, mode='r'):
668 return self.wopener(f, mode)
668 return self.wopener(f, mode)
669
669
670 def _link(self, f):
670 def _link(self, f):
671 return os.path.islink(self.wjoin(f))
671 return os.path.islink(self.wjoin(f))
672
672
673 def _loadfilter(self, filter):
673 def _loadfilter(self, filter):
674 if filter not in self.filterpats:
674 if filter not in self.filterpats:
675 l = []
675 l = []
676 for pat, cmd in self.ui.configitems(filter):
676 for pat, cmd in self.ui.configitems(filter):
677 if cmd == '!':
677 if cmd == '!':
678 continue
678 continue
679 mf = matchmod.match(self.root, '', [pat])
679 mf = matchmod.match(self.root, '', [pat])
680 fn = None
680 fn = None
681 params = cmd
681 params = cmd
682 for name, filterfn in self._datafilters.iteritems():
682 for name, filterfn in self._datafilters.iteritems():
683 if cmd.startswith(name):
683 if cmd.startswith(name):
684 fn = filterfn
684 fn = filterfn
685 params = cmd[len(name):].lstrip()
685 params = cmd[len(name):].lstrip()
686 break
686 break
687 if not fn:
687 if not fn:
688 fn = lambda s, c, **kwargs: util.filter(s, c)
688 fn = lambda s, c, **kwargs: util.filter(s, c)
689 # Wrap old filters not supporting keyword arguments
689 # Wrap old filters not supporting keyword arguments
690 if not inspect.getargspec(fn)[2]:
690 if not inspect.getargspec(fn)[2]:
691 oldfn = fn
691 oldfn = fn
692 fn = lambda s, c, **kwargs: oldfn(s, c)
692 fn = lambda s, c, **kwargs: oldfn(s, c)
693 l.append((mf, fn, params))
693 l.append((mf, fn, params))
694 self.filterpats[filter] = l
694 self.filterpats[filter] = l
695 return self.filterpats[filter]
695 return self.filterpats[filter]
696
696
697 def _filter(self, filterpats, filename, data):
697 def _filter(self, filterpats, filename, data):
698 for mf, fn, cmd in filterpats:
698 for mf, fn, cmd in filterpats:
699 if mf(filename):
699 if mf(filename):
700 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
700 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
701 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
701 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
702 break
702 break
703
703
704 return data
704 return data
705
705
706 @propertycache
706 @propertycache
707 def _encodefilterpats(self):
707 def _encodefilterpats(self):
708 return self._loadfilter('encode')
708 return self._loadfilter('encode')
709
709
710 @propertycache
710 @propertycache
711 def _decodefilterpats(self):
711 def _decodefilterpats(self):
712 return self._loadfilter('decode')
712 return self._loadfilter('decode')
713
713
714 def adddatafilter(self, name, filter):
714 def adddatafilter(self, name, filter):
715 self._datafilters[name] = filter
715 self._datafilters[name] = filter
716
716
717 def wread(self, filename):
717 def wread(self, filename):
718 if self._link(filename):
718 if self._link(filename):
719 data = os.readlink(self.wjoin(filename))
719 data = os.readlink(self.wjoin(filename))
720 else:
720 else:
721 data = self.wopener.read(filename)
721 data = self.wopener.read(filename)
722 return self._filter(self._encodefilterpats, filename, data)
722 return self._filter(self._encodefilterpats, filename, data)
723
723
724 def wwrite(self, filename, data, flags):
724 def wwrite(self, filename, data, flags):
725 data = self._filter(self._decodefilterpats, filename, data)
725 data = self._filter(self._decodefilterpats, filename, data)
726 if 'l' in flags:
726 if 'l' in flags:
727 self.wopener.symlink(data, filename)
727 self.wopener.symlink(data, filename)
728 else:
728 else:
729 self.wopener.write(filename, data)
729 self.wopener.write(filename, data)
730 if 'x' in flags:
730 if 'x' in flags:
731 util.setflags(self.wjoin(filename), False, True)
731 util.setflags(self.wjoin(filename), False, True)
732
732
733 def wwritedata(self, filename, data):
733 def wwritedata(self, filename, data):
734 return self._filter(self._decodefilterpats, filename, data)
734 return self._filter(self._decodefilterpats, filename, data)
735
735
736 def transaction(self, desc):
736 def transaction(self, desc):
737 tr = self._transref and self._transref() or None
737 tr = self._transref and self._transref() or None
738 if tr and tr.running():
738 if tr and tr.running():
739 return tr.nest()
739 return tr.nest()
740
740
741 # abort here if the journal already exists
741 # abort here if the journal already exists
742 if os.path.exists(self.sjoin("journal")):
742 if os.path.exists(self.sjoin("journal")):
743 raise error.RepoError(
743 raise error.RepoError(
744 _("abandoned transaction found - run hg recover"))
744 _("abandoned transaction found - run hg recover"))
745
745
746 journalfiles = self._writejournal(desc)
746 journalfiles = self._writejournal(desc)
747 renames = [(x, undoname(x)) for x in journalfiles]
747 renames = [(x, undoname(x)) for x in journalfiles]
748
748
749 tr = transaction.transaction(self.ui.warn, self.sopener,
749 tr = transaction.transaction(self.ui.warn, self.sopener,
750 self.sjoin("journal"),
750 self.sjoin("journal"),
751 aftertrans(renames),
751 aftertrans(renames),
752 self.store.createmode)
752 self.store.createmode)
753 self._transref = weakref.ref(tr)
753 self._transref = weakref.ref(tr)
754 return tr
754 return tr
755
755
756 def _writejournal(self, desc):
756 def _writejournal(self, desc):
757 # save dirstate for rollback
757 # save dirstate for rollback
758 try:
758 try:
759 ds = self.opener.read("dirstate")
759 ds = self.opener.read("dirstate")
760 except IOError:
760 except IOError:
761 ds = ""
761 ds = ""
762 self.opener.write("journal.dirstate", ds)
762 self.opener.write("journal.dirstate", ds)
763 self.opener.write("journal.branch",
763 self.opener.write("journal.branch",
764 encoding.fromlocal(self.dirstate.branch()))
764 encoding.fromlocal(self.dirstate.branch()))
765 self.opener.write("journal.desc",
765 self.opener.write("journal.desc",
766 "%d\n%s\n" % (len(self), desc))
766 "%d\n%s\n" % (len(self), desc))
767
767
768 bkname = self.join('bookmarks')
768 bkname = self.join('bookmarks')
769 if os.path.exists(bkname):
769 if os.path.exists(bkname):
770 util.copyfile(bkname, self.join('journal.bookmarks'))
770 util.copyfile(bkname, self.join('journal.bookmarks'))
771 else:
771 else:
772 self.opener.write('journal.bookmarks', '')
772 self.opener.write('journal.bookmarks', '')
773 phasesname = self.sjoin('phaseroots')
773 phasesname = self.sjoin('phaseroots')
774 if os.path.exists(phasesname):
774 if os.path.exists(phasesname):
775 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
775 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
776 else:
776 else:
777 self.sopener.write('journal.phaseroots', '')
777 self.sopener.write('journal.phaseroots', '')
778
778
779 return (self.sjoin('journal'), self.join('journal.dirstate'),
779 return (self.sjoin('journal'), self.join('journal.dirstate'),
780 self.join('journal.branch'), self.join('journal.desc'),
780 self.join('journal.branch'), self.join('journal.desc'),
781 self.join('journal.bookmarks'),
781 self.join('journal.bookmarks'),
782 self.sjoin('journal.phaseroots'))
782 self.sjoin('journal.phaseroots'))
783
783
784 def recover(self):
784 def recover(self):
785 lock = self.lock()
785 lock = self.lock()
786 try:
786 try:
787 if os.path.exists(self.sjoin("journal")):
787 if os.path.exists(self.sjoin("journal")):
788 self.ui.status(_("rolling back interrupted transaction\n"))
788 self.ui.status(_("rolling back interrupted transaction\n"))
789 transaction.rollback(self.sopener, self.sjoin("journal"),
789 transaction.rollback(self.sopener, self.sjoin("journal"),
790 self.ui.warn)
790 self.ui.warn)
791 self.invalidate()
791 self.invalidate()
792 return True
792 return True
793 else:
793 else:
794 self.ui.warn(_("no interrupted transaction available\n"))
794 self.ui.warn(_("no interrupted transaction available\n"))
795 return False
795 return False
796 finally:
796 finally:
797 lock.release()
797 lock.release()
798
798
799 def rollback(self, dryrun=False, force=False):
799 def rollback(self, dryrun=False, force=False):
800 wlock = lock = None
800 wlock = lock = None
801 try:
801 try:
802 wlock = self.wlock()
802 wlock = self.wlock()
803 lock = self.lock()
803 lock = self.lock()
804 if os.path.exists(self.sjoin("undo")):
804 if os.path.exists(self.sjoin("undo")):
805 return self._rollback(dryrun, force)
805 return self._rollback(dryrun, force)
806 else:
806 else:
807 self.ui.warn(_("no rollback information available\n"))
807 self.ui.warn(_("no rollback information available\n"))
808 return 1
808 return 1
809 finally:
809 finally:
810 release(lock, wlock)
810 release(lock, wlock)
811
811
812 def _rollback(self, dryrun, force):
812 def _rollback(self, dryrun, force):
813 ui = self.ui
813 ui = self.ui
814 try:
814 try:
815 args = self.opener.read('undo.desc').splitlines()
815 args = self.opener.read('undo.desc').splitlines()
816 (oldlen, desc, detail) = (int(args[0]), args[1], None)
816 (oldlen, desc, detail) = (int(args[0]), args[1], None)
817 if len(args) >= 3:
817 if len(args) >= 3:
818 detail = args[2]
818 detail = args[2]
819 oldtip = oldlen - 1
819 oldtip = oldlen - 1
820
820
821 if detail and ui.verbose:
821 if detail and ui.verbose:
822 msg = (_('repository tip rolled back to revision %s'
822 msg = (_('repository tip rolled back to revision %s'
823 ' (undo %s: %s)\n')
823 ' (undo %s: %s)\n')
824 % (oldtip, desc, detail))
824 % (oldtip, desc, detail))
825 else:
825 else:
826 msg = (_('repository tip rolled back to revision %s'
826 msg = (_('repository tip rolled back to revision %s'
827 ' (undo %s)\n')
827 ' (undo %s)\n')
828 % (oldtip, desc))
828 % (oldtip, desc))
829 except IOError:
829 except IOError:
830 msg = _('rolling back unknown transaction\n')
830 msg = _('rolling back unknown transaction\n')
831 desc = None
831 desc = None
832
832
833 if not force and self['.'] != self['tip'] and desc == 'commit':
833 if not force and self['.'] != self['tip'] and desc == 'commit':
834 raise util.Abort(
834 raise util.Abort(
835 _('rollback of last commit while not checked out '
835 _('rollback of last commit while not checked out '
836 'may lose data'), hint=_('use -f to force'))
836 'may lose data'), hint=_('use -f to force'))
837
837
838 ui.status(msg)
838 ui.status(msg)
839 if dryrun:
839 if dryrun:
840 return 0
840 return 0
841
841
842 parents = self.dirstate.parents()
842 parents = self.dirstate.parents()
843 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
843 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
844 if os.path.exists(self.join('undo.bookmarks')):
844 if os.path.exists(self.join('undo.bookmarks')):
845 util.rename(self.join('undo.bookmarks'),
845 util.rename(self.join('undo.bookmarks'),
846 self.join('bookmarks'))
846 self.join('bookmarks'))
847 if os.path.exists(self.sjoin('undo.phaseroots')):
847 if os.path.exists(self.sjoin('undo.phaseroots')):
848 util.rename(self.sjoin('undo.phaseroots'),
848 util.rename(self.sjoin('undo.phaseroots'),
849 self.sjoin('phaseroots'))
849 self.sjoin('phaseroots'))
850 self.invalidate()
850 self.invalidate()
851
851
852 parentgone = (parents[0] not in self.changelog.nodemap or
852 parentgone = (parents[0] not in self.changelog.nodemap or
853 parents[1] not in self.changelog.nodemap)
853 parents[1] not in self.changelog.nodemap)
854 if parentgone:
854 if parentgone:
855 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
855 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
856 try:
856 try:
857 branch = self.opener.read('undo.branch')
857 branch = self.opener.read('undo.branch')
858 self.dirstate.setbranch(branch)
858 self.dirstate.setbranch(branch)
859 except IOError:
859 except IOError:
860 ui.warn(_('named branch could not be reset: '
860 ui.warn(_('named branch could not be reset: '
861 'current branch is still \'%s\'\n')
861 'current branch is still \'%s\'\n')
862 % self.dirstate.branch())
862 % self.dirstate.branch())
863
863
864 self.dirstate.invalidate()
864 self.dirstate.invalidate()
865 parents = tuple([p.rev() for p in self.parents()])
865 parents = tuple([p.rev() for p in self.parents()])
866 if len(parents) > 1:
866 if len(parents) > 1:
867 ui.status(_('working directory now based on '
867 ui.status(_('working directory now based on '
868 'revisions %d and %d\n') % parents)
868 'revisions %d and %d\n') % parents)
869 else:
869 else:
870 ui.status(_('working directory now based on '
870 ui.status(_('working directory now based on '
871 'revision %d\n') % parents)
871 'revision %d\n') % parents)
872 self.destroyed()
872 self.destroyed()
873 return 0
873 return 0
874
874
875 def invalidatecaches(self):
875 def invalidatecaches(self):
876 try:
876 try:
877 delattr(self, '_tagscache')
877 delattr(self, '_tagscache')
878 except AttributeError:
878 except AttributeError:
879 pass
879 pass
880
880
881 self._branchcache = None # in UTF-8
881 self._branchcache = None # in UTF-8
882 self._branchcachetip = None
882 self._branchcachetip = None
883
883
884 def invalidatedirstate(self):
884 def invalidatedirstate(self):
885 '''Invalidates the dirstate, causing the next call to dirstate
885 '''Invalidates the dirstate, causing the next call to dirstate
886 to check if it was modified since the last time it was read,
886 to check if it was modified since the last time it was read,
887 rereading it if it has.
887 rereading it if it has.
888
888
889 This is different to dirstate.invalidate() that it doesn't always
889 This is different to dirstate.invalidate() that it doesn't always
890 rereads the dirstate. Use dirstate.invalidate() if you want to
890 rereads the dirstate. Use dirstate.invalidate() if you want to
891 explicitly read the dirstate again (i.e. restoring it to a previous
891 explicitly read the dirstate again (i.e. restoring it to a previous
892 known good state).'''
892 known good state).'''
893 try:
893 try:
894 delattr(self, 'dirstate')
894 delattr(self, 'dirstate')
895 except AttributeError:
895 except AttributeError:
896 pass
896 pass
897
897
898 def invalidate(self):
898 def invalidate(self):
899 for k in self._filecache:
899 for k in self._filecache:
900 # dirstate is invalidated separately in invalidatedirstate()
900 # dirstate is invalidated separately in invalidatedirstate()
901 if k == 'dirstate':
901 if k == 'dirstate':
902 continue
902 continue
903
903
904 try:
904 try:
905 delattr(self, k)
905 delattr(self, k)
906 except AttributeError:
906 except AttributeError:
907 pass
907 pass
908 self.invalidatecaches()
908 self.invalidatecaches()
909
909
910 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
910 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
911 try:
911 try:
912 l = lock.lock(lockname, 0, releasefn, desc=desc)
912 l = lock.lock(lockname, 0, releasefn, desc=desc)
913 except error.LockHeld, inst:
913 except error.LockHeld, inst:
914 if not wait:
914 if not wait:
915 raise
915 raise
916 self.ui.warn(_("waiting for lock on %s held by %r\n") %
916 self.ui.warn(_("waiting for lock on %s held by %r\n") %
917 (desc, inst.locker))
917 (desc, inst.locker))
918 # default to 600 seconds timeout
918 # default to 600 seconds timeout
919 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
919 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
920 releasefn, desc=desc)
920 releasefn, desc=desc)
921 if acquirefn:
921 if acquirefn:
922 acquirefn()
922 acquirefn()
923 return l
923 return l
924
924
925 def _afterlock(self, callback):
925 def _afterlock(self, callback):
926 """add a callback to the current repository lock.
926 """add a callback to the current repository lock.
927
927
928 The callback will be executed on lock release."""
928 The callback will be executed on lock release."""
929 l = self._lockref and self._lockref()
929 l = self._lockref and self._lockref()
930 if l:
930 if l:
931 l.postrelease.append(callback)
931 l.postrelease.append(callback)
932
932
933 def lock(self, wait=True):
933 def lock(self, wait=True):
934 '''Lock the repository store (.hg/store) and return a weak reference
934 '''Lock the repository store (.hg/store) and return a weak reference
935 to the lock. Use this before modifying the store (e.g. committing or
935 to the lock. Use this before modifying the store (e.g. committing or
936 stripping). If you are opening a transaction, get a lock as well.)'''
936 stripping). If you are opening a transaction, get a lock as well.)'''
937 l = self._lockref and self._lockref()
937 l = self._lockref and self._lockref()
938 if l is not None and l.held:
938 if l is not None and l.held:
939 l.lock()
939 l.lock()
940 return l
940 return l
941
941
942 def unlock():
942 def unlock():
943 self.store.write()
943 self.store.write()
944 if self._dirtyphases:
944 if self._dirtyphases:
945 phases.writeroots(self)
945 phases.writeroots(self)
946 for k, ce in self._filecache.items():
946 for k, ce in self._filecache.items():
947 if k == 'dirstate':
947 if k == 'dirstate':
948 continue
948 continue
949 ce.refresh()
949 ce.refresh()
950
950
951 l = self._lock(self.sjoin("lock"), wait, unlock,
951 l = self._lock(self.sjoin("lock"), wait, unlock,
952 self.invalidate, _('repository %s') % self.origroot)
952 self.invalidate, _('repository %s') % self.origroot)
953 self._lockref = weakref.ref(l)
953 self._lockref = weakref.ref(l)
954 return l
954 return l
955
955
956 def wlock(self, wait=True):
956 def wlock(self, wait=True):
957 '''Lock the non-store parts of the repository (everything under
957 '''Lock the non-store parts of the repository (everything under
958 .hg except .hg/store) and return a weak reference to the lock.
958 .hg except .hg/store) and return a weak reference to the lock.
959 Use this before modifying files in .hg.'''
959 Use this before modifying files in .hg.'''
960 l = self._wlockref and self._wlockref()
960 l = self._wlockref and self._wlockref()
961 if l is not None and l.held:
961 if l is not None and l.held:
962 l.lock()
962 l.lock()
963 return l
963 return l
964
964
965 def unlock():
965 def unlock():
966 self.dirstate.write()
966 self.dirstate.write()
967 ce = self._filecache.get('dirstate')
967 ce = self._filecache.get('dirstate')
968 if ce:
968 if ce:
969 ce.refresh()
969 ce.refresh()
970
970
971 l = self._lock(self.join("wlock"), wait, unlock,
971 l = self._lock(self.join("wlock"), wait, unlock,
972 self.invalidatedirstate, _('working directory of %s') %
972 self.invalidatedirstate, _('working directory of %s') %
973 self.origroot)
973 self.origroot)
974 self._wlockref = weakref.ref(l)
974 self._wlockref = weakref.ref(l)
975 return l
975 return l
976
976
977 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
977 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
978 """
978 """
979 commit an individual file as part of a larger transaction
979 commit an individual file as part of a larger transaction
980 """
980 """
981
981
982 fname = fctx.path()
982 fname = fctx.path()
983 text = fctx.data()
983 text = fctx.data()
984 flog = self.file(fname)
984 flog = self.file(fname)
985 fparent1 = manifest1.get(fname, nullid)
985 fparent1 = manifest1.get(fname, nullid)
986 fparent2 = fparent2o = manifest2.get(fname, nullid)
986 fparent2 = fparent2o = manifest2.get(fname, nullid)
987
987
988 meta = {}
988 meta = {}
989 copy = fctx.renamed()
989 copy = fctx.renamed()
990 if copy and copy[0] != fname:
990 if copy and copy[0] != fname:
991 # Mark the new revision of this file as a copy of another
991 # Mark the new revision of this file as a copy of another
992 # file. This copy data will effectively act as a parent
992 # file. This copy data will effectively act as a parent
993 # of this new revision. If this is a merge, the first
993 # of this new revision. If this is a merge, the first
994 # parent will be the nullid (meaning "look up the copy data")
994 # parent will be the nullid (meaning "look up the copy data")
995 # and the second one will be the other parent. For example:
995 # and the second one will be the other parent. For example:
996 #
996 #
997 # 0 --- 1 --- 3 rev1 changes file foo
997 # 0 --- 1 --- 3 rev1 changes file foo
998 # \ / rev2 renames foo to bar and changes it
998 # \ / rev2 renames foo to bar and changes it
999 # \- 2 -/ rev3 should have bar with all changes and
999 # \- 2 -/ rev3 should have bar with all changes and
1000 # should record that bar descends from
1000 # should record that bar descends from
1001 # bar in rev2 and foo in rev1
1001 # bar in rev2 and foo in rev1
1002 #
1002 #
1003 # this allows this merge to succeed:
1003 # this allows this merge to succeed:
1004 #
1004 #
1005 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1005 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1006 # \ / merging rev3 and rev4 should use bar@rev2
1006 # \ / merging rev3 and rev4 should use bar@rev2
1007 # \- 2 --- 4 as the merge base
1007 # \- 2 --- 4 as the merge base
1008 #
1008 #
1009
1009
1010 cfname = copy[0]
1010 cfname = copy[0]
1011 crev = manifest1.get(cfname)
1011 crev = manifest1.get(cfname)
1012 newfparent = fparent2
1012 newfparent = fparent2
1013
1013
1014 if manifest2: # branch merge
1014 if manifest2: # branch merge
1015 if fparent2 == nullid or crev is None: # copied on remote side
1015 if fparent2 == nullid or crev is None: # copied on remote side
1016 if cfname in manifest2:
1016 if cfname in manifest2:
1017 crev = manifest2[cfname]
1017 crev = manifest2[cfname]
1018 newfparent = fparent1
1018 newfparent = fparent1
1019
1019
1020 # find source in nearest ancestor if we've lost track
1020 # find source in nearest ancestor if we've lost track
1021 if not crev:
1021 if not crev:
1022 self.ui.debug(" %s: searching for copy revision for %s\n" %
1022 self.ui.debug(" %s: searching for copy revision for %s\n" %
1023 (fname, cfname))
1023 (fname, cfname))
1024 for ancestor in self[None].ancestors():
1024 for ancestor in self[None].ancestors():
1025 if cfname in ancestor:
1025 if cfname in ancestor:
1026 crev = ancestor[cfname].filenode()
1026 crev = ancestor[cfname].filenode()
1027 break
1027 break
1028
1028
1029 if crev:
1029 if crev:
1030 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1030 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1031 meta["copy"] = cfname
1031 meta["copy"] = cfname
1032 meta["copyrev"] = hex(crev)
1032 meta["copyrev"] = hex(crev)
1033 fparent1, fparent2 = nullid, newfparent
1033 fparent1, fparent2 = nullid, newfparent
1034 else:
1034 else:
1035 self.ui.warn(_("warning: can't find ancestor for '%s' "
1035 self.ui.warn(_("warning: can't find ancestor for '%s' "
1036 "copied from '%s'!\n") % (fname, cfname))
1036 "copied from '%s'!\n") % (fname, cfname))
1037
1037
1038 elif fparent2 != nullid:
1038 elif fparent2 != nullid:
1039 # is one parent an ancestor of the other?
1039 # is one parent an ancestor of the other?
1040 fparentancestor = flog.ancestor(fparent1, fparent2)
1040 fparentancestor = flog.ancestor(fparent1, fparent2)
1041 if fparentancestor == fparent1:
1041 if fparentancestor == fparent1:
1042 fparent1, fparent2 = fparent2, nullid
1042 fparent1, fparent2 = fparent2, nullid
1043 elif fparentancestor == fparent2:
1043 elif fparentancestor == fparent2:
1044 fparent2 = nullid
1044 fparent2 = nullid
1045
1045
1046 # is the file changed?
1046 # is the file changed?
1047 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1047 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1048 changelist.append(fname)
1048 changelist.append(fname)
1049 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1049 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1050
1050
1051 # are just the flags changed during merge?
1051 # are just the flags changed during merge?
1052 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1052 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1053 changelist.append(fname)
1053 changelist.append(fname)
1054
1054
1055 return fparent1
1055 return fparent1
1056
1056
1057 def commit(self, text="", user=None, date=None, match=None, force=False,
1057 def commit(self, text="", user=None, date=None, match=None, force=False,
1058 editor=False, extra={}):
1058 editor=False, extra={}):
1059 """Add a new revision to current repository.
1059 """Add a new revision to current repository.
1060
1060
1061 Revision information is gathered from the working directory,
1061 Revision information is gathered from the working directory,
1062 match can be used to filter the committed files. If editor is
1062 match can be used to filter the committed files. If editor is
1063 supplied, it is called to get a commit message.
1063 supplied, it is called to get a commit message.
1064 """
1064 """
1065
1065
1066 def fail(f, msg):
1066 def fail(f, msg):
1067 raise util.Abort('%s: %s' % (f, msg))
1067 raise util.Abort('%s: %s' % (f, msg))
1068
1068
1069 if not match:
1069 if not match:
1070 match = matchmod.always(self.root, '')
1070 match = matchmod.always(self.root, '')
1071
1071
1072 if not force:
1072 if not force:
1073 vdirs = []
1073 vdirs = []
1074 match.dir = vdirs.append
1074 match.dir = vdirs.append
1075 match.bad = fail
1075 match.bad = fail
1076
1076
1077 wlock = self.wlock()
1077 wlock = self.wlock()
1078 try:
1078 try:
1079 wctx = self[None]
1079 wctx = self[None]
1080 merge = len(wctx.parents()) > 1
1080 merge = len(wctx.parents()) > 1
1081
1081
1082 if (not force and merge and match and
1082 if (not force and merge and match and
1083 (match.files() or match.anypats())):
1083 (match.files() or match.anypats())):
1084 raise util.Abort(_('cannot partially commit a merge '
1084 raise util.Abort(_('cannot partially commit a merge '
1085 '(do not specify files or patterns)'))
1085 '(do not specify files or patterns)'))
1086
1086
1087 changes = self.status(match=match, clean=force)
1087 changes = self.status(match=match, clean=force)
1088 if force:
1088 if force:
1089 changes[0].extend(changes[6]) # mq may commit unchanged files
1089 changes[0].extend(changes[6]) # mq may commit unchanged files
1090
1090
1091 # check subrepos
1091 # check subrepos
1092 subs = []
1092 subs = []
1093 removedsubs = set()
1093 removedsubs = set()
1094 if '.hgsub' in wctx:
1094 if '.hgsub' in wctx:
1095 # only manage subrepos and .hgsubstate if .hgsub is present
1095 # only manage subrepos and .hgsubstate if .hgsub is present
1096 for p in wctx.parents():
1096 for p in wctx.parents():
1097 removedsubs.update(s for s in p.substate if match(s))
1097 removedsubs.update(s for s in p.substate if match(s))
1098 for s in wctx.substate:
1098 for s in wctx.substate:
1099 removedsubs.discard(s)
1099 removedsubs.discard(s)
1100 if match(s) and wctx.sub(s).dirty():
1100 if match(s) and wctx.sub(s).dirty():
1101 subs.append(s)
1101 subs.append(s)
1102 if (subs or removedsubs):
1102 if (subs or removedsubs):
1103 if (not match('.hgsub') and
1103 if (not match('.hgsub') and
1104 '.hgsub' in (wctx.modified() + wctx.added())):
1104 '.hgsub' in (wctx.modified() + wctx.added())):
1105 raise util.Abort(
1105 raise util.Abort(
1106 _("can't commit subrepos without .hgsub"))
1106 _("can't commit subrepos without .hgsub"))
1107 if '.hgsubstate' not in changes[0]:
1107 if '.hgsubstate' not in changes[0]:
1108 changes[0].insert(0, '.hgsubstate')
1108 changes[0].insert(0, '.hgsubstate')
1109 if '.hgsubstate' in changes[2]:
1109 if '.hgsubstate' in changes[2]:
1110 changes[2].remove('.hgsubstate')
1110 changes[2].remove('.hgsubstate')
1111 elif '.hgsub' in changes[2]:
1111 elif '.hgsub' in changes[2]:
1112 # clean up .hgsubstate when .hgsub is removed
1112 # clean up .hgsubstate when .hgsub is removed
1113 if ('.hgsubstate' in wctx and
1113 if ('.hgsubstate' in wctx and
1114 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1114 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1115 changes[2].insert(0, '.hgsubstate')
1115 changes[2].insert(0, '.hgsubstate')
1116
1116
1117 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1117 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1118 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1118 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1119 if changedsubs:
1119 if changedsubs:
1120 raise util.Abort(_("uncommitted changes in subrepo %s")
1120 raise util.Abort(_("uncommitted changes in subrepo %s")
1121 % changedsubs[0],
1121 % changedsubs[0],
1122 hint=_("use --subrepos for recursive commit"))
1122 hint=_("use --subrepos for recursive commit"))
1123
1123
1124 # make sure all explicit patterns are matched
1124 # make sure all explicit patterns are matched
1125 if not force and match.files():
1125 if not force and match.files():
1126 matched = set(changes[0] + changes[1] + changes[2])
1126 matched = set(changes[0] + changes[1] + changes[2])
1127
1127
1128 for f in match.files():
1128 for f in match.files():
1129 if f == '.' or f in matched or f in wctx.substate:
1129 if f == '.' or f in matched or f in wctx.substate:
1130 continue
1130 continue
1131 if f in changes[3]: # missing
1131 if f in changes[3]: # missing
1132 fail(f, _('file not found!'))
1132 fail(f, _('file not found!'))
1133 if f in vdirs: # visited directory
1133 if f in vdirs: # visited directory
1134 d = f + '/'
1134 d = f + '/'
1135 for mf in matched:
1135 for mf in matched:
1136 if mf.startswith(d):
1136 if mf.startswith(d):
1137 break
1137 break
1138 else:
1138 else:
1139 fail(f, _("no match under directory!"))
1139 fail(f, _("no match under directory!"))
1140 elif f not in self.dirstate:
1140 elif f not in self.dirstate:
1141 fail(f, _("file not tracked!"))
1141 fail(f, _("file not tracked!"))
1142
1142
1143 if (not force and not extra.get("close") and not merge
1143 if (not force and not extra.get("close") and not merge
1144 and not (changes[0] or changes[1] or changes[2])
1144 and not (changes[0] or changes[1] or changes[2])
1145 and wctx.branch() == wctx.p1().branch()):
1145 and wctx.branch() == wctx.p1().branch()):
1146 return None
1146 return None
1147
1147
1148 ms = mergemod.mergestate(self)
1148 ms = mergemod.mergestate(self)
1149 for f in changes[0]:
1149 for f in changes[0]:
1150 if f in ms and ms[f] == 'u':
1150 if f in ms and ms[f] == 'u':
1151 raise util.Abort(_("unresolved merge conflicts "
1151 raise util.Abort(_("unresolved merge conflicts "
1152 "(see hg help resolve)"))
1152 "(see hg help resolve)"))
1153
1153
1154 cctx = context.workingctx(self, text, user, date, extra, changes)
1154 cctx = context.workingctx(self, text, user, date, extra, changes)
1155 if editor:
1155 if editor:
1156 cctx._text = editor(self, cctx, subs)
1156 cctx._text = editor(self, cctx, subs)
1157 edited = (text != cctx._text)
1157 edited = (text != cctx._text)
1158
1158
1159 # commit subs
1159 # commit subs
1160 if subs or removedsubs:
1160 if subs or removedsubs:
1161 state = wctx.substate.copy()
1161 state = wctx.substate.copy()
1162 for s in sorted(subs):
1162 for s in sorted(subs):
1163 sub = wctx.sub(s)
1163 sub = wctx.sub(s)
1164 self.ui.status(_('committing subrepository %s\n') %
1164 self.ui.status(_('committing subrepository %s\n') %
1165 subrepo.subrelpath(sub))
1165 subrepo.subrelpath(sub))
1166 sr = sub.commit(cctx._text, user, date)
1166 sr = sub.commit(cctx._text, user, date)
1167 state[s] = (state[s][0], sr)
1167 state[s] = (state[s][0], sr)
1168 subrepo.writestate(self, state)
1168 subrepo.writestate(self, state)
1169
1169
1170 # Save commit message in case this transaction gets rolled back
1170 # Save commit message in case this transaction gets rolled back
1171 # (e.g. by a pretxncommit hook). Leave the content alone on
1171 # (e.g. by a pretxncommit hook). Leave the content alone on
1172 # the assumption that the user will use the same editor again.
1172 # the assumption that the user will use the same editor again.
1173 msgfn = self.savecommitmessage(cctx._text)
1173 msgfn = self.savecommitmessage(cctx._text)
1174
1174
1175 p1, p2 = self.dirstate.parents()
1175 p1, p2 = self.dirstate.parents()
1176 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1176 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1177 try:
1177 try:
1178 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1178 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1179 ret = self.commitctx(cctx, True)
1179 ret = self.commitctx(cctx, True)
1180 except:
1180 except:
1181 if edited:
1181 if edited:
1182 self.ui.write(
1182 self.ui.write(
1183 _('note: commit message saved in %s\n') % msgfn)
1183 _('note: commit message saved in %s\n') % msgfn)
1184 raise
1184 raise
1185
1185
1186 # update bookmarks, dirstate and mergestate
1186 # update bookmarks, dirstate and mergestate
1187 bookmarks.update(self, p1, ret)
1187 bookmarks.update(self, p1, ret)
1188 for f in changes[0] + changes[1]:
1188 for f in changes[0] + changes[1]:
1189 self.dirstate.normal(f)
1189 self.dirstate.normal(f)
1190 for f in changes[2]:
1190 for f in changes[2]:
1191 self.dirstate.drop(f)
1191 self.dirstate.drop(f)
1192 self.dirstate.setparents(ret)
1192 self.dirstate.setparents(ret)
1193 ms.reset()
1193 ms.reset()
1194 finally:
1194 finally:
1195 wlock.release()
1195 wlock.release()
1196
1196
1197 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1197 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1198 return ret
1198 return ret
1199
1199
1200 def commitctx(self, ctx, error=False):
1200 def commitctx(self, ctx, error=False):
1201 """Add a new revision to current repository.
1201 """Add a new revision to current repository.
1202 Revision information is passed via the context argument.
1202 Revision information is passed via the context argument.
1203 """
1203 """
1204
1204
1205 tr = lock = None
1205 tr = lock = None
1206 removed = list(ctx.removed())
1206 removed = list(ctx.removed())
1207 p1, p2 = ctx.p1(), ctx.p2()
1207 p1, p2 = ctx.p1(), ctx.p2()
1208 user = ctx.user()
1208 user = ctx.user()
1209
1209
1210 lock = self.lock()
1210 lock = self.lock()
1211 try:
1211 try:
1212 tr = self.transaction("commit")
1212 tr = self.transaction("commit")
1213 trp = weakref.proxy(tr)
1213 trp = weakref.proxy(tr)
1214
1214
1215 if ctx.files():
1215 if ctx.files():
1216 m1 = p1.manifest().copy()
1216 m1 = p1.manifest().copy()
1217 m2 = p2.manifest()
1217 m2 = p2.manifest()
1218
1218
1219 # check in files
1219 # check in files
1220 new = {}
1220 new = {}
1221 changed = []
1221 changed = []
1222 linkrev = len(self)
1222 linkrev = len(self)
1223 for f in sorted(ctx.modified() + ctx.added()):
1223 for f in sorted(ctx.modified() + ctx.added()):
1224 self.ui.note(f + "\n")
1224 self.ui.note(f + "\n")
1225 try:
1225 try:
1226 fctx = ctx[f]
1226 fctx = ctx[f]
1227 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1227 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1228 changed)
1228 changed)
1229 m1.set(f, fctx.flags())
1229 m1.set(f, fctx.flags())
1230 except OSError, inst:
1230 except OSError, inst:
1231 self.ui.warn(_("trouble committing %s!\n") % f)
1231 self.ui.warn(_("trouble committing %s!\n") % f)
1232 raise
1232 raise
1233 except IOError, inst:
1233 except IOError, inst:
1234 errcode = getattr(inst, 'errno', errno.ENOENT)
1234 errcode = getattr(inst, 'errno', errno.ENOENT)
1235 if error or errcode and errcode != errno.ENOENT:
1235 if error or errcode and errcode != errno.ENOENT:
1236 self.ui.warn(_("trouble committing %s!\n") % f)
1236 self.ui.warn(_("trouble committing %s!\n") % f)
1237 raise
1237 raise
1238 else:
1238 else:
1239 removed.append(f)
1239 removed.append(f)
1240
1240
1241 # update manifest
1241 # update manifest
1242 m1.update(new)
1242 m1.update(new)
1243 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1243 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1244 drop = [f for f in removed if f in m1]
1244 drop = [f for f in removed if f in m1]
1245 for f in drop:
1245 for f in drop:
1246 del m1[f]
1246 del m1[f]
1247 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1247 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1248 p2.manifestnode(), (new, drop))
1248 p2.manifestnode(), (new, drop))
1249 files = changed + removed
1249 files = changed + removed
1250 else:
1250 else:
1251 mn = p1.manifestnode()
1251 mn = p1.manifestnode()
1252 files = []
1252 files = []
1253
1253
1254 # update changelog
1254 # update changelog
1255 self.changelog.delayupdate()
1255 self.changelog.delayupdate()
1256 n = self.changelog.add(mn, files, ctx.description(),
1256 n = self.changelog.add(mn, files, ctx.description(),
1257 trp, p1.node(), p2.node(),
1257 trp, p1.node(), p2.node(),
1258 user, ctx.date(), ctx.extra().copy())
1258 user, ctx.date(), ctx.extra().copy())
1259 p = lambda: self.changelog.writepending() and self.root or ""
1259 p = lambda: self.changelog.writepending() and self.root or ""
1260 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1260 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1261 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1261 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1262 parent2=xp2, pending=p)
1262 parent2=xp2, pending=p)
1263 self.changelog.finalize(trp)
1263 self.changelog.finalize(trp)
1264 # set the new commit is proper phase
1264 # set the new commit is proper phase
1265 targetphase = self.ui.configint('phases', 'new-commit',
1265 targetphase = self.ui.configint('phases', 'new-commit',
1266 phases.draft)
1266 phases.draft)
1267 if targetphase:
1267 if targetphase:
1268 # retract boundary do not alter parent changeset.
1268 # retract boundary do not alter parent changeset.
1269 # if a parent have higher the resulting phase will
1269 # if a parent have higher the resulting phase will
1270 # be compliant anyway
1270 # be compliant anyway
1271 #
1271 #
1272 # if minimal phase was 0 we don't need to retract anything
1272 # if minimal phase was 0 we don't need to retract anything
1273 phases.retractboundary(self, targetphase, [n])
1273 phases.retractboundary(self, targetphase, [n])
1274 tr.close()
1274 tr.close()
1275 self.updatebranchcache()
1275 self.updatebranchcache()
1276 return n
1276 return n
1277 finally:
1277 finally:
1278 if tr:
1278 if tr:
1279 tr.release()
1279 tr.release()
1280 lock.release()
1280 lock.release()
1281
1281
1282 def destroyed(self):
1282 def destroyed(self):
1283 '''Inform the repository that nodes have been destroyed.
1283 '''Inform the repository that nodes have been destroyed.
1284 Intended for use by strip and rollback, so there's a common
1284 Intended for use by strip and rollback, so there's a common
1285 place for anything that has to be done after destroying history.'''
1285 place for anything that has to be done after destroying history.'''
1286 # XXX it might be nice if we could take the list of destroyed
1286 # XXX it might be nice if we could take the list of destroyed
1287 # nodes, but I don't see an easy way for rollback() to do that
1287 # nodes, but I don't see an easy way for rollback() to do that
1288
1288
1289 # Ensure the persistent tag cache is updated. Doing it now
1289 # Ensure the persistent tag cache is updated. Doing it now
1290 # means that the tag cache only has to worry about destroyed
1290 # means that the tag cache only has to worry about destroyed
1291 # heads immediately after a strip/rollback. That in turn
1291 # heads immediately after a strip/rollback. That in turn
1292 # guarantees that "cachetip == currenttip" (comparing both rev
1292 # guarantees that "cachetip == currenttip" (comparing both rev
1293 # and node) always means no nodes have been added or destroyed.
1293 # and node) always means no nodes have been added or destroyed.
1294
1294
1295 # XXX this is suboptimal when qrefresh'ing: we strip the current
1295 # XXX this is suboptimal when qrefresh'ing: we strip the current
1296 # head, refresh the tag cache, then immediately add a new head.
1296 # head, refresh the tag cache, then immediately add a new head.
1297 # But I think doing it this way is necessary for the "instant
1297 # But I think doing it this way is necessary for the "instant
1298 # tag cache retrieval" case to work.
1298 # tag cache retrieval" case to work.
1299 self.invalidatecaches()
1299 self.invalidatecaches()
1300
1300
1301 def walk(self, match, node=None):
1301 def walk(self, match, node=None):
1302 '''
1302 '''
1303 walk recursively through the directory tree or a given
1303 walk recursively through the directory tree or a given
1304 changeset, finding all files matched by the match
1304 changeset, finding all files matched by the match
1305 function
1305 function
1306 '''
1306 '''
1307 return self[node].walk(match)
1307 return self[node].walk(match)
1308
1308
1309 def status(self, node1='.', node2=None, match=None,
1309 def status(self, node1='.', node2=None, match=None,
1310 ignored=False, clean=False, unknown=False,
1310 ignored=False, clean=False, unknown=False,
1311 listsubrepos=False):
1311 listsubrepos=False):
1312 """return status of files between two nodes or node and working directory
1312 """return status of files between two nodes or node and working directory
1313
1313
1314 If node1 is None, use the first dirstate parent instead.
1314 If node1 is None, use the first dirstate parent instead.
1315 If node2 is None, compare node1 with working directory.
1315 If node2 is None, compare node1 with working directory.
1316 """
1316 """
1317
1317
1318 def mfmatches(ctx):
1318 def mfmatches(ctx):
1319 mf = ctx.manifest().copy()
1319 mf = ctx.manifest().copy()
1320 for fn in mf.keys():
1320 for fn in mf.keys():
1321 if not match(fn):
1321 if not match(fn):
1322 del mf[fn]
1322 del mf[fn]
1323 return mf
1323 return mf
1324
1324
1325 if isinstance(node1, context.changectx):
1325 if isinstance(node1, context.changectx):
1326 ctx1 = node1
1326 ctx1 = node1
1327 else:
1327 else:
1328 ctx1 = self[node1]
1328 ctx1 = self[node1]
1329 if isinstance(node2, context.changectx):
1329 if isinstance(node2, context.changectx):
1330 ctx2 = node2
1330 ctx2 = node2
1331 else:
1331 else:
1332 ctx2 = self[node2]
1332 ctx2 = self[node2]
1333
1333
1334 working = ctx2.rev() is None
1334 working = ctx2.rev() is None
1335 parentworking = working and ctx1 == self['.']
1335 parentworking = working and ctx1 == self['.']
1336 match = match or matchmod.always(self.root, self.getcwd())
1336 match = match or matchmod.always(self.root, self.getcwd())
1337 listignored, listclean, listunknown = ignored, clean, unknown
1337 listignored, listclean, listunknown = ignored, clean, unknown
1338
1338
1339 # load earliest manifest first for caching reasons
1339 # load earliest manifest first for caching reasons
1340 if not working and ctx2.rev() < ctx1.rev():
1340 if not working and ctx2.rev() < ctx1.rev():
1341 ctx2.manifest()
1341 ctx2.manifest()
1342
1342
1343 if not parentworking:
1343 if not parentworking:
1344 def bad(f, msg):
1344 def bad(f, msg):
1345 if f not in ctx1:
1345 if f not in ctx1:
1346 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1346 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1347 match.bad = bad
1347 match.bad = bad
1348
1348
1349 if working: # we need to scan the working dir
1349 if working: # we need to scan the working dir
1350 subrepos = []
1350 subrepos = []
1351 if '.hgsub' in self.dirstate:
1351 if '.hgsub' in self.dirstate:
1352 subrepos = ctx2.substate.keys()
1352 subrepos = ctx2.substate.keys()
1353 s = self.dirstate.status(match, subrepos, listignored,
1353 s = self.dirstate.status(match, subrepos, listignored,
1354 listclean, listunknown)
1354 listclean, listunknown)
1355 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1355 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1356
1356
1357 # check for any possibly clean files
1357 # check for any possibly clean files
1358 if parentworking and cmp:
1358 if parentworking and cmp:
1359 fixup = []
1359 fixup = []
1360 # do a full compare of any files that might have changed
1360 # do a full compare of any files that might have changed
1361 for f in sorted(cmp):
1361 for f in sorted(cmp):
1362 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1362 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1363 or ctx1[f].cmp(ctx2[f])):
1363 or ctx1[f].cmp(ctx2[f])):
1364 modified.append(f)
1364 modified.append(f)
1365 else:
1365 else:
1366 fixup.append(f)
1366 fixup.append(f)
1367
1367
1368 # update dirstate for files that are actually clean
1368 # update dirstate for files that are actually clean
1369 if fixup:
1369 if fixup:
1370 if listclean:
1370 if listclean:
1371 clean += fixup
1371 clean += fixup
1372
1372
1373 try:
1373 try:
1374 # updating the dirstate is optional
1374 # updating the dirstate is optional
1375 # so we don't wait on the lock
1375 # so we don't wait on the lock
1376 wlock = self.wlock(False)
1376 wlock = self.wlock(False)
1377 try:
1377 try:
1378 for f in fixup:
1378 for f in fixup:
1379 self.dirstate.normal(f)
1379 self.dirstate.normal(f)
1380 finally:
1380 finally:
1381 wlock.release()
1381 wlock.release()
1382 except error.LockError:
1382 except error.LockError:
1383 pass
1383 pass
1384
1384
1385 if not parentworking:
1385 if not parentworking:
1386 mf1 = mfmatches(ctx1)
1386 mf1 = mfmatches(ctx1)
1387 if working:
1387 if working:
1388 # we are comparing working dir against non-parent
1388 # we are comparing working dir against non-parent
1389 # generate a pseudo-manifest for the working dir
1389 # generate a pseudo-manifest for the working dir
1390 mf2 = mfmatches(self['.'])
1390 mf2 = mfmatches(self['.'])
1391 for f in cmp + modified + added:
1391 for f in cmp + modified + added:
1392 mf2[f] = None
1392 mf2[f] = None
1393 mf2.set(f, ctx2.flags(f))
1393 mf2.set(f, ctx2.flags(f))
1394 for f in removed:
1394 for f in removed:
1395 if f in mf2:
1395 if f in mf2:
1396 del mf2[f]
1396 del mf2[f]
1397 else:
1397 else:
1398 # we are comparing two revisions
1398 # we are comparing two revisions
1399 deleted, unknown, ignored = [], [], []
1399 deleted, unknown, ignored = [], [], []
1400 mf2 = mfmatches(ctx2)
1400 mf2 = mfmatches(ctx2)
1401
1401
1402 modified, added, clean = [], [], []
1402 modified, added, clean = [], [], []
1403 for fn in mf2:
1403 for fn in mf2:
1404 if fn in mf1:
1404 if fn in mf1:
1405 if (fn not in deleted and
1405 if (fn not in deleted and
1406 (mf1.flags(fn) != mf2.flags(fn) or
1406 (mf1.flags(fn) != mf2.flags(fn) or
1407 (mf1[fn] != mf2[fn] and
1407 (mf1[fn] != mf2[fn] and
1408 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1408 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1409 modified.append(fn)
1409 modified.append(fn)
1410 elif listclean:
1410 elif listclean:
1411 clean.append(fn)
1411 clean.append(fn)
1412 del mf1[fn]
1412 del mf1[fn]
1413 elif fn not in deleted:
1413 elif fn not in deleted:
1414 added.append(fn)
1414 added.append(fn)
1415 removed = mf1.keys()
1415 removed = mf1.keys()
1416
1416
1417 if working and modified and not self.dirstate._checklink:
1417 if working and modified and not self.dirstate._checklink:
1418 # Symlink placeholders may get non-symlink-like contents
1418 # Symlink placeholders may get non-symlink-like contents
1419 # via user error or dereferencing by NFS or Samba servers,
1419 # via user error or dereferencing by NFS or Samba servers,
1420 # so we filter out any placeholders that don't look like a
1420 # so we filter out any placeholders that don't look like a
1421 # symlink
1421 # symlink
1422 sane = []
1422 sane = []
1423 for f in modified:
1423 for f in modified:
1424 if ctx2.flags(f) == 'l':
1424 if ctx2.flags(f) == 'l':
1425 d = ctx2[f].data()
1425 d = ctx2[f].data()
1426 if len(d) >= 1024 or '\n' in d or util.binary(d):
1426 if len(d) >= 1024 or '\n' in d or util.binary(d):
1427 self.ui.debug('ignoring suspect symlink placeholder'
1427 self.ui.debug('ignoring suspect symlink placeholder'
1428 ' "%s"\n' % f)
1428 ' "%s"\n' % f)
1429 continue
1429 continue
1430 sane.append(f)
1430 sane.append(f)
1431 modified = sane
1431 modified = sane
1432
1432
1433 r = modified, added, removed, deleted, unknown, ignored, clean
1433 r = modified, added, removed, deleted, unknown, ignored, clean
1434
1434
1435 if listsubrepos:
1435 if listsubrepos:
1436 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1436 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1437 if working:
1437 if working:
1438 rev2 = None
1438 rev2 = None
1439 else:
1439 else:
1440 rev2 = ctx2.substate[subpath][1]
1440 rev2 = ctx2.substate[subpath][1]
1441 try:
1441 try:
1442 submatch = matchmod.narrowmatcher(subpath, match)
1442 submatch = matchmod.narrowmatcher(subpath, match)
1443 s = sub.status(rev2, match=submatch, ignored=listignored,
1443 s = sub.status(rev2, match=submatch, ignored=listignored,
1444 clean=listclean, unknown=listunknown,
1444 clean=listclean, unknown=listunknown,
1445 listsubrepos=True)
1445 listsubrepos=True)
1446 for rfiles, sfiles in zip(r, s):
1446 for rfiles, sfiles in zip(r, s):
1447 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1447 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1448 except error.LookupError:
1448 except error.LookupError:
1449 self.ui.status(_("skipping missing subrepository: %s\n")
1449 self.ui.status(_("skipping missing subrepository: %s\n")
1450 % subpath)
1450 % subpath)
1451
1451
1452 for l in r:
1452 for l in r:
1453 l.sort()
1453 l.sort()
1454 return r
1454 return r
1455
1455
1456 def heads(self, start=None):
1456 def heads(self, start=None):
1457 heads = self.changelog.heads(start)
1457 heads = self.changelog.heads(start)
1458 # sort the output in rev descending order
1458 # sort the output in rev descending order
1459 return sorted(heads, key=self.changelog.rev, reverse=True)
1459 return sorted(heads, key=self.changelog.rev, reverse=True)
1460
1460
1461 def branchheads(self, branch=None, start=None, closed=False):
1461 def branchheads(self, branch=None, start=None, closed=False):
1462 '''return a (possibly filtered) list of heads for the given branch
1462 '''return a (possibly filtered) list of heads for the given branch
1463
1463
1464 Heads are returned in topological order, from newest to oldest.
1464 Heads are returned in topological order, from newest to oldest.
1465 If branch is None, use the dirstate branch.
1465 If branch is None, use the dirstate branch.
1466 If start is not None, return only heads reachable from start.
1466 If start is not None, return only heads reachable from start.
1467 If closed is True, return heads that are marked as closed as well.
1467 If closed is True, return heads that are marked as closed as well.
1468 '''
1468 '''
1469 if branch is None:
1469 if branch is None:
1470 branch = self[None].branch()
1470 branch = self[None].branch()
1471 branches = self.branchmap()
1471 branches = self.branchmap()
1472 if branch not in branches:
1472 if branch not in branches:
1473 return []
1473 return []
1474 # the cache returns heads ordered lowest to highest
1474 # the cache returns heads ordered lowest to highest
1475 bheads = list(reversed(branches[branch]))
1475 bheads = list(reversed(branches[branch]))
1476 if start is not None:
1476 if start is not None:
1477 # filter out the heads that cannot be reached from startrev
1477 # filter out the heads that cannot be reached from startrev
1478 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1478 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1479 bheads = [h for h in bheads if h in fbheads]
1479 bheads = [h for h in bheads if h in fbheads]
1480 if not closed:
1480 if not closed:
1481 bheads = [h for h in bheads if
1481 bheads = [h for h in bheads if
1482 ('close' not in self.changelog.read(h)[5])]
1482 ('close' not in self.changelog.read(h)[5])]
1483 return bheads
1483 return bheads
1484
1484
1485 def branches(self, nodes):
1485 def branches(self, nodes):
1486 if not nodes:
1486 if not nodes:
1487 nodes = [self.changelog.tip()]
1487 nodes = [self.changelog.tip()]
1488 b = []
1488 b = []
1489 for n in nodes:
1489 for n in nodes:
1490 t = n
1490 t = n
1491 while True:
1491 while True:
1492 p = self.changelog.parents(n)
1492 p = self.changelog.parents(n)
1493 if p[1] != nullid or p[0] == nullid:
1493 if p[1] != nullid or p[0] == nullid:
1494 b.append((t, n, p[0], p[1]))
1494 b.append((t, n, p[0], p[1]))
1495 break
1495 break
1496 n = p[0]
1496 n = p[0]
1497 return b
1497 return b
1498
1498
1499 def between(self, pairs):
1499 def between(self, pairs):
1500 r = []
1500 r = []
1501
1501
1502 for top, bottom in pairs:
1502 for top, bottom in pairs:
1503 n, l, i = top, [], 0
1503 n, l, i = top, [], 0
1504 f = 1
1504 f = 1
1505
1505
1506 while n != bottom and n != nullid:
1506 while n != bottom and n != nullid:
1507 p = self.changelog.parents(n)[0]
1507 p = self.changelog.parents(n)[0]
1508 if i == f:
1508 if i == f:
1509 l.append(n)
1509 l.append(n)
1510 f = f * 2
1510 f = f * 2
1511 n = p
1511 n = p
1512 i += 1
1512 i += 1
1513
1513
1514 r.append(l)
1514 r.append(l)
1515
1515
1516 return r
1516 return r
1517
1517
1518 def pull(self, remote, heads=None, force=False):
1518 def pull(self, remote, heads=None, force=False):
1519 lock = self.lock()
1519 lock = self.lock()
1520 try:
1520 try:
1521 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1521 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1522 force=force)
1522 force=force)
1523 common, fetch, rheads = tmp
1523 common, fetch, rheads = tmp
1524 if not fetch:
1524 if not fetch:
1525 self.ui.status(_("no changes found\n"))
1525 self.ui.status(_("no changes found\n"))
1526 added = []
1526 added = []
1527 result = 0
1527 result = 0
1528 else:
1528 else:
1529 if heads is None and list(common) == [nullid]:
1529 if heads is None and list(common) == [nullid]:
1530 self.ui.status(_("requesting all changes\n"))
1530 self.ui.status(_("requesting all changes\n"))
1531 elif heads is None and remote.capable('changegroupsubset'):
1531 elif heads is None and remote.capable('changegroupsubset'):
1532 # issue1320, avoid a race if remote changed after discovery
1532 # issue1320, avoid a race if remote changed after discovery
1533 heads = rheads
1533 heads = rheads
1534
1534
1535 if remote.capable('getbundle'):
1535 if remote.capable('getbundle'):
1536 cg = remote.getbundle('pull', common=common,
1536 cg = remote.getbundle('pull', common=common,
1537 heads=heads or rheads)
1537 heads=heads or rheads)
1538 elif heads is None:
1538 elif heads is None:
1539 cg = remote.changegroup(fetch, 'pull')
1539 cg = remote.changegroup(fetch, 'pull')
1540 elif not remote.capable('changegroupsubset'):
1540 elif not remote.capable('changegroupsubset'):
1541 raise util.Abort(_("partial pull cannot be done because "
1541 raise util.Abort(_("partial pull cannot be done because "
1542 "other repository doesn't support "
1542 "other repository doesn't support "
1543 "changegroupsubset."))
1543 "changegroupsubset."))
1544 else:
1544 else:
1545 cg = remote.changegroupsubset(fetch, heads, 'pull')
1545 cg = remote.changegroupsubset(fetch, heads, 'pull')
1546 clstart = len(self.changelog)
1546 clstart = len(self.changelog)
1547 result = self.addchangegroup(cg, 'pull', remote.url())
1547 result = self.addchangegroup(cg, 'pull', remote.url())
1548 clend = len(self.changelog)
1548 clend = len(self.changelog)
1549 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1549 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1550
1550
1551
1551
1552 # Get remote phases data from remote
1552 # Get remote phases data from remote
1553 remotephases = remote.listkeys('phases')
1553 remotephases = remote.listkeys('phases')
1554 publishing = bool(remotephases.get('publishing', False))
1554 publishing = bool(remotephases.get('publishing', False))
1555 if remotephases and not publishing:
1555 if remotephases and not publishing:
1556 # remote is new and unpublishing
1556 # remote is new and unpublishing
1557 subset = common + added
1557 subset = common + added
1558 rheads, rroots = phases.analyzeremotephases(self, subset,
1558 rheads, rroots = phases.analyzeremotephases(self, subset,
1559 remotephases)
1559 remotephases)
1560 for phase, boundary in enumerate(rheads):
1560 for phase, boundary in enumerate(rheads):
1561 phases.advanceboundary(self, phase, boundary)
1561 phases.advanceboundary(self, phase, boundary)
1562 else:
1562 else:
1563 # Remote is old or publishing all common changesets
1563 # Remote is old or publishing all common changesets
1564 # should be seen as public
1564 # should be seen as public
1565 phases.advanceboundary(self, phases.public, common + added)
1565 phases.advanceboundary(self, phases.public, common + added)
1566 finally:
1566 finally:
1567 lock.release()
1567 lock.release()
1568
1568
1569 return result
1569 return result
1570
1570
1571 def checkpush(self, force, revs):
1571 def checkpush(self, force, revs):
1572 """Extensions can override this function if additional checks have
1572 """Extensions can override this function if additional checks have
1573 to be performed before pushing, or call it if they override push
1573 to be performed before pushing, or call it if they override push
1574 command.
1574 command.
1575 """
1575 """
1576 pass
1576 pass
1577
1577
1578 def push(self, remote, force=False, revs=None, newbranch=False):
1578 def push(self, remote, force=False, revs=None, newbranch=False):
1579 '''Push outgoing changesets (limited by revs) from the current
1579 '''Push outgoing changesets (limited by revs) from the current
1580 repository to remote. Return an integer:
1580 repository to remote. Return an integer:
1581 - 0 means HTTP error *or* nothing to push
1581 - 0 means HTTP error *or* nothing to push
1582 - 1 means we pushed and remote head count is unchanged *or*
1582 - 1 means we pushed and remote head count is unchanged *or*
1583 we have outgoing changesets but refused to push
1583 we have outgoing changesets but refused to push
1584 - other values as described by addchangegroup()
1584 - other values as described by addchangegroup()
1585 '''
1585 '''
1586 # there are two ways to push to remote repo:
1586 # there are two ways to push to remote repo:
1587 #
1587 #
1588 # addchangegroup assumes local user can lock remote
1588 # addchangegroup assumes local user can lock remote
1589 # repo (local filesystem, old ssh servers).
1589 # repo (local filesystem, old ssh servers).
1590 #
1590 #
1591 # unbundle assumes local user cannot lock remote repo (new ssh
1591 # unbundle assumes local user cannot lock remote repo (new ssh
1592 # servers, http servers).
1592 # servers, http servers).
1593
1593
1594 self.checkpush(force, revs)
1594 self.checkpush(force, revs)
1595 lock = None
1595 lock = None
1596 unbundle = remote.capable('unbundle')
1596 unbundle = remote.capable('unbundle')
1597 if not unbundle:
1597 if not unbundle:
1598 lock = remote.lock()
1598 lock = remote.lock()
1599 try:
1599 try:
1600 # get local lock as we might write phase data
1600 # get local lock as we might write phase data
1601 locallock = self.lock()
1601 locallock = self.lock()
1602 try:
1602 try:
1603 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1603 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1604 revs, newbranch)
1604 revs, newbranch)
1605 ret = remote_heads
1605 ret = remote_heads
1606 # create a callback for addchangegroup.
1606 # create a callback for addchangegroup.
1607 # If will be used branch of the conditionnal too.
1607 # If will be used branch of the conditionnal too.
1608 if cg is not None:
1608 if cg is not None:
1609 if unbundle:
1609 if unbundle:
1610 # local repo finds heads on server, finds out what
1610 # local repo finds heads on server, finds out what
1611 # revs it must push. once revs transferred, if server
1611 # revs it must push. once revs transferred, if server
1612 # finds it has different heads (someone else won
1612 # finds it has different heads (someone else won
1613 # commit/push race), server aborts.
1613 # commit/push race), server aborts.
1614 if force:
1614 if force:
1615 remote_heads = ['force']
1615 remote_heads = ['force']
1616 # ssh: return remote's addchangegroup()
1616 # ssh: return remote's addchangegroup()
1617 # http: return remote's addchangegroup() or 0 for error
1617 # http: return remote's addchangegroup() or 0 for error
1618 ret = remote.unbundle(cg, remote_heads, 'push')
1618 ret = remote.unbundle(cg, remote_heads, 'push')
1619 else:
1619 else:
1620 # we return an integer indicating remote head count change
1620 # we return an integer indicating remote head count change
1621 ret = remote.addchangegroup(cg, 'push', self.url())
1621 ret = remote.addchangegroup(cg, 'push', self.url())
1622
1622
1623 # even when we don't push, exchanging phase data is useful
1623 # even when we don't push, exchanging phase data is useful
1624 remotephases = remote.listkeys('phases')
1624 remotephases = remote.listkeys('phases')
1625 if not remotephases: # old server or public only repo
1625 if not remotephases: # old server or public only repo
1626 phases.advanceboundary(self, phases.public, fut)
1626 phases.advanceboundary(self, phases.public, fut)
1627 # don't push any phase data as there is nothing to push
1627 # don't push any phase data as there is nothing to push
1628 else:
1628 else:
1629 ana = phases.analyzeremotephases(self, fut, remotephases)
1629 ana = phases.analyzeremotephases(self, fut, remotephases)
1630 rheads, rroots = ana
1630 rheads, rroots = ana
1631 ### Apply remote phase on local
1631 ### Apply remote phase on local
1632 if remotephases.get('publishing', False):
1632 if remotephases.get('publishing', False):
1633 phases.advanceboundary(self, phases.public, fut)
1633 phases.advanceboundary(self, phases.public, fut)
1634 else: # publish = False
1634 else: # publish = False
1635 for phase, rpheads in enumerate(rheads):
1635 for phase, rpheads in enumerate(rheads):
1636 phases.advanceboundary(self, phase, rpheads)
1636 phases.advanceboundary(self, phase, rpheads)
1637 ### Apply local phase on remote
1637 ### Apply local phase on remote
1638 #
1638 #
1639 # XXX If push failed we should use strict common and not
1639 # XXX If push failed we should use strict common and not
1640 # future to avoir pushing phase data on unknown changeset.
1640 # future to avoir pushing phase data on unknown changeset.
1641 # This is to done later.
1641 # This is to done later.
1642
1642
1643 # element we want to push
1643 # element we want to push
1644 topush = []
1644 topush = []
1645
1645
1646 # store details of known remote phase of several revision
1646 # store details of known remote phase of several revision
1647 # /!\ set of index I holds rev where: I <= rev.phase()
1647 # /!\ set of index I holds rev where: I <= rev.phase()
1648 # /!\ public phase (index 0) is ignored
1648 # /!\ public phase (index 0) is ignored
1649 remdetails = [set() for i in xrange(len(phases.allphases))]
1649 remdetails = [set() for i in xrange(len(phases.allphases))]
1650 _revs = set()
1650 _revs = set()
1651 for relremphase in phases.trackedphases[::-1]:
1651 for relremphase in phases.trackedphases[::-1]:
1652 # we iterate backward because the list alway grows
1652 # we iterate backward because the list alway grows
1653 # when filled in this direction.
1653 # when filled in this direction.
1654 _revs.update(self.revs('%ln::%ln',
1654 _revs.update(self.revs('%ln::%ln',
1655 rroots[relremphase], fut))
1655 rroots[relremphase], fut))
1656 remdetails[relremphase].update(_revs)
1656 remdetails[relremphase].update(_revs)
1657
1657
1658 for phase in phases.allphases[:-1]:
1658 for phase in phases.allphases[:-1]:
1659 # We don't need the last phase as we will never want to
1659 # We don't need the last phase as we will never want to
1660 # move anything to it while moving phase backward.
1660 # move anything to it while moving phase backward.
1661
1661
1662 # Get the list of all revs on remote which are in a
1662 # Get the list of all revs on remote which are in a
1663 # phase higher than currently processed phase.
1663 # phase higher than currently processed phase.
1664 relremrev = remdetails[phase + 1]
1664 relremrev = remdetails[phase + 1]
1665
1665
1666 if not relremrev:
1666 if not relremrev:
1667 # no candidate to remote push anymore
1667 # no candidate to remote push anymore
1668 # break before any expensive revset
1668 # break before any expensive revset
1669 break
1669 break
1670
1670
1671 #dynamical inject appropriate phase symbol
1671 #dynamical inject appropriate phase symbol
1672 phasename = phases.phasenames[phase]
1672 phasename = phases.phasenames[phase]
1673 odrevset = 'heads(%%ld and %s())' % phasename
1673 odrevset = 'heads(%%ld and %s())' % phasename
1674 outdated = self.set(odrevset, relremrev)
1674 outdated = self.set(odrevset, relremrev)
1675 for od in outdated:
1675 for od in outdated:
1676 candstart = len(remdetails) - 1
1676 candstart = len(remdetails) - 1
1677 candstop = phase + 1
1677 candstop = phase + 1
1678 candidateold = xrange(candstart, candstop, -1)
1678 candidateold = xrange(candstart, candstop, -1)
1679 for oldphase in candidateold:
1679 for oldphase in candidateold:
1680 if od.rev() in remdetails[oldphase]:
1680 if od.rev() in remdetails[oldphase]:
1681 break
1681 break
1682 else: # last one: no need to search
1682 else: # last one: no need to search
1683 oldphase = phase + 1
1683 oldphase = phase + 1
1684 topush.append((oldphase, phase, od))
1684 topush.append((oldphase, phase, od))
1685
1685
1686 # push every needed data
1686 # push every needed data
1687 for oldphase, newphase, newremotehead in topush:
1687 for oldphase, newphase, newremotehead in topush:
1688 r = remote.pushkey('phases',
1688 r = remote.pushkey('phases',
1689 newremotehead.hex(),
1689 newremotehead.hex(),
1690 str(oldphase), str(newphase))
1690 str(oldphase), str(newphase))
1691 if not r:
1691 if not r:
1692 self.ui.warn(_('updating phase of %s '
1692 self.ui.warn(_('updating phase of %s '
1693 'to %s from %s failed!\n')
1693 'to %s from %s failed!\n')
1694 % (newremotehead, newphase,
1694 % (newremotehead, newphase,
1695 oldphase))
1695 oldphase))
1696 finally:
1696 finally:
1697 locallock.release()
1697 locallock.release()
1698 finally:
1698 finally:
1699 if lock is not None:
1699 if lock is not None:
1700 lock.release()
1700 lock.release()
1701
1701
1702 self.ui.debug("checking for updated bookmarks\n")
1702 self.ui.debug("checking for updated bookmarks\n")
1703 rb = remote.listkeys('bookmarks')
1703 rb = remote.listkeys('bookmarks')
1704 for k in rb.keys():
1704 for k in rb.keys():
1705 if k in self._bookmarks:
1705 if k in self._bookmarks:
1706 nr, nl = rb[k], hex(self._bookmarks[k])
1706 nr, nl = rb[k], hex(self._bookmarks[k])
1707 if nr in self:
1707 if nr in self:
1708 cr = self[nr]
1708 cr = self[nr]
1709 cl = self[nl]
1709 cl = self[nl]
1710 if cl in cr.descendants():
1710 if cl in cr.descendants():
1711 r = remote.pushkey('bookmarks', k, nr, nl)
1711 r = remote.pushkey('bookmarks', k, nr, nl)
1712 if r:
1712 if r:
1713 self.ui.status(_("updating bookmark %s\n") % k)
1713 self.ui.status(_("updating bookmark %s\n") % k)
1714 else:
1714 else:
1715 self.ui.warn(_('updating bookmark %s'
1715 self.ui.warn(_('updating bookmark %s'
1716 ' failed!\n') % k)
1716 ' failed!\n') % k)
1717
1717
1718 return ret
1718 return ret
1719
1719
1720 def changegroupinfo(self, nodes, source):
1720 def changegroupinfo(self, nodes, source):
1721 if self.ui.verbose or source == 'bundle':
1721 if self.ui.verbose or source == 'bundle':
1722 self.ui.status(_("%d changesets found\n") % len(nodes))
1722 self.ui.status(_("%d changesets found\n") % len(nodes))
1723 if self.ui.debugflag:
1723 if self.ui.debugflag:
1724 self.ui.debug("list of changesets:\n")
1724 self.ui.debug("list of changesets:\n")
1725 for node in nodes:
1725 for node in nodes:
1726 self.ui.debug("%s\n" % hex(node))
1726 self.ui.debug("%s\n" % hex(node))
1727
1727
1728 def changegroupsubset(self, bases, heads, source):
1728 def changegroupsubset(self, bases, heads, source):
1729 """Compute a changegroup consisting of all the nodes that are
1729 """Compute a changegroup consisting of all the nodes that are
1730 descendants of any of the bases and ancestors of any of the heads.
1730 descendants of any of the bases and ancestors of any of the heads.
1731 Return a chunkbuffer object whose read() method will return
1731 Return a chunkbuffer object whose read() method will return
1732 successive changegroup chunks.
1732 successive changegroup chunks.
1733
1733
1734 It is fairly complex as determining which filenodes and which
1734 It is fairly complex as determining which filenodes and which
1735 manifest nodes need to be included for the changeset to be complete
1735 manifest nodes need to be included for the changeset to be complete
1736 is non-trivial.
1736 is non-trivial.
1737
1737
1738 Another wrinkle is doing the reverse, figuring out which changeset in
1738 Another wrinkle is doing the reverse, figuring out which changeset in
1739 the changegroup a particular filenode or manifestnode belongs to.
1739 the changegroup a particular filenode or manifestnode belongs to.
1740 """
1740 """
1741 cl = self.changelog
1741 cl = self.changelog
1742 if not bases:
1742 if not bases:
1743 bases = [nullid]
1743 bases = [nullid]
1744 csets, bases, heads = cl.nodesbetween(bases, heads)
1744 csets, bases, heads = cl.nodesbetween(bases, heads)
1745 # We assume that all ancestors of bases are known
1745 # We assume that all ancestors of bases are known
1746 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1746 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1747 return self._changegroupsubset(common, csets, heads, source)
1747 return self._changegroupsubset(common, csets, heads, source)
1748
1748
1749 def getlocalbundle(self, source, outgoing):
1749 def getlocalbundle(self, source, outgoing):
1750 """Like getbundle, but taking a discovery.outgoing as an argument.
1750 """Like getbundle, but taking a discovery.outgoing as an argument.
1751
1751
1752 This is only implemented for local repos and reuses potentially
1752 This is only implemented for local repos and reuses potentially
1753 precomputed sets in outgoing."""
1753 precomputed sets in outgoing."""
1754 if not outgoing.missing:
1754 if not outgoing.missing:
1755 return None
1755 return None
1756 return self._changegroupsubset(outgoing.common,
1756 return self._changegroupsubset(outgoing.common,
1757 outgoing.missing,
1757 outgoing.missing,
1758 outgoing.missingheads,
1758 outgoing.missingheads,
1759 source)
1759 source)
1760
1760
1761 def getbundle(self, source, heads=None, common=None):
1761 def getbundle(self, source, heads=None, common=None):
1762 """Like changegroupsubset, but returns the set difference between the
1762 """Like changegroupsubset, but returns the set difference between the
1763 ancestors of heads and the ancestors common.
1763 ancestors of heads and the ancestors common.
1764
1764
1765 If heads is None, use the local heads. If common is None, use [nullid].
1765 If heads is None, use the local heads. If common is None, use [nullid].
1766
1766
1767 The nodes in common might not all be known locally due to the way the
1767 The nodes in common might not all be known locally due to the way the
1768 current discovery protocol works.
1768 current discovery protocol works.
1769 """
1769 """
1770 cl = self.changelog
1770 cl = self.changelog
1771 if common:
1771 if common:
1772 nm = cl.nodemap
1772 nm = cl.nodemap
1773 common = [n for n in common if n in nm]
1773 common = [n for n in common if n in nm]
1774 else:
1774 else:
1775 common = [nullid]
1775 common = [nullid]
1776 if not heads:
1776 if not heads:
1777 heads = cl.heads()
1777 heads = cl.heads()
1778 return self.getlocalbundle(source,
1778 return self.getlocalbundle(source,
1779 discovery.outgoing(cl, common, heads))
1779 discovery.outgoing(cl, common, heads))
1780
1780
1781 def _changegroupsubset(self, commonrevs, csets, heads, source):
1781 def _changegroupsubset(self, commonrevs, csets, heads, source):
1782
1782
1783 cl = self.changelog
1783 cl = self.changelog
1784 mf = self.manifest
1784 mf = self.manifest
1785 mfs = {} # needed manifests
1785 mfs = {} # needed manifests
1786 fnodes = {} # needed file nodes
1786 fnodes = {} # needed file nodes
1787 changedfiles = set()
1787 changedfiles = set()
1788 fstate = ['', {}]
1788 fstate = ['', {}]
1789 count = [0]
1789 count = [0]
1790
1790
1791 # can we go through the fast path ?
1791 # can we go through the fast path ?
1792 heads.sort()
1792 heads.sort()
1793 if heads == sorted(self.heads()):
1793 if heads == sorted(self.heads()):
1794 return self._changegroup(csets, source)
1794 return self._changegroup(csets, source)
1795
1795
1796 # slow path
1796 # slow path
1797 self.hook('preoutgoing', throw=True, source=source)
1797 self.hook('preoutgoing', throw=True, source=source)
1798 self.changegroupinfo(csets, source)
1798 self.changegroupinfo(csets, source)
1799
1799
1800 # filter any nodes that claim to be part of the known set
1800 # filter any nodes that claim to be part of the known set
1801 def prune(revlog, missing):
1801 def prune(revlog, missing):
1802 return [n for n in missing
1802 return [n for n in missing
1803 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1803 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1804
1804
1805 def lookup(revlog, x):
1805 def lookup(revlog, x):
1806 if revlog == cl:
1806 if revlog == cl:
1807 c = cl.read(x)
1807 c = cl.read(x)
1808 changedfiles.update(c[3])
1808 changedfiles.update(c[3])
1809 mfs.setdefault(c[0], x)
1809 mfs.setdefault(c[0], x)
1810 count[0] += 1
1810 count[0] += 1
1811 self.ui.progress(_('bundling'), count[0],
1811 self.ui.progress(_('bundling'), count[0],
1812 unit=_('changesets'), total=len(csets))
1812 unit=_('changesets'), total=len(csets))
1813 return x
1813 return x
1814 elif revlog == mf:
1814 elif revlog == mf:
1815 clnode = mfs[x]
1815 clnode = mfs[x]
1816 mdata = mf.readfast(x)
1816 mdata = mf.readfast(x)
1817 for f in changedfiles:
1817 for f in changedfiles:
1818 if f in mdata:
1818 if f in mdata:
1819 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1819 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1820 count[0] += 1
1820 count[0] += 1
1821 self.ui.progress(_('bundling'), count[0],
1821 self.ui.progress(_('bundling'), count[0],
1822 unit=_('manifests'), total=len(mfs))
1822 unit=_('manifests'), total=len(mfs))
1823 return mfs[x]
1823 return mfs[x]
1824 else:
1824 else:
1825 self.ui.progress(
1825 self.ui.progress(
1826 _('bundling'), count[0], item=fstate[0],
1826 _('bundling'), count[0], item=fstate[0],
1827 unit=_('files'), total=len(changedfiles))
1827 unit=_('files'), total=len(changedfiles))
1828 return fstate[1][x]
1828 return fstate[1][x]
1829
1829
1830 bundler = changegroup.bundle10(lookup)
1830 bundler = changegroup.bundle10(lookup)
1831 reorder = self.ui.config('bundle', 'reorder', 'auto')
1831 reorder = self.ui.config('bundle', 'reorder', 'auto')
1832 if reorder == 'auto':
1832 if reorder == 'auto':
1833 reorder = None
1833 reorder = None
1834 else:
1834 else:
1835 reorder = util.parsebool(reorder)
1835 reorder = util.parsebool(reorder)
1836
1836
1837 def gengroup():
1837 def gengroup():
1838 # Create a changenode group generator that will call our functions
1838 # Create a changenode group generator that will call our functions
1839 # back to lookup the owning changenode and collect information.
1839 # back to lookup the owning changenode and collect information.
1840 for chunk in cl.group(csets, bundler, reorder=reorder):
1840 for chunk in cl.group(csets, bundler, reorder=reorder):
1841 yield chunk
1841 yield chunk
1842 self.ui.progress(_('bundling'), None)
1842 self.ui.progress(_('bundling'), None)
1843
1843
1844 # Create a generator for the manifestnodes that calls our lookup
1844 # Create a generator for the manifestnodes that calls our lookup
1845 # and data collection functions back.
1845 # and data collection functions back.
1846 count[0] = 0
1846 count[0] = 0
1847 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1847 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1848 yield chunk
1848 yield chunk
1849 self.ui.progress(_('bundling'), None)
1849 self.ui.progress(_('bundling'), None)
1850
1850
1851 mfs.clear()
1851 mfs.clear()
1852
1852
1853 # Go through all our files in order sorted by name.
1853 # Go through all our files in order sorted by name.
1854 count[0] = 0
1854 count[0] = 0
1855 for fname in sorted(changedfiles):
1855 for fname in sorted(changedfiles):
1856 filerevlog = self.file(fname)
1856 filerevlog = self.file(fname)
1857 if not len(filerevlog):
1857 if not len(filerevlog):
1858 raise util.Abort(_("empty or missing revlog for %s") % fname)
1858 raise util.Abort(_("empty or missing revlog for %s") % fname)
1859 fstate[0] = fname
1859 fstate[0] = fname
1860 fstate[1] = fnodes.pop(fname, {})
1860 fstate[1] = fnodes.pop(fname, {})
1861
1861
1862 nodelist = prune(filerevlog, fstate[1])
1862 nodelist = prune(filerevlog, fstate[1])
1863 if nodelist:
1863 if nodelist:
1864 count[0] += 1
1864 count[0] += 1
1865 yield bundler.fileheader(fname)
1865 yield bundler.fileheader(fname)
1866 for chunk in filerevlog.group(nodelist, bundler, reorder):
1866 for chunk in filerevlog.group(nodelist, bundler, reorder):
1867 yield chunk
1867 yield chunk
1868
1868
1869 # Signal that no more groups are left.
1869 # Signal that no more groups are left.
1870 yield bundler.close()
1870 yield bundler.close()
1871 self.ui.progress(_('bundling'), None)
1871 self.ui.progress(_('bundling'), None)
1872
1872
1873 if csets:
1873 if csets:
1874 self.hook('outgoing', node=hex(csets[0]), source=source)
1874 self.hook('outgoing', node=hex(csets[0]), source=source)
1875
1875
1876 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1876 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1877
1877
1878 def changegroup(self, basenodes, source):
1878 def changegroup(self, basenodes, source):
1879 # to avoid a race we use changegroupsubset() (issue1320)
1879 # to avoid a race we use changegroupsubset() (issue1320)
1880 return self.changegroupsubset(basenodes, self.heads(), source)
1880 return self.changegroupsubset(basenodes, self.heads(), source)
1881
1881
1882 def _changegroup(self, nodes, source):
1882 def _changegroup(self, nodes, source):
1883 """Compute the changegroup of all nodes that we have that a recipient
1883 """Compute the changegroup of all nodes that we have that a recipient
1884 doesn't. Return a chunkbuffer object whose read() method will return
1884 doesn't. Return a chunkbuffer object whose read() method will return
1885 successive changegroup chunks.
1885 successive changegroup chunks.
1886
1886
1887 This is much easier than the previous function as we can assume that
1887 This is much easier than the previous function as we can assume that
1888 the recipient has any changenode we aren't sending them.
1888 the recipient has any changenode we aren't sending them.
1889
1889
1890 nodes is the set of nodes to send"""
1890 nodes is the set of nodes to send"""
1891
1891
1892 cl = self.changelog
1892 cl = self.changelog
1893 mf = self.manifest
1893 mf = self.manifest
1894 mfs = {}
1894 mfs = {}
1895 changedfiles = set()
1895 changedfiles = set()
1896 fstate = ['']
1896 fstate = ['']
1897 count = [0]
1897 count = [0]
1898
1898
1899 self.hook('preoutgoing', throw=True, source=source)
1899 self.hook('preoutgoing', throw=True, source=source)
1900 self.changegroupinfo(nodes, source)
1900 self.changegroupinfo(nodes, source)
1901
1901
1902 revset = set([cl.rev(n) for n in nodes])
1902 revset = set([cl.rev(n) for n in nodes])
1903
1903
1904 def gennodelst(log):
1904 def gennodelst(log):
1905 return [log.node(r) for r in log if log.linkrev(r) in revset]
1905 return [log.node(r) for r in log if log.linkrev(r) in revset]
1906
1906
1907 def lookup(revlog, x):
1907 def lookup(revlog, x):
1908 if revlog == cl:
1908 if revlog == cl:
1909 c = cl.read(x)
1909 c = cl.read(x)
1910 changedfiles.update(c[3])
1910 changedfiles.update(c[3])
1911 mfs.setdefault(c[0], x)
1911 mfs.setdefault(c[0], x)
1912 count[0] += 1
1912 count[0] += 1
1913 self.ui.progress(_('bundling'), count[0],
1913 self.ui.progress(_('bundling'), count[0],
1914 unit=_('changesets'), total=len(nodes))
1914 unit=_('changesets'), total=len(nodes))
1915 return x
1915 return x
1916 elif revlog == mf:
1916 elif revlog == mf:
1917 count[0] += 1
1917 count[0] += 1
1918 self.ui.progress(_('bundling'), count[0],
1918 self.ui.progress(_('bundling'), count[0],
1919 unit=_('manifests'), total=len(mfs))
1919 unit=_('manifests'), total=len(mfs))
1920 return cl.node(revlog.linkrev(revlog.rev(x)))
1920 return cl.node(revlog.linkrev(revlog.rev(x)))
1921 else:
1921 else:
1922 self.ui.progress(
1922 self.ui.progress(
1923 _('bundling'), count[0], item=fstate[0],
1923 _('bundling'), count[0], item=fstate[0],
1924 total=len(changedfiles), unit=_('files'))
1924 total=len(changedfiles), unit=_('files'))
1925 return cl.node(revlog.linkrev(revlog.rev(x)))
1925 return cl.node(revlog.linkrev(revlog.rev(x)))
1926
1926
1927 bundler = changegroup.bundle10(lookup)
1927 bundler = changegroup.bundle10(lookup)
1928 reorder = self.ui.config('bundle', 'reorder', 'auto')
1928 reorder = self.ui.config('bundle', 'reorder', 'auto')
1929 if reorder == 'auto':
1929 if reorder == 'auto':
1930 reorder = None
1930 reorder = None
1931 else:
1931 else:
1932 reorder = util.parsebool(reorder)
1932 reorder = util.parsebool(reorder)
1933
1933
1934 def gengroup():
1934 def gengroup():
1935 '''yield a sequence of changegroup chunks (strings)'''
1935 '''yield a sequence of changegroup chunks (strings)'''
1936 # construct a list of all changed files
1936 # construct a list of all changed files
1937
1937
1938 for chunk in cl.group(nodes, bundler, reorder=reorder):
1938 for chunk in cl.group(nodes, bundler, reorder=reorder):
1939 yield chunk
1939 yield chunk
1940 self.ui.progress(_('bundling'), None)
1940 self.ui.progress(_('bundling'), None)
1941
1941
1942 count[0] = 0
1942 count[0] = 0
1943 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1943 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1944 yield chunk
1944 yield chunk
1945 self.ui.progress(_('bundling'), None)
1945 self.ui.progress(_('bundling'), None)
1946
1946
1947 count[0] = 0
1947 count[0] = 0
1948 for fname in sorted(changedfiles):
1948 for fname in sorted(changedfiles):
1949 filerevlog = self.file(fname)
1949 filerevlog = self.file(fname)
1950 if not len(filerevlog):
1950 if not len(filerevlog):
1951 raise util.Abort(_("empty or missing revlog for %s") % fname)
1951 raise util.Abort(_("empty or missing revlog for %s") % fname)
1952 fstate[0] = fname
1952 fstate[0] = fname
1953 nodelist = gennodelst(filerevlog)
1953 nodelist = gennodelst(filerevlog)
1954 if nodelist:
1954 if nodelist:
1955 count[0] += 1
1955 count[0] += 1
1956 yield bundler.fileheader(fname)
1956 yield bundler.fileheader(fname)
1957 for chunk in filerevlog.group(nodelist, bundler, reorder):
1957 for chunk in filerevlog.group(nodelist, bundler, reorder):
1958 yield chunk
1958 yield chunk
1959 yield bundler.close()
1959 yield bundler.close()
1960 self.ui.progress(_('bundling'), None)
1960 self.ui.progress(_('bundling'), None)
1961
1961
1962 if nodes:
1962 if nodes:
1963 self.hook('outgoing', node=hex(nodes[0]), source=source)
1963 self.hook('outgoing', node=hex(nodes[0]), source=source)
1964
1964
1965 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1965 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1966
1966
1967 def addchangegroup(self, source, srctype, url, emptyok=False):
1967 def addchangegroup(self, source, srctype, url, emptyok=False):
1968 """Add the changegroup returned by source.read() to this repo.
1968 """Add the changegroup returned by source.read() to this repo.
1969 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1969 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1970 the URL of the repo where this changegroup is coming from.
1970 the URL of the repo where this changegroup is coming from.
1971
1971
1972 Return an integer summarizing the change to this repo:
1972 Return an integer summarizing the change to this repo:
1973 - nothing changed or no source: 0
1973 - nothing changed or no source: 0
1974 - more heads than before: 1+added heads (2..n)
1974 - more heads than before: 1+added heads (2..n)
1975 - fewer heads than before: -1-removed heads (-2..-n)
1975 - fewer heads than before: -1-removed heads (-2..-n)
1976 - number of heads stays the same: 1
1976 - number of heads stays the same: 1
1977 """
1977 """
1978 def csmap(x):
1978 def csmap(x):
1979 self.ui.debug("add changeset %s\n" % short(x))
1979 self.ui.debug("add changeset %s\n" % short(x))
1980 return len(cl)
1980 return len(cl)
1981
1981
1982 def revmap(x):
1982 def revmap(x):
1983 return cl.rev(x)
1983 return cl.rev(x)
1984
1984
1985 if not source:
1985 if not source:
1986 return 0
1986 return 0
1987
1987
1988 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1988 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1989
1989
1990 changesets = files = revisions = 0
1990 changesets = files = revisions = 0
1991 efiles = set()
1991 efiles = set()
1992
1992
1993 # write changelog data to temp files so concurrent readers will not see
1993 # write changelog data to temp files so concurrent readers will not see
1994 # inconsistent view
1994 # inconsistent view
1995 cl = self.changelog
1995 cl = self.changelog
1996 cl.delayupdate()
1996 cl.delayupdate()
1997 oldheads = cl.heads()
1997 oldheads = cl.heads()
1998
1998
1999 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1999 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2000 try:
2000 try:
2001 trp = weakref.proxy(tr)
2001 trp = weakref.proxy(tr)
2002 # pull off the changeset group
2002 # pull off the changeset group
2003 self.ui.status(_("adding changesets\n"))
2003 self.ui.status(_("adding changesets\n"))
2004 clstart = len(cl)
2004 clstart = len(cl)
2005 class prog(object):
2005 class prog(object):
2006 step = _('changesets')
2006 step = _('changesets')
2007 count = 1
2007 count = 1
2008 ui = self.ui
2008 ui = self.ui
2009 total = None
2009 total = None
2010 def __call__(self):
2010 def __call__(self):
2011 self.ui.progress(self.step, self.count, unit=_('chunks'),
2011 self.ui.progress(self.step, self.count, unit=_('chunks'),
2012 total=self.total)
2012 total=self.total)
2013 self.count += 1
2013 self.count += 1
2014 pr = prog()
2014 pr = prog()
2015 source.callback = pr
2015 source.callback = pr
2016
2016
2017 source.changelogheader()
2017 source.changelogheader()
2018 if (cl.addgroup(source, csmap, trp) is None
2018 srccontent = cl.addgroup(source, csmap, trp)
2019 and not emptyok):
2019 if not (srccontent or emptyok):
2020 raise util.Abort(_("received changelog group is empty"))
2020 raise util.Abort(_("received changelog group is empty"))
2021 clend = len(cl)
2021 clend = len(cl)
2022 changesets = clend - clstart
2022 changesets = clend - clstart
2023 for c in xrange(clstart, clend):
2023 for c in xrange(clstart, clend):
2024 efiles.update(self[c].files())
2024 efiles.update(self[c].files())
2025 efiles = len(efiles)
2025 efiles = len(efiles)
2026 self.ui.progress(_('changesets'), None)
2026 self.ui.progress(_('changesets'), None)
2027
2027
2028 # pull off the manifest group
2028 # pull off the manifest group
2029 self.ui.status(_("adding manifests\n"))
2029 self.ui.status(_("adding manifests\n"))
2030 pr.step = _('manifests')
2030 pr.step = _('manifests')
2031 pr.count = 1
2031 pr.count = 1
2032 pr.total = changesets # manifests <= changesets
2032 pr.total = changesets # manifests <= changesets
2033 # no need to check for empty manifest group here:
2033 # no need to check for empty manifest group here:
2034 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2034 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2035 # no new manifest will be created and the manifest group will
2035 # no new manifest will be created and the manifest group will
2036 # be empty during the pull
2036 # be empty during the pull
2037 source.manifestheader()
2037 source.manifestheader()
2038 self.manifest.addgroup(source, revmap, trp)
2038 self.manifest.addgroup(source, revmap, trp)
2039 self.ui.progress(_('manifests'), None)
2039 self.ui.progress(_('manifests'), None)
2040
2040
2041 needfiles = {}
2041 needfiles = {}
2042 if self.ui.configbool('server', 'validate', default=False):
2042 if self.ui.configbool('server', 'validate', default=False):
2043 # validate incoming csets have their manifests
2043 # validate incoming csets have their manifests
2044 for cset in xrange(clstart, clend):
2044 for cset in xrange(clstart, clend):
2045 mfest = self.changelog.read(self.changelog.node(cset))[0]
2045 mfest = self.changelog.read(self.changelog.node(cset))[0]
2046 mfest = self.manifest.readdelta(mfest)
2046 mfest = self.manifest.readdelta(mfest)
2047 # store file nodes we must see
2047 # store file nodes we must see
2048 for f, n in mfest.iteritems():
2048 for f, n in mfest.iteritems():
2049 needfiles.setdefault(f, set()).add(n)
2049 needfiles.setdefault(f, set()).add(n)
2050
2050
2051 # process the files
2051 # process the files
2052 self.ui.status(_("adding file changes\n"))
2052 self.ui.status(_("adding file changes\n"))
2053 pr.step = _('files')
2053 pr.step = _('files')
2054 pr.count = 1
2054 pr.count = 1
2055 pr.total = efiles
2055 pr.total = efiles
2056 source.callback = None
2056 source.callback = None
2057
2057
2058 while True:
2058 while True:
2059 chunkdata = source.filelogheader()
2059 chunkdata = source.filelogheader()
2060 if not chunkdata:
2060 if not chunkdata:
2061 break
2061 break
2062 f = chunkdata["filename"]
2062 f = chunkdata["filename"]
2063 self.ui.debug("adding %s revisions\n" % f)
2063 self.ui.debug("adding %s revisions\n" % f)
2064 pr()
2064 pr()
2065 fl = self.file(f)
2065 fl = self.file(f)
2066 o = len(fl)
2066 o = len(fl)
2067 if fl.addgroup(source, revmap, trp) is None:
2067 if not fl.addgroup(source, revmap, trp):
2068 raise util.Abort(_("received file revlog group is empty"))
2068 raise util.Abort(_("received file revlog group is empty"))
2069 revisions += len(fl) - o
2069 revisions += len(fl) - o
2070 files += 1
2070 files += 1
2071 if f in needfiles:
2071 if f in needfiles:
2072 needs = needfiles[f]
2072 needs = needfiles[f]
2073 for new in xrange(o, len(fl)):
2073 for new in xrange(o, len(fl)):
2074 n = fl.node(new)
2074 n = fl.node(new)
2075 if n in needs:
2075 if n in needs:
2076 needs.remove(n)
2076 needs.remove(n)
2077 if not needs:
2077 if not needs:
2078 del needfiles[f]
2078 del needfiles[f]
2079 self.ui.progress(_('files'), None)
2079 self.ui.progress(_('files'), None)
2080
2080
2081 for f, needs in needfiles.iteritems():
2081 for f, needs in needfiles.iteritems():
2082 fl = self.file(f)
2082 fl = self.file(f)
2083 for n in needs:
2083 for n in needs:
2084 try:
2084 try:
2085 fl.rev(n)
2085 fl.rev(n)
2086 except error.LookupError:
2086 except error.LookupError:
2087 raise util.Abort(
2087 raise util.Abort(
2088 _('missing file data for %s:%s - run hg verify') %
2088 _('missing file data for %s:%s - run hg verify') %
2089 (f, hex(n)))
2089 (f, hex(n)))
2090
2090
2091 dh = 0
2091 dh = 0
2092 if oldheads:
2092 if oldheads:
2093 heads = cl.heads()
2093 heads = cl.heads()
2094 dh = len(heads) - len(oldheads)
2094 dh = len(heads) - len(oldheads)
2095 for h in heads:
2095 for h in heads:
2096 if h not in oldheads and 'close' in self[h].extra():
2096 if h not in oldheads and 'close' in self[h].extra():
2097 dh -= 1
2097 dh -= 1
2098 htext = ""
2098 htext = ""
2099 if dh:
2099 if dh:
2100 htext = _(" (%+d heads)") % dh
2100 htext = _(" (%+d heads)") % dh
2101
2101
2102 self.ui.status(_("added %d changesets"
2102 self.ui.status(_("added %d changesets"
2103 " with %d changes to %d files%s\n")
2103 " with %d changes to %d files%s\n")
2104 % (changesets, revisions, files, htext))
2104 % (changesets, revisions, files, htext))
2105
2105
2106 if changesets > 0:
2106 if changesets > 0:
2107 p = lambda: cl.writepending() and self.root or ""
2107 p = lambda: cl.writepending() and self.root or ""
2108 self.hook('pretxnchangegroup', throw=True,
2108 self.hook('pretxnchangegroup', throw=True,
2109 node=hex(cl.node(clstart)), source=srctype,
2109 node=hex(cl.node(clstart)), source=srctype,
2110 url=url, pending=p)
2110 url=url, pending=p)
2111
2111
2112 added = [cl.node(r) for r in xrange(clstart, clend)]
2112 added = [cl.node(r) for r in xrange(clstart, clend)]
2113 publishing = self.ui.configbool('phases', 'publish', True)
2113 publishing = self.ui.configbool('phases', 'publish', True)
2114 if publishing and srctype == 'push':
2114 if publishing and srctype == 'push':
2115 # Old server can not push the boundary themself.
2115 # Old server can not push the boundary themself.
2116 # This clause ensure pushed changeset are alway marked as public
2116 # This clause ensure pushed changeset are alway marked as public
2117 phases.advanceboundary(self, phases.public, added)
2117 phases.advanceboundary(self, phases.public, added)
2118 elif srctype != 'strip': # strip should not touch boundary at all
2118 elif srctype != 'strip': # strip should not touch boundary at all
2119 phases.retractboundary(self, phases.draft, added)
2119 phases.retractboundary(self, phases.draft, added)
2120
2120
2121 # make changelog see real files again
2121 # make changelog see real files again
2122 cl.finalize(trp)
2122 cl.finalize(trp)
2123
2123
2124 tr.close()
2124 tr.close()
2125
2125
2126 if changesets > 0:
2126 if changesets > 0:
2127 def runhooks():
2127 def runhooks():
2128 # forcefully update the on-disk branch cache
2128 # forcefully update the on-disk branch cache
2129 self.ui.debug("updating the branch cache\n")
2129 self.ui.debug("updating the branch cache\n")
2130 self.updatebranchcache()
2130 self.updatebranchcache()
2131 self.hook("changegroup", node=hex(cl.node(clstart)),
2131 self.hook("changegroup", node=hex(cl.node(clstart)),
2132 source=srctype, url=url)
2132 source=srctype, url=url)
2133
2133
2134 for n in added:
2134 for n in added:
2135 self.hook("incoming", node=hex(n), source=srctype,
2135 self.hook("incoming", node=hex(n), source=srctype,
2136 url=url)
2136 url=url)
2137 self._afterlock(runhooks)
2137 self._afterlock(runhooks)
2138
2138
2139 finally:
2139 finally:
2140 tr.release()
2140 tr.release()
2141 # never return 0 here:
2141 # never return 0 here:
2142 if dh < 0:
2142 if dh < 0:
2143 return dh - 1
2143 return dh - 1
2144 else:
2144 else:
2145 return dh + 1
2145 return dh + 1
2146
2146
2147 def stream_in(self, remote, requirements):
2147 def stream_in(self, remote, requirements):
2148 lock = self.lock()
2148 lock = self.lock()
2149 try:
2149 try:
2150 fp = remote.stream_out()
2150 fp = remote.stream_out()
2151 l = fp.readline()
2151 l = fp.readline()
2152 try:
2152 try:
2153 resp = int(l)
2153 resp = int(l)
2154 except ValueError:
2154 except ValueError:
2155 raise error.ResponseError(
2155 raise error.ResponseError(
2156 _('Unexpected response from remote server:'), l)
2156 _('Unexpected response from remote server:'), l)
2157 if resp == 1:
2157 if resp == 1:
2158 raise util.Abort(_('operation forbidden by server'))
2158 raise util.Abort(_('operation forbidden by server'))
2159 elif resp == 2:
2159 elif resp == 2:
2160 raise util.Abort(_('locking the remote repository failed'))
2160 raise util.Abort(_('locking the remote repository failed'))
2161 elif resp != 0:
2161 elif resp != 0:
2162 raise util.Abort(_('the server sent an unknown error code'))
2162 raise util.Abort(_('the server sent an unknown error code'))
2163 self.ui.status(_('streaming all changes\n'))
2163 self.ui.status(_('streaming all changes\n'))
2164 l = fp.readline()
2164 l = fp.readline()
2165 try:
2165 try:
2166 total_files, total_bytes = map(int, l.split(' ', 1))
2166 total_files, total_bytes = map(int, l.split(' ', 1))
2167 except (ValueError, TypeError):
2167 except (ValueError, TypeError):
2168 raise error.ResponseError(
2168 raise error.ResponseError(
2169 _('Unexpected response from remote server:'), l)
2169 _('Unexpected response from remote server:'), l)
2170 self.ui.status(_('%d files to transfer, %s of data\n') %
2170 self.ui.status(_('%d files to transfer, %s of data\n') %
2171 (total_files, util.bytecount(total_bytes)))
2171 (total_files, util.bytecount(total_bytes)))
2172 start = time.time()
2172 start = time.time()
2173 for i in xrange(total_files):
2173 for i in xrange(total_files):
2174 # XXX doesn't support '\n' or '\r' in filenames
2174 # XXX doesn't support '\n' or '\r' in filenames
2175 l = fp.readline()
2175 l = fp.readline()
2176 try:
2176 try:
2177 name, size = l.split('\0', 1)
2177 name, size = l.split('\0', 1)
2178 size = int(size)
2178 size = int(size)
2179 except (ValueError, TypeError):
2179 except (ValueError, TypeError):
2180 raise error.ResponseError(
2180 raise error.ResponseError(
2181 _('Unexpected response from remote server:'), l)
2181 _('Unexpected response from remote server:'), l)
2182 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2182 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2183 # for backwards compat, name was partially encoded
2183 # for backwards compat, name was partially encoded
2184 ofp = self.sopener(store.decodedir(name), 'w')
2184 ofp = self.sopener(store.decodedir(name), 'w')
2185 for chunk in util.filechunkiter(fp, limit=size):
2185 for chunk in util.filechunkiter(fp, limit=size):
2186 ofp.write(chunk)
2186 ofp.write(chunk)
2187 ofp.close()
2187 ofp.close()
2188 elapsed = time.time() - start
2188 elapsed = time.time() - start
2189 if elapsed <= 0:
2189 if elapsed <= 0:
2190 elapsed = 0.001
2190 elapsed = 0.001
2191 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2191 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2192 (util.bytecount(total_bytes), elapsed,
2192 (util.bytecount(total_bytes), elapsed,
2193 util.bytecount(total_bytes / elapsed)))
2193 util.bytecount(total_bytes / elapsed)))
2194
2194
2195 # new requirements = old non-format requirements + new format-related
2195 # new requirements = old non-format requirements + new format-related
2196 # requirements from the streamed-in repository
2196 # requirements from the streamed-in repository
2197 requirements.update(set(self.requirements) - self.supportedformats)
2197 requirements.update(set(self.requirements) - self.supportedformats)
2198 self._applyrequirements(requirements)
2198 self._applyrequirements(requirements)
2199 self._writerequirements()
2199 self._writerequirements()
2200
2200
2201 self.invalidate()
2201 self.invalidate()
2202 return len(self.heads()) + 1
2202 return len(self.heads()) + 1
2203 finally:
2203 finally:
2204 lock.release()
2204 lock.release()
2205
2205
2206 def clone(self, remote, heads=[], stream=False):
2206 def clone(self, remote, heads=[], stream=False):
2207 '''clone remote repository.
2207 '''clone remote repository.
2208
2208
2209 keyword arguments:
2209 keyword arguments:
2210 heads: list of revs to clone (forces use of pull)
2210 heads: list of revs to clone (forces use of pull)
2211 stream: use streaming clone if possible'''
2211 stream: use streaming clone if possible'''
2212
2212
2213 # now, all clients that can request uncompressed clones can
2213 # now, all clients that can request uncompressed clones can
2214 # read repo formats supported by all servers that can serve
2214 # read repo formats supported by all servers that can serve
2215 # them.
2215 # them.
2216
2216
2217 # if revlog format changes, client will have to check version
2217 # if revlog format changes, client will have to check version
2218 # and format flags on "stream" capability, and use
2218 # and format flags on "stream" capability, and use
2219 # uncompressed only if compatible.
2219 # uncompressed only if compatible.
2220
2220
2221 if stream and not heads:
2221 if stream and not heads:
2222 # 'stream' means remote revlog format is revlogv1 only
2222 # 'stream' means remote revlog format is revlogv1 only
2223 if remote.capable('stream'):
2223 if remote.capable('stream'):
2224 return self.stream_in(remote, set(('revlogv1',)))
2224 return self.stream_in(remote, set(('revlogv1',)))
2225 # otherwise, 'streamreqs' contains the remote revlog format
2225 # otherwise, 'streamreqs' contains the remote revlog format
2226 streamreqs = remote.capable('streamreqs')
2226 streamreqs = remote.capable('streamreqs')
2227 if streamreqs:
2227 if streamreqs:
2228 streamreqs = set(streamreqs.split(','))
2228 streamreqs = set(streamreqs.split(','))
2229 # if we support it, stream in and adjust our requirements
2229 # if we support it, stream in and adjust our requirements
2230 if not streamreqs - self.supportedformats:
2230 if not streamreqs - self.supportedformats:
2231 return self.stream_in(remote, streamreqs)
2231 return self.stream_in(remote, streamreqs)
2232 return self.pull(remote, heads)
2232 return self.pull(remote, heads)
2233
2233
2234 def pushkey(self, namespace, key, old, new):
2234 def pushkey(self, namespace, key, old, new):
2235 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2235 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2236 old=old, new=new)
2236 old=old, new=new)
2237 ret = pushkey.push(self, namespace, key, old, new)
2237 ret = pushkey.push(self, namespace, key, old, new)
2238 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2238 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2239 ret=ret)
2239 ret=ret)
2240 return ret
2240 return ret
2241
2241
2242 def listkeys(self, namespace):
2242 def listkeys(self, namespace):
2243 self.hook('prelistkeys', throw=True, namespace=namespace)
2243 self.hook('prelistkeys', throw=True, namespace=namespace)
2244 values = pushkey.list(self, namespace)
2244 values = pushkey.list(self, namespace)
2245 self.hook('listkeys', namespace=namespace, values=values)
2245 self.hook('listkeys', namespace=namespace, values=values)
2246 return values
2246 return values
2247
2247
2248 def debugwireargs(self, one, two, three=None, four=None, five=None):
2248 def debugwireargs(self, one, two, three=None, four=None, five=None):
2249 '''used to test argument passing over the wire'''
2249 '''used to test argument passing over the wire'''
2250 return "%s %s %s %s %s" % (one, two, three, four, five)
2250 return "%s %s %s %s %s" % (one, two, three, four, five)
2251
2251
2252 def savecommitmessage(self, text):
2252 def savecommitmessage(self, text):
2253 fp = self.opener('last-message.txt', 'wb')
2253 fp = self.opener('last-message.txt', 'wb')
2254 try:
2254 try:
2255 fp.write(text)
2255 fp.write(text)
2256 finally:
2256 finally:
2257 fp.close()
2257 fp.close()
2258 return self.pathto(fp.name[len(self.root)+1:])
2258 return self.pathto(fp.name[len(self.root)+1:])
2259
2259
2260 # used to avoid circular references so destructors work
2260 # used to avoid circular references so destructors work
2261 def aftertrans(files):
2261 def aftertrans(files):
2262 renamefiles = [tuple(t) for t in files]
2262 renamefiles = [tuple(t) for t in files]
2263 def a():
2263 def a():
2264 for src, dest in renamefiles:
2264 for src, dest in renamefiles:
2265 util.rename(src, dest)
2265 util.rename(src, dest)
2266 return a
2266 return a
2267
2267
2268 def undoname(fn):
2268 def undoname(fn):
2269 base, name = os.path.split(fn)
2269 base, name = os.path.split(fn)
2270 assert name.startswith('journal')
2270 assert name.startswith('journal')
2271 return os.path.join(base, name.replace('journal', 'undo', 1))
2271 return os.path.join(base, name.replace('journal', 'undo', 1))
2272
2272
2273 def instance(ui, path, create):
2273 def instance(ui, path, create):
2274 return localrepository(ui, util.urllocalpath(path), create)
2274 return localrepository(ui, util.urllocalpath(path), create)
2275
2275
2276 def islocal(path):
2276 def islocal(path):
2277 return True
2277 return True
@@ -1,1277 +1,1280
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev
15 from node import bin, hex, nullid, nullrev
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util, dagutil
17 import ancestor, mdiff, parsers, error, util, dagutil
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45
45
46 def getoffset(q):
46 def getoffset(q):
47 return int(q >> 16)
47 return int(q >> 16)
48
48
49 def gettype(q):
49 def gettype(q):
50 return int(q & 0xFFFF)
50 return int(q & 0xFFFF)
51
51
52 def offset_type(offset, type):
52 def offset_type(offset, type):
53 return long(long(offset) << 16 | type)
53 return long(long(offset) << 16 | type)
54
54
55 nullhash = _sha(nullid)
55 nullhash = _sha(nullid)
56
56
57 def hash(text, p1, p2):
57 def hash(text, p1, p2):
58 """generate a hash from the given text and its parent hashes
58 """generate a hash from the given text and its parent hashes
59
59
60 This hash combines both the current file contents and its history
60 This hash combines both the current file contents and its history
61 in a manner that makes it easy to distinguish nodes with the same
61 in a manner that makes it easy to distinguish nodes with the same
62 content in the revision graph.
62 content in the revision graph.
63 """
63 """
64 # As of now, if one of the parent node is null, p2 is null
64 # As of now, if one of the parent node is null, p2 is null
65 if p2 == nullid:
65 if p2 == nullid:
66 # deep copy of a hash is faster than creating one
66 # deep copy of a hash is faster than creating one
67 s = nullhash.copy()
67 s = nullhash.copy()
68 s.update(p1)
68 s.update(p1)
69 else:
69 else:
70 # none of the parent nodes are nullid
70 # none of the parent nodes are nullid
71 l = [p1, p2]
71 l = [p1, p2]
72 l.sort()
72 l.sort()
73 s = _sha(l[0])
73 s = _sha(l[0])
74 s.update(l[1])
74 s.update(l[1])
75 s.update(text)
75 s.update(text)
76 return s.digest()
76 return s.digest()
77
77
78 def compress(text):
78 def compress(text):
79 """ generate a possibly-compressed representation of text """
79 """ generate a possibly-compressed representation of text """
80 if not text:
80 if not text:
81 return ("", text)
81 return ("", text)
82 l = len(text)
82 l = len(text)
83 bin = None
83 bin = None
84 if l < 44:
84 if l < 44:
85 pass
85 pass
86 elif l > 1000000:
86 elif l > 1000000:
87 # zlib makes an internal copy, thus doubling memory usage for
87 # zlib makes an internal copy, thus doubling memory usage for
88 # large files, so lets do this in pieces
88 # large files, so lets do this in pieces
89 z = zlib.compressobj()
89 z = zlib.compressobj()
90 p = []
90 p = []
91 pos = 0
91 pos = 0
92 while pos < l:
92 while pos < l:
93 pos2 = pos + 2**20
93 pos2 = pos + 2**20
94 p.append(z.compress(text[pos:pos2]))
94 p.append(z.compress(text[pos:pos2]))
95 pos = pos2
95 pos = pos2
96 p.append(z.flush())
96 p.append(z.flush())
97 if sum(map(len, p)) < l:
97 if sum(map(len, p)) < l:
98 bin = "".join(p)
98 bin = "".join(p)
99 else:
99 else:
100 bin = _compress(text)
100 bin = _compress(text)
101 if bin is None or len(bin) > l:
101 if bin is None or len(bin) > l:
102 if text[0] == '\0':
102 if text[0] == '\0':
103 return ("", text)
103 return ("", text)
104 return ('u', text)
104 return ('u', text)
105 return ("", bin)
105 return ("", bin)
106
106
107 def decompress(bin):
107 def decompress(bin):
108 """ decompress the given input """
108 """ decompress the given input """
109 if not bin:
109 if not bin:
110 return bin
110 return bin
111 t = bin[0]
111 t = bin[0]
112 if t == '\0':
112 if t == '\0':
113 return bin
113 return bin
114 if t == 'x':
114 if t == 'x':
115 return _decompress(bin)
115 return _decompress(bin)
116 if t == 'u':
116 if t == 'u':
117 return bin[1:]
117 return bin[1:]
118 raise RevlogError(_("unknown compression type %r") % t)
118 raise RevlogError(_("unknown compression type %r") % t)
119
119
120 indexformatv0 = ">4l20s20s20s"
120 indexformatv0 = ">4l20s20s20s"
121 v0shaoffset = 56
121 v0shaoffset = 56
122
122
123 class revlogoldio(object):
123 class revlogoldio(object):
124 def __init__(self):
124 def __init__(self):
125 self.size = struct.calcsize(indexformatv0)
125 self.size = struct.calcsize(indexformatv0)
126
126
127 def parseindex(self, data, inline):
127 def parseindex(self, data, inline):
128 s = self.size
128 s = self.size
129 index = []
129 index = []
130 nodemap = {nullid: nullrev}
130 nodemap = {nullid: nullrev}
131 n = off = 0
131 n = off = 0
132 l = len(data)
132 l = len(data)
133 while off + s <= l:
133 while off + s <= l:
134 cur = data[off:off + s]
134 cur = data[off:off + s]
135 off += s
135 off += s
136 e = _unpack(indexformatv0, cur)
136 e = _unpack(indexformatv0, cur)
137 # transform to revlogv1 format
137 # transform to revlogv1 format
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 index.append(e2)
140 index.append(e2)
141 nodemap[e[6]] = n
141 nodemap[e[6]] = n
142 n += 1
142 n += 1
143
143
144 # add the magic null revision at -1
144 # add the magic null revision at -1
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146
146
147 return index, nodemap, None
147 return index, nodemap, None
148
148
149 def packentry(self, entry, node, version, rev):
149 def packentry(self, entry, node, version, rev):
150 if gettype(entry[0]):
150 if gettype(entry[0]):
151 raise RevlogError(_("index entry flags need RevlogNG"))
151 raise RevlogError(_("index entry flags need RevlogNG"))
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 node(entry[5]), node(entry[6]), entry[7])
153 node(entry[5]), node(entry[6]), entry[7])
154 return _pack(indexformatv0, *e2)
154 return _pack(indexformatv0, *e2)
155
155
156 # index ng:
156 # index ng:
157 # 6 bytes: offset
157 # 6 bytes: offset
158 # 2 bytes: flags
158 # 2 bytes: flags
159 # 4 bytes: compressed length
159 # 4 bytes: compressed length
160 # 4 bytes: uncompressed length
160 # 4 bytes: uncompressed length
161 # 4 bytes: base rev
161 # 4 bytes: base rev
162 # 4 bytes: link rev
162 # 4 bytes: link rev
163 # 4 bytes: parent 1 rev
163 # 4 bytes: parent 1 rev
164 # 4 bytes: parent 2 rev
164 # 4 bytes: parent 2 rev
165 # 32 bytes: nodeid
165 # 32 bytes: nodeid
166 indexformatng = ">Qiiiiii20s12x"
166 indexformatng = ">Qiiiiii20s12x"
167 ngshaoffset = 32
167 ngshaoffset = 32
168 versionformat = ">I"
168 versionformat = ">I"
169
169
170 class revlogio(object):
170 class revlogio(object):
171 def __init__(self):
171 def __init__(self):
172 self.size = struct.calcsize(indexformatng)
172 self.size = struct.calcsize(indexformatng)
173
173
174 def parseindex(self, data, inline):
174 def parseindex(self, data, inline):
175 # call the C implementation to parse the index data
175 # call the C implementation to parse the index data
176 index, cache = parsers.parse_index2(data, inline)
176 index, cache = parsers.parse_index2(data, inline)
177 return index, None, cache
177 return index, None, cache
178
178
179 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
180 p = _pack(indexformatng, *entry)
180 p = _pack(indexformatng, *entry)
181 if rev == 0:
181 if rev == 0:
182 p = _pack(versionformat, version) + p[4:]
182 p = _pack(versionformat, version) + p[4:]
183 return p
183 return p
184
184
185 class revlog(object):
185 class revlog(object):
186 """
186 """
187 the underlying revision storage object
187 the underlying revision storage object
188
188
189 A revlog consists of two parts, an index and the revision data.
189 A revlog consists of two parts, an index and the revision data.
190
190
191 The index is a file with a fixed record size containing
191 The index is a file with a fixed record size containing
192 information on each revision, including its nodeid (hash), the
192 information on each revision, including its nodeid (hash), the
193 nodeids of its parents, the position and offset of its data within
193 nodeids of its parents, the position and offset of its data within
194 the data file, and the revision it's based on. Finally, each entry
194 the data file, and the revision it's based on. Finally, each entry
195 contains a linkrev entry that can serve as a pointer to external
195 contains a linkrev entry that can serve as a pointer to external
196 data.
196 data.
197
197
198 The revision data itself is a linear collection of data chunks.
198 The revision data itself is a linear collection of data chunks.
199 Each chunk represents a revision and is usually represented as a
199 Each chunk represents a revision and is usually represented as a
200 delta against the previous chunk. To bound lookup time, runs of
200 delta against the previous chunk. To bound lookup time, runs of
201 deltas are limited to about 2 times the length of the original
201 deltas are limited to about 2 times the length of the original
202 version data. This makes retrieval of a version proportional to
202 version data. This makes retrieval of a version proportional to
203 its size, or O(1) relative to the number of revisions.
203 its size, or O(1) relative to the number of revisions.
204
204
205 Both pieces of the revlog are written to in an append-only
205 Both pieces of the revlog are written to in an append-only
206 fashion, which means we never need to rewrite a file to insert or
206 fashion, which means we never need to rewrite a file to insert or
207 remove data, and can use some simple techniques to avoid the need
207 remove data, and can use some simple techniques to avoid the need
208 for locking while reading.
208 for locking while reading.
209 """
209 """
210 def __init__(self, opener, indexfile):
210 def __init__(self, opener, indexfile):
211 """
211 """
212 create a revlog object
212 create a revlog object
213
213
214 opener is a function that abstracts the file opening operation
214 opener is a function that abstracts the file opening operation
215 and can be used to implement COW semantics or the like.
215 and can be used to implement COW semantics or the like.
216 """
216 """
217 self.indexfile = indexfile
217 self.indexfile = indexfile
218 self.datafile = indexfile[:-2] + ".d"
218 self.datafile = indexfile[:-2] + ".d"
219 self.opener = opener
219 self.opener = opener
220 self._cache = None
220 self._cache = None
221 self._basecache = (0, 0)
221 self._basecache = (0, 0)
222 self._chunkcache = (0, '')
222 self._chunkcache = (0, '')
223 self.index = []
223 self.index = []
224 self._pcache = {}
224 self._pcache = {}
225 self._nodecache = {nullid: nullrev}
225 self._nodecache = {nullid: nullrev}
226 self._nodepos = None
226 self._nodepos = None
227
227
228 v = REVLOG_DEFAULT_VERSION
228 v = REVLOG_DEFAULT_VERSION
229 opts = getattr(opener, 'options', None)
229 opts = getattr(opener, 'options', None)
230 if opts is not None:
230 if opts is not None:
231 if 'revlogv1' in opts:
231 if 'revlogv1' in opts:
232 if 'generaldelta' in opts:
232 if 'generaldelta' in opts:
233 v |= REVLOGGENERALDELTA
233 v |= REVLOGGENERALDELTA
234 else:
234 else:
235 v = 0
235 v = 0
236
236
237 i = ''
237 i = ''
238 self._initempty = True
238 self._initempty = True
239 try:
239 try:
240 f = self.opener(self.indexfile)
240 f = self.opener(self.indexfile)
241 i = f.read()
241 i = f.read()
242 f.close()
242 f.close()
243 if len(i) > 0:
243 if len(i) > 0:
244 v = struct.unpack(versionformat, i[:4])[0]
244 v = struct.unpack(versionformat, i[:4])[0]
245 self._initempty = False
245 self._initempty = False
246 except IOError, inst:
246 except IOError, inst:
247 if inst.errno != errno.ENOENT:
247 if inst.errno != errno.ENOENT:
248 raise
248 raise
249
249
250 self.version = v
250 self.version = v
251 self._inline = v & REVLOGNGINLINEDATA
251 self._inline = v & REVLOGNGINLINEDATA
252 self._generaldelta = v & REVLOGGENERALDELTA
252 self._generaldelta = v & REVLOGGENERALDELTA
253 flags = v & ~0xFFFF
253 flags = v & ~0xFFFF
254 fmt = v & 0xFFFF
254 fmt = v & 0xFFFF
255 if fmt == REVLOGV0 and flags:
255 if fmt == REVLOGV0 and flags:
256 raise RevlogError(_("index %s unknown flags %#04x for format v0")
256 raise RevlogError(_("index %s unknown flags %#04x for format v0")
257 % (self.indexfile, flags >> 16))
257 % (self.indexfile, flags >> 16))
258 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
258 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
259 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
259 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
260 % (self.indexfile, flags >> 16))
260 % (self.indexfile, flags >> 16))
261 elif fmt > REVLOGNG:
261 elif fmt > REVLOGNG:
262 raise RevlogError(_("index %s unknown format %d")
262 raise RevlogError(_("index %s unknown format %d")
263 % (self.indexfile, fmt))
263 % (self.indexfile, fmt))
264
264
265 self._io = revlogio()
265 self._io = revlogio()
266 if self.version == REVLOGV0:
266 if self.version == REVLOGV0:
267 self._io = revlogoldio()
267 self._io = revlogoldio()
268 try:
268 try:
269 d = self._io.parseindex(i, self._inline)
269 d = self._io.parseindex(i, self._inline)
270 except (ValueError, IndexError):
270 except (ValueError, IndexError):
271 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
271 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
272 self.index, nodemap, self._chunkcache = d
272 self.index, nodemap, self._chunkcache = d
273 if nodemap is not None:
273 if nodemap is not None:
274 self.nodemap = self._nodecache = nodemap
274 self.nodemap = self._nodecache = nodemap
275 if not self._chunkcache:
275 if not self._chunkcache:
276 self._chunkclear()
276 self._chunkclear()
277
277
278 def tip(self):
278 def tip(self):
279 return self.node(len(self.index) - 2)
279 return self.node(len(self.index) - 2)
280 def __len__(self):
280 def __len__(self):
281 return len(self.index) - 1
281 return len(self.index) - 1
282 def __iter__(self):
282 def __iter__(self):
283 for i in xrange(len(self)):
283 for i in xrange(len(self)):
284 yield i
284 yield i
285
285
286 @util.propertycache
286 @util.propertycache
287 def nodemap(self):
287 def nodemap(self):
288 self.rev(self.node(0))
288 self.rev(self.node(0))
289 return self._nodecache
289 return self._nodecache
290
290
291 def rev(self, node):
291 def rev(self, node):
292 try:
292 try:
293 return self._nodecache[node]
293 return self._nodecache[node]
294 except KeyError:
294 except KeyError:
295 n = self._nodecache
295 n = self._nodecache
296 i = self.index
296 i = self.index
297 p = self._nodepos
297 p = self._nodepos
298 if p is None:
298 if p is None:
299 p = len(i) - 2
299 p = len(i) - 2
300 for r in xrange(p, -1, -1):
300 for r in xrange(p, -1, -1):
301 v = i[r][7]
301 v = i[r][7]
302 n[v] = r
302 n[v] = r
303 if v == node:
303 if v == node:
304 self._nodepos = r - 1
304 self._nodepos = r - 1
305 return r
305 return r
306 raise LookupError(node, self.indexfile, _('no node'))
306 raise LookupError(node, self.indexfile, _('no node'))
307
307
308 def node(self, rev):
308 def node(self, rev):
309 return self.index[rev][7]
309 return self.index[rev][7]
310 def linkrev(self, rev):
310 def linkrev(self, rev):
311 return self.index[rev][4]
311 return self.index[rev][4]
312 def parents(self, node):
312 def parents(self, node):
313 i = self.index
313 i = self.index
314 d = i[self.rev(node)]
314 d = i[self.rev(node)]
315 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
315 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
316 def parentrevs(self, rev):
316 def parentrevs(self, rev):
317 return self.index[rev][5:7]
317 return self.index[rev][5:7]
318 def start(self, rev):
318 def start(self, rev):
319 return int(self.index[rev][0] >> 16)
319 return int(self.index[rev][0] >> 16)
320 def end(self, rev):
320 def end(self, rev):
321 return self.start(rev) + self.length(rev)
321 return self.start(rev) + self.length(rev)
322 def length(self, rev):
322 def length(self, rev):
323 return self.index[rev][1]
323 return self.index[rev][1]
324 def chainbase(self, rev):
324 def chainbase(self, rev):
325 index = self.index
325 index = self.index
326 base = index[rev][3]
326 base = index[rev][3]
327 while base != rev:
327 while base != rev:
328 rev = base
328 rev = base
329 base = index[rev][3]
329 base = index[rev][3]
330 return base
330 return base
331 def flags(self, rev):
331 def flags(self, rev):
332 return self.index[rev][0] & 0xFFFF
332 return self.index[rev][0] & 0xFFFF
333 def rawsize(self, rev):
333 def rawsize(self, rev):
334 """return the length of the uncompressed text for a given revision"""
334 """return the length of the uncompressed text for a given revision"""
335 l = self.index[rev][2]
335 l = self.index[rev][2]
336 if l >= 0:
336 if l >= 0:
337 return l
337 return l
338
338
339 t = self.revision(self.node(rev))
339 t = self.revision(self.node(rev))
340 return len(t)
340 return len(t)
341 size = rawsize
341 size = rawsize
342
342
343 def reachable(self, node, stop=None):
343 def reachable(self, node, stop=None):
344 """return the set of all nodes ancestral to a given node, including
344 """return the set of all nodes ancestral to a given node, including
345 the node itself, stopping when stop is matched"""
345 the node itself, stopping when stop is matched"""
346 reachable = set((node,))
346 reachable = set((node,))
347 visit = [node]
347 visit = [node]
348 if stop:
348 if stop:
349 stopn = self.rev(stop)
349 stopn = self.rev(stop)
350 else:
350 else:
351 stopn = 0
351 stopn = 0
352 while visit:
352 while visit:
353 n = visit.pop(0)
353 n = visit.pop(0)
354 if n == stop:
354 if n == stop:
355 continue
355 continue
356 if n == nullid:
356 if n == nullid:
357 continue
357 continue
358 for p in self.parents(n):
358 for p in self.parents(n):
359 if self.rev(p) < stopn:
359 if self.rev(p) < stopn:
360 continue
360 continue
361 if p not in reachable:
361 if p not in reachable:
362 reachable.add(p)
362 reachable.add(p)
363 visit.append(p)
363 visit.append(p)
364 return reachable
364 return reachable
365
365
366 def ancestors(self, *revs):
366 def ancestors(self, *revs):
367 """Generate the ancestors of 'revs' in reverse topological order.
367 """Generate the ancestors of 'revs' in reverse topological order.
368
368
369 Yield a sequence of revision numbers starting with the parents
369 Yield a sequence of revision numbers starting with the parents
370 of each revision in revs, i.e., each revision is *not* considered
370 of each revision in revs, i.e., each revision is *not* considered
371 an ancestor of itself. Results are in breadth-first order:
371 an ancestor of itself. Results are in breadth-first order:
372 parents of each rev in revs, then parents of those, etc. Result
372 parents of each rev in revs, then parents of those, etc. Result
373 does not include the null revision."""
373 does not include the null revision."""
374 visit = list(revs)
374 visit = list(revs)
375 seen = set([nullrev])
375 seen = set([nullrev])
376 while visit:
376 while visit:
377 for parent in self.parentrevs(visit.pop(0)):
377 for parent in self.parentrevs(visit.pop(0)):
378 if parent not in seen:
378 if parent not in seen:
379 visit.append(parent)
379 visit.append(parent)
380 seen.add(parent)
380 seen.add(parent)
381 yield parent
381 yield parent
382
382
383 def descendants(self, *revs):
383 def descendants(self, *revs):
384 """Generate the descendants of 'revs' in revision order.
384 """Generate the descendants of 'revs' in revision order.
385
385
386 Yield a sequence of revision numbers starting with a child of
386 Yield a sequence of revision numbers starting with a child of
387 some rev in revs, i.e., each revision is *not* considered a
387 some rev in revs, i.e., each revision is *not* considered a
388 descendant of itself. Results are ordered by revision number (a
388 descendant of itself. Results are ordered by revision number (a
389 topological sort)."""
389 topological sort)."""
390 first = min(revs)
390 first = min(revs)
391 if first == nullrev:
391 if first == nullrev:
392 for i in self:
392 for i in self:
393 yield i
393 yield i
394 return
394 return
395
395
396 seen = set(revs)
396 seen = set(revs)
397 for i in xrange(first + 1, len(self)):
397 for i in xrange(first + 1, len(self)):
398 for x in self.parentrevs(i):
398 for x in self.parentrevs(i):
399 if x != nullrev and x in seen:
399 if x != nullrev and x in seen:
400 seen.add(i)
400 seen.add(i)
401 yield i
401 yield i
402 break
402 break
403
403
404 def findcommonmissing(self, common=None, heads=None):
404 def findcommonmissing(self, common=None, heads=None):
405 """Return a tuple of the ancestors of common and the ancestors of heads
405 """Return a tuple of the ancestors of common and the ancestors of heads
406 that are not ancestors of common. In revset terminology, we return the
406 that are not ancestors of common. In revset terminology, we return the
407 tuple:
407 tuple:
408
408
409 ::common, (::heads) - (::common)
409 ::common, (::heads) - (::common)
410
410
411 The list is sorted by revision number, meaning it is
411 The list is sorted by revision number, meaning it is
412 topologically sorted.
412 topologically sorted.
413
413
414 'heads' and 'common' are both lists of node IDs. If heads is
414 'heads' and 'common' are both lists of node IDs. If heads is
415 not supplied, uses all of the revlog's heads. If common is not
415 not supplied, uses all of the revlog's heads. If common is not
416 supplied, uses nullid."""
416 supplied, uses nullid."""
417 if common is None:
417 if common is None:
418 common = [nullid]
418 common = [nullid]
419 if heads is None:
419 if heads is None:
420 heads = self.heads()
420 heads = self.heads()
421
421
422 common = [self.rev(n) for n in common]
422 common = [self.rev(n) for n in common]
423 heads = [self.rev(n) for n in heads]
423 heads = [self.rev(n) for n in heads]
424
424
425 # we want the ancestors, but inclusive
425 # we want the ancestors, but inclusive
426 has = set(self.ancestors(*common))
426 has = set(self.ancestors(*common))
427 has.add(nullrev)
427 has.add(nullrev)
428 has.update(common)
428 has.update(common)
429
429
430 # take all ancestors from heads that aren't in has
430 # take all ancestors from heads that aren't in has
431 missing = set()
431 missing = set()
432 visit = [r for r in heads if r not in has]
432 visit = [r for r in heads if r not in has]
433 while visit:
433 while visit:
434 r = visit.pop(0)
434 r = visit.pop(0)
435 if r in missing:
435 if r in missing:
436 continue
436 continue
437 else:
437 else:
438 missing.add(r)
438 missing.add(r)
439 for p in self.parentrevs(r):
439 for p in self.parentrevs(r):
440 if p not in has:
440 if p not in has:
441 visit.append(p)
441 visit.append(p)
442 missing = list(missing)
442 missing = list(missing)
443 missing.sort()
443 missing.sort()
444 return has, [self.node(r) for r in missing]
444 return has, [self.node(r) for r in missing]
445
445
446 def findmissing(self, common=None, heads=None):
446 def findmissing(self, common=None, heads=None):
447 """Return the ancestors of heads that are not ancestors of common.
447 """Return the ancestors of heads that are not ancestors of common.
448
448
449 More specifically, return a list of nodes N such that every N
449 More specifically, return a list of nodes N such that every N
450 satisfies the following constraints:
450 satisfies the following constraints:
451
451
452 1. N is an ancestor of some node in 'heads'
452 1. N is an ancestor of some node in 'heads'
453 2. N is not an ancestor of any node in 'common'
453 2. N is not an ancestor of any node in 'common'
454
454
455 The list is sorted by revision number, meaning it is
455 The list is sorted by revision number, meaning it is
456 topologically sorted.
456 topologically sorted.
457
457
458 'heads' and 'common' are both lists of node IDs. If heads is
458 'heads' and 'common' are both lists of node IDs. If heads is
459 not supplied, uses all of the revlog's heads. If common is not
459 not supplied, uses all of the revlog's heads. If common is not
460 supplied, uses nullid."""
460 supplied, uses nullid."""
461 _common, missing = self.findcommonmissing(common, heads)
461 _common, missing = self.findcommonmissing(common, heads)
462 return missing
462 return missing
463
463
464 def nodesbetween(self, roots=None, heads=None):
464 def nodesbetween(self, roots=None, heads=None):
465 """Return a topological path from 'roots' to 'heads'.
465 """Return a topological path from 'roots' to 'heads'.
466
466
467 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
467 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
468 topologically sorted list of all nodes N that satisfy both of
468 topologically sorted list of all nodes N that satisfy both of
469 these constraints:
469 these constraints:
470
470
471 1. N is a descendant of some node in 'roots'
471 1. N is a descendant of some node in 'roots'
472 2. N is an ancestor of some node in 'heads'
472 2. N is an ancestor of some node in 'heads'
473
473
474 Every node is considered to be both a descendant and an ancestor
474 Every node is considered to be both a descendant and an ancestor
475 of itself, so every reachable node in 'roots' and 'heads' will be
475 of itself, so every reachable node in 'roots' and 'heads' will be
476 included in 'nodes'.
476 included in 'nodes'.
477
477
478 'outroots' is the list of reachable nodes in 'roots', i.e., the
478 'outroots' is the list of reachable nodes in 'roots', i.e., the
479 subset of 'roots' that is returned in 'nodes'. Likewise,
479 subset of 'roots' that is returned in 'nodes'. Likewise,
480 'outheads' is the subset of 'heads' that is also in 'nodes'.
480 'outheads' is the subset of 'heads' that is also in 'nodes'.
481
481
482 'roots' and 'heads' are both lists of node IDs. If 'roots' is
482 'roots' and 'heads' are both lists of node IDs. If 'roots' is
483 unspecified, uses nullid as the only root. If 'heads' is
483 unspecified, uses nullid as the only root. If 'heads' is
484 unspecified, uses list of all of the revlog's heads."""
484 unspecified, uses list of all of the revlog's heads."""
485 nonodes = ([], [], [])
485 nonodes = ([], [], [])
486 if roots is not None:
486 if roots is not None:
487 roots = list(roots)
487 roots = list(roots)
488 if not roots:
488 if not roots:
489 return nonodes
489 return nonodes
490 lowestrev = min([self.rev(n) for n in roots])
490 lowestrev = min([self.rev(n) for n in roots])
491 else:
491 else:
492 roots = [nullid] # Everybody's a descendant of nullid
492 roots = [nullid] # Everybody's a descendant of nullid
493 lowestrev = nullrev
493 lowestrev = nullrev
494 if (lowestrev == nullrev) and (heads is None):
494 if (lowestrev == nullrev) and (heads is None):
495 # We want _all_ the nodes!
495 # We want _all_ the nodes!
496 return ([self.node(r) for r in self], [nullid], list(self.heads()))
496 return ([self.node(r) for r in self], [nullid], list(self.heads()))
497 if heads is None:
497 if heads is None:
498 # All nodes are ancestors, so the latest ancestor is the last
498 # All nodes are ancestors, so the latest ancestor is the last
499 # node.
499 # node.
500 highestrev = len(self) - 1
500 highestrev = len(self) - 1
501 # Set ancestors to None to signal that every node is an ancestor.
501 # Set ancestors to None to signal that every node is an ancestor.
502 ancestors = None
502 ancestors = None
503 # Set heads to an empty dictionary for later discovery of heads
503 # Set heads to an empty dictionary for later discovery of heads
504 heads = {}
504 heads = {}
505 else:
505 else:
506 heads = list(heads)
506 heads = list(heads)
507 if not heads:
507 if not heads:
508 return nonodes
508 return nonodes
509 ancestors = set()
509 ancestors = set()
510 # Turn heads into a dictionary so we can remove 'fake' heads.
510 # Turn heads into a dictionary so we can remove 'fake' heads.
511 # Also, later we will be using it to filter out the heads we can't
511 # Also, later we will be using it to filter out the heads we can't
512 # find from roots.
512 # find from roots.
513 heads = dict.fromkeys(heads, False)
513 heads = dict.fromkeys(heads, False)
514 # Start at the top and keep marking parents until we're done.
514 # Start at the top and keep marking parents until we're done.
515 nodestotag = set(heads)
515 nodestotag = set(heads)
516 # Remember where the top was so we can use it as a limit later.
516 # Remember where the top was so we can use it as a limit later.
517 highestrev = max([self.rev(n) for n in nodestotag])
517 highestrev = max([self.rev(n) for n in nodestotag])
518 while nodestotag:
518 while nodestotag:
519 # grab a node to tag
519 # grab a node to tag
520 n = nodestotag.pop()
520 n = nodestotag.pop()
521 # Never tag nullid
521 # Never tag nullid
522 if n == nullid:
522 if n == nullid:
523 continue
523 continue
524 # A node's revision number represents its place in a
524 # A node's revision number represents its place in a
525 # topologically sorted list of nodes.
525 # topologically sorted list of nodes.
526 r = self.rev(n)
526 r = self.rev(n)
527 if r >= lowestrev:
527 if r >= lowestrev:
528 if n not in ancestors:
528 if n not in ancestors:
529 # If we are possibly a descendant of one of the roots
529 # If we are possibly a descendant of one of the roots
530 # and we haven't already been marked as an ancestor
530 # and we haven't already been marked as an ancestor
531 ancestors.add(n) # Mark as ancestor
531 ancestors.add(n) # Mark as ancestor
532 # Add non-nullid parents to list of nodes to tag.
532 # Add non-nullid parents to list of nodes to tag.
533 nodestotag.update([p for p in self.parents(n) if
533 nodestotag.update([p for p in self.parents(n) if
534 p != nullid])
534 p != nullid])
535 elif n in heads: # We've seen it before, is it a fake head?
535 elif n in heads: # We've seen it before, is it a fake head?
536 # So it is, real heads should not be the ancestors of
536 # So it is, real heads should not be the ancestors of
537 # any other heads.
537 # any other heads.
538 heads.pop(n)
538 heads.pop(n)
539 if not ancestors:
539 if not ancestors:
540 return nonodes
540 return nonodes
541 # Now that we have our set of ancestors, we want to remove any
541 # Now that we have our set of ancestors, we want to remove any
542 # roots that are not ancestors.
542 # roots that are not ancestors.
543
543
544 # If one of the roots was nullid, everything is included anyway.
544 # If one of the roots was nullid, everything is included anyway.
545 if lowestrev > nullrev:
545 if lowestrev > nullrev:
546 # But, since we weren't, let's recompute the lowest rev to not
546 # But, since we weren't, let's recompute the lowest rev to not
547 # include roots that aren't ancestors.
547 # include roots that aren't ancestors.
548
548
549 # Filter out roots that aren't ancestors of heads
549 # Filter out roots that aren't ancestors of heads
550 roots = [n for n in roots if n in ancestors]
550 roots = [n for n in roots if n in ancestors]
551 # Recompute the lowest revision
551 # Recompute the lowest revision
552 if roots:
552 if roots:
553 lowestrev = min([self.rev(n) for n in roots])
553 lowestrev = min([self.rev(n) for n in roots])
554 else:
554 else:
555 # No more roots? Return empty list
555 # No more roots? Return empty list
556 return nonodes
556 return nonodes
557 else:
557 else:
558 # We are descending from nullid, and don't need to care about
558 # We are descending from nullid, and don't need to care about
559 # any other roots.
559 # any other roots.
560 lowestrev = nullrev
560 lowestrev = nullrev
561 roots = [nullid]
561 roots = [nullid]
562 # Transform our roots list into a set.
562 # Transform our roots list into a set.
563 descendants = set(roots)
563 descendants = set(roots)
564 # Also, keep the original roots so we can filter out roots that aren't
564 # Also, keep the original roots so we can filter out roots that aren't
565 # 'real' roots (i.e. are descended from other roots).
565 # 'real' roots (i.e. are descended from other roots).
566 roots = descendants.copy()
566 roots = descendants.copy()
567 # Our topologically sorted list of output nodes.
567 # Our topologically sorted list of output nodes.
568 orderedout = []
568 orderedout = []
569 # Don't start at nullid since we don't want nullid in our output list,
569 # Don't start at nullid since we don't want nullid in our output list,
570 # and if nullid shows up in descedents, empty parents will look like
570 # and if nullid shows up in descedents, empty parents will look like
571 # they're descendants.
571 # they're descendants.
572 for r in xrange(max(lowestrev, 0), highestrev + 1):
572 for r in xrange(max(lowestrev, 0), highestrev + 1):
573 n = self.node(r)
573 n = self.node(r)
574 isdescendant = False
574 isdescendant = False
575 if lowestrev == nullrev: # Everybody is a descendant of nullid
575 if lowestrev == nullrev: # Everybody is a descendant of nullid
576 isdescendant = True
576 isdescendant = True
577 elif n in descendants:
577 elif n in descendants:
578 # n is already a descendant
578 # n is already a descendant
579 isdescendant = True
579 isdescendant = True
580 # This check only needs to be done here because all the roots
580 # This check only needs to be done here because all the roots
581 # will start being marked is descendants before the loop.
581 # will start being marked is descendants before the loop.
582 if n in roots:
582 if n in roots:
583 # If n was a root, check if it's a 'real' root.
583 # If n was a root, check if it's a 'real' root.
584 p = tuple(self.parents(n))
584 p = tuple(self.parents(n))
585 # If any of its parents are descendants, it's not a root.
585 # If any of its parents are descendants, it's not a root.
586 if (p[0] in descendants) or (p[1] in descendants):
586 if (p[0] in descendants) or (p[1] in descendants):
587 roots.remove(n)
587 roots.remove(n)
588 else:
588 else:
589 p = tuple(self.parents(n))
589 p = tuple(self.parents(n))
590 # A node is a descendant if either of its parents are
590 # A node is a descendant if either of its parents are
591 # descendants. (We seeded the dependents list with the roots
591 # descendants. (We seeded the dependents list with the roots
592 # up there, remember?)
592 # up there, remember?)
593 if (p[0] in descendants) or (p[1] in descendants):
593 if (p[0] in descendants) or (p[1] in descendants):
594 descendants.add(n)
594 descendants.add(n)
595 isdescendant = True
595 isdescendant = True
596 if isdescendant and ((ancestors is None) or (n in ancestors)):
596 if isdescendant and ((ancestors is None) or (n in ancestors)):
597 # Only include nodes that are both descendants and ancestors.
597 # Only include nodes that are both descendants and ancestors.
598 orderedout.append(n)
598 orderedout.append(n)
599 if (ancestors is not None) and (n in heads):
599 if (ancestors is not None) and (n in heads):
600 # We're trying to figure out which heads are reachable
600 # We're trying to figure out which heads are reachable
601 # from roots.
601 # from roots.
602 # Mark this head as having been reached
602 # Mark this head as having been reached
603 heads[n] = True
603 heads[n] = True
604 elif ancestors is None:
604 elif ancestors is None:
605 # Otherwise, we're trying to discover the heads.
605 # Otherwise, we're trying to discover the heads.
606 # Assume this is a head because if it isn't, the next step
606 # Assume this is a head because if it isn't, the next step
607 # will eventually remove it.
607 # will eventually remove it.
608 heads[n] = True
608 heads[n] = True
609 # But, obviously its parents aren't.
609 # But, obviously its parents aren't.
610 for p in self.parents(n):
610 for p in self.parents(n):
611 heads.pop(p, None)
611 heads.pop(p, None)
612 heads = [n for n, flag in heads.iteritems() if flag]
612 heads = [n for n, flag in heads.iteritems() if flag]
613 roots = list(roots)
613 roots = list(roots)
614 assert orderedout
614 assert orderedout
615 assert roots
615 assert roots
616 assert heads
616 assert heads
617 return (orderedout, roots, heads)
617 return (orderedout, roots, heads)
618
618
619 def headrevs(self):
619 def headrevs(self):
620 count = len(self)
620 count = len(self)
621 if not count:
621 if not count:
622 return [nullrev]
622 return [nullrev]
623 ishead = [1] * (count + 1)
623 ishead = [1] * (count + 1)
624 index = self.index
624 index = self.index
625 for r in xrange(count):
625 for r in xrange(count):
626 e = index[r]
626 e = index[r]
627 ishead[e[5]] = ishead[e[6]] = 0
627 ishead[e[5]] = ishead[e[6]] = 0
628 return [r for r in xrange(count) if ishead[r]]
628 return [r for r in xrange(count) if ishead[r]]
629
629
630 def heads(self, start=None, stop=None):
630 def heads(self, start=None, stop=None):
631 """return the list of all nodes that have no children
631 """return the list of all nodes that have no children
632
632
633 if start is specified, only heads that are descendants of
633 if start is specified, only heads that are descendants of
634 start will be returned
634 start will be returned
635 if stop is specified, it will consider all the revs from stop
635 if stop is specified, it will consider all the revs from stop
636 as if they had no children
636 as if they had no children
637 """
637 """
638 if start is None and stop is None:
638 if start is None and stop is None:
639 if not len(self):
639 if not len(self):
640 return [nullid]
640 return [nullid]
641 return [self.node(r) for r in self.headrevs()]
641 return [self.node(r) for r in self.headrevs()]
642
642
643 if start is None:
643 if start is None:
644 start = nullid
644 start = nullid
645 if stop is None:
645 if stop is None:
646 stop = []
646 stop = []
647 stoprevs = set([self.rev(n) for n in stop])
647 stoprevs = set([self.rev(n) for n in stop])
648 startrev = self.rev(start)
648 startrev = self.rev(start)
649 reachable = set((startrev,))
649 reachable = set((startrev,))
650 heads = set((startrev,))
650 heads = set((startrev,))
651
651
652 parentrevs = self.parentrevs
652 parentrevs = self.parentrevs
653 for r in xrange(startrev + 1, len(self)):
653 for r in xrange(startrev + 1, len(self)):
654 for p in parentrevs(r):
654 for p in parentrevs(r):
655 if p in reachable:
655 if p in reachable:
656 if r not in stoprevs:
656 if r not in stoprevs:
657 reachable.add(r)
657 reachable.add(r)
658 heads.add(r)
658 heads.add(r)
659 if p in heads and p not in stoprevs:
659 if p in heads and p not in stoprevs:
660 heads.remove(p)
660 heads.remove(p)
661
661
662 return [self.node(r) for r in heads]
662 return [self.node(r) for r in heads]
663
663
664 def children(self, node):
664 def children(self, node):
665 """find the children of a given node"""
665 """find the children of a given node"""
666 c = []
666 c = []
667 p = self.rev(node)
667 p = self.rev(node)
668 for r in range(p + 1, len(self)):
668 for r in range(p + 1, len(self)):
669 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
669 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
670 if prevs:
670 if prevs:
671 for pr in prevs:
671 for pr in prevs:
672 if pr == p:
672 if pr == p:
673 c.append(self.node(r))
673 c.append(self.node(r))
674 elif p == nullrev:
674 elif p == nullrev:
675 c.append(self.node(r))
675 c.append(self.node(r))
676 return c
676 return c
677
677
678 def descendant(self, start, end):
678 def descendant(self, start, end):
679 if start == nullrev:
679 if start == nullrev:
680 return True
680 return True
681 for i in self.descendants(start):
681 for i in self.descendants(start):
682 if i == end:
682 if i == end:
683 return True
683 return True
684 elif i > end:
684 elif i > end:
685 break
685 break
686 return False
686 return False
687
687
688 def ancestor(self, a, b):
688 def ancestor(self, a, b):
689 """calculate the least common ancestor of nodes a and b"""
689 """calculate the least common ancestor of nodes a and b"""
690
690
691 # fast path, check if it is a descendant
691 # fast path, check if it is a descendant
692 a, b = self.rev(a), self.rev(b)
692 a, b = self.rev(a), self.rev(b)
693 start, end = sorted((a, b))
693 start, end = sorted((a, b))
694 if self.descendant(start, end):
694 if self.descendant(start, end):
695 return self.node(start)
695 return self.node(start)
696
696
697 def parents(rev):
697 def parents(rev):
698 return [p for p in self.parentrevs(rev) if p != nullrev]
698 return [p for p in self.parentrevs(rev) if p != nullrev]
699
699
700 c = ancestor.ancestor(a, b, parents)
700 c = ancestor.ancestor(a, b, parents)
701 if c is None:
701 if c is None:
702 return nullid
702 return nullid
703
703
704 return self.node(c)
704 return self.node(c)
705
705
706 def _match(self, id):
706 def _match(self, id):
707 if isinstance(id, (long, int)):
707 if isinstance(id, (long, int)):
708 # rev
708 # rev
709 return self.node(id)
709 return self.node(id)
710 if len(id) == 20:
710 if len(id) == 20:
711 # possibly a binary node
711 # possibly a binary node
712 # odds of a binary node being all hex in ASCII are 1 in 10**25
712 # odds of a binary node being all hex in ASCII are 1 in 10**25
713 try:
713 try:
714 node = id
714 node = id
715 self.rev(node) # quick search the index
715 self.rev(node) # quick search the index
716 return node
716 return node
717 except LookupError:
717 except LookupError:
718 pass # may be partial hex id
718 pass # may be partial hex id
719 try:
719 try:
720 # str(rev)
720 # str(rev)
721 rev = int(id)
721 rev = int(id)
722 if str(rev) != id:
722 if str(rev) != id:
723 raise ValueError
723 raise ValueError
724 if rev < 0:
724 if rev < 0:
725 rev = len(self) + rev
725 rev = len(self) + rev
726 if rev < 0 or rev >= len(self):
726 if rev < 0 or rev >= len(self):
727 raise ValueError
727 raise ValueError
728 return self.node(rev)
728 return self.node(rev)
729 except (ValueError, OverflowError):
729 except (ValueError, OverflowError):
730 pass
730 pass
731 if len(id) == 40:
731 if len(id) == 40:
732 try:
732 try:
733 # a full hex nodeid?
733 # a full hex nodeid?
734 node = bin(id)
734 node = bin(id)
735 self.rev(node)
735 self.rev(node)
736 return node
736 return node
737 except (TypeError, LookupError):
737 except (TypeError, LookupError):
738 pass
738 pass
739
739
740 def _partialmatch(self, id):
740 def _partialmatch(self, id):
741 if id in self._pcache:
741 if id in self._pcache:
742 return self._pcache[id]
742 return self._pcache[id]
743
743
744 if len(id) < 40:
744 if len(id) < 40:
745 try:
745 try:
746 # hex(node)[:...]
746 # hex(node)[:...]
747 l = len(id) // 2 # grab an even number of digits
747 l = len(id) // 2 # grab an even number of digits
748 prefix = bin(id[:l * 2])
748 prefix = bin(id[:l * 2])
749 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
749 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
750 nl = [n for n in nl if hex(n).startswith(id)]
750 nl = [n for n in nl if hex(n).startswith(id)]
751 if len(nl) > 0:
751 if len(nl) > 0:
752 if len(nl) == 1:
752 if len(nl) == 1:
753 self._pcache[id] = nl[0]
753 self._pcache[id] = nl[0]
754 return nl[0]
754 return nl[0]
755 raise LookupError(id, self.indexfile,
755 raise LookupError(id, self.indexfile,
756 _('ambiguous identifier'))
756 _('ambiguous identifier'))
757 return None
757 return None
758 except TypeError:
758 except TypeError:
759 pass
759 pass
760
760
761 def lookup(self, id):
761 def lookup(self, id):
762 """locate a node based on:
762 """locate a node based on:
763 - revision number or str(revision number)
763 - revision number or str(revision number)
764 - nodeid or subset of hex nodeid
764 - nodeid or subset of hex nodeid
765 """
765 """
766 n = self._match(id)
766 n = self._match(id)
767 if n is not None:
767 if n is not None:
768 return n
768 return n
769 n = self._partialmatch(id)
769 n = self._partialmatch(id)
770 if n:
770 if n:
771 return n
771 return n
772
772
773 raise LookupError(id, self.indexfile, _('no match found'))
773 raise LookupError(id, self.indexfile, _('no match found'))
774
774
775 def cmp(self, node, text):
775 def cmp(self, node, text):
776 """compare text with a given file revision
776 """compare text with a given file revision
777
777
778 returns True if text is different than what is stored.
778 returns True if text is different than what is stored.
779 """
779 """
780 p1, p2 = self.parents(node)
780 p1, p2 = self.parents(node)
781 return hash(text, p1, p2) != node
781 return hash(text, p1, p2) != node
782
782
783 def _addchunk(self, offset, data):
783 def _addchunk(self, offset, data):
784 o, d = self._chunkcache
784 o, d = self._chunkcache
785 # try to add to existing cache
785 # try to add to existing cache
786 if o + len(d) == offset and len(d) + len(data) < _chunksize:
786 if o + len(d) == offset and len(d) + len(data) < _chunksize:
787 self._chunkcache = o, d + data
787 self._chunkcache = o, d + data
788 else:
788 else:
789 self._chunkcache = offset, data
789 self._chunkcache = offset, data
790
790
791 def _loadchunk(self, offset, length):
791 def _loadchunk(self, offset, length):
792 if self._inline:
792 if self._inline:
793 df = self.opener(self.indexfile)
793 df = self.opener(self.indexfile)
794 else:
794 else:
795 df = self.opener(self.datafile)
795 df = self.opener(self.datafile)
796
796
797 readahead = max(65536, length)
797 readahead = max(65536, length)
798 df.seek(offset)
798 df.seek(offset)
799 d = df.read(readahead)
799 d = df.read(readahead)
800 df.close()
800 df.close()
801 self._addchunk(offset, d)
801 self._addchunk(offset, d)
802 if readahead > length:
802 if readahead > length:
803 return d[:length]
803 return d[:length]
804 return d
804 return d
805
805
806 def _getchunk(self, offset, length):
806 def _getchunk(self, offset, length):
807 o, d = self._chunkcache
807 o, d = self._chunkcache
808 l = len(d)
808 l = len(d)
809
809
810 # is it in the cache?
810 # is it in the cache?
811 cachestart = offset - o
811 cachestart = offset - o
812 cacheend = cachestart + length
812 cacheend = cachestart + length
813 if cachestart >= 0 and cacheend <= l:
813 if cachestart >= 0 and cacheend <= l:
814 if cachestart == 0 and cacheend == l:
814 if cachestart == 0 and cacheend == l:
815 return d # avoid a copy
815 return d # avoid a copy
816 return d[cachestart:cacheend]
816 return d[cachestart:cacheend]
817
817
818 return self._loadchunk(offset, length)
818 return self._loadchunk(offset, length)
819
819
820 def _chunkraw(self, startrev, endrev):
820 def _chunkraw(self, startrev, endrev):
821 start = self.start(startrev)
821 start = self.start(startrev)
822 length = self.end(endrev) - start
822 length = self.end(endrev) - start
823 if self._inline:
823 if self._inline:
824 start += (startrev + 1) * self._io.size
824 start += (startrev + 1) * self._io.size
825 return self._getchunk(start, length)
825 return self._getchunk(start, length)
826
826
827 def _chunk(self, rev):
827 def _chunk(self, rev):
828 return decompress(self._chunkraw(rev, rev))
828 return decompress(self._chunkraw(rev, rev))
829
829
830 def _chunkbase(self, rev):
830 def _chunkbase(self, rev):
831 return self._chunk(rev)
831 return self._chunk(rev)
832
832
833 def _chunkclear(self):
833 def _chunkclear(self):
834 self._chunkcache = (0, '')
834 self._chunkcache = (0, '')
835
835
836 def deltaparent(self, rev):
836 def deltaparent(self, rev):
837 """return deltaparent of the given revision"""
837 """return deltaparent of the given revision"""
838 base = self.index[rev][3]
838 base = self.index[rev][3]
839 if base == rev:
839 if base == rev:
840 return nullrev
840 return nullrev
841 elif self._generaldelta:
841 elif self._generaldelta:
842 return base
842 return base
843 else:
843 else:
844 return rev - 1
844 return rev - 1
845
845
846 def revdiff(self, rev1, rev2):
846 def revdiff(self, rev1, rev2):
847 """return or calculate a delta between two revisions"""
847 """return or calculate a delta between two revisions"""
848 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
848 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
849 return self._chunk(rev2)
849 return self._chunk(rev2)
850
850
851 return mdiff.textdiff(self.revision(self.node(rev1)),
851 return mdiff.textdiff(self.revision(self.node(rev1)),
852 self.revision(self.node(rev2)))
852 self.revision(self.node(rev2)))
853
853
854 def revision(self, node):
854 def revision(self, node):
855 """return an uncompressed revision of a given node"""
855 """return an uncompressed revision of a given node"""
856 cachedrev = None
856 cachedrev = None
857 if node == nullid:
857 if node == nullid:
858 return ""
858 return ""
859 if self._cache:
859 if self._cache:
860 if self._cache[0] == node:
860 if self._cache[0] == node:
861 return self._cache[2]
861 return self._cache[2]
862 cachedrev = self._cache[1]
862 cachedrev = self._cache[1]
863
863
864 # look up what we need to read
864 # look up what we need to read
865 text = None
865 text = None
866 rev = self.rev(node)
866 rev = self.rev(node)
867
867
868 # check rev flags
868 # check rev flags
869 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
869 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
870 raise RevlogError(_('incompatible revision flag %x') %
870 raise RevlogError(_('incompatible revision flag %x') %
871 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
871 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
872
872
873 # build delta chain
873 # build delta chain
874 chain = []
874 chain = []
875 index = self.index # for performance
875 index = self.index # for performance
876 generaldelta = self._generaldelta
876 generaldelta = self._generaldelta
877 iterrev = rev
877 iterrev = rev
878 e = index[iterrev]
878 e = index[iterrev]
879 while iterrev != e[3] and iterrev != cachedrev:
879 while iterrev != e[3] and iterrev != cachedrev:
880 chain.append(iterrev)
880 chain.append(iterrev)
881 if generaldelta:
881 if generaldelta:
882 iterrev = e[3]
882 iterrev = e[3]
883 else:
883 else:
884 iterrev -= 1
884 iterrev -= 1
885 e = index[iterrev]
885 e = index[iterrev]
886 chain.reverse()
886 chain.reverse()
887 base = iterrev
887 base = iterrev
888
888
889 if iterrev == cachedrev:
889 if iterrev == cachedrev:
890 # cache hit
890 # cache hit
891 text = self._cache[2]
891 text = self._cache[2]
892
892
893 # drop cache to save memory
893 # drop cache to save memory
894 self._cache = None
894 self._cache = None
895
895
896 self._chunkraw(base, rev)
896 self._chunkraw(base, rev)
897 if text is None:
897 if text is None:
898 text = self._chunkbase(base)
898 text = self._chunkbase(base)
899
899
900 bins = [self._chunk(r) for r in chain]
900 bins = [self._chunk(r) for r in chain]
901 text = mdiff.patches(text, bins)
901 text = mdiff.patches(text, bins)
902
902
903 text = self._checkhash(text, node, rev)
903 text = self._checkhash(text, node, rev)
904
904
905 self._cache = (node, rev, text)
905 self._cache = (node, rev, text)
906 return text
906 return text
907
907
908 def _checkhash(self, text, node, rev):
908 def _checkhash(self, text, node, rev):
909 p1, p2 = self.parents(node)
909 p1, p2 = self.parents(node)
910 if node != hash(text, p1, p2):
910 if node != hash(text, p1, p2):
911 raise RevlogError(_("integrity check failed on %s:%d")
911 raise RevlogError(_("integrity check failed on %s:%d")
912 % (self.indexfile, rev))
912 % (self.indexfile, rev))
913 return text
913 return text
914
914
915 def checkinlinesize(self, tr, fp=None):
915 def checkinlinesize(self, tr, fp=None):
916 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
916 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
917 return
917 return
918
918
919 trinfo = tr.find(self.indexfile)
919 trinfo = tr.find(self.indexfile)
920 if trinfo is None:
920 if trinfo is None:
921 raise RevlogError(_("%s not found in the transaction")
921 raise RevlogError(_("%s not found in the transaction")
922 % self.indexfile)
922 % self.indexfile)
923
923
924 trindex = trinfo[2]
924 trindex = trinfo[2]
925 dataoff = self.start(trindex)
925 dataoff = self.start(trindex)
926
926
927 tr.add(self.datafile, dataoff)
927 tr.add(self.datafile, dataoff)
928
928
929 if fp:
929 if fp:
930 fp.flush()
930 fp.flush()
931 fp.close()
931 fp.close()
932
932
933 df = self.opener(self.datafile, 'w')
933 df = self.opener(self.datafile, 'w')
934 try:
934 try:
935 for r in self:
935 for r in self:
936 df.write(self._chunkraw(r, r))
936 df.write(self._chunkraw(r, r))
937 finally:
937 finally:
938 df.close()
938 df.close()
939
939
940 fp = self.opener(self.indexfile, 'w', atomictemp=True)
940 fp = self.opener(self.indexfile, 'w', atomictemp=True)
941 self.version &= ~(REVLOGNGINLINEDATA)
941 self.version &= ~(REVLOGNGINLINEDATA)
942 self._inline = False
942 self._inline = False
943 for i in self:
943 for i in self:
944 e = self._io.packentry(self.index[i], self.node, self.version, i)
944 e = self._io.packentry(self.index[i], self.node, self.version, i)
945 fp.write(e)
945 fp.write(e)
946
946
947 # if we don't call close, the temp file will never replace the
947 # if we don't call close, the temp file will never replace the
948 # real index
948 # real index
949 fp.close()
949 fp.close()
950
950
951 tr.replace(self.indexfile, trindex * self._io.size)
951 tr.replace(self.indexfile, trindex * self._io.size)
952 self._chunkclear()
952 self._chunkclear()
953
953
954 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
954 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
955 """add a revision to the log
955 """add a revision to the log
956
956
957 text - the revision data to add
957 text - the revision data to add
958 transaction - the transaction object used for rollback
958 transaction - the transaction object used for rollback
959 link - the linkrev data to add
959 link - the linkrev data to add
960 p1, p2 - the parent nodeids of the revision
960 p1, p2 - the parent nodeids of the revision
961 cachedelta - an optional precomputed delta
961 cachedelta - an optional precomputed delta
962 """
962 """
963 node = hash(text, p1, p2)
963 node = hash(text, p1, p2)
964 if node in self.nodemap:
964 if node in self.nodemap:
965 return node
965 return node
966
966
967 dfh = None
967 dfh = None
968 if not self._inline:
968 if not self._inline:
969 dfh = self.opener(self.datafile, "a")
969 dfh = self.opener(self.datafile, "a")
970 ifh = self.opener(self.indexfile, "a+")
970 ifh = self.opener(self.indexfile, "a+")
971 try:
971 try:
972 return self._addrevision(node, text, transaction, link, p1, p2,
972 return self._addrevision(node, text, transaction, link, p1, p2,
973 cachedelta, ifh, dfh)
973 cachedelta, ifh, dfh)
974 finally:
974 finally:
975 if dfh:
975 if dfh:
976 dfh.close()
976 dfh.close()
977 ifh.close()
977 ifh.close()
978
978
979 def _addrevision(self, node, text, transaction, link, p1, p2,
979 def _addrevision(self, node, text, transaction, link, p1, p2,
980 cachedelta, ifh, dfh):
980 cachedelta, ifh, dfh):
981 """internal function to add revisions to the log
981 """internal function to add revisions to the log
982
982
983 see addrevision for argument descriptions.
983 see addrevision for argument descriptions.
984 invariants:
984 invariants:
985 - text is optional (can be None); if not set, cachedelta must be set.
985 - text is optional (can be None); if not set, cachedelta must be set.
986 if both are set, they must correspond to eachother.
986 if both are set, they must correspond to eachother.
987 """
987 """
988 btext = [text]
988 btext = [text]
989 def buildtext():
989 def buildtext():
990 if btext[0] is not None:
990 if btext[0] is not None:
991 return btext[0]
991 return btext[0]
992 # flush any pending writes here so we can read it in revision
992 # flush any pending writes here so we can read it in revision
993 if dfh:
993 if dfh:
994 dfh.flush()
994 dfh.flush()
995 ifh.flush()
995 ifh.flush()
996 basetext = self.revision(self.node(cachedelta[0]))
996 basetext = self.revision(self.node(cachedelta[0]))
997 btext[0] = mdiff.patch(basetext, cachedelta[1])
997 btext[0] = mdiff.patch(basetext, cachedelta[1])
998 chk = hash(btext[0], p1, p2)
998 chk = hash(btext[0], p1, p2)
999 if chk != node:
999 if chk != node:
1000 raise RevlogError(_("consistency error in delta"))
1000 raise RevlogError(_("consistency error in delta"))
1001 return btext[0]
1001 return btext[0]
1002
1002
1003 def builddelta(rev):
1003 def builddelta(rev):
1004 # can we use the cached delta?
1004 # can we use the cached delta?
1005 if cachedelta and cachedelta[0] == rev:
1005 if cachedelta and cachedelta[0] == rev:
1006 delta = cachedelta[1]
1006 delta = cachedelta[1]
1007 else:
1007 else:
1008 t = buildtext()
1008 t = buildtext()
1009 ptext = self.revision(self.node(rev))
1009 ptext = self.revision(self.node(rev))
1010 delta = mdiff.textdiff(ptext, t)
1010 delta = mdiff.textdiff(ptext, t)
1011 data = compress(delta)
1011 data = compress(delta)
1012 l = len(data[1]) + len(data[0])
1012 l = len(data[1]) + len(data[0])
1013 if basecache[0] == rev:
1013 if basecache[0] == rev:
1014 chainbase = basecache[1]
1014 chainbase = basecache[1]
1015 else:
1015 else:
1016 chainbase = self.chainbase(rev)
1016 chainbase = self.chainbase(rev)
1017 dist = l + offset - self.start(chainbase)
1017 dist = l + offset - self.start(chainbase)
1018 if self._generaldelta:
1018 if self._generaldelta:
1019 base = rev
1019 base = rev
1020 else:
1020 else:
1021 base = chainbase
1021 base = chainbase
1022 return dist, l, data, base, chainbase
1022 return dist, l, data, base, chainbase
1023
1023
1024 curr = len(self)
1024 curr = len(self)
1025 prev = curr - 1
1025 prev = curr - 1
1026 base = chainbase = curr
1026 base = chainbase = curr
1027 offset = self.end(prev)
1027 offset = self.end(prev)
1028 flags = 0
1028 flags = 0
1029 d = None
1029 d = None
1030 basecache = self._basecache
1030 basecache = self._basecache
1031 p1r, p2r = self.rev(p1), self.rev(p2)
1031 p1r, p2r = self.rev(p1), self.rev(p2)
1032
1032
1033 # should we try to build a delta?
1033 # should we try to build a delta?
1034 if prev != nullrev:
1034 if prev != nullrev:
1035 if self._generaldelta:
1035 if self._generaldelta:
1036 if p1r >= basecache[1]:
1036 if p1r >= basecache[1]:
1037 d = builddelta(p1r)
1037 d = builddelta(p1r)
1038 elif p2r >= basecache[1]:
1038 elif p2r >= basecache[1]:
1039 d = builddelta(p2r)
1039 d = builddelta(p2r)
1040 else:
1040 else:
1041 d = builddelta(prev)
1041 d = builddelta(prev)
1042 else:
1042 else:
1043 d = builddelta(prev)
1043 d = builddelta(prev)
1044 dist, l, data, base, chainbase = d
1044 dist, l, data, base, chainbase = d
1045
1045
1046 # full versions are inserted when the needed deltas
1046 # full versions are inserted when the needed deltas
1047 # become comparable to the uncompressed text
1047 # become comparable to the uncompressed text
1048 if text is None:
1048 if text is None:
1049 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1049 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1050 cachedelta[1])
1050 cachedelta[1])
1051 else:
1051 else:
1052 textlen = len(text)
1052 textlen = len(text)
1053 if d is None or dist > textlen * 2:
1053 if d is None or dist > textlen * 2:
1054 text = buildtext()
1054 text = buildtext()
1055 data = compress(text)
1055 data = compress(text)
1056 l = len(data[1]) + len(data[0])
1056 l = len(data[1]) + len(data[0])
1057 base = chainbase = curr
1057 base = chainbase = curr
1058
1058
1059 e = (offset_type(offset, flags), l, textlen,
1059 e = (offset_type(offset, flags), l, textlen,
1060 base, link, p1r, p2r, node)
1060 base, link, p1r, p2r, node)
1061 self.index.insert(-1, e)
1061 self.index.insert(-1, e)
1062 self.nodemap[node] = curr
1062 self.nodemap[node] = curr
1063
1063
1064 entry = self._io.packentry(e, self.node, self.version, curr)
1064 entry = self._io.packentry(e, self.node, self.version, curr)
1065 if not self._inline:
1065 if not self._inline:
1066 transaction.add(self.datafile, offset)
1066 transaction.add(self.datafile, offset)
1067 transaction.add(self.indexfile, curr * len(entry))
1067 transaction.add(self.indexfile, curr * len(entry))
1068 if data[0]:
1068 if data[0]:
1069 dfh.write(data[0])
1069 dfh.write(data[0])
1070 dfh.write(data[1])
1070 dfh.write(data[1])
1071 dfh.flush()
1071 dfh.flush()
1072 ifh.write(entry)
1072 ifh.write(entry)
1073 else:
1073 else:
1074 offset += curr * self._io.size
1074 offset += curr * self._io.size
1075 transaction.add(self.indexfile, offset, curr)
1075 transaction.add(self.indexfile, offset, curr)
1076 ifh.write(entry)
1076 ifh.write(entry)
1077 ifh.write(data[0])
1077 ifh.write(data[0])
1078 ifh.write(data[1])
1078 ifh.write(data[1])
1079 self.checkinlinesize(transaction, ifh)
1079 self.checkinlinesize(transaction, ifh)
1080
1080
1081 if type(text) == str: # only accept immutable objects
1081 if type(text) == str: # only accept immutable objects
1082 self._cache = (node, curr, text)
1082 self._cache = (node, curr, text)
1083 self._basecache = (curr, chainbase)
1083 self._basecache = (curr, chainbase)
1084 return node
1084 return node
1085
1085
1086 def group(self, nodelist, bundler, reorder=None):
1086 def group(self, nodelist, bundler, reorder=None):
1087 """Calculate a delta group, yielding a sequence of changegroup chunks
1087 """Calculate a delta group, yielding a sequence of changegroup chunks
1088 (strings).
1088 (strings).
1089
1089
1090 Given a list of changeset revs, return a set of deltas and
1090 Given a list of changeset revs, return a set of deltas and
1091 metadata corresponding to nodes. The first delta is
1091 metadata corresponding to nodes. The first delta is
1092 first parent(nodelist[0]) -> nodelist[0], the receiver is
1092 first parent(nodelist[0]) -> nodelist[0], the receiver is
1093 guaranteed to have this parent as it has all history before
1093 guaranteed to have this parent as it has all history before
1094 these changesets. In the case firstparent is nullrev the
1094 these changesets. In the case firstparent is nullrev the
1095 changegroup starts with a full revision.
1095 changegroup starts with a full revision.
1096 """
1096 """
1097
1097
1098 # if we don't have any revisions touched by these changesets, bail
1098 # if we don't have any revisions touched by these changesets, bail
1099 if len(nodelist) == 0:
1099 if len(nodelist) == 0:
1100 yield bundler.close()
1100 yield bundler.close()
1101 return
1101 return
1102
1102
1103 # for generaldelta revlogs, we linearize the revs; this will both be
1103 # for generaldelta revlogs, we linearize the revs; this will both be
1104 # much quicker and generate a much smaller bundle
1104 # much quicker and generate a much smaller bundle
1105 if (self._generaldelta and reorder is not False) or reorder:
1105 if (self._generaldelta and reorder is not False) or reorder:
1106 dag = dagutil.revlogdag(self)
1106 dag = dagutil.revlogdag(self)
1107 revs = set(self.rev(n) for n in nodelist)
1107 revs = set(self.rev(n) for n in nodelist)
1108 revs = dag.linearize(revs)
1108 revs = dag.linearize(revs)
1109 else:
1109 else:
1110 revs = sorted([self.rev(n) for n in nodelist])
1110 revs = sorted([self.rev(n) for n in nodelist])
1111
1111
1112 # add the parent of the first rev
1112 # add the parent of the first rev
1113 p = self.parentrevs(revs[0])[0]
1113 p = self.parentrevs(revs[0])[0]
1114 revs.insert(0, p)
1114 revs.insert(0, p)
1115
1115
1116 # build deltas
1116 # build deltas
1117 for r in xrange(len(revs) - 1):
1117 for r in xrange(len(revs) - 1):
1118 prev, curr = revs[r], revs[r + 1]
1118 prev, curr = revs[r], revs[r + 1]
1119 for c in bundler.revchunk(self, curr, prev):
1119 for c in bundler.revchunk(self, curr, prev):
1120 yield c
1120 yield c
1121
1121
1122 yield bundler.close()
1122 yield bundler.close()
1123
1123
1124 def addgroup(self, bundle, linkmapper, transaction):
1124 def addgroup(self, bundle, linkmapper, transaction):
1125 """
1125 """
1126 add a delta group
1126 add a delta group
1127
1127
1128 given a set of deltas, add them to the revision log. the
1128 given a set of deltas, add them to the revision log. the
1129 first delta is against its parent, which should be in our
1129 first delta is against its parent, which should be in our
1130 log, the rest are against the previous delta.
1130 log, the rest are against the previous delta.
1131 """
1131 """
1132
1132
1133 # track the base of the current delta log
1133 # track the base of the current delta log
1134 content = []
1134 node = None
1135 node = None
1135
1136
1136 r = len(self)
1137 r = len(self)
1137 end = 0
1138 end = 0
1138 if r:
1139 if r:
1139 end = self.end(r - 1)
1140 end = self.end(r - 1)
1140 ifh = self.opener(self.indexfile, "a+")
1141 ifh = self.opener(self.indexfile, "a+")
1141 isize = r * self._io.size
1142 isize = r * self._io.size
1142 if self._inline:
1143 if self._inline:
1143 transaction.add(self.indexfile, end + isize, r)
1144 transaction.add(self.indexfile, end + isize, r)
1144 dfh = None
1145 dfh = None
1145 else:
1146 else:
1146 transaction.add(self.indexfile, isize, r)
1147 transaction.add(self.indexfile, isize, r)
1147 transaction.add(self.datafile, end)
1148 transaction.add(self.datafile, end)
1148 dfh = self.opener(self.datafile, "a")
1149 dfh = self.opener(self.datafile, "a")
1149
1150
1150 try:
1151 try:
1151 # loop through our set of deltas
1152 # loop through our set of deltas
1152 chain = None
1153 chain = None
1153 while True:
1154 while True:
1154 chunkdata = bundle.deltachunk(chain)
1155 chunkdata = bundle.deltachunk(chain)
1155 if not chunkdata:
1156 if not chunkdata:
1156 break
1157 break
1157 node = chunkdata['node']
1158 node = chunkdata['node']
1158 p1 = chunkdata['p1']
1159 p1 = chunkdata['p1']
1159 p2 = chunkdata['p2']
1160 p2 = chunkdata['p2']
1160 cs = chunkdata['cs']
1161 cs = chunkdata['cs']
1161 deltabase = chunkdata['deltabase']
1162 deltabase = chunkdata['deltabase']
1162 delta = chunkdata['delta']
1163 delta = chunkdata['delta']
1163
1164
1165 content.append(node)
1166
1164 link = linkmapper(cs)
1167 link = linkmapper(cs)
1165 if node in self.nodemap:
1168 if node in self.nodemap:
1166 # this can happen if two branches make the same change
1169 # this can happen if two branches make the same change
1167 chain = node
1170 chain = node
1168 continue
1171 continue
1169
1172
1170 for p in (p1, p2):
1173 for p in (p1, p2):
1171 if not p in self.nodemap:
1174 if not p in self.nodemap:
1172 raise LookupError(p, self.indexfile,
1175 raise LookupError(p, self.indexfile,
1173 _('unknown parent'))
1176 _('unknown parent'))
1174
1177
1175 if deltabase not in self.nodemap:
1178 if deltabase not in self.nodemap:
1176 raise LookupError(deltabase, self.indexfile,
1179 raise LookupError(deltabase, self.indexfile,
1177 _('unknown delta base'))
1180 _('unknown delta base'))
1178
1181
1179 baserev = self.rev(deltabase)
1182 baserev = self.rev(deltabase)
1180 chain = self._addrevision(node, None, transaction, link,
1183 chain = self._addrevision(node, None, transaction, link,
1181 p1, p2, (baserev, delta), ifh, dfh)
1184 p1, p2, (baserev, delta), ifh, dfh)
1182 if not dfh and not self._inline:
1185 if not dfh and not self._inline:
1183 # addrevision switched from inline to conventional
1186 # addrevision switched from inline to conventional
1184 # reopen the index
1187 # reopen the index
1185 ifh.close()
1188 ifh.close()
1186 dfh = self.opener(self.datafile, "a")
1189 dfh = self.opener(self.datafile, "a")
1187 ifh = self.opener(self.indexfile, "a")
1190 ifh = self.opener(self.indexfile, "a")
1188 finally:
1191 finally:
1189 if dfh:
1192 if dfh:
1190 dfh.close()
1193 dfh.close()
1191 ifh.close()
1194 ifh.close()
1192
1195
1193 return node
1196 return content
1194
1197
1195 def strip(self, minlink, transaction):
1198 def strip(self, minlink, transaction):
1196 """truncate the revlog on the first revision with a linkrev >= minlink
1199 """truncate the revlog on the first revision with a linkrev >= minlink
1197
1200
1198 This function is called when we're stripping revision minlink and
1201 This function is called when we're stripping revision minlink and
1199 its descendants from the repository.
1202 its descendants from the repository.
1200
1203
1201 We have to remove all revisions with linkrev >= minlink, because
1204 We have to remove all revisions with linkrev >= minlink, because
1202 the equivalent changelog revisions will be renumbered after the
1205 the equivalent changelog revisions will be renumbered after the
1203 strip.
1206 strip.
1204
1207
1205 So we truncate the revlog on the first of these revisions, and
1208 So we truncate the revlog on the first of these revisions, and
1206 trust that the caller has saved the revisions that shouldn't be
1209 trust that the caller has saved the revisions that shouldn't be
1207 removed and that it'll re-add them after this truncation.
1210 removed and that it'll re-add them after this truncation.
1208 """
1211 """
1209 if len(self) == 0:
1212 if len(self) == 0:
1210 return
1213 return
1211
1214
1212 for rev in self:
1215 for rev in self:
1213 if self.index[rev][4] >= minlink:
1216 if self.index[rev][4] >= minlink:
1214 break
1217 break
1215 else:
1218 else:
1216 return
1219 return
1217
1220
1218 # first truncate the files on disk
1221 # first truncate the files on disk
1219 end = self.start(rev)
1222 end = self.start(rev)
1220 if not self._inline:
1223 if not self._inline:
1221 transaction.add(self.datafile, end)
1224 transaction.add(self.datafile, end)
1222 end = rev * self._io.size
1225 end = rev * self._io.size
1223 else:
1226 else:
1224 end += rev * self._io.size
1227 end += rev * self._io.size
1225
1228
1226 transaction.add(self.indexfile, end)
1229 transaction.add(self.indexfile, end)
1227
1230
1228 # then reset internal state in memory to forget those revisions
1231 # then reset internal state in memory to forget those revisions
1229 self._cache = None
1232 self._cache = None
1230 self._chunkclear()
1233 self._chunkclear()
1231 for x in xrange(rev, len(self)):
1234 for x in xrange(rev, len(self)):
1232 del self.nodemap[self.node(x)]
1235 del self.nodemap[self.node(x)]
1233
1236
1234 del self.index[rev:-1]
1237 del self.index[rev:-1]
1235
1238
1236 def checksize(self):
1239 def checksize(self):
1237 expected = 0
1240 expected = 0
1238 if len(self):
1241 if len(self):
1239 expected = max(0, self.end(len(self) - 1))
1242 expected = max(0, self.end(len(self) - 1))
1240
1243
1241 try:
1244 try:
1242 f = self.opener(self.datafile)
1245 f = self.opener(self.datafile)
1243 f.seek(0, 2)
1246 f.seek(0, 2)
1244 actual = f.tell()
1247 actual = f.tell()
1245 f.close()
1248 f.close()
1246 dd = actual - expected
1249 dd = actual - expected
1247 except IOError, inst:
1250 except IOError, inst:
1248 if inst.errno != errno.ENOENT:
1251 if inst.errno != errno.ENOENT:
1249 raise
1252 raise
1250 dd = 0
1253 dd = 0
1251
1254
1252 try:
1255 try:
1253 f = self.opener(self.indexfile)
1256 f = self.opener(self.indexfile)
1254 f.seek(0, 2)
1257 f.seek(0, 2)
1255 actual = f.tell()
1258 actual = f.tell()
1256 f.close()
1259 f.close()
1257 s = self._io.size
1260 s = self._io.size
1258 i = max(0, actual // s)
1261 i = max(0, actual // s)
1259 di = actual - (i * s)
1262 di = actual - (i * s)
1260 if self._inline:
1263 if self._inline:
1261 databytes = 0
1264 databytes = 0
1262 for r in self:
1265 for r in self:
1263 databytes += max(0, self.length(r))
1266 databytes += max(0, self.length(r))
1264 dd = 0
1267 dd = 0
1265 di = actual - len(self) * s - databytes
1268 di = actual - len(self) * s - databytes
1266 except IOError, inst:
1269 except IOError, inst:
1267 if inst.errno != errno.ENOENT:
1270 if inst.errno != errno.ENOENT:
1268 raise
1271 raise
1269 di = 0
1272 di = 0
1270
1273
1271 return (dd, di)
1274 return (dd, di)
1272
1275
1273 def files(self):
1276 def files(self):
1274 res = [self.indexfile]
1277 res = [self.indexfile]
1275 if not self._inline:
1278 if not self._inline:
1276 res.append(self.datafile)
1279 res.append(self.datafile)
1277 return res
1280 return res
General Comments 0
You need to be logged in to leave comments. Login now