##// END OF EJS Templates
revlog: linearize created changegroups in generaldelta revlogs...
Sune Foldager -
r14365:a8e3931e default
parent child Browse files
Show More
@@ -1,1975 +1,1986 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error
13 import scmutil, util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=False):
28 def __init__(self, baseui, path=None, create=False):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'generaldelta', False):
64 if self.ui.configbool('format', 'generaldelta', False):
65 requirements.append("generaldelta")
65 requirements.append("generaldelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener.read("requires").splitlines())
74 requirements = set(self.opener.read("requires").splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener.read("sharedpath"))
84 s = os.path.realpath(self.opener.read("sharedpath"))
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 openerreqs = set(('revlogv1', 'generaldelta'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
120 self.sopener.options = dict((r, 1) for r in requirements
120 self.sopener.options = dict((r, 1) for r in requirements
121 if r in openerreqs)
121 if r in openerreqs)
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 return c
181 return c
182
182
183 @propertycache
183 @propertycache
184 def manifest(self):
184 def manifest(self):
185 return manifest.manifest(self.sopener)
185 return manifest.manifest(self.sopener)
186
186
187 @propertycache
187 @propertycache
188 def dirstate(self):
188 def dirstate(self):
189 warned = [0]
189 warned = [0]
190 def validate(node):
190 def validate(node):
191 try:
191 try:
192 self.changelog.rev(node)
192 self.changelog.rev(node)
193 return node
193 return node
194 except error.LookupError:
194 except error.LookupError:
195 if not warned[0]:
195 if not warned[0]:
196 warned[0] = True
196 warned[0] = True
197 self.ui.warn(_("warning: ignoring unknown"
197 self.ui.warn(_("warning: ignoring unknown"
198 " working parent %s!\n") % short(node))
198 " working parent %s!\n") % short(node))
199 return nullid
199 return nullid
200
200
201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202
202
203 def __getitem__(self, changeid):
203 def __getitem__(self, changeid):
204 if changeid is None:
204 if changeid is None:
205 return context.workingctx(self)
205 return context.workingctx(self)
206 return context.changectx(self, changeid)
206 return context.changectx(self, changeid)
207
207
208 def __contains__(self, changeid):
208 def __contains__(self, changeid):
209 try:
209 try:
210 return bool(self.lookup(changeid))
210 return bool(self.lookup(changeid))
211 except error.RepoLookupError:
211 except error.RepoLookupError:
212 return False
212 return False
213
213
214 def __nonzero__(self):
214 def __nonzero__(self):
215 return True
215 return True
216
216
217 def __len__(self):
217 def __len__(self):
218 return len(self.changelog)
218 return len(self.changelog)
219
219
220 def __iter__(self):
220 def __iter__(self):
221 for i in xrange(len(self)):
221 for i in xrange(len(self)):
222 yield i
222 yield i
223
223
224 def url(self):
224 def url(self):
225 return 'file:' + self.root
225 return 'file:' + self.root
226
226
227 def hook(self, name, throw=False, **args):
227 def hook(self, name, throw=False, **args):
228 return hook.hook(self.ui, self, name, throw, **args)
228 return hook.hook(self.ui, self, name, throw, **args)
229
229
230 tag_disallowed = ':\r\n'
230 tag_disallowed = ':\r\n'
231
231
232 def _tag(self, names, node, message, local, user, date, extra={}):
232 def _tag(self, names, node, message, local, user, date, extra={}):
233 if isinstance(names, str):
233 if isinstance(names, str):
234 allchars = names
234 allchars = names
235 names = (names,)
235 names = (names,)
236 else:
236 else:
237 allchars = ''.join(names)
237 allchars = ''.join(names)
238 for c in self.tag_disallowed:
238 for c in self.tag_disallowed:
239 if c in allchars:
239 if c in allchars:
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
241
241
242 branches = self.branchmap()
242 branches = self.branchmap()
243 for name in names:
243 for name in names:
244 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 local=local)
245 local=local)
246 if name in branches:
246 if name in branches:
247 self.ui.warn(_("warning: tag %s conflicts with existing"
247 self.ui.warn(_("warning: tag %s conflicts with existing"
248 " branch name\n") % name)
248 " branch name\n") % name)
249
249
250 def writetags(fp, names, munge, prevtags):
250 def writetags(fp, names, munge, prevtags):
251 fp.seek(0, 2)
251 fp.seek(0, 2)
252 if prevtags and prevtags[-1] != '\n':
252 if prevtags and prevtags[-1] != '\n':
253 fp.write('\n')
253 fp.write('\n')
254 for name in names:
254 for name in names:
255 m = munge and munge(name) or name
255 m = munge and munge(name) or name
256 if self._tagtypes and name in self._tagtypes:
256 if self._tagtypes and name in self._tagtypes:
257 old = self._tags.get(name, nullid)
257 old = self._tags.get(name, nullid)
258 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(node), m))
259 fp.write('%s %s\n' % (hex(node), m))
260 fp.close()
260 fp.close()
261
261
262 prevtags = ''
262 prevtags = ''
263 if local:
263 if local:
264 try:
264 try:
265 fp = self.opener('localtags', 'r+')
265 fp = self.opener('localtags', 'r+')
266 except IOError:
266 except IOError:
267 fp = self.opener('localtags', 'a')
267 fp = self.opener('localtags', 'a')
268 else:
268 else:
269 prevtags = fp.read()
269 prevtags = fp.read()
270
270
271 # local tags are stored in the current charset
271 # local tags are stored in the current charset
272 writetags(fp, names, None, prevtags)
272 writetags(fp, names, None, prevtags)
273 for name in names:
273 for name in names:
274 self.hook('tag', node=hex(node), tag=name, local=local)
274 self.hook('tag', node=hex(node), tag=name, local=local)
275 return
275 return
276
276
277 try:
277 try:
278 fp = self.wfile('.hgtags', 'rb+')
278 fp = self.wfile('.hgtags', 'rb+')
279 except IOError:
279 except IOError:
280 fp = self.wfile('.hgtags', 'ab')
280 fp = self.wfile('.hgtags', 'ab')
281 else:
281 else:
282 prevtags = fp.read()
282 prevtags = fp.read()
283
283
284 # committed tags are stored in UTF-8
284 # committed tags are stored in UTF-8
285 writetags(fp, names, encoding.fromlocal, prevtags)
285 writetags(fp, names, encoding.fromlocal, prevtags)
286
286
287 fp.close()
287 fp.close()
288
288
289 if '.hgtags' not in self.dirstate:
289 if '.hgtags' not in self.dirstate:
290 self[None].add(['.hgtags'])
290 self[None].add(['.hgtags'])
291
291
292 m = matchmod.exact(self.root, '', ['.hgtags'])
292 m = matchmod.exact(self.root, '', ['.hgtags'])
293 tagnode = self.commit(message, user, date, extra=extra, match=m)
293 tagnode = self.commit(message, user, date, extra=extra, match=m)
294
294
295 for name in names:
295 for name in names:
296 self.hook('tag', node=hex(node), tag=name, local=local)
296 self.hook('tag', node=hex(node), tag=name, local=local)
297
297
298 return tagnode
298 return tagnode
299
299
300 def tag(self, names, node, message, local, user, date):
300 def tag(self, names, node, message, local, user, date):
301 '''tag a revision with one or more symbolic names.
301 '''tag a revision with one or more symbolic names.
302
302
303 names is a list of strings or, when adding a single tag, names may be a
303 names is a list of strings or, when adding a single tag, names may be a
304 string.
304 string.
305
305
306 if local is True, the tags are stored in a per-repository file.
306 if local is True, the tags are stored in a per-repository file.
307 otherwise, they are stored in the .hgtags file, and a new
307 otherwise, they are stored in the .hgtags file, and a new
308 changeset is committed with the change.
308 changeset is committed with the change.
309
309
310 keyword arguments:
310 keyword arguments:
311
311
312 local: whether to store tags in non-version-controlled file
312 local: whether to store tags in non-version-controlled file
313 (default False)
313 (default False)
314
314
315 message: commit message to use if committing
315 message: commit message to use if committing
316
316
317 user: name of user to use if committing
317 user: name of user to use if committing
318
318
319 date: date tuple to use if committing'''
319 date: date tuple to use if committing'''
320
320
321 if not local:
321 if not local:
322 for x in self.status()[:5]:
322 for x in self.status()[:5]:
323 if '.hgtags' in x:
323 if '.hgtags' in x:
324 raise util.Abort(_('working copy of .hgtags is changed '
324 raise util.Abort(_('working copy of .hgtags is changed '
325 '(please commit .hgtags manually)'))
325 '(please commit .hgtags manually)'))
326
326
327 self.tags() # instantiate the cache
327 self.tags() # instantiate the cache
328 self._tag(names, node, message, local, user, date)
328 self._tag(names, node, message, local, user, date)
329
329
330 def tags(self):
330 def tags(self):
331 '''return a mapping of tag to node'''
331 '''return a mapping of tag to node'''
332 if self._tags is None:
332 if self._tags is None:
333 (self._tags, self._tagtypes) = self._findtags()
333 (self._tags, self._tagtypes) = self._findtags()
334
334
335 return self._tags
335 return self._tags
336
336
337 def _findtags(self):
337 def _findtags(self):
338 '''Do the hard work of finding tags. Return a pair of dicts
338 '''Do the hard work of finding tags. Return a pair of dicts
339 (tags, tagtypes) where tags maps tag name to node, and tagtypes
339 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 maps tag name to a string like \'global\' or \'local\'.
340 maps tag name to a string like \'global\' or \'local\'.
341 Subclasses or extensions are free to add their own tags, but
341 Subclasses or extensions are free to add their own tags, but
342 should be aware that the returned dicts will be retained for the
342 should be aware that the returned dicts will be retained for the
343 duration of the localrepo object.'''
343 duration of the localrepo object.'''
344
344
345 # XXX what tagtype should subclasses/extensions use? Currently
345 # XXX what tagtype should subclasses/extensions use? Currently
346 # mq and bookmarks add tags, but do not set the tagtype at all.
346 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # Should each extension invent its own tag type? Should there
347 # Should each extension invent its own tag type? Should there
348 # be one tagtype for all such "virtual" tags? Or is the status
348 # be one tagtype for all such "virtual" tags? Or is the status
349 # quo fine?
349 # quo fine?
350
350
351 alltags = {} # map tag name to (node, hist)
351 alltags = {} # map tag name to (node, hist)
352 tagtypes = {}
352 tagtypes = {}
353
353
354 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
354 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356
356
357 # Build the return dicts. Have to re-encode tag names because
357 # Build the return dicts. Have to re-encode tag names because
358 # the tags module always uses UTF-8 (in order not to lose info
358 # the tags module always uses UTF-8 (in order not to lose info
359 # writing to the cache), but the rest of Mercurial wants them in
359 # writing to the cache), but the rest of Mercurial wants them in
360 # local encoding.
360 # local encoding.
361 tags = {}
361 tags = {}
362 for (name, (node, hist)) in alltags.iteritems():
362 for (name, (node, hist)) in alltags.iteritems():
363 if node != nullid:
363 if node != nullid:
364 try:
364 try:
365 # ignore tags to unknown nodes
365 # ignore tags to unknown nodes
366 self.changelog.lookup(node)
366 self.changelog.lookup(node)
367 tags[encoding.tolocal(name)] = node
367 tags[encoding.tolocal(name)] = node
368 except error.LookupError:
368 except error.LookupError:
369 pass
369 pass
370 tags['tip'] = self.changelog.tip()
370 tags['tip'] = self.changelog.tip()
371 tagtypes = dict([(encoding.tolocal(name), value)
371 tagtypes = dict([(encoding.tolocal(name), value)
372 for (name, value) in tagtypes.iteritems()])
372 for (name, value) in tagtypes.iteritems()])
373 return (tags, tagtypes)
373 return (tags, tagtypes)
374
374
375 def tagtype(self, tagname):
375 def tagtype(self, tagname):
376 '''
376 '''
377 return the type of the given tag. result can be:
377 return the type of the given tag. result can be:
378
378
379 'local' : a local tag
379 'local' : a local tag
380 'global' : a global tag
380 'global' : a global tag
381 None : tag does not exist
381 None : tag does not exist
382 '''
382 '''
383
383
384 self.tags()
384 self.tags()
385
385
386 return self._tagtypes.get(tagname)
386 return self._tagtypes.get(tagname)
387
387
388 def tagslist(self):
388 def tagslist(self):
389 '''return a list of tags ordered by revision'''
389 '''return a list of tags ordered by revision'''
390 l = []
390 l = []
391 for t, n in self.tags().iteritems():
391 for t, n in self.tags().iteritems():
392 r = self.changelog.rev(n)
392 r = self.changelog.rev(n)
393 l.append((r, t, n))
393 l.append((r, t, n))
394 return [(t, n) for r, t, n in sorted(l)]
394 return [(t, n) for r, t, n in sorted(l)]
395
395
396 def nodetags(self, node):
396 def nodetags(self, node):
397 '''return the tags associated with a node'''
397 '''return the tags associated with a node'''
398 if not self.nodetagscache:
398 if not self.nodetagscache:
399 self.nodetagscache = {}
399 self.nodetagscache = {}
400 for t, n in self.tags().iteritems():
400 for t, n in self.tags().iteritems():
401 self.nodetagscache.setdefault(n, []).append(t)
401 self.nodetagscache.setdefault(n, []).append(t)
402 for tags in self.nodetagscache.itervalues():
402 for tags in self.nodetagscache.itervalues():
403 tags.sort()
403 tags.sort()
404 return self.nodetagscache.get(node, [])
404 return self.nodetagscache.get(node, [])
405
405
406 def nodebookmarks(self, node):
406 def nodebookmarks(self, node):
407 marks = []
407 marks = []
408 for bookmark, n in self._bookmarks.iteritems():
408 for bookmark, n in self._bookmarks.iteritems():
409 if n == node:
409 if n == node:
410 marks.append(bookmark)
410 marks.append(bookmark)
411 return sorted(marks)
411 return sorted(marks)
412
412
413 def _branchtags(self, partial, lrev):
413 def _branchtags(self, partial, lrev):
414 # TODO: rename this function?
414 # TODO: rename this function?
415 tiprev = len(self) - 1
415 tiprev = len(self) - 1
416 if lrev != tiprev:
416 if lrev != tiprev:
417 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
417 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
418 self._updatebranchcache(partial, ctxgen)
418 self._updatebranchcache(partial, ctxgen)
419 self._writebranchcache(partial, self.changelog.tip(), tiprev)
419 self._writebranchcache(partial, self.changelog.tip(), tiprev)
420
420
421 return partial
421 return partial
422
422
423 def updatebranchcache(self):
423 def updatebranchcache(self):
424 tip = self.changelog.tip()
424 tip = self.changelog.tip()
425 if self._branchcache is not None and self._branchcachetip == tip:
425 if self._branchcache is not None and self._branchcachetip == tip:
426 return self._branchcache
426 return self._branchcache
427
427
428 oldtip = self._branchcachetip
428 oldtip = self._branchcachetip
429 self._branchcachetip = tip
429 self._branchcachetip = tip
430 if oldtip is None or oldtip not in self.changelog.nodemap:
430 if oldtip is None or oldtip not in self.changelog.nodemap:
431 partial, last, lrev = self._readbranchcache()
431 partial, last, lrev = self._readbranchcache()
432 else:
432 else:
433 lrev = self.changelog.rev(oldtip)
433 lrev = self.changelog.rev(oldtip)
434 partial = self._branchcache
434 partial = self._branchcache
435
435
436 self._branchtags(partial, lrev)
436 self._branchtags(partial, lrev)
437 # this private cache holds all heads (not just tips)
437 # this private cache holds all heads (not just tips)
438 self._branchcache = partial
438 self._branchcache = partial
439
439
440 def branchmap(self):
440 def branchmap(self):
441 '''returns a dictionary {branch: [branchheads]}'''
441 '''returns a dictionary {branch: [branchheads]}'''
442 self.updatebranchcache()
442 self.updatebranchcache()
443 return self._branchcache
443 return self._branchcache
444
444
445 def branchtags(self):
445 def branchtags(self):
446 '''return a dict where branch names map to the tipmost head of
446 '''return a dict where branch names map to the tipmost head of
447 the branch, open heads come before closed'''
447 the branch, open heads come before closed'''
448 bt = {}
448 bt = {}
449 for bn, heads in self.branchmap().iteritems():
449 for bn, heads in self.branchmap().iteritems():
450 tip = heads[-1]
450 tip = heads[-1]
451 for h in reversed(heads):
451 for h in reversed(heads):
452 if 'close' not in self.changelog.read(h)[5]:
452 if 'close' not in self.changelog.read(h)[5]:
453 tip = h
453 tip = h
454 break
454 break
455 bt[bn] = tip
455 bt[bn] = tip
456 return bt
456 return bt
457
457
458 def _readbranchcache(self):
458 def _readbranchcache(self):
459 partial = {}
459 partial = {}
460 try:
460 try:
461 f = self.opener("cache/branchheads")
461 f = self.opener("cache/branchheads")
462 lines = f.read().split('\n')
462 lines = f.read().split('\n')
463 f.close()
463 f.close()
464 except (IOError, OSError):
464 except (IOError, OSError):
465 return {}, nullid, nullrev
465 return {}, nullid, nullrev
466
466
467 try:
467 try:
468 last, lrev = lines.pop(0).split(" ", 1)
468 last, lrev = lines.pop(0).split(" ", 1)
469 last, lrev = bin(last), int(lrev)
469 last, lrev = bin(last), int(lrev)
470 if lrev >= len(self) or self[lrev].node() != last:
470 if lrev >= len(self) or self[lrev].node() != last:
471 # invalidate the cache
471 # invalidate the cache
472 raise ValueError('invalidating branch cache (tip differs)')
472 raise ValueError('invalidating branch cache (tip differs)')
473 for l in lines:
473 for l in lines:
474 if not l:
474 if not l:
475 continue
475 continue
476 node, label = l.split(" ", 1)
476 node, label = l.split(" ", 1)
477 label = encoding.tolocal(label.strip())
477 label = encoding.tolocal(label.strip())
478 partial.setdefault(label, []).append(bin(node))
478 partial.setdefault(label, []).append(bin(node))
479 except KeyboardInterrupt:
479 except KeyboardInterrupt:
480 raise
480 raise
481 except Exception, inst:
481 except Exception, inst:
482 if self.ui.debugflag:
482 if self.ui.debugflag:
483 self.ui.warn(str(inst), '\n')
483 self.ui.warn(str(inst), '\n')
484 partial, last, lrev = {}, nullid, nullrev
484 partial, last, lrev = {}, nullid, nullrev
485 return partial, last, lrev
485 return partial, last, lrev
486
486
487 def _writebranchcache(self, branches, tip, tiprev):
487 def _writebranchcache(self, branches, tip, tiprev):
488 try:
488 try:
489 f = self.opener("cache/branchheads", "w", atomictemp=True)
489 f = self.opener("cache/branchheads", "w", atomictemp=True)
490 f.write("%s %s\n" % (hex(tip), tiprev))
490 f.write("%s %s\n" % (hex(tip), tiprev))
491 for label, nodes in branches.iteritems():
491 for label, nodes in branches.iteritems():
492 for node in nodes:
492 for node in nodes:
493 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
493 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
494 f.rename()
494 f.rename()
495 except (IOError, OSError):
495 except (IOError, OSError):
496 pass
496 pass
497
497
498 def _updatebranchcache(self, partial, ctxgen):
498 def _updatebranchcache(self, partial, ctxgen):
499 # collect new branch entries
499 # collect new branch entries
500 newbranches = {}
500 newbranches = {}
501 for c in ctxgen:
501 for c in ctxgen:
502 newbranches.setdefault(c.branch(), []).append(c.node())
502 newbranches.setdefault(c.branch(), []).append(c.node())
503 # if older branchheads are reachable from new ones, they aren't
503 # if older branchheads are reachable from new ones, they aren't
504 # really branchheads. Note checking parents is insufficient:
504 # really branchheads. Note checking parents is insufficient:
505 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
505 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
506 for branch, newnodes in newbranches.iteritems():
506 for branch, newnodes in newbranches.iteritems():
507 bheads = partial.setdefault(branch, [])
507 bheads = partial.setdefault(branch, [])
508 bheads.extend(newnodes)
508 bheads.extend(newnodes)
509 if len(bheads) <= 1:
509 if len(bheads) <= 1:
510 continue
510 continue
511 bheads = sorted(bheads, key=lambda x: self[x].rev())
511 bheads = sorted(bheads, key=lambda x: self[x].rev())
512 # starting from tip means fewer passes over reachable
512 # starting from tip means fewer passes over reachable
513 while newnodes:
513 while newnodes:
514 latest = newnodes.pop()
514 latest = newnodes.pop()
515 if latest not in bheads:
515 if latest not in bheads:
516 continue
516 continue
517 minbhrev = self[bheads[0]].node()
517 minbhrev = self[bheads[0]].node()
518 reachable = self.changelog.reachable(latest, minbhrev)
518 reachable = self.changelog.reachable(latest, minbhrev)
519 reachable.remove(latest)
519 reachable.remove(latest)
520 if reachable:
520 if reachable:
521 bheads = [b for b in bheads if b not in reachable]
521 bheads = [b for b in bheads if b not in reachable]
522 partial[branch] = bheads
522 partial[branch] = bheads
523
523
524 def lookup(self, key):
524 def lookup(self, key):
525 if isinstance(key, int):
525 if isinstance(key, int):
526 return self.changelog.node(key)
526 return self.changelog.node(key)
527 elif key == '.':
527 elif key == '.':
528 return self.dirstate.p1()
528 return self.dirstate.p1()
529 elif key == 'null':
529 elif key == 'null':
530 return nullid
530 return nullid
531 elif key == 'tip':
531 elif key == 'tip':
532 return self.changelog.tip()
532 return self.changelog.tip()
533 n = self.changelog._match(key)
533 n = self.changelog._match(key)
534 if n:
534 if n:
535 return n
535 return n
536 if key in self._bookmarks:
536 if key in self._bookmarks:
537 return self._bookmarks[key]
537 return self._bookmarks[key]
538 if key in self.tags():
538 if key in self.tags():
539 return self.tags()[key]
539 return self.tags()[key]
540 if key in self.branchtags():
540 if key in self.branchtags():
541 return self.branchtags()[key]
541 return self.branchtags()[key]
542 n = self.changelog._partialmatch(key)
542 n = self.changelog._partialmatch(key)
543 if n:
543 if n:
544 return n
544 return n
545
545
546 # can't find key, check if it might have come from damaged dirstate
546 # can't find key, check if it might have come from damaged dirstate
547 if key in self.dirstate.parents():
547 if key in self.dirstate.parents():
548 raise error.Abort(_("working directory has unknown parent '%s'!")
548 raise error.Abort(_("working directory has unknown parent '%s'!")
549 % short(key))
549 % short(key))
550 try:
550 try:
551 if len(key) == 20:
551 if len(key) == 20:
552 key = hex(key)
552 key = hex(key)
553 except TypeError:
553 except TypeError:
554 pass
554 pass
555 raise error.RepoLookupError(_("unknown revision '%s'") % key)
555 raise error.RepoLookupError(_("unknown revision '%s'") % key)
556
556
557 def lookupbranch(self, key, remote=None):
557 def lookupbranch(self, key, remote=None):
558 repo = remote or self
558 repo = remote or self
559 if key in repo.branchmap():
559 if key in repo.branchmap():
560 return key
560 return key
561
561
562 repo = (remote and remote.local()) and remote or self
562 repo = (remote and remote.local()) and remote or self
563 return repo[key].branch()
563 return repo[key].branch()
564
564
565 def known(self, nodes):
565 def known(self, nodes):
566 nm = self.changelog.nodemap
566 nm = self.changelog.nodemap
567 return [(n in nm) for n in nodes]
567 return [(n in nm) for n in nodes]
568
568
569 def local(self):
569 def local(self):
570 return True
570 return True
571
571
572 def join(self, f):
572 def join(self, f):
573 return os.path.join(self.path, f)
573 return os.path.join(self.path, f)
574
574
575 def wjoin(self, f):
575 def wjoin(self, f):
576 return os.path.join(self.root, f)
576 return os.path.join(self.root, f)
577
577
578 def file(self, f):
578 def file(self, f):
579 if f[0] == '/':
579 if f[0] == '/':
580 f = f[1:]
580 f = f[1:]
581 return filelog.filelog(self.sopener, f)
581 return filelog.filelog(self.sopener, f)
582
582
583 def changectx(self, changeid):
583 def changectx(self, changeid):
584 return self[changeid]
584 return self[changeid]
585
585
586 def parents(self, changeid=None):
586 def parents(self, changeid=None):
587 '''get list of changectxs for parents of changeid'''
587 '''get list of changectxs for parents of changeid'''
588 return self[changeid].parents()
588 return self[changeid].parents()
589
589
590 def filectx(self, path, changeid=None, fileid=None):
590 def filectx(self, path, changeid=None, fileid=None):
591 """changeid can be a changeset revision, node, or tag.
591 """changeid can be a changeset revision, node, or tag.
592 fileid can be a file revision or node."""
592 fileid can be a file revision or node."""
593 return context.filectx(self, path, changeid, fileid)
593 return context.filectx(self, path, changeid, fileid)
594
594
595 def getcwd(self):
595 def getcwd(self):
596 return self.dirstate.getcwd()
596 return self.dirstate.getcwd()
597
597
598 def pathto(self, f, cwd=None):
598 def pathto(self, f, cwd=None):
599 return self.dirstate.pathto(f, cwd)
599 return self.dirstate.pathto(f, cwd)
600
600
601 def wfile(self, f, mode='r'):
601 def wfile(self, f, mode='r'):
602 return self.wopener(f, mode)
602 return self.wopener(f, mode)
603
603
604 def _link(self, f):
604 def _link(self, f):
605 return os.path.islink(self.wjoin(f))
605 return os.path.islink(self.wjoin(f))
606
606
607 def _loadfilter(self, filter):
607 def _loadfilter(self, filter):
608 if filter not in self.filterpats:
608 if filter not in self.filterpats:
609 l = []
609 l = []
610 for pat, cmd in self.ui.configitems(filter):
610 for pat, cmd in self.ui.configitems(filter):
611 if cmd == '!':
611 if cmd == '!':
612 continue
612 continue
613 mf = matchmod.match(self.root, '', [pat])
613 mf = matchmod.match(self.root, '', [pat])
614 fn = None
614 fn = None
615 params = cmd
615 params = cmd
616 for name, filterfn in self._datafilters.iteritems():
616 for name, filterfn in self._datafilters.iteritems():
617 if cmd.startswith(name):
617 if cmd.startswith(name):
618 fn = filterfn
618 fn = filterfn
619 params = cmd[len(name):].lstrip()
619 params = cmd[len(name):].lstrip()
620 break
620 break
621 if not fn:
621 if not fn:
622 fn = lambda s, c, **kwargs: util.filter(s, c)
622 fn = lambda s, c, **kwargs: util.filter(s, c)
623 # Wrap old filters not supporting keyword arguments
623 # Wrap old filters not supporting keyword arguments
624 if not inspect.getargspec(fn)[2]:
624 if not inspect.getargspec(fn)[2]:
625 oldfn = fn
625 oldfn = fn
626 fn = lambda s, c, **kwargs: oldfn(s, c)
626 fn = lambda s, c, **kwargs: oldfn(s, c)
627 l.append((mf, fn, params))
627 l.append((mf, fn, params))
628 self.filterpats[filter] = l
628 self.filterpats[filter] = l
629 return self.filterpats[filter]
629 return self.filterpats[filter]
630
630
631 def _filter(self, filterpats, filename, data):
631 def _filter(self, filterpats, filename, data):
632 for mf, fn, cmd in filterpats:
632 for mf, fn, cmd in filterpats:
633 if mf(filename):
633 if mf(filename):
634 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
634 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
635 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
635 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
636 break
636 break
637
637
638 return data
638 return data
639
639
640 @propertycache
640 @propertycache
641 def _encodefilterpats(self):
641 def _encodefilterpats(self):
642 return self._loadfilter('encode')
642 return self._loadfilter('encode')
643
643
644 @propertycache
644 @propertycache
645 def _decodefilterpats(self):
645 def _decodefilterpats(self):
646 return self._loadfilter('decode')
646 return self._loadfilter('decode')
647
647
648 def adddatafilter(self, name, filter):
648 def adddatafilter(self, name, filter):
649 self._datafilters[name] = filter
649 self._datafilters[name] = filter
650
650
651 def wread(self, filename):
651 def wread(self, filename):
652 if self._link(filename):
652 if self._link(filename):
653 data = os.readlink(self.wjoin(filename))
653 data = os.readlink(self.wjoin(filename))
654 else:
654 else:
655 data = self.wopener.read(filename)
655 data = self.wopener.read(filename)
656 return self._filter(self._encodefilterpats, filename, data)
656 return self._filter(self._encodefilterpats, filename, data)
657
657
658 def wwrite(self, filename, data, flags):
658 def wwrite(self, filename, data, flags):
659 data = self._filter(self._decodefilterpats, filename, data)
659 data = self._filter(self._decodefilterpats, filename, data)
660 if 'l' in flags:
660 if 'l' in flags:
661 self.wopener.symlink(data, filename)
661 self.wopener.symlink(data, filename)
662 else:
662 else:
663 self.wopener.write(filename, data)
663 self.wopener.write(filename, data)
664 if 'x' in flags:
664 if 'x' in flags:
665 util.setflags(self.wjoin(filename), False, True)
665 util.setflags(self.wjoin(filename), False, True)
666
666
667 def wwritedata(self, filename, data):
667 def wwritedata(self, filename, data):
668 return self._filter(self._decodefilterpats, filename, data)
668 return self._filter(self._decodefilterpats, filename, data)
669
669
670 def transaction(self, desc):
670 def transaction(self, desc):
671 tr = self._transref and self._transref() or None
671 tr = self._transref and self._transref() or None
672 if tr and tr.running():
672 if tr and tr.running():
673 return tr.nest()
673 return tr.nest()
674
674
675 # abort here if the journal already exists
675 # abort here if the journal already exists
676 if os.path.exists(self.sjoin("journal")):
676 if os.path.exists(self.sjoin("journal")):
677 raise error.RepoError(
677 raise error.RepoError(
678 _("abandoned transaction found - run hg recover"))
678 _("abandoned transaction found - run hg recover"))
679
679
680 journalfiles = self._writejournal(desc)
680 journalfiles = self._writejournal(desc)
681 renames = [(x, undoname(x)) for x in journalfiles]
681 renames = [(x, undoname(x)) for x in journalfiles]
682
682
683 tr = transaction.transaction(self.ui.warn, self.sopener,
683 tr = transaction.transaction(self.ui.warn, self.sopener,
684 self.sjoin("journal"),
684 self.sjoin("journal"),
685 aftertrans(renames),
685 aftertrans(renames),
686 self.store.createmode)
686 self.store.createmode)
687 self._transref = weakref.ref(tr)
687 self._transref = weakref.ref(tr)
688 return tr
688 return tr
689
689
690 def _writejournal(self, desc):
690 def _writejournal(self, desc):
691 # save dirstate for rollback
691 # save dirstate for rollback
692 try:
692 try:
693 ds = self.opener.read("dirstate")
693 ds = self.opener.read("dirstate")
694 except IOError:
694 except IOError:
695 ds = ""
695 ds = ""
696 self.opener.write("journal.dirstate", ds)
696 self.opener.write("journal.dirstate", ds)
697 self.opener.write("journal.branch",
697 self.opener.write("journal.branch",
698 encoding.fromlocal(self.dirstate.branch()))
698 encoding.fromlocal(self.dirstate.branch()))
699 self.opener.write("journal.desc",
699 self.opener.write("journal.desc",
700 "%d\n%s\n" % (len(self), desc))
700 "%d\n%s\n" % (len(self), desc))
701
701
702 bkname = self.join('bookmarks')
702 bkname = self.join('bookmarks')
703 if os.path.exists(bkname):
703 if os.path.exists(bkname):
704 util.copyfile(bkname, self.join('journal.bookmarks'))
704 util.copyfile(bkname, self.join('journal.bookmarks'))
705 else:
705 else:
706 self.opener.write('journal.bookmarks', '')
706 self.opener.write('journal.bookmarks', '')
707
707
708 return (self.sjoin('journal'), self.join('journal.dirstate'),
708 return (self.sjoin('journal'), self.join('journal.dirstate'),
709 self.join('journal.branch'), self.join('journal.desc'),
709 self.join('journal.branch'), self.join('journal.desc'),
710 self.join('journal.bookmarks'))
710 self.join('journal.bookmarks'))
711
711
712 def recover(self):
712 def recover(self):
713 lock = self.lock()
713 lock = self.lock()
714 try:
714 try:
715 if os.path.exists(self.sjoin("journal")):
715 if os.path.exists(self.sjoin("journal")):
716 self.ui.status(_("rolling back interrupted transaction\n"))
716 self.ui.status(_("rolling back interrupted transaction\n"))
717 transaction.rollback(self.sopener, self.sjoin("journal"),
717 transaction.rollback(self.sopener, self.sjoin("journal"),
718 self.ui.warn)
718 self.ui.warn)
719 self.invalidate()
719 self.invalidate()
720 return True
720 return True
721 else:
721 else:
722 self.ui.warn(_("no interrupted transaction available\n"))
722 self.ui.warn(_("no interrupted transaction available\n"))
723 return False
723 return False
724 finally:
724 finally:
725 lock.release()
725 lock.release()
726
726
727 def rollback(self, dryrun=False):
727 def rollback(self, dryrun=False):
728 wlock = lock = None
728 wlock = lock = None
729 try:
729 try:
730 wlock = self.wlock()
730 wlock = self.wlock()
731 lock = self.lock()
731 lock = self.lock()
732 if os.path.exists(self.sjoin("undo")):
732 if os.path.exists(self.sjoin("undo")):
733 try:
733 try:
734 args = self.opener.read("undo.desc").splitlines()
734 args = self.opener.read("undo.desc").splitlines()
735 if len(args) >= 3 and self.ui.verbose:
735 if len(args) >= 3 and self.ui.verbose:
736 desc = _("repository tip rolled back to revision %s"
736 desc = _("repository tip rolled back to revision %s"
737 " (undo %s: %s)\n") % (
737 " (undo %s: %s)\n") % (
738 int(args[0]) - 1, args[1], args[2])
738 int(args[0]) - 1, args[1], args[2])
739 elif len(args) >= 2:
739 elif len(args) >= 2:
740 desc = _("repository tip rolled back to revision %s"
740 desc = _("repository tip rolled back to revision %s"
741 " (undo %s)\n") % (
741 " (undo %s)\n") % (
742 int(args[0]) - 1, args[1])
742 int(args[0]) - 1, args[1])
743 except IOError:
743 except IOError:
744 desc = _("rolling back unknown transaction\n")
744 desc = _("rolling back unknown transaction\n")
745 self.ui.status(desc)
745 self.ui.status(desc)
746 if dryrun:
746 if dryrun:
747 return
747 return
748 transaction.rollback(self.sopener, self.sjoin("undo"),
748 transaction.rollback(self.sopener, self.sjoin("undo"),
749 self.ui.warn)
749 self.ui.warn)
750 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
750 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
751 if os.path.exists(self.join('undo.bookmarks')):
751 if os.path.exists(self.join('undo.bookmarks')):
752 util.rename(self.join('undo.bookmarks'),
752 util.rename(self.join('undo.bookmarks'),
753 self.join('bookmarks'))
753 self.join('bookmarks'))
754 try:
754 try:
755 branch = self.opener.read("undo.branch")
755 branch = self.opener.read("undo.branch")
756 self.dirstate.setbranch(branch)
756 self.dirstate.setbranch(branch)
757 except IOError:
757 except IOError:
758 self.ui.warn(_("named branch could not be reset, "
758 self.ui.warn(_("named branch could not be reset, "
759 "current branch is still: %s\n")
759 "current branch is still: %s\n")
760 % self.dirstate.branch())
760 % self.dirstate.branch())
761 self.invalidate()
761 self.invalidate()
762 self.dirstate.invalidate()
762 self.dirstate.invalidate()
763 self.destroyed()
763 self.destroyed()
764 parents = tuple([p.rev() for p in self.parents()])
764 parents = tuple([p.rev() for p in self.parents()])
765 if len(parents) > 1:
765 if len(parents) > 1:
766 self.ui.status(_("working directory now based on "
766 self.ui.status(_("working directory now based on "
767 "revisions %d and %d\n") % parents)
767 "revisions %d and %d\n") % parents)
768 else:
768 else:
769 self.ui.status(_("working directory now based on "
769 self.ui.status(_("working directory now based on "
770 "revision %d\n") % parents)
770 "revision %d\n") % parents)
771 else:
771 else:
772 self.ui.warn(_("no rollback information available\n"))
772 self.ui.warn(_("no rollback information available\n"))
773 return 1
773 return 1
774 finally:
774 finally:
775 release(lock, wlock)
775 release(lock, wlock)
776
776
777 def invalidatecaches(self):
777 def invalidatecaches(self):
778 self._tags = None
778 self._tags = None
779 self._tagtypes = None
779 self._tagtypes = None
780 self.nodetagscache = None
780 self.nodetagscache = None
781 self._branchcache = None # in UTF-8
781 self._branchcache = None # in UTF-8
782 self._branchcachetip = None
782 self._branchcachetip = None
783
783
784 def invalidate(self):
784 def invalidate(self):
785 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
785 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
786 if a in self.__dict__:
786 if a in self.__dict__:
787 delattr(self, a)
787 delattr(self, a)
788 self.invalidatecaches()
788 self.invalidatecaches()
789
789
790 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
790 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
791 try:
791 try:
792 l = lock.lock(lockname, 0, releasefn, desc=desc)
792 l = lock.lock(lockname, 0, releasefn, desc=desc)
793 except error.LockHeld, inst:
793 except error.LockHeld, inst:
794 if not wait:
794 if not wait:
795 raise
795 raise
796 self.ui.warn(_("waiting for lock on %s held by %r\n") %
796 self.ui.warn(_("waiting for lock on %s held by %r\n") %
797 (desc, inst.locker))
797 (desc, inst.locker))
798 # default to 600 seconds timeout
798 # default to 600 seconds timeout
799 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
799 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
800 releasefn, desc=desc)
800 releasefn, desc=desc)
801 if acquirefn:
801 if acquirefn:
802 acquirefn()
802 acquirefn()
803 return l
803 return l
804
804
805 def lock(self, wait=True):
805 def lock(self, wait=True):
806 '''Lock the repository store (.hg/store) and return a weak reference
806 '''Lock the repository store (.hg/store) and return a weak reference
807 to the lock. Use this before modifying the store (e.g. committing or
807 to the lock. Use this before modifying the store (e.g. committing or
808 stripping). If you are opening a transaction, get a lock as well.)'''
808 stripping). If you are opening a transaction, get a lock as well.)'''
809 l = self._lockref and self._lockref()
809 l = self._lockref and self._lockref()
810 if l is not None and l.held:
810 if l is not None and l.held:
811 l.lock()
811 l.lock()
812 return l
812 return l
813
813
814 l = self._lock(self.sjoin("lock"), wait, self.store.write,
814 l = self._lock(self.sjoin("lock"), wait, self.store.write,
815 self.invalidate, _('repository %s') % self.origroot)
815 self.invalidate, _('repository %s') % self.origroot)
816 self._lockref = weakref.ref(l)
816 self._lockref = weakref.ref(l)
817 return l
817 return l
818
818
819 def wlock(self, wait=True):
819 def wlock(self, wait=True):
820 '''Lock the non-store parts of the repository (everything under
820 '''Lock the non-store parts of the repository (everything under
821 .hg except .hg/store) and return a weak reference to the lock.
821 .hg except .hg/store) and return a weak reference to the lock.
822 Use this before modifying files in .hg.'''
822 Use this before modifying files in .hg.'''
823 l = self._wlockref and self._wlockref()
823 l = self._wlockref and self._wlockref()
824 if l is not None and l.held:
824 if l is not None and l.held:
825 l.lock()
825 l.lock()
826 return l
826 return l
827
827
828 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
828 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
829 self.dirstate.invalidate, _('working directory of %s') %
829 self.dirstate.invalidate, _('working directory of %s') %
830 self.origroot)
830 self.origroot)
831 self._wlockref = weakref.ref(l)
831 self._wlockref = weakref.ref(l)
832 return l
832 return l
833
833
834 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
834 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
835 """
835 """
836 commit an individual file as part of a larger transaction
836 commit an individual file as part of a larger transaction
837 """
837 """
838
838
839 fname = fctx.path()
839 fname = fctx.path()
840 text = fctx.data()
840 text = fctx.data()
841 flog = self.file(fname)
841 flog = self.file(fname)
842 fparent1 = manifest1.get(fname, nullid)
842 fparent1 = manifest1.get(fname, nullid)
843 fparent2 = fparent2o = manifest2.get(fname, nullid)
843 fparent2 = fparent2o = manifest2.get(fname, nullid)
844
844
845 meta = {}
845 meta = {}
846 copy = fctx.renamed()
846 copy = fctx.renamed()
847 if copy and copy[0] != fname:
847 if copy and copy[0] != fname:
848 # Mark the new revision of this file as a copy of another
848 # Mark the new revision of this file as a copy of another
849 # file. This copy data will effectively act as a parent
849 # file. This copy data will effectively act as a parent
850 # of this new revision. If this is a merge, the first
850 # of this new revision. If this is a merge, the first
851 # parent will be the nullid (meaning "look up the copy data")
851 # parent will be the nullid (meaning "look up the copy data")
852 # and the second one will be the other parent. For example:
852 # and the second one will be the other parent. For example:
853 #
853 #
854 # 0 --- 1 --- 3 rev1 changes file foo
854 # 0 --- 1 --- 3 rev1 changes file foo
855 # \ / rev2 renames foo to bar and changes it
855 # \ / rev2 renames foo to bar and changes it
856 # \- 2 -/ rev3 should have bar with all changes and
856 # \- 2 -/ rev3 should have bar with all changes and
857 # should record that bar descends from
857 # should record that bar descends from
858 # bar in rev2 and foo in rev1
858 # bar in rev2 and foo in rev1
859 #
859 #
860 # this allows this merge to succeed:
860 # this allows this merge to succeed:
861 #
861 #
862 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
862 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
863 # \ / merging rev3 and rev4 should use bar@rev2
863 # \ / merging rev3 and rev4 should use bar@rev2
864 # \- 2 --- 4 as the merge base
864 # \- 2 --- 4 as the merge base
865 #
865 #
866
866
867 cfname = copy[0]
867 cfname = copy[0]
868 crev = manifest1.get(cfname)
868 crev = manifest1.get(cfname)
869 newfparent = fparent2
869 newfparent = fparent2
870
870
871 if manifest2: # branch merge
871 if manifest2: # branch merge
872 if fparent2 == nullid or crev is None: # copied on remote side
872 if fparent2 == nullid or crev is None: # copied on remote side
873 if cfname in manifest2:
873 if cfname in manifest2:
874 crev = manifest2[cfname]
874 crev = manifest2[cfname]
875 newfparent = fparent1
875 newfparent = fparent1
876
876
877 # find source in nearest ancestor if we've lost track
877 # find source in nearest ancestor if we've lost track
878 if not crev:
878 if not crev:
879 self.ui.debug(" %s: searching for copy revision for %s\n" %
879 self.ui.debug(" %s: searching for copy revision for %s\n" %
880 (fname, cfname))
880 (fname, cfname))
881 for ancestor in self[None].ancestors():
881 for ancestor in self[None].ancestors():
882 if cfname in ancestor:
882 if cfname in ancestor:
883 crev = ancestor[cfname].filenode()
883 crev = ancestor[cfname].filenode()
884 break
884 break
885
885
886 if crev:
886 if crev:
887 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
887 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
888 meta["copy"] = cfname
888 meta["copy"] = cfname
889 meta["copyrev"] = hex(crev)
889 meta["copyrev"] = hex(crev)
890 fparent1, fparent2 = nullid, newfparent
890 fparent1, fparent2 = nullid, newfparent
891 else:
891 else:
892 self.ui.warn(_("warning: can't find ancestor for '%s' "
892 self.ui.warn(_("warning: can't find ancestor for '%s' "
893 "copied from '%s'!\n") % (fname, cfname))
893 "copied from '%s'!\n") % (fname, cfname))
894
894
895 elif fparent2 != nullid:
895 elif fparent2 != nullid:
896 # is one parent an ancestor of the other?
896 # is one parent an ancestor of the other?
897 fparentancestor = flog.ancestor(fparent1, fparent2)
897 fparentancestor = flog.ancestor(fparent1, fparent2)
898 if fparentancestor == fparent1:
898 if fparentancestor == fparent1:
899 fparent1, fparent2 = fparent2, nullid
899 fparent1, fparent2 = fparent2, nullid
900 elif fparentancestor == fparent2:
900 elif fparentancestor == fparent2:
901 fparent2 = nullid
901 fparent2 = nullid
902
902
903 # is the file changed?
903 # is the file changed?
904 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
904 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
905 changelist.append(fname)
905 changelist.append(fname)
906 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
906 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
907
907
908 # are just the flags changed during merge?
908 # are just the flags changed during merge?
909 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
909 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
910 changelist.append(fname)
910 changelist.append(fname)
911
911
912 return fparent1
912 return fparent1
913
913
914 def commit(self, text="", user=None, date=None, match=None, force=False,
914 def commit(self, text="", user=None, date=None, match=None, force=False,
915 editor=False, extra={}):
915 editor=False, extra={}):
916 """Add a new revision to current repository.
916 """Add a new revision to current repository.
917
917
918 Revision information is gathered from the working directory,
918 Revision information is gathered from the working directory,
919 match can be used to filter the committed files. If editor is
919 match can be used to filter the committed files. If editor is
920 supplied, it is called to get a commit message.
920 supplied, it is called to get a commit message.
921 """
921 """
922
922
923 def fail(f, msg):
923 def fail(f, msg):
924 raise util.Abort('%s: %s' % (f, msg))
924 raise util.Abort('%s: %s' % (f, msg))
925
925
926 if not match:
926 if not match:
927 match = matchmod.always(self.root, '')
927 match = matchmod.always(self.root, '')
928
928
929 if not force:
929 if not force:
930 vdirs = []
930 vdirs = []
931 match.dir = vdirs.append
931 match.dir = vdirs.append
932 match.bad = fail
932 match.bad = fail
933
933
934 wlock = self.wlock()
934 wlock = self.wlock()
935 try:
935 try:
936 wctx = self[None]
936 wctx = self[None]
937 merge = len(wctx.parents()) > 1
937 merge = len(wctx.parents()) > 1
938
938
939 if (not force and merge and match and
939 if (not force and merge and match and
940 (match.files() or match.anypats())):
940 (match.files() or match.anypats())):
941 raise util.Abort(_('cannot partially commit a merge '
941 raise util.Abort(_('cannot partially commit a merge '
942 '(do not specify files or patterns)'))
942 '(do not specify files or patterns)'))
943
943
944 changes = self.status(match=match, clean=force)
944 changes = self.status(match=match, clean=force)
945 if force:
945 if force:
946 changes[0].extend(changes[6]) # mq may commit unchanged files
946 changes[0].extend(changes[6]) # mq may commit unchanged files
947
947
948 # check subrepos
948 # check subrepos
949 subs = []
949 subs = []
950 removedsubs = set()
950 removedsubs = set()
951 for p in wctx.parents():
951 for p in wctx.parents():
952 removedsubs.update(s for s in p.substate if match(s))
952 removedsubs.update(s for s in p.substate if match(s))
953 for s in wctx.substate:
953 for s in wctx.substate:
954 removedsubs.discard(s)
954 removedsubs.discard(s)
955 if match(s) and wctx.sub(s).dirty():
955 if match(s) and wctx.sub(s).dirty():
956 subs.append(s)
956 subs.append(s)
957 if (subs or removedsubs):
957 if (subs or removedsubs):
958 if (not match('.hgsub') and
958 if (not match('.hgsub') and
959 '.hgsub' in (wctx.modified() + wctx.added())):
959 '.hgsub' in (wctx.modified() + wctx.added())):
960 raise util.Abort(_("can't commit subrepos without .hgsub"))
960 raise util.Abort(_("can't commit subrepos without .hgsub"))
961 if '.hgsubstate' not in changes[0]:
961 if '.hgsubstate' not in changes[0]:
962 changes[0].insert(0, '.hgsubstate')
962 changes[0].insert(0, '.hgsubstate')
963
963
964 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
964 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
965 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
965 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
966 if changedsubs:
966 if changedsubs:
967 raise util.Abort(_("uncommitted changes in subrepo %s")
967 raise util.Abort(_("uncommitted changes in subrepo %s")
968 % changedsubs[0])
968 % changedsubs[0])
969
969
970 # make sure all explicit patterns are matched
970 # make sure all explicit patterns are matched
971 if not force and match.files():
971 if not force and match.files():
972 matched = set(changes[0] + changes[1] + changes[2])
972 matched = set(changes[0] + changes[1] + changes[2])
973
973
974 for f in match.files():
974 for f in match.files():
975 if f == '.' or f in matched or f in wctx.substate:
975 if f == '.' or f in matched or f in wctx.substate:
976 continue
976 continue
977 if f in changes[3]: # missing
977 if f in changes[3]: # missing
978 fail(f, _('file not found!'))
978 fail(f, _('file not found!'))
979 if f in vdirs: # visited directory
979 if f in vdirs: # visited directory
980 d = f + '/'
980 d = f + '/'
981 for mf in matched:
981 for mf in matched:
982 if mf.startswith(d):
982 if mf.startswith(d):
983 break
983 break
984 else:
984 else:
985 fail(f, _("no match under directory!"))
985 fail(f, _("no match under directory!"))
986 elif f not in self.dirstate:
986 elif f not in self.dirstate:
987 fail(f, _("file not tracked!"))
987 fail(f, _("file not tracked!"))
988
988
989 if (not force and not extra.get("close") and not merge
989 if (not force and not extra.get("close") and not merge
990 and not (changes[0] or changes[1] or changes[2])
990 and not (changes[0] or changes[1] or changes[2])
991 and wctx.branch() == wctx.p1().branch()):
991 and wctx.branch() == wctx.p1().branch()):
992 return None
992 return None
993
993
994 ms = mergemod.mergestate(self)
994 ms = mergemod.mergestate(self)
995 for f in changes[0]:
995 for f in changes[0]:
996 if f in ms and ms[f] == 'u':
996 if f in ms and ms[f] == 'u':
997 raise util.Abort(_("unresolved merge conflicts "
997 raise util.Abort(_("unresolved merge conflicts "
998 "(see hg help resolve)"))
998 "(see hg help resolve)"))
999
999
1000 cctx = context.workingctx(self, text, user, date, extra, changes)
1000 cctx = context.workingctx(self, text, user, date, extra, changes)
1001 if editor:
1001 if editor:
1002 cctx._text = editor(self, cctx, subs)
1002 cctx._text = editor(self, cctx, subs)
1003 edited = (text != cctx._text)
1003 edited = (text != cctx._text)
1004
1004
1005 # commit subs
1005 # commit subs
1006 if subs or removedsubs:
1006 if subs or removedsubs:
1007 state = wctx.substate.copy()
1007 state = wctx.substate.copy()
1008 for s in sorted(subs):
1008 for s in sorted(subs):
1009 sub = wctx.sub(s)
1009 sub = wctx.sub(s)
1010 self.ui.status(_('committing subrepository %s\n') %
1010 self.ui.status(_('committing subrepository %s\n') %
1011 subrepo.subrelpath(sub))
1011 subrepo.subrelpath(sub))
1012 sr = sub.commit(cctx._text, user, date)
1012 sr = sub.commit(cctx._text, user, date)
1013 state[s] = (state[s][0], sr)
1013 state[s] = (state[s][0], sr)
1014 subrepo.writestate(self, state)
1014 subrepo.writestate(self, state)
1015
1015
1016 # Save commit message in case this transaction gets rolled back
1016 # Save commit message in case this transaction gets rolled back
1017 # (e.g. by a pretxncommit hook). Leave the content alone on
1017 # (e.g. by a pretxncommit hook). Leave the content alone on
1018 # the assumption that the user will use the same editor again.
1018 # the assumption that the user will use the same editor again.
1019 msgfile = self.opener('last-message.txt', 'wb')
1019 msgfile = self.opener('last-message.txt', 'wb')
1020 msgfile.write(cctx._text)
1020 msgfile.write(cctx._text)
1021 msgfile.close()
1021 msgfile.close()
1022
1022
1023 p1, p2 = self.dirstate.parents()
1023 p1, p2 = self.dirstate.parents()
1024 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1024 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1025 try:
1025 try:
1026 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1026 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1027 ret = self.commitctx(cctx, True)
1027 ret = self.commitctx(cctx, True)
1028 except:
1028 except:
1029 if edited:
1029 if edited:
1030 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1030 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1031 self.ui.write(
1031 self.ui.write(
1032 _('note: commit message saved in %s\n') % msgfn)
1032 _('note: commit message saved in %s\n') % msgfn)
1033 raise
1033 raise
1034
1034
1035 # update bookmarks, dirstate and mergestate
1035 # update bookmarks, dirstate and mergestate
1036 bookmarks.update(self, p1, ret)
1036 bookmarks.update(self, p1, ret)
1037 for f in changes[0] + changes[1]:
1037 for f in changes[0] + changes[1]:
1038 self.dirstate.normal(f)
1038 self.dirstate.normal(f)
1039 for f in changes[2]:
1039 for f in changes[2]:
1040 self.dirstate.forget(f)
1040 self.dirstate.forget(f)
1041 self.dirstate.setparents(ret)
1041 self.dirstate.setparents(ret)
1042 ms.reset()
1042 ms.reset()
1043 finally:
1043 finally:
1044 wlock.release()
1044 wlock.release()
1045
1045
1046 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1046 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1047 return ret
1047 return ret
1048
1048
1049 def commitctx(self, ctx, error=False):
1049 def commitctx(self, ctx, error=False):
1050 """Add a new revision to current repository.
1050 """Add a new revision to current repository.
1051 Revision information is passed via the context argument.
1051 Revision information is passed via the context argument.
1052 """
1052 """
1053
1053
1054 tr = lock = None
1054 tr = lock = None
1055 removed = list(ctx.removed())
1055 removed = list(ctx.removed())
1056 p1, p2 = ctx.p1(), ctx.p2()
1056 p1, p2 = ctx.p1(), ctx.p2()
1057 user = ctx.user()
1057 user = ctx.user()
1058
1058
1059 lock = self.lock()
1059 lock = self.lock()
1060 try:
1060 try:
1061 tr = self.transaction("commit")
1061 tr = self.transaction("commit")
1062 trp = weakref.proxy(tr)
1062 trp = weakref.proxy(tr)
1063
1063
1064 if ctx.files():
1064 if ctx.files():
1065 m1 = p1.manifest().copy()
1065 m1 = p1.manifest().copy()
1066 m2 = p2.manifest()
1066 m2 = p2.manifest()
1067
1067
1068 # check in files
1068 # check in files
1069 new = {}
1069 new = {}
1070 changed = []
1070 changed = []
1071 linkrev = len(self)
1071 linkrev = len(self)
1072 for f in sorted(ctx.modified() + ctx.added()):
1072 for f in sorted(ctx.modified() + ctx.added()):
1073 self.ui.note(f + "\n")
1073 self.ui.note(f + "\n")
1074 try:
1074 try:
1075 fctx = ctx[f]
1075 fctx = ctx[f]
1076 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1076 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1077 changed)
1077 changed)
1078 m1.set(f, fctx.flags())
1078 m1.set(f, fctx.flags())
1079 except OSError, inst:
1079 except OSError, inst:
1080 self.ui.warn(_("trouble committing %s!\n") % f)
1080 self.ui.warn(_("trouble committing %s!\n") % f)
1081 raise
1081 raise
1082 except IOError, inst:
1082 except IOError, inst:
1083 errcode = getattr(inst, 'errno', errno.ENOENT)
1083 errcode = getattr(inst, 'errno', errno.ENOENT)
1084 if error or errcode and errcode != errno.ENOENT:
1084 if error or errcode and errcode != errno.ENOENT:
1085 self.ui.warn(_("trouble committing %s!\n") % f)
1085 self.ui.warn(_("trouble committing %s!\n") % f)
1086 raise
1086 raise
1087 else:
1087 else:
1088 removed.append(f)
1088 removed.append(f)
1089
1089
1090 # update manifest
1090 # update manifest
1091 m1.update(new)
1091 m1.update(new)
1092 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1092 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1093 drop = [f for f in removed if f in m1]
1093 drop = [f for f in removed if f in m1]
1094 for f in drop:
1094 for f in drop:
1095 del m1[f]
1095 del m1[f]
1096 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1096 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1097 p2.manifestnode(), (new, drop))
1097 p2.manifestnode(), (new, drop))
1098 files = changed + removed
1098 files = changed + removed
1099 else:
1099 else:
1100 mn = p1.manifestnode()
1100 mn = p1.manifestnode()
1101 files = []
1101 files = []
1102
1102
1103 # update changelog
1103 # update changelog
1104 self.changelog.delayupdate()
1104 self.changelog.delayupdate()
1105 n = self.changelog.add(mn, files, ctx.description(),
1105 n = self.changelog.add(mn, files, ctx.description(),
1106 trp, p1.node(), p2.node(),
1106 trp, p1.node(), p2.node(),
1107 user, ctx.date(), ctx.extra().copy())
1107 user, ctx.date(), ctx.extra().copy())
1108 p = lambda: self.changelog.writepending() and self.root or ""
1108 p = lambda: self.changelog.writepending() and self.root or ""
1109 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1109 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1110 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1110 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1111 parent2=xp2, pending=p)
1111 parent2=xp2, pending=p)
1112 self.changelog.finalize(trp)
1112 self.changelog.finalize(trp)
1113 tr.close()
1113 tr.close()
1114
1114
1115 if self._branchcache:
1115 if self._branchcache:
1116 self.updatebranchcache()
1116 self.updatebranchcache()
1117 return n
1117 return n
1118 finally:
1118 finally:
1119 if tr:
1119 if tr:
1120 tr.release()
1120 tr.release()
1121 lock.release()
1121 lock.release()
1122
1122
1123 def destroyed(self):
1123 def destroyed(self):
1124 '''Inform the repository that nodes have been destroyed.
1124 '''Inform the repository that nodes have been destroyed.
1125 Intended for use by strip and rollback, so there's a common
1125 Intended for use by strip and rollback, so there's a common
1126 place for anything that has to be done after destroying history.'''
1126 place for anything that has to be done after destroying history.'''
1127 # XXX it might be nice if we could take the list of destroyed
1127 # XXX it might be nice if we could take the list of destroyed
1128 # nodes, but I don't see an easy way for rollback() to do that
1128 # nodes, but I don't see an easy way for rollback() to do that
1129
1129
1130 # Ensure the persistent tag cache is updated. Doing it now
1130 # Ensure the persistent tag cache is updated. Doing it now
1131 # means that the tag cache only has to worry about destroyed
1131 # means that the tag cache only has to worry about destroyed
1132 # heads immediately after a strip/rollback. That in turn
1132 # heads immediately after a strip/rollback. That in turn
1133 # guarantees that "cachetip == currenttip" (comparing both rev
1133 # guarantees that "cachetip == currenttip" (comparing both rev
1134 # and node) always means no nodes have been added or destroyed.
1134 # and node) always means no nodes have been added or destroyed.
1135
1135
1136 # XXX this is suboptimal when qrefresh'ing: we strip the current
1136 # XXX this is suboptimal when qrefresh'ing: we strip the current
1137 # head, refresh the tag cache, then immediately add a new head.
1137 # head, refresh the tag cache, then immediately add a new head.
1138 # But I think doing it this way is necessary for the "instant
1138 # But I think doing it this way is necessary for the "instant
1139 # tag cache retrieval" case to work.
1139 # tag cache retrieval" case to work.
1140 self.invalidatecaches()
1140 self.invalidatecaches()
1141
1141
1142 def walk(self, match, node=None):
1142 def walk(self, match, node=None):
1143 '''
1143 '''
1144 walk recursively through the directory tree or a given
1144 walk recursively through the directory tree or a given
1145 changeset, finding all files matched by the match
1145 changeset, finding all files matched by the match
1146 function
1146 function
1147 '''
1147 '''
1148 return self[node].walk(match)
1148 return self[node].walk(match)
1149
1149
1150 def status(self, node1='.', node2=None, match=None,
1150 def status(self, node1='.', node2=None, match=None,
1151 ignored=False, clean=False, unknown=False,
1151 ignored=False, clean=False, unknown=False,
1152 listsubrepos=False):
1152 listsubrepos=False):
1153 """return status of files between two nodes or node and working directory
1153 """return status of files between two nodes or node and working directory
1154
1154
1155 If node1 is None, use the first dirstate parent instead.
1155 If node1 is None, use the first dirstate parent instead.
1156 If node2 is None, compare node1 with working directory.
1156 If node2 is None, compare node1 with working directory.
1157 """
1157 """
1158
1158
1159 def mfmatches(ctx):
1159 def mfmatches(ctx):
1160 mf = ctx.manifest().copy()
1160 mf = ctx.manifest().copy()
1161 for fn in mf.keys():
1161 for fn in mf.keys():
1162 if not match(fn):
1162 if not match(fn):
1163 del mf[fn]
1163 del mf[fn]
1164 return mf
1164 return mf
1165
1165
1166 if isinstance(node1, context.changectx):
1166 if isinstance(node1, context.changectx):
1167 ctx1 = node1
1167 ctx1 = node1
1168 else:
1168 else:
1169 ctx1 = self[node1]
1169 ctx1 = self[node1]
1170 if isinstance(node2, context.changectx):
1170 if isinstance(node2, context.changectx):
1171 ctx2 = node2
1171 ctx2 = node2
1172 else:
1172 else:
1173 ctx2 = self[node2]
1173 ctx2 = self[node2]
1174
1174
1175 working = ctx2.rev() is None
1175 working = ctx2.rev() is None
1176 parentworking = working and ctx1 == self['.']
1176 parentworking = working and ctx1 == self['.']
1177 match = match or matchmod.always(self.root, self.getcwd())
1177 match = match or matchmod.always(self.root, self.getcwd())
1178 listignored, listclean, listunknown = ignored, clean, unknown
1178 listignored, listclean, listunknown = ignored, clean, unknown
1179
1179
1180 # load earliest manifest first for caching reasons
1180 # load earliest manifest first for caching reasons
1181 if not working and ctx2.rev() < ctx1.rev():
1181 if not working and ctx2.rev() < ctx1.rev():
1182 ctx2.manifest()
1182 ctx2.manifest()
1183
1183
1184 if not parentworking:
1184 if not parentworking:
1185 def bad(f, msg):
1185 def bad(f, msg):
1186 if f not in ctx1:
1186 if f not in ctx1:
1187 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1187 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1188 match.bad = bad
1188 match.bad = bad
1189
1189
1190 if working: # we need to scan the working dir
1190 if working: # we need to scan the working dir
1191 subrepos = []
1191 subrepos = []
1192 if '.hgsub' in self.dirstate:
1192 if '.hgsub' in self.dirstate:
1193 subrepos = ctx1.substate.keys()
1193 subrepos = ctx1.substate.keys()
1194 s = self.dirstate.status(match, subrepos, listignored,
1194 s = self.dirstate.status(match, subrepos, listignored,
1195 listclean, listunknown)
1195 listclean, listunknown)
1196 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1196 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1197
1197
1198 # check for any possibly clean files
1198 # check for any possibly clean files
1199 if parentworking and cmp:
1199 if parentworking and cmp:
1200 fixup = []
1200 fixup = []
1201 # do a full compare of any files that might have changed
1201 # do a full compare of any files that might have changed
1202 for f in sorted(cmp):
1202 for f in sorted(cmp):
1203 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1203 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1204 or ctx1[f].cmp(ctx2[f])):
1204 or ctx1[f].cmp(ctx2[f])):
1205 modified.append(f)
1205 modified.append(f)
1206 else:
1206 else:
1207 fixup.append(f)
1207 fixup.append(f)
1208
1208
1209 # update dirstate for files that are actually clean
1209 # update dirstate for files that are actually clean
1210 if fixup:
1210 if fixup:
1211 if listclean:
1211 if listclean:
1212 clean += fixup
1212 clean += fixup
1213
1213
1214 try:
1214 try:
1215 # updating the dirstate is optional
1215 # updating the dirstate is optional
1216 # so we don't wait on the lock
1216 # so we don't wait on the lock
1217 wlock = self.wlock(False)
1217 wlock = self.wlock(False)
1218 try:
1218 try:
1219 for f in fixup:
1219 for f in fixup:
1220 self.dirstate.normal(f)
1220 self.dirstate.normal(f)
1221 finally:
1221 finally:
1222 wlock.release()
1222 wlock.release()
1223 except error.LockError:
1223 except error.LockError:
1224 pass
1224 pass
1225
1225
1226 if not parentworking:
1226 if not parentworking:
1227 mf1 = mfmatches(ctx1)
1227 mf1 = mfmatches(ctx1)
1228 if working:
1228 if working:
1229 # we are comparing working dir against non-parent
1229 # we are comparing working dir against non-parent
1230 # generate a pseudo-manifest for the working dir
1230 # generate a pseudo-manifest for the working dir
1231 mf2 = mfmatches(self['.'])
1231 mf2 = mfmatches(self['.'])
1232 for f in cmp + modified + added:
1232 for f in cmp + modified + added:
1233 mf2[f] = None
1233 mf2[f] = None
1234 mf2.set(f, ctx2.flags(f))
1234 mf2.set(f, ctx2.flags(f))
1235 for f in removed:
1235 for f in removed:
1236 if f in mf2:
1236 if f in mf2:
1237 del mf2[f]
1237 del mf2[f]
1238 else:
1238 else:
1239 # we are comparing two revisions
1239 # we are comparing two revisions
1240 deleted, unknown, ignored = [], [], []
1240 deleted, unknown, ignored = [], [], []
1241 mf2 = mfmatches(ctx2)
1241 mf2 = mfmatches(ctx2)
1242
1242
1243 modified, added, clean = [], [], []
1243 modified, added, clean = [], [], []
1244 for fn in mf2:
1244 for fn in mf2:
1245 if fn in mf1:
1245 if fn in mf1:
1246 if (fn not in deleted and
1246 if (fn not in deleted and
1247 (mf1.flags(fn) != mf2.flags(fn) or
1247 (mf1.flags(fn) != mf2.flags(fn) or
1248 (mf1[fn] != mf2[fn] and
1248 (mf1[fn] != mf2[fn] and
1249 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1249 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1250 modified.append(fn)
1250 modified.append(fn)
1251 elif listclean:
1251 elif listclean:
1252 clean.append(fn)
1252 clean.append(fn)
1253 del mf1[fn]
1253 del mf1[fn]
1254 elif fn not in deleted:
1254 elif fn not in deleted:
1255 added.append(fn)
1255 added.append(fn)
1256 removed = mf1.keys()
1256 removed = mf1.keys()
1257
1257
1258 r = modified, added, removed, deleted, unknown, ignored, clean
1258 r = modified, added, removed, deleted, unknown, ignored, clean
1259
1259
1260 if listsubrepos:
1260 if listsubrepos:
1261 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1261 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1262 if working:
1262 if working:
1263 rev2 = None
1263 rev2 = None
1264 else:
1264 else:
1265 rev2 = ctx2.substate[subpath][1]
1265 rev2 = ctx2.substate[subpath][1]
1266 try:
1266 try:
1267 submatch = matchmod.narrowmatcher(subpath, match)
1267 submatch = matchmod.narrowmatcher(subpath, match)
1268 s = sub.status(rev2, match=submatch, ignored=listignored,
1268 s = sub.status(rev2, match=submatch, ignored=listignored,
1269 clean=listclean, unknown=listunknown,
1269 clean=listclean, unknown=listunknown,
1270 listsubrepos=True)
1270 listsubrepos=True)
1271 for rfiles, sfiles in zip(r, s):
1271 for rfiles, sfiles in zip(r, s):
1272 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1272 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1273 except error.LookupError:
1273 except error.LookupError:
1274 self.ui.status(_("skipping missing subrepository: %s\n")
1274 self.ui.status(_("skipping missing subrepository: %s\n")
1275 % subpath)
1275 % subpath)
1276
1276
1277 for l in r:
1277 for l in r:
1278 l.sort()
1278 l.sort()
1279 return r
1279 return r
1280
1280
1281 def heads(self, start=None):
1281 def heads(self, start=None):
1282 heads = self.changelog.heads(start)
1282 heads = self.changelog.heads(start)
1283 # sort the output in rev descending order
1283 # sort the output in rev descending order
1284 return sorted(heads, key=self.changelog.rev, reverse=True)
1284 return sorted(heads, key=self.changelog.rev, reverse=True)
1285
1285
1286 def branchheads(self, branch=None, start=None, closed=False):
1286 def branchheads(self, branch=None, start=None, closed=False):
1287 '''return a (possibly filtered) list of heads for the given branch
1287 '''return a (possibly filtered) list of heads for the given branch
1288
1288
1289 Heads are returned in topological order, from newest to oldest.
1289 Heads are returned in topological order, from newest to oldest.
1290 If branch is None, use the dirstate branch.
1290 If branch is None, use the dirstate branch.
1291 If start is not None, return only heads reachable from start.
1291 If start is not None, return only heads reachable from start.
1292 If closed is True, return heads that are marked as closed as well.
1292 If closed is True, return heads that are marked as closed as well.
1293 '''
1293 '''
1294 if branch is None:
1294 if branch is None:
1295 branch = self[None].branch()
1295 branch = self[None].branch()
1296 branches = self.branchmap()
1296 branches = self.branchmap()
1297 if branch not in branches:
1297 if branch not in branches:
1298 return []
1298 return []
1299 # the cache returns heads ordered lowest to highest
1299 # the cache returns heads ordered lowest to highest
1300 bheads = list(reversed(branches[branch]))
1300 bheads = list(reversed(branches[branch]))
1301 if start is not None:
1301 if start is not None:
1302 # filter out the heads that cannot be reached from startrev
1302 # filter out the heads that cannot be reached from startrev
1303 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1303 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1304 bheads = [h for h in bheads if h in fbheads]
1304 bheads = [h for h in bheads if h in fbheads]
1305 if not closed:
1305 if not closed:
1306 bheads = [h for h in bheads if
1306 bheads = [h for h in bheads if
1307 ('close' not in self.changelog.read(h)[5])]
1307 ('close' not in self.changelog.read(h)[5])]
1308 return bheads
1308 return bheads
1309
1309
1310 def branches(self, nodes):
1310 def branches(self, nodes):
1311 if not nodes:
1311 if not nodes:
1312 nodes = [self.changelog.tip()]
1312 nodes = [self.changelog.tip()]
1313 b = []
1313 b = []
1314 for n in nodes:
1314 for n in nodes:
1315 t = n
1315 t = n
1316 while 1:
1316 while 1:
1317 p = self.changelog.parents(n)
1317 p = self.changelog.parents(n)
1318 if p[1] != nullid or p[0] == nullid:
1318 if p[1] != nullid or p[0] == nullid:
1319 b.append((t, n, p[0], p[1]))
1319 b.append((t, n, p[0], p[1]))
1320 break
1320 break
1321 n = p[0]
1321 n = p[0]
1322 return b
1322 return b
1323
1323
1324 def between(self, pairs):
1324 def between(self, pairs):
1325 r = []
1325 r = []
1326
1326
1327 for top, bottom in pairs:
1327 for top, bottom in pairs:
1328 n, l, i = top, [], 0
1328 n, l, i = top, [], 0
1329 f = 1
1329 f = 1
1330
1330
1331 while n != bottom and n != nullid:
1331 while n != bottom and n != nullid:
1332 p = self.changelog.parents(n)[0]
1332 p = self.changelog.parents(n)[0]
1333 if i == f:
1333 if i == f:
1334 l.append(n)
1334 l.append(n)
1335 f = f * 2
1335 f = f * 2
1336 n = p
1336 n = p
1337 i += 1
1337 i += 1
1338
1338
1339 r.append(l)
1339 r.append(l)
1340
1340
1341 return r
1341 return r
1342
1342
1343 def pull(self, remote, heads=None, force=False):
1343 def pull(self, remote, heads=None, force=False):
1344 lock = self.lock()
1344 lock = self.lock()
1345 try:
1345 try:
1346 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1346 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1347 force=force)
1347 force=force)
1348 common, fetch, rheads = tmp
1348 common, fetch, rheads = tmp
1349 if not fetch:
1349 if not fetch:
1350 self.ui.status(_("no changes found\n"))
1350 self.ui.status(_("no changes found\n"))
1351 result = 0
1351 result = 0
1352 else:
1352 else:
1353 if heads is None and list(common) == [nullid]:
1353 if heads is None and list(common) == [nullid]:
1354 self.ui.status(_("requesting all changes\n"))
1354 self.ui.status(_("requesting all changes\n"))
1355 elif heads is None and remote.capable('changegroupsubset'):
1355 elif heads is None and remote.capable('changegroupsubset'):
1356 # issue1320, avoid a race if remote changed after discovery
1356 # issue1320, avoid a race if remote changed after discovery
1357 heads = rheads
1357 heads = rheads
1358
1358
1359 if remote.capable('getbundle'):
1359 if remote.capable('getbundle'):
1360 cg = remote.getbundle('pull', common=common,
1360 cg = remote.getbundle('pull', common=common,
1361 heads=heads or rheads)
1361 heads=heads or rheads)
1362 elif heads is None:
1362 elif heads is None:
1363 cg = remote.changegroup(fetch, 'pull')
1363 cg = remote.changegroup(fetch, 'pull')
1364 elif not remote.capable('changegroupsubset'):
1364 elif not remote.capable('changegroupsubset'):
1365 raise util.Abort(_("partial pull cannot be done because "
1365 raise util.Abort(_("partial pull cannot be done because "
1366 "other repository doesn't support "
1366 "other repository doesn't support "
1367 "changegroupsubset."))
1367 "changegroupsubset."))
1368 else:
1368 else:
1369 cg = remote.changegroupsubset(fetch, heads, 'pull')
1369 cg = remote.changegroupsubset(fetch, heads, 'pull')
1370 result = self.addchangegroup(cg, 'pull', remote.url(),
1370 result = self.addchangegroup(cg, 'pull', remote.url(),
1371 lock=lock)
1371 lock=lock)
1372 finally:
1372 finally:
1373 lock.release()
1373 lock.release()
1374
1374
1375 return result
1375 return result
1376
1376
1377 def checkpush(self, force, revs):
1377 def checkpush(self, force, revs):
1378 """Extensions can override this function if additional checks have
1378 """Extensions can override this function if additional checks have
1379 to be performed before pushing, or call it if they override push
1379 to be performed before pushing, or call it if they override push
1380 command.
1380 command.
1381 """
1381 """
1382 pass
1382 pass
1383
1383
1384 def push(self, remote, force=False, revs=None, newbranch=False):
1384 def push(self, remote, force=False, revs=None, newbranch=False):
1385 '''Push outgoing changesets (limited by revs) from the current
1385 '''Push outgoing changesets (limited by revs) from the current
1386 repository to remote. Return an integer:
1386 repository to remote. Return an integer:
1387 - 0 means HTTP error *or* nothing to push
1387 - 0 means HTTP error *or* nothing to push
1388 - 1 means we pushed and remote head count is unchanged *or*
1388 - 1 means we pushed and remote head count is unchanged *or*
1389 we have outgoing changesets but refused to push
1389 we have outgoing changesets but refused to push
1390 - other values as described by addchangegroup()
1390 - other values as described by addchangegroup()
1391 '''
1391 '''
1392 # there are two ways to push to remote repo:
1392 # there are two ways to push to remote repo:
1393 #
1393 #
1394 # addchangegroup assumes local user can lock remote
1394 # addchangegroup assumes local user can lock remote
1395 # repo (local filesystem, old ssh servers).
1395 # repo (local filesystem, old ssh servers).
1396 #
1396 #
1397 # unbundle assumes local user cannot lock remote repo (new ssh
1397 # unbundle assumes local user cannot lock remote repo (new ssh
1398 # servers, http servers).
1398 # servers, http servers).
1399
1399
1400 self.checkpush(force, revs)
1400 self.checkpush(force, revs)
1401 lock = None
1401 lock = None
1402 unbundle = remote.capable('unbundle')
1402 unbundle = remote.capable('unbundle')
1403 if not unbundle:
1403 if not unbundle:
1404 lock = remote.lock()
1404 lock = remote.lock()
1405 try:
1405 try:
1406 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1406 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1407 newbranch)
1407 newbranch)
1408 ret = remote_heads
1408 ret = remote_heads
1409 if cg is not None:
1409 if cg is not None:
1410 if unbundle:
1410 if unbundle:
1411 # local repo finds heads on server, finds out what
1411 # local repo finds heads on server, finds out what
1412 # revs it must push. once revs transferred, if server
1412 # revs it must push. once revs transferred, if server
1413 # finds it has different heads (someone else won
1413 # finds it has different heads (someone else won
1414 # commit/push race), server aborts.
1414 # commit/push race), server aborts.
1415 if force:
1415 if force:
1416 remote_heads = ['force']
1416 remote_heads = ['force']
1417 # ssh: return remote's addchangegroup()
1417 # ssh: return remote's addchangegroup()
1418 # http: return remote's addchangegroup() or 0 for error
1418 # http: return remote's addchangegroup() or 0 for error
1419 ret = remote.unbundle(cg, remote_heads, 'push')
1419 ret = remote.unbundle(cg, remote_heads, 'push')
1420 else:
1420 else:
1421 # we return an integer indicating remote head count change
1421 # we return an integer indicating remote head count change
1422 ret = remote.addchangegroup(cg, 'push', self.url(),
1422 ret = remote.addchangegroup(cg, 'push', self.url(),
1423 lock=lock)
1423 lock=lock)
1424 finally:
1424 finally:
1425 if lock is not None:
1425 if lock is not None:
1426 lock.release()
1426 lock.release()
1427
1427
1428 self.ui.debug("checking for updated bookmarks\n")
1428 self.ui.debug("checking for updated bookmarks\n")
1429 rb = remote.listkeys('bookmarks')
1429 rb = remote.listkeys('bookmarks')
1430 for k in rb.keys():
1430 for k in rb.keys():
1431 if k in self._bookmarks:
1431 if k in self._bookmarks:
1432 nr, nl = rb[k], hex(self._bookmarks[k])
1432 nr, nl = rb[k], hex(self._bookmarks[k])
1433 if nr in self:
1433 if nr in self:
1434 cr = self[nr]
1434 cr = self[nr]
1435 cl = self[nl]
1435 cl = self[nl]
1436 if cl in cr.descendants():
1436 if cl in cr.descendants():
1437 r = remote.pushkey('bookmarks', k, nr, nl)
1437 r = remote.pushkey('bookmarks', k, nr, nl)
1438 if r:
1438 if r:
1439 self.ui.status(_("updating bookmark %s\n") % k)
1439 self.ui.status(_("updating bookmark %s\n") % k)
1440 else:
1440 else:
1441 self.ui.warn(_('updating bookmark %s'
1441 self.ui.warn(_('updating bookmark %s'
1442 ' failed!\n') % k)
1442 ' failed!\n') % k)
1443
1443
1444 return ret
1444 return ret
1445
1445
1446 def changegroupinfo(self, nodes, source):
1446 def changegroupinfo(self, nodes, source):
1447 if self.ui.verbose or source == 'bundle':
1447 if self.ui.verbose or source == 'bundle':
1448 self.ui.status(_("%d changesets found\n") % len(nodes))
1448 self.ui.status(_("%d changesets found\n") % len(nodes))
1449 if self.ui.debugflag:
1449 if self.ui.debugflag:
1450 self.ui.debug("list of changesets:\n")
1450 self.ui.debug("list of changesets:\n")
1451 for node in nodes:
1451 for node in nodes:
1452 self.ui.debug("%s\n" % hex(node))
1452 self.ui.debug("%s\n" % hex(node))
1453
1453
1454 def changegroupsubset(self, bases, heads, source):
1454 def changegroupsubset(self, bases, heads, source):
1455 """Compute a changegroup consisting of all the nodes that are
1455 """Compute a changegroup consisting of all the nodes that are
1456 descendents of any of the bases and ancestors of any of the heads.
1456 descendents of any of the bases and ancestors of any of the heads.
1457 Return a chunkbuffer object whose read() method will return
1457 Return a chunkbuffer object whose read() method will return
1458 successive changegroup chunks.
1458 successive changegroup chunks.
1459
1459
1460 It is fairly complex as determining which filenodes and which
1460 It is fairly complex as determining which filenodes and which
1461 manifest nodes need to be included for the changeset to be complete
1461 manifest nodes need to be included for the changeset to be complete
1462 is non-trivial.
1462 is non-trivial.
1463
1463
1464 Another wrinkle is doing the reverse, figuring out which changeset in
1464 Another wrinkle is doing the reverse, figuring out which changeset in
1465 the changegroup a particular filenode or manifestnode belongs to.
1465 the changegroup a particular filenode or manifestnode belongs to.
1466 """
1466 """
1467 cl = self.changelog
1467 cl = self.changelog
1468 if not bases:
1468 if not bases:
1469 bases = [nullid]
1469 bases = [nullid]
1470 csets, bases, heads = cl.nodesbetween(bases, heads)
1470 csets, bases, heads = cl.nodesbetween(bases, heads)
1471 # We assume that all ancestors of bases are known
1471 # We assume that all ancestors of bases are known
1472 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1472 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1473 return self._changegroupsubset(common, csets, heads, source)
1473 return self._changegroupsubset(common, csets, heads, source)
1474
1474
1475 def getbundle(self, source, heads=None, common=None):
1475 def getbundle(self, source, heads=None, common=None):
1476 """Like changegroupsubset, but returns the set difference between the
1476 """Like changegroupsubset, but returns the set difference between the
1477 ancestors of heads and the ancestors common.
1477 ancestors of heads and the ancestors common.
1478
1478
1479 If heads is None, use the local heads. If common is None, use [nullid].
1479 If heads is None, use the local heads. If common is None, use [nullid].
1480
1480
1481 The nodes in common might not all be known locally due to the way the
1481 The nodes in common might not all be known locally due to the way the
1482 current discovery protocol works.
1482 current discovery protocol works.
1483 """
1483 """
1484 cl = self.changelog
1484 cl = self.changelog
1485 if common:
1485 if common:
1486 nm = cl.nodemap
1486 nm = cl.nodemap
1487 common = [n for n in common if n in nm]
1487 common = [n for n in common if n in nm]
1488 else:
1488 else:
1489 common = [nullid]
1489 common = [nullid]
1490 if not heads:
1490 if not heads:
1491 heads = cl.heads()
1491 heads = cl.heads()
1492 common, missing = cl.findcommonmissing(common, heads)
1492 common, missing = cl.findcommonmissing(common, heads)
1493 if not missing:
1493 if not missing:
1494 return None
1494 return None
1495 return self._changegroupsubset(common, missing, heads, source)
1495 return self._changegroupsubset(common, missing, heads, source)
1496
1496
1497 def _changegroupsubset(self, commonrevs, csets, heads, source):
1497 def _changegroupsubset(self, commonrevs, csets, heads, source):
1498
1498
1499 cl = self.changelog
1499 cl = self.changelog
1500 mf = self.manifest
1500 mf = self.manifest
1501 mfs = {} # needed manifests
1501 mfs = {} # needed manifests
1502 fnodes = {} # needed file nodes
1502 fnodes = {} # needed file nodes
1503 changedfiles = set()
1503 changedfiles = set()
1504 fstate = ['', {}]
1504 fstate = ['', {}]
1505 count = [0]
1505 count = [0]
1506
1506
1507 # can we go through the fast path ?
1507 # can we go through the fast path ?
1508 heads.sort()
1508 heads.sort()
1509 if heads == sorted(self.heads()):
1509 if heads == sorted(self.heads()):
1510 return self._changegroup(csets, source)
1510 return self._changegroup(csets, source)
1511
1511
1512 # slow path
1512 # slow path
1513 self.hook('preoutgoing', throw=True, source=source)
1513 self.hook('preoutgoing', throw=True, source=source)
1514 self.changegroupinfo(csets, source)
1514 self.changegroupinfo(csets, source)
1515
1515
1516 # filter any nodes that claim to be part of the known set
1516 # filter any nodes that claim to be part of the known set
1517 def prune(revlog, missing):
1517 def prune(revlog, missing):
1518 for n in missing:
1518 for n in missing:
1519 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1519 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1520 yield n
1520 yield n
1521
1521
1522 def lookup(revlog, x):
1522 def lookup(revlog, x):
1523 if revlog == cl:
1523 if revlog == cl:
1524 c = cl.read(x)
1524 c = cl.read(x)
1525 changedfiles.update(c[3])
1525 changedfiles.update(c[3])
1526 mfs.setdefault(c[0], x)
1526 mfs.setdefault(c[0], x)
1527 count[0] += 1
1527 count[0] += 1
1528 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1528 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1529 return x
1529 return x
1530 elif revlog == mf:
1530 elif revlog == mf:
1531 clnode = mfs[x]
1531 clnode = mfs[x]
1532 mdata = mf.readfast(x)
1532 mdata = mf.readfast(x)
1533 for f in changedfiles:
1533 for f in changedfiles:
1534 if f in mdata:
1534 if f in mdata:
1535 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1535 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1536 count[0] += 1
1536 count[0] += 1
1537 self.ui.progress(_('bundling'), count[0],
1537 self.ui.progress(_('bundling'), count[0],
1538 unit=_('manifests'), total=len(mfs))
1538 unit=_('manifests'), total=len(mfs))
1539 return mfs[x]
1539 return mfs[x]
1540 else:
1540 else:
1541 self.ui.progress(
1541 self.ui.progress(
1542 _('bundling'), count[0], item=fstate[0],
1542 _('bundling'), count[0], item=fstate[0],
1543 unit=_('files'), total=len(changedfiles))
1543 unit=_('files'), total=len(changedfiles))
1544 return fstate[1][x]
1544 return fstate[1][x]
1545
1545
1546 bundler = changegroup.bundle10(lookup)
1546 bundler = changegroup.bundle10(lookup)
1547 reorder = self.ui.config('bundle', 'reorder', 'auto')
1548 if reorder == 'auto':
1549 reorder = None
1550 else:
1551 reorder = util.parsebool(reorder)
1547
1552
1548 def gengroup():
1553 def gengroup():
1549 # Create a changenode group generator that will call our functions
1554 # Create a changenode group generator that will call our functions
1550 # back to lookup the owning changenode and collect information.
1555 # back to lookup the owning changenode and collect information.
1551 for chunk in cl.group(csets, bundler):
1556 for chunk in cl.group(csets, bundler, reorder=reorder):
1552 yield chunk
1557 yield chunk
1553 self.ui.progress(_('bundling'), None)
1558 self.ui.progress(_('bundling'), None)
1554
1559
1555 # Create a generator for the manifestnodes that calls our lookup
1560 # Create a generator for the manifestnodes that calls our lookup
1556 # and data collection functions back.
1561 # and data collection functions back.
1557 count[0] = 0
1562 count[0] = 0
1558 for chunk in mf.group(prune(mf, mfs), bundler):
1563 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1559 yield chunk
1564 yield chunk
1560 self.ui.progress(_('bundling'), None)
1565 self.ui.progress(_('bundling'), None)
1561
1566
1562 mfs.clear()
1567 mfs.clear()
1563
1568
1564 # Go through all our files in order sorted by name.
1569 # Go through all our files in order sorted by name.
1565 count[0] = 0
1570 count[0] = 0
1566 for fname in sorted(changedfiles):
1571 for fname in sorted(changedfiles):
1567 filerevlog = self.file(fname)
1572 filerevlog = self.file(fname)
1568 if not len(filerevlog):
1573 if not len(filerevlog):
1569 raise util.Abort(_("empty or missing revlog for %s") % fname)
1574 raise util.Abort(_("empty or missing revlog for %s") % fname)
1570 fstate[0] = fname
1575 fstate[0] = fname
1571 fstate[1] = fnodes.pop(fname, {})
1576 fstate[1] = fnodes.pop(fname, {})
1572 first = True
1577 first = True
1573
1578
1574 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1579 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1575 bundler):
1580 bundler, reorder=reorder):
1576 if first:
1581 if first:
1577 if chunk == bundler.close():
1582 if chunk == bundler.close():
1578 break
1583 break
1579 count[0] += 1
1584 count[0] += 1
1580 yield bundler.fileheader(fname)
1585 yield bundler.fileheader(fname)
1581 first = False
1586 first = False
1582 yield chunk
1587 yield chunk
1583 # Signal that no more groups are left.
1588 # Signal that no more groups are left.
1584 yield bundler.close()
1589 yield bundler.close()
1585 self.ui.progress(_('bundling'), None)
1590 self.ui.progress(_('bundling'), None)
1586
1591
1587 if csets:
1592 if csets:
1588 self.hook('outgoing', node=hex(csets[0]), source=source)
1593 self.hook('outgoing', node=hex(csets[0]), source=source)
1589
1594
1590 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1595 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1591
1596
1592 def changegroup(self, basenodes, source):
1597 def changegroup(self, basenodes, source):
1593 # to avoid a race we use changegroupsubset() (issue1320)
1598 # to avoid a race we use changegroupsubset() (issue1320)
1594 return self.changegroupsubset(basenodes, self.heads(), source)
1599 return self.changegroupsubset(basenodes, self.heads(), source)
1595
1600
1596 def _changegroup(self, nodes, source):
1601 def _changegroup(self, nodes, source):
1597 """Compute the changegroup of all nodes that we have that a recipient
1602 """Compute the changegroup of all nodes that we have that a recipient
1598 doesn't. Return a chunkbuffer object whose read() method will return
1603 doesn't. Return a chunkbuffer object whose read() method will return
1599 successive changegroup chunks.
1604 successive changegroup chunks.
1600
1605
1601 This is much easier than the previous function as we can assume that
1606 This is much easier than the previous function as we can assume that
1602 the recipient has any changenode we aren't sending them.
1607 the recipient has any changenode we aren't sending them.
1603
1608
1604 nodes is the set of nodes to send"""
1609 nodes is the set of nodes to send"""
1605
1610
1606 cl = self.changelog
1611 cl = self.changelog
1607 mf = self.manifest
1612 mf = self.manifest
1608 mfs = {}
1613 mfs = {}
1609 changedfiles = set()
1614 changedfiles = set()
1610 fstate = ['']
1615 fstate = ['']
1611 count = [0]
1616 count = [0]
1612
1617
1613 self.hook('preoutgoing', throw=True, source=source)
1618 self.hook('preoutgoing', throw=True, source=source)
1614 self.changegroupinfo(nodes, source)
1619 self.changegroupinfo(nodes, source)
1615
1620
1616 revset = set([cl.rev(n) for n in nodes])
1621 revset = set([cl.rev(n) for n in nodes])
1617
1622
1618 def gennodelst(log):
1623 def gennodelst(log):
1619 for r in log:
1624 for r in log:
1620 if log.linkrev(r) in revset:
1625 if log.linkrev(r) in revset:
1621 yield log.node(r)
1626 yield log.node(r)
1622
1627
1623 def lookup(revlog, x):
1628 def lookup(revlog, x):
1624 if revlog == cl:
1629 if revlog == cl:
1625 c = cl.read(x)
1630 c = cl.read(x)
1626 changedfiles.update(c[3])
1631 changedfiles.update(c[3])
1627 mfs.setdefault(c[0], x)
1632 mfs.setdefault(c[0], x)
1628 count[0] += 1
1633 count[0] += 1
1629 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1634 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1630 return x
1635 return x
1631 elif revlog == mf:
1636 elif revlog == mf:
1632 count[0] += 1
1637 count[0] += 1
1633 self.ui.progress(_('bundling'), count[0],
1638 self.ui.progress(_('bundling'), count[0],
1634 unit=_('manifests'), total=len(mfs))
1639 unit=_('manifests'), total=len(mfs))
1635 return cl.node(revlog.linkrev(revlog.rev(x)))
1640 return cl.node(revlog.linkrev(revlog.rev(x)))
1636 else:
1641 else:
1637 self.ui.progress(
1642 self.ui.progress(
1638 _('bundling'), count[0], item=fstate[0],
1643 _('bundling'), count[0], item=fstate[0],
1639 total=len(changedfiles), unit=_('files'))
1644 total=len(changedfiles), unit=_('files'))
1640 return cl.node(revlog.linkrev(revlog.rev(x)))
1645 return cl.node(revlog.linkrev(revlog.rev(x)))
1641
1646
1642 bundler = changegroup.bundle10(lookup)
1647 bundler = changegroup.bundle10(lookup)
1648 reorder = self.ui.config('bundle', 'reorder', 'auto')
1649 if reorder == 'auto':
1650 reorder = None
1651 else:
1652 reorder = util.parsebool(reorder)
1643
1653
1644 def gengroup():
1654 def gengroup():
1645 '''yield a sequence of changegroup chunks (strings)'''
1655 '''yield a sequence of changegroup chunks (strings)'''
1646 # construct a list of all changed files
1656 # construct a list of all changed files
1647
1657
1648 for chunk in cl.group(nodes, bundler):
1658 for chunk in cl.group(nodes, bundler, reorder=reorder):
1649 yield chunk
1659 yield chunk
1650 self.ui.progress(_('bundling'), None)
1660 self.ui.progress(_('bundling'), None)
1651
1661
1652 count[0] = 0
1662 count[0] = 0
1653 for chunk in mf.group(gennodelst(mf), bundler):
1663 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1654 yield chunk
1664 yield chunk
1655 self.ui.progress(_('bundling'), None)
1665 self.ui.progress(_('bundling'), None)
1656
1666
1657 count[0] = 0
1667 count[0] = 0
1658 for fname in sorted(changedfiles):
1668 for fname in sorted(changedfiles):
1659 filerevlog = self.file(fname)
1669 filerevlog = self.file(fname)
1660 if not len(filerevlog):
1670 if not len(filerevlog):
1661 raise util.Abort(_("empty or missing revlog for %s") % fname)
1671 raise util.Abort(_("empty or missing revlog for %s") % fname)
1662 fstate[0] = fname
1672 fstate[0] = fname
1663 first = True
1673 first = True
1664 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1674 for chunk in filerevlog.group(gennodelst(filerevlog), bundler,
1675 reorder=reorder):
1665 if first:
1676 if first:
1666 if chunk == bundler.close():
1677 if chunk == bundler.close():
1667 break
1678 break
1668 count[0] += 1
1679 count[0] += 1
1669 yield bundler.fileheader(fname)
1680 yield bundler.fileheader(fname)
1670 first = False
1681 first = False
1671 yield chunk
1682 yield chunk
1672 yield bundler.close()
1683 yield bundler.close()
1673 self.ui.progress(_('bundling'), None)
1684 self.ui.progress(_('bundling'), None)
1674
1685
1675 if nodes:
1686 if nodes:
1676 self.hook('outgoing', node=hex(nodes[0]), source=source)
1687 self.hook('outgoing', node=hex(nodes[0]), source=source)
1677
1688
1678 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1689 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1679
1690
1680 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1691 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1681 """Add the changegroup returned by source.read() to this repo.
1692 """Add the changegroup returned by source.read() to this repo.
1682 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1693 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1683 the URL of the repo where this changegroup is coming from.
1694 the URL of the repo where this changegroup is coming from.
1684 If lock is not None, the function takes ownership of the lock
1695 If lock is not None, the function takes ownership of the lock
1685 and releases it after the changegroup is added.
1696 and releases it after the changegroup is added.
1686
1697
1687 Return an integer summarizing the change to this repo:
1698 Return an integer summarizing the change to this repo:
1688 - nothing changed or no source: 0
1699 - nothing changed or no source: 0
1689 - more heads than before: 1+added heads (2..n)
1700 - more heads than before: 1+added heads (2..n)
1690 - fewer heads than before: -1-removed heads (-2..-n)
1701 - fewer heads than before: -1-removed heads (-2..-n)
1691 - number of heads stays the same: 1
1702 - number of heads stays the same: 1
1692 """
1703 """
1693 def csmap(x):
1704 def csmap(x):
1694 self.ui.debug("add changeset %s\n" % short(x))
1705 self.ui.debug("add changeset %s\n" % short(x))
1695 return len(cl)
1706 return len(cl)
1696
1707
1697 def revmap(x):
1708 def revmap(x):
1698 return cl.rev(x)
1709 return cl.rev(x)
1699
1710
1700 if not source:
1711 if not source:
1701 return 0
1712 return 0
1702
1713
1703 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1714 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1704
1715
1705 changesets = files = revisions = 0
1716 changesets = files = revisions = 0
1706 efiles = set()
1717 efiles = set()
1707
1718
1708 # write changelog data to temp files so concurrent readers will not see
1719 # write changelog data to temp files so concurrent readers will not see
1709 # inconsistent view
1720 # inconsistent view
1710 cl = self.changelog
1721 cl = self.changelog
1711 cl.delayupdate()
1722 cl.delayupdate()
1712 oldheads = cl.heads()
1723 oldheads = cl.heads()
1713
1724
1714 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1725 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1715 try:
1726 try:
1716 trp = weakref.proxy(tr)
1727 trp = weakref.proxy(tr)
1717 # pull off the changeset group
1728 # pull off the changeset group
1718 self.ui.status(_("adding changesets\n"))
1729 self.ui.status(_("adding changesets\n"))
1719 clstart = len(cl)
1730 clstart = len(cl)
1720 class prog(object):
1731 class prog(object):
1721 step = _('changesets')
1732 step = _('changesets')
1722 count = 1
1733 count = 1
1723 ui = self.ui
1734 ui = self.ui
1724 total = None
1735 total = None
1725 def __call__(self):
1736 def __call__(self):
1726 self.ui.progress(self.step, self.count, unit=_('chunks'),
1737 self.ui.progress(self.step, self.count, unit=_('chunks'),
1727 total=self.total)
1738 total=self.total)
1728 self.count += 1
1739 self.count += 1
1729 pr = prog()
1740 pr = prog()
1730 source.callback = pr
1741 source.callback = pr
1731
1742
1732 source.changelogheader()
1743 source.changelogheader()
1733 if (cl.addgroup(source, csmap, trp) is None
1744 if (cl.addgroup(source, csmap, trp) is None
1734 and not emptyok):
1745 and not emptyok):
1735 raise util.Abort(_("received changelog group is empty"))
1746 raise util.Abort(_("received changelog group is empty"))
1736 clend = len(cl)
1747 clend = len(cl)
1737 changesets = clend - clstart
1748 changesets = clend - clstart
1738 for c in xrange(clstart, clend):
1749 for c in xrange(clstart, clend):
1739 efiles.update(self[c].files())
1750 efiles.update(self[c].files())
1740 efiles = len(efiles)
1751 efiles = len(efiles)
1741 self.ui.progress(_('changesets'), None)
1752 self.ui.progress(_('changesets'), None)
1742
1753
1743 # pull off the manifest group
1754 # pull off the manifest group
1744 self.ui.status(_("adding manifests\n"))
1755 self.ui.status(_("adding manifests\n"))
1745 pr.step = _('manifests')
1756 pr.step = _('manifests')
1746 pr.count = 1
1757 pr.count = 1
1747 pr.total = changesets # manifests <= changesets
1758 pr.total = changesets # manifests <= changesets
1748 # no need to check for empty manifest group here:
1759 # no need to check for empty manifest group here:
1749 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1760 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1750 # no new manifest will be created and the manifest group will
1761 # no new manifest will be created and the manifest group will
1751 # be empty during the pull
1762 # be empty during the pull
1752 source.manifestheader()
1763 source.manifestheader()
1753 self.manifest.addgroup(source, revmap, trp)
1764 self.manifest.addgroup(source, revmap, trp)
1754 self.ui.progress(_('manifests'), None)
1765 self.ui.progress(_('manifests'), None)
1755
1766
1756 needfiles = {}
1767 needfiles = {}
1757 if self.ui.configbool('server', 'validate', default=False):
1768 if self.ui.configbool('server', 'validate', default=False):
1758 # validate incoming csets have their manifests
1769 # validate incoming csets have their manifests
1759 for cset in xrange(clstart, clend):
1770 for cset in xrange(clstart, clend):
1760 mfest = self.changelog.read(self.changelog.node(cset))[0]
1771 mfest = self.changelog.read(self.changelog.node(cset))[0]
1761 mfest = self.manifest.readdelta(mfest)
1772 mfest = self.manifest.readdelta(mfest)
1762 # store file nodes we must see
1773 # store file nodes we must see
1763 for f, n in mfest.iteritems():
1774 for f, n in mfest.iteritems():
1764 needfiles.setdefault(f, set()).add(n)
1775 needfiles.setdefault(f, set()).add(n)
1765
1776
1766 # process the files
1777 # process the files
1767 self.ui.status(_("adding file changes\n"))
1778 self.ui.status(_("adding file changes\n"))
1768 pr.step = 'files'
1779 pr.step = 'files'
1769 pr.count = 1
1780 pr.count = 1
1770 pr.total = efiles
1781 pr.total = efiles
1771 source.callback = None
1782 source.callback = None
1772
1783
1773 while 1:
1784 while 1:
1774 chunkdata = source.filelogheader()
1785 chunkdata = source.filelogheader()
1775 if not chunkdata:
1786 if not chunkdata:
1776 break
1787 break
1777 f = chunkdata["filename"]
1788 f = chunkdata["filename"]
1778 self.ui.debug("adding %s revisions\n" % f)
1789 self.ui.debug("adding %s revisions\n" % f)
1779 pr()
1790 pr()
1780 fl = self.file(f)
1791 fl = self.file(f)
1781 o = len(fl)
1792 o = len(fl)
1782 if fl.addgroup(source, revmap, trp) is None:
1793 if fl.addgroup(source, revmap, trp) is None:
1783 raise util.Abort(_("received file revlog group is empty"))
1794 raise util.Abort(_("received file revlog group is empty"))
1784 revisions += len(fl) - o
1795 revisions += len(fl) - o
1785 files += 1
1796 files += 1
1786 if f in needfiles:
1797 if f in needfiles:
1787 needs = needfiles[f]
1798 needs = needfiles[f]
1788 for new in xrange(o, len(fl)):
1799 for new in xrange(o, len(fl)):
1789 n = fl.node(new)
1800 n = fl.node(new)
1790 if n in needs:
1801 if n in needs:
1791 needs.remove(n)
1802 needs.remove(n)
1792 if not needs:
1803 if not needs:
1793 del needfiles[f]
1804 del needfiles[f]
1794 self.ui.progress(_('files'), None)
1805 self.ui.progress(_('files'), None)
1795
1806
1796 for f, needs in needfiles.iteritems():
1807 for f, needs in needfiles.iteritems():
1797 fl = self.file(f)
1808 fl = self.file(f)
1798 for n in needs:
1809 for n in needs:
1799 try:
1810 try:
1800 fl.rev(n)
1811 fl.rev(n)
1801 except error.LookupError:
1812 except error.LookupError:
1802 raise util.Abort(
1813 raise util.Abort(
1803 _('missing file data for %s:%s - run hg verify') %
1814 _('missing file data for %s:%s - run hg verify') %
1804 (f, hex(n)))
1815 (f, hex(n)))
1805
1816
1806 dh = 0
1817 dh = 0
1807 if oldheads:
1818 if oldheads:
1808 heads = cl.heads()
1819 heads = cl.heads()
1809 dh = len(heads) - len(oldheads)
1820 dh = len(heads) - len(oldheads)
1810 for h in heads:
1821 for h in heads:
1811 if h not in oldheads and 'close' in self[h].extra():
1822 if h not in oldheads and 'close' in self[h].extra():
1812 dh -= 1
1823 dh -= 1
1813 htext = ""
1824 htext = ""
1814 if dh:
1825 if dh:
1815 htext = _(" (%+d heads)") % dh
1826 htext = _(" (%+d heads)") % dh
1816
1827
1817 self.ui.status(_("added %d changesets"
1828 self.ui.status(_("added %d changesets"
1818 " with %d changes to %d files%s\n")
1829 " with %d changes to %d files%s\n")
1819 % (changesets, revisions, files, htext))
1830 % (changesets, revisions, files, htext))
1820
1831
1821 if changesets > 0:
1832 if changesets > 0:
1822 p = lambda: cl.writepending() and self.root or ""
1833 p = lambda: cl.writepending() and self.root or ""
1823 self.hook('pretxnchangegroup', throw=True,
1834 self.hook('pretxnchangegroup', throw=True,
1824 node=hex(cl.node(clstart)), source=srctype,
1835 node=hex(cl.node(clstart)), source=srctype,
1825 url=url, pending=p)
1836 url=url, pending=p)
1826
1837
1827 # make changelog see real files again
1838 # make changelog see real files again
1828 cl.finalize(trp)
1839 cl.finalize(trp)
1829
1840
1830 tr.close()
1841 tr.close()
1831 finally:
1842 finally:
1832 tr.release()
1843 tr.release()
1833 if lock:
1844 if lock:
1834 lock.release()
1845 lock.release()
1835
1846
1836 if changesets > 0:
1847 if changesets > 0:
1837 # forcefully update the on-disk branch cache
1848 # forcefully update the on-disk branch cache
1838 self.ui.debug("updating the branch cache\n")
1849 self.ui.debug("updating the branch cache\n")
1839 self.updatebranchcache()
1850 self.updatebranchcache()
1840 self.hook("changegroup", node=hex(cl.node(clstart)),
1851 self.hook("changegroup", node=hex(cl.node(clstart)),
1841 source=srctype, url=url)
1852 source=srctype, url=url)
1842
1853
1843 for i in xrange(clstart, clend):
1854 for i in xrange(clstart, clend):
1844 self.hook("incoming", node=hex(cl.node(i)),
1855 self.hook("incoming", node=hex(cl.node(i)),
1845 source=srctype, url=url)
1856 source=srctype, url=url)
1846
1857
1847 # never return 0 here:
1858 # never return 0 here:
1848 if dh < 0:
1859 if dh < 0:
1849 return dh - 1
1860 return dh - 1
1850 else:
1861 else:
1851 return dh + 1
1862 return dh + 1
1852
1863
1853 def stream_in(self, remote, requirements):
1864 def stream_in(self, remote, requirements):
1854 lock = self.lock()
1865 lock = self.lock()
1855 try:
1866 try:
1856 fp = remote.stream_out()
1867 fp = remote.stream_out()
1857 l = fp.readline()
1868 l = fp.readline()
1858 try:
1869 try:
1859 resp = int(l)
1870 resp = int(l)
1860 except ValueError:
1871 except ValueError:
1861 raise error.ResponseError(
1872 raise error.ResponseError(
1862 _('Unexpected response from remote server:'), l)
1873 _('Unexpected response from remote server:'), l)
1863 if resp == 1:
1874 if resp == 1:
1864 raise util.Abort(_('operation forbidden by server'))
1875 raise util.Abort(_('operation forbidden by server'))
1865 elif resp == 2:
1876 elif resp == 2:
1866 raise util.Abort(_('locking the remote repository failed'))
1877 raise util.Abort(_('locking the remote repository failed'))
1867 elif resp != 0:
1878 elif resp != 0:
1868 raise util.Abort(_('the server sent an unknown error code'))
1879 raise util.Abort(_('the server sent an unknown error code'))
1869 self.ui.status(_('streaming all changes\n'))
1880 self.ui.status(_('streaming all changes\n'))
1870 l = fp.readline()
1881 l = fp.readline()
1871 try:
1882 try:
1872 total_files, total_bytes = map(int, l.split(' ', 1))
1883 total_files, total_bytes = map(int, l.split(' ', 1))
1873 except (ValueError, TypeError):
1884 except (ValueError, TypeError):
1874 raise error.ResponseError(
1885 raise error.ResponseError(
1875 _('Unexpected response from remote server:'), l)
1886 _('Unexpected response from remote server:'), l)
1876 self.ui.status(_('%d files to transfer, %s of data\n') %
1887 self.ui.status(_('%d files to transfer, %s of data\n') %
1877 (total_files, util.bytecount(total_bytes)))
1888 (total_files, util.bytecount(total_bytes)))
1878 start = time.time()
1889 start = time.time()
1879 for i in xrange(total_files):
1890 for i in xrange(total_files):
1880 # XXX doesn't support '\n' or '\r' in filenames
1891 # XXX doesn't support '\n' or '\r' in filenames
1881 l = fp.readline()
1892 l = fp.readline()
1882 try:
1893 try:
1883 name, size = l.split('\0', 1)
1894 name, size = l.split('\0', 1)
1884 size = int(size)
1895 size = int(size)
1885 except (ValueError, TypeError):
1896 except (ValueError, TypeError):
1886 raise error.ResponseError(
1897 raise error.ResponseError(
1887 _('Unexpected response from remote server:'), l)
1898 _('Unexpected response from remote server:'), l)
1888 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1899 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1889 # for backwards compat, name was partially encoded
1900 # for backwards compat, name was partially encoded
1890 ofp = self.sopener(store.decodedir(name), 'w')
1901 ofp = self.sopener(store.decodedir(name), 'w')
1891 for chunk in util.filechunkiter(fp, limit=size):
1902 for chunk in util.filechunkiter(fp, limit=size):
1892 ofp.write(chunk)
1903 ofp.write(chunk)
1893 ofp.close()
1904 ofp.close()
1894 elapsed = time.time() - start
1905 elapsed = time.time() - start
1895 if elapsed <= 0:
1906 if elapsed <= 0:
1896 elapsed = 0.001
1907 elapsed = 0.001
1897 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1908 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1898 (util.bytecount(total_bytes), elapsed,
1909 (util.bytecount(total_bytes), elapsed,
1899 util.bytecount(total_bytes / elapsed)))
1910 util.bytecount(total_bytes / elapsed)))
1900
1911
1901 # new requirements = old non-format requirements + new format-related
1912 # new requirements = old non-format requirements + new format-related
1902 # requirements from the streamed-in repository
1913 # requirements from the streamed-in repository
1903 requirements.update(set(self.requirements) - self.supportedformats)
1914 requirements.update(set(self.requirements) - self.supportedformats)
1904 self._applyrequirements(requirements)
1915 self._applyrequirements(requirements)
1905 self._writerequirements()
1916 self._writerequirements()
1906
1917
1907 self.invalidate()
1918 self.invalidate()
1908 return len(self.heads()) + 1
1919 return len(self.heads()) + 1
1909 finally:
1920 finally:
1910 lock.release()
1921 lock.release()
1911
1922
1912 def clone(self, remote, heads=[], stream=False):
1923 def clone(self, remote, heads=[], stream=False):
1913 '''clone remote repository.
1924 '''clone remote repository.
1914
1925
1915 keyword arguments:
1926 keyword arguments:
1916 heads: list of revs to clone (forces use of pull)
1927 heads: list of revs to clone (forces use of pull)
1917 stream: use streaming clone if possible'''
1928 stream: use streaming clone if possible'''
1918
1929
1919 # now, all clients that can request uncompressed clones can
1930 # now, all clients that can request uncompressed clones can
1920 # read repo formats supported by all servers that can serve
1931 # read repo formats supported by all servers that can serve
1921 # them.
1932 # them.
1922
1933
1923 # if revlog format changes, client will have to check version
1934 # if revlog format changes, client will have to check version
1924 # and format flags on "stream" capability, and use
1935 # and format flags on "stream" capability, and use
1925 # uncompressed only if compatible.
1936 # uncompressed only if compatible.
1926
1937
1927 if stream and not heads:
1938 if stream and not heads:
1928 # 'stream' means remote revlog format is revlogv1 only
1939 # 'stream' means remote revlog format is revlogv1 only
1929 if remote.capable('stream'):
1940 if remote.capable('stream'):
1930 return self.stream_in(remote, set(('revlogv1',)))
1941 return self.stream_in(remote, set(('revlogv1',)))
1931 # otherwise, 'streamreqs' contains the remote revlog format
1942 # otherwise, 'streamreqs' contains the remote revlog format
1932 streamreqs = remote.capable('streamreqs')
1943 streamreqs = remote.capable('streamreqs')
1933 if streamreqs:
1944 if streamreqs:
1934 streamreqs = set(streamreqs.split(','))
1945 streamreqs = set(streamreqs.split(','))
1935 # if we support it, stream in and adjust our requirements
1946 # if we support it, stream in and adjust our requirements
1936 if not streamreqs - self.supportedformats:
1947 if not streamreqs - self.supportedformats:
1937 return self.stream_in(remote, streamreqs)
1948 return self.stream_in(remote, streamreqs)
1938 return self.pull(remote, heads)
1949 return self.pull(remote, heads)
1939
1950
1940 def pushkey(self, namespace, key, old, new):
1951 def pushkey(self, namespace, key, old, new):
1941 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1952 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1942 old=old, new=new)
1953 old=old, new=new)
1943 ret = pushkey.push(self, namespace, key, old, new)
1954 ret = pushkey.push(self, namespace, key, old, new)
1944 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1955 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1945 ret=ret)
1956 ret=ret)
1946 return ret
1957 return ret
1947
1958
1948 def listkeys(self, namespace):
1959 def listkeys(self, namespace):
1949 self.hook('prelistkeys', throw=True, namespace=namespace)
1960 self.hook('prelistkeys', throw=True, namespace=namespace)
1950 values = pushkey.list(self, namespace)
1961 values = pushkey.list(self, namespace)
1951 self.hook('listkeys', namespace=namespace, values=values)
1962 self.hook('listkeys', namespace=namespace, values=values)
1952 return values
1963 return values
1953
1964
1954 def debugwireargs(self, one, two, three=None, four=None, five=None):
1965 def debugwireargs(self, one, two, three=None, four=None, five=None):
1955 '''used to test argument passing over the wire'''
1966 '''used to test argument passing over the wire'''
1956 return "%s %s %s %s %s" % (one, two, three, four, five)
1967 return "%s %s %s %s %s" % (one, two, three, four, five)
1957
1968
1958 # used to avoid circular references so destructors work
1969 # used to avoid circular references so destructors work
1959 def aftertrans(files):
1970 def aftertrans(files):
1960 renamefiles = [tuple(t) for t in files]
1971 renamefiles = [tuple(t) for t in files]
1961 def a():
1972 def a():
1962 for src, dest in renamefiles:
1973 for src, dest in renamefiles:
1963 util.rename(src, dest)
1974 util.rename(src, dest)
1964 return a
1975 return a
1965
1976
1966 def undoname(fn):
1977 def undoname(fn):
1967 base, name = os.path.split(fn)
1978 base, name = os.path.split(fn)
1968 assert name.startswith('journal')
1979 assert name.startswith('journal')
1969 return os.path.join(base, name.replace('journal', 'undo', 1))
1980 return os.path.join(base, name.replace('journal', 'undo', 1))
1970
1981
1971 def instance(ui, path, create):
1982 def instance(ui, path, create):
1972 return localrepository(ui, util.localpath(path), create)
1983 return localrepository(ui, util.localpath(path), create)
1973
1984
1974 def islocal(path):
1985 def islocal(path):
1975 return True
1986 return True
@@ -1,1273 +1,1280 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util
17 import ancestor, mdiff, parsers, error, util, dagutil
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45
45
46 def getoffset(q):
46 def getoffset(q):
47 return int(q >> 16)
47 return int(q >> 16)
48
48
49 def gettype(q):
49 def gettype(q):
50 return int(q & 0xFFFF)
50 return int(q & 0xFFFF)
51
51
52 def offset_type(offset, type):
52 def offset_type(offset, type):
53 return long(long(offset) << 16 | type)
53 return long(long(offset) << 16 | type)
54
54
55 nullhash = _sha(nullid)
55 nullhash = _sha(nullid)
56
56
57 def hash(text, p1, p2):
57 def hash(text, p1, p2):
58 """generate a hash from the given text and its parent hashes
58 """generate a hash from the given text and its parent hashes
59
59
60 This hash combines both the current file contents and its history
60 This hash combines both the current file contents and its history
61 in a manner that makes it easy to distinguish nodes with the same
61 in a manner that makes it easy to distinguish nodes with the same
62 content in the revision graph.
62 content in the revision graph.
63 """
63 """
64 # As of now, if one of the parent node is null, p2 is null
64 # As of now, if one of the parent node is null, p2 is null
65 if p2 == nullid:
65 if p2 == nullid:
66 # deep copy of a hash is faster than creating one
66 # deep copy of a hash is faster than creating one
67 s = nullhash.copy()
67 s = nullhash.copy()
68 s.update(p1)
68 s.update(p1)
69 else:
69 else:
70 # none of the parent nodes are nullid
70 # none of the parent nodes are nullid
71 l = [p1, p2]
71 l = [p1, p2]
72 l.sort()
72 l.sort()
73 s = _sha(l[0])
73 s = _sha(l[0])
74 s.update(l[1])
74 s.update(l[1])
75 s.update(text)
75 s.update(text)
76 return s.digest()
76 return s.digest()
77
77
78 def compress(text):
78 def compress(text):
79 """ generate a possibly-compressed representation of text """
79 """ generate a possibly-compressed representation of text """
80 if not text:
80 if not text:
81 return ("", text)
81 return ("", text)
82 l = len(text)
82 l = len(text)
83 bin = None
83 bin = None
84 if l < 44:
84 if l < 44:
85 pass
85 pass
86 elif l > 1000000:
86 elif l > 1000000:
87 # zlib makes an internal copy, thus doubling memory usage for
87 # zlib makes an internal copy, thus doubling memory usage for
88 # large files, so lets do this in pieces
88 # large files, so lets do this in pieces
89 z = zlib.compressobj()
89 z = zlib.compressobj()
90 p = []
90 p = []
91 pos = 0
91 pos = 0
92 while pos < l:
92 while pos < l:
93 pos2 = pos + 2**20
93 pos2 = pos + 2**20
94 p.append(z.compress(text[pos:pos2]))
94 p.append(z.compress(text[pos:pos2]))
95 pos = pos2
95 pos = pos2
96 p.append(z.flush())
96 p.append(z.flush())
97 if sum(map(len, p)) < l:
97 if sum(map(len, p)) < l:
98 bin = "".join(p)
98 bin = "".join(p)
99 else:
99 else:
100 bin = _compress(text)
100 bin = _compress(text)
101 if bin is None or len(bin) > l:
101 if bin is None or len(bin) > l:
102 if text[0] == '\0':
102 if text[0] == '\0':
103 return ("", text)
103 return ("", text)
104 return ('u', text)
104 return ('u', text)
105 return ("", bin)
105 return ("", bin)
106
106
107 def decompress(bin):
107 def decompress(bin):
108 """ decompress the given input """
108 """ decompress the given input """
109 if not bin:
109 if not bin:
110 return bin
110 return bin
111 t = bin[0]
111 t = bin[0]
112 if t == '\0':
112 if t == '\0':
113 return bin
113 return bin
114 if t == 'x':
114 if t == 'x':
115 return _decompress(bin)
115 return _decompress(bin)
116 if t == 'u':
116 if t == 'u':
117 return bin[1:]
117 return bin[1:]
118 raise RevlogError(_("unknown compression type %r") % t)
118 raise RevlogError(_("unknown compression type %r") % t)
119
119
120 indexformatv0 = ">4l20s20s20s"
120 indexformatv0 = ">4l20s20s20s"
121 v0shaoffset = 56
121 v0shaoffset = 56
122
122
123 class revlogoldio(object):
123 class revlogoldio(object):
124 def __init__(self):
124 def __init__(self):
125 self.size = struct.calcsize(indexformatv0)
125 self.size = struct.calcsize(indexformatv0)
126
126
127 def parseindex(self, data, inline):
127 def parseindex(self, data, inline):
128 s = self.size
128 s = self.size
129 index = []
129 index = []
130 nodemap = {nullid: nullrev}
130 nodemap = {nullid: nullrev}
131 n = off = 0
131 n = off = 0
132 l = len(data)
132 l = len(data)
133 while off + s <= l:
133 while off + s <= l:
134 cur = data[off:off + s]
134 cur = data[off:off + s]
135 off += s
135 off += s
136 e = _unpack(indexformatv0, cur)
136 e = _unpack(indexformatv0, cur)
137 # transform to revlogv1 format
137 # transform to revlogv1 format
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 index.append(e2)
140 index.append(e2)
141 nodemap[e[6]] = n
141 nodemap[e[6]] = n
142 n += 1
142 n += 1
143
143
144 # add the magic null revision at -1
144 # add the magic null revision at -1
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146
146
147 return index, nodemap, None
147 return index, nodemap, None
148
148
149 def packentry(self, entry, node, version, rev):
149 def packentry(self, entry, node, version, rev):
150 if gettype(entry[0]):
150 if gettype(entry[0]):
151 raise RevlogError(_("index entry flags need RevlogNG"))
151 raise RevlogError(_("index entry flags need RevlogNG"))
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 node(entry[5]), node(entry[6]), entry[7])
153 node(entry[5]), node(entry[6]), entry[7])
154 return _pack(indexformatv0, *e2)
154 return _pack(indexformatv0, *e2)
155
155
156 # index ng:
156 # index ng:
157 # 6 bytes: offset
157 # 6 bytes: offset
158 # 2 bytes: flags
158 # 2 bytes: flags
159 # 4 bytes: compressed length
159 # 4 bytes: compressed length
160 # 4 bytes: uncompressed length
160 # 4 bytes: uncompressed length
161 # 4 bytes: base rev
161 # 4 bytes: base rev
162 # 4 bytes: link rev
162 # 4 bytes: link rev
163 # 4 bytes: parent 1 rev
163 # 4 bytes: parent 1 rev
164 # 4 bytes: parent 2 rev
164 # 4 bytes: parent 2 rev
165 # 32 bytes: nodeid
165 # 32 bytes: nodeid
166 indexformatng = ">Qiiiiii20s12x"
166 indexformatng = ">Qiiiiii20s12x"
167 ngshaoffset = 32
167 ngshaoffset = 32
168 versionformat = ">I"
168 versionformat = ">I"
169
169
170 class revlogio(object):
170 class revlogio(object):
171 def __init__(self):
171 def __init__(self):
172 self.size = struct.calcsize(indexformatng)
172 self.size = struct.calcsize(indexformatng)
173
173
174 def parseindex(self, data, inline):
174 def parseindex(self, data, inline):
175 # call the C implementation to parse the index data
175 # call the C implementation to parse the index data
176 index, cache = parsers.parse_index2(data, inline)
176 index, cache = parsers.parse_index2(data, inline)
177 return index, None, cache
177 return index, None, cache
178
178
179 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
180 p = _pack(indexformatng, *entry)
180 p = _pack(indexformatng, *entry)
181 if rev == 0:
181 if rev == 0:
182 p = _pack(versionformat, version) + p[4:]
182 p = _pack(versionformat, version) + p[4:]
183 return p
183 return p
184
184
185 class revlog(object):
185 class revlog(object):
186 """
186 """
187 the underlying revision storage object
187 the underlying revision storage object
188
188
189 A revlog consists of two parts, an index and the revision data.
189 A revlog consists of two parts, an index and the revision data.
190
190
191 The index is a file with a fixed record size containing
191 The index is a file with a fixed record size containing
192 information on each revision, including its nodeid (hash), the
192 information on each revision, including its nodeid (hash), the
193 nodeids of its parents, the position and offset of its data within
193 nodeids of its parents, the position and offset of its data within
194 the data file, and the revision it's based on. Finally, each entry
194 the data file, and the revision it's based on. Finally, each entry
195 contains a linkrev entry that can serve as a pointer to external
195 contains a linkrev entry that can serve as a pointer to external
196 data.
196 data.
197
197
198 The revision data itself is a linear collection of data chunks.
198 The revision data itself is a linear collection of data chunks.
199 Each chunk represents a revision and is usually represented as a
199 Each chunk represents a revision and is usually represented as a
200 delta against the previous chunk. To bound lookup time, runs of
200 delta against the previous chunk. To bound lookup time, runs of
201 deltas are limited to about 2 times the length of the original
201 deltas are limited to about 2 times the length of the original
202 version data. This makes retrieval of a version proportional to
202 version data. This makes retrieval of a version proportional to
203 its size, or O(1) relative to the number of revisions.
203 its size, or O(1) relative to the number of revisions.
204
204
205 Both pieces of the revlog are written to in an append-only
205 Both pieces of the revlog are written to in an append-only
206 fashion, which means we never need to rewrite a file to insert or
206 fashion, which means we never need to rewrite a file to insert or
207 remove data, and can use some simple techniques to avoid the need
207 remove data, and can use some simple techniques to avoid the need
208 for locking while reading.
208 for locking while reading.
209 """
209 """
210 def __init__(self, opener, indexfile):
210 def __init__(self, opener, indexfile):
211 """
211 """
212 create a revlog object
212 create a revlog object
213
213
214 opener is a function that abstracts the file opening operation
214 opener is a function that abstracts the file opening operation
215 and can be used to implement COW semantics or the like.
215 and can be used to implement COW semantics or the like.
216 """
216 """
217 self.indexfile = indexfile
217 self.indexfile = indexfile
218 self.datafile = indexfile[:-2] + ".d"
218 self.datafile = indexfile[:-2] + ".d"
219 self.opener = opener
219 self.opener = opener
220 self._cache = None
220 self._cache = None
221 self._basecache = (0, 0)
221 self._basecache = (0, 0)
222 self._chunkcache = (0, '')
222 self._chunkcache = (0, '')
223 self.index = []
223 self.index = []
224 self._pcache = {}
224 self._pcache = {}
225 self._nodecache = {nullid: nullrev}
225 self._nodecache = {nullid: nullrev}
226 self._nodepos = None
226 self._nodepos = None
227
227
228 v = REVLOG_DEFAULT_VERSION
228 v = REVLOG_DEFAULT_VERSION
229 if hasattr(opener, 'options'):
229 if hasattr(opener, 'options'):
230 if 'revlogv1' in opener.options:
230 if 'revlogv1' in opener.options:
231 if 'generaldelta' in opener.options:
231 if 'generaldelta' in opener.options:
232 v |= REVLOGGENERALDELTA
232 v |= REVLOGGENERALDELTA
233 else:
233 else:
234 v = 0
234 v = 0
235
235
236 i = ''
236 i = ''
237 self._initempty = True
237 self._initempty = True
238 try:
238 try:
239 f = self.opener(self.indexfile)
239 f = self.opener(self.indexfile)
240 i = f.read()
240 i = f.read()
241 f.close()
241 f.close()
242 if len(i) > 0:
242 if len(i) > 0:
243 v = struct.unpack(versionformat, i[:4])[0]
243 v = struct.unpack(versionformat, i[:4])[0]
244 self._initempty = False
244 self._initempty = False
245 except IOError, inst:
245 except IOError, inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248
248
249 self.version = v
249 self.version = v
250 self._inline = v & REVLOGNGINLINEDATA
250 self._inline = v & REVLOGNGINLINEDATA
251 self._generaldelta = v & REVLOGGENERALDELTA
251 self._generaldelta = v & REVLOGGENERALDELTA
252 flags = v & ~0xFFFF
252 flags = v & ~0xFFFF
253 fmt = v & 0xFFFF
253 fmt = v & 0xFFFF
254 if fmt == REVLOGV0 and flags:
254 if fmt == REVLOGV0 and flags:
255 raise RevlogError(_("index %s unknown flags %#04x for format v0")
255 raise RevlogError(_("index %s unknown flags %#04x for format v0")
256 % (self.indexfile, flags >> 16))
256 % (self.indexfile, flags >> 16))
257 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
257 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
258 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
258 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
259 % (self.indexfile, flags >> 16))
259 % (self.indexfile, flags >> 16))
260 elif fmt > REVLOGNG:
260 elif fmt > REVLOGNG:
261 raise RevlogError(_("index %s unknown format %d")
261 raise RevlogError(_("index %s unknown format %d")
262 % (self.indexfile, fmt))
262 % (self.indexfile, fmt))
263
263
264 self._io = revlogio()
264 self._io = revlogio()
265 if self.version == REVLOGV0:
265 if self.version == REVLOGV0:
266 self._io = revlogoldio()
266 self._io = revlogoldio()
267 try:
267 try:
268 d = self._io.parseindex(i, self._inline)
268 d = self._io.parseindex(i, self._inline)
269 except (ValueError, IndexError):
269 except (ValueError, IndexError):
270 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
270 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
271 self.index, nodemap, self._chunkcache = d
271 self.index, nodemap, self._chunkcache = d
272 if nodemap is not None:
272 if nodemap is not None:
273 self.nodemap = self._nodecache = nodemap
273 self.nodemap = self._nodecache = nodemap
274 if not self._chunkcache:
274 if not self._chunkcache:
275 self._chunkclear()
275 self._chunkclear()
276
276
277 def tip(self):
277 def tip(self):
278 return self.node(len(self.index) - 2)
278 return self.node(len(self.index) - 2)
279 def __len__(self):
279 def __len__(self):
280 return len(self.index) - 1
280 return len(self.index) - 1
281 def __iter__(self):
281 def __iter__(self):
282 for i in xrange(len(self)):
282 for i in xrange(len(self)):
283 yield i
283 yield i
284
284
285 @util.propertycache
285 @util.propertycache
286 def nodemap(self):
286 def nodemap(self):
287 self.rev(self.node(0))
287 self.rev(self.node(0))
288 return self._nodecache
288 return self._nodecache
289
289
290 def rev(self, node):
290 def rev(self, node):
291 try:
291 try:
292 return self._nodecache[node]
292 return self._nodecache[node]
293 except KeyError:
293 except KeyError:
294 n = self._nodecache
294 n = self._nodecache
295 i = self.index
295 i = self.index
296 p = self._nodepos
296 p = self._nodepos
297 if p is None:
297 if p is None:
298 p = len(i) - 2
298 p = len(i) - 2
299 for r in xrange(p, -1, -1):
299 for r in xrange(p, -1, -1):
300 v = i[r][7]
300 v = i[r][7]
301 n[v] = r
301 n[v] = r
302 if v == node:
302 if v == node:
303 self._nodepos = r - 1
303 self._nodepos = r - 1
304 return r
304 return r
305 raise LookupError(node, self.indexfile, _('no node'))
305 raise LookupError(node, self.indexfile, _('no node'))
306
306
307 def node(self, rev):
307 def node(self, rev):
308 return self.index[rev][7]
308 return self.index[rev][7]
309 def linkrev(self, rev):
309 def linkrev(self, rev):
310 return self.index[rev][4]
310 return self.index[rev][4]
311 def parents(self, node):
311 def parents(self, node):
312 i = self.index
312 i = self.index
313 d = i[self.rev(node)]
313 d = i[self.rev(node)]
314 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
314 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
315 def parentrevs(self, rev):
315 def parentrevs(self, rev):
316 return self.index[rev][5:7]
316 return self.index[rev][5:7]
317 def start(self, rev):
317 def start(self, rev):
318 return int(self.index[rev][0] >> 16)
318 return int(self.index[rev][0] >> 16)
319 def end(self, rev):
319 def end(self, rev):
320 return self.start(rev) + self.length(rev)
320 return self.start(rev) + self.length(rev)
321 def length(self, rev):
321 def length(self, rev):
322 return self.index[rev][1]
322 return self.index[rev][1]
323 def base(self, rev):
323 def base(self, rev):
324 return self.index[rev][3]
324 return self.index[rev][3]
325 def chainbase(self, rev):
325 def chainbase(self, rev):
326 index = self.index
326 index = self.index
327 base = index[rev][3]
327 base = index[rev][3]
328 while base != rev:
328 while base != rev:
329 rev = base
329 rev = base
330 base = index[rev][3]
330 base = index[rev][3]
331 return base
331 return base
332 def flags(self, rev):
332 def flags(self, rev):
333 return self.index[rev][0] & 0xFFFF
333 return self.index[rev][0] & 0xFFFF
334 def rawsize(self, rev):
334 def rawsize(self, rev):
335 """return the length of the uncompressed text for a given revision"""
335 """return the length of the uncompressed text for a given revision"""
336 l = self.index[rev][2]
336 l = self.index[rev][2]
337 if l >= 0:
337 if l >= 0:
338 return l
338 return l
339
339
340 t = self.revision(self.node(rev))
340 t = self.revision(self.node(rev))
341 return len(t)
341 return len(t)
342 size = rawsize
342 size = rawsize
343
343
344 def reachable(self, node, stop=None):
344 def reachable(self, node, stop=None):
345 """return the set of all nodes ancestral to a given node, including
345 """return the set of all nodes ancestral to a given node, including
346 the node itself, stopping when stop is matched"""
346 the node itself, stopping when stop is matched"""
347 reachable = set((node,))
347 reachable = set((node,))
348 visit = [node]
348 visit = [node]
349 if stop:
349 if stop:
350 stopn = self.rev(stop)
350 stopn = self.rev(stop)
351 else:
351 else:
352 stopn = 0
352 stopn = 0
353 while visit:
353 while visit:
354 n = visit.pop(0)
354 n = visit.pop(0)
355 if n == stop:
355 if n == stop:
356 continue
356 continue
357 if n == nullid:
357 if n == nullid:
358 continue
358 continue
359 for p in self.parents(n):
359 for p in self.parents(n):
360 if self.rev(p) < stopn:
360 if self.rev(p) < stopn:
361 continue
361 continue
362 if p not in reachable:
362 if p not in reachable:
363 reachable.add(p)
363 reachable.add(p)
364 visit.append(p)
364 visit.append(p)
365 return reachable
365 return reachable
366
366
367 def ancestors(self, *revs):
367 def ancestors(self, *revs):
368 """Generate the ancestors of 'revs' in reverse topological order.
368 """Generate the ancestors of 'revs' in reverse topological order.
369
369
370 Yield a sequence of revision numbers starting with the parents
370 Yield a sequence of revision numbers starting with the parents
371 of each revision in revs, i.e., each revision is *not* considered
371 of each revision in revs, i.e., each revision is *not* considered
372 an ancestor of itself. Results are in breadth-first order:
372 an ancestor of itself. Results are in breadth-first order:
373 parents of each rev in revs, then parents of those, etc. Result
373 parents of each rev in revs, then parents of those, etc. Result
374 does not include the null revision."""
374 does not include the null revision."""
375 visit = list(revs)
375 visit = list(revs)
376 seen = set([nullrev])
376 seen = set([nullrev])
377 while visit:
377 while visit:
378 for parent in self.parentrevs(visit.pop(0)):
378 for parent in self.parentrevs(visit.pop(0)):
379 if parent not in seen:
379 if parent not in seen:
380 visit.append(parent)
380 visit.append(parent)
381 seen.add(parent)
381 seen.add(parent)
382 yield parent
382 yield parent
383
383
384 def descendants(self, *revs):
384 def descendants(self, *revs):
385 """Generate the descendants of 'revs' in revision order.
385 """Generate the descendants of 'revs' in revision order.
386
386
387 Yield a sequence of revision numbers starting with a child of
387 Yield a sequence of revision numbers starting with a child of
388 some rev in revs, i.e., each revision is *not* considered a
388 some rev in revs, i.e., each revision is *not* considered a
389 descendant of itself. Results are ordered by revision number (a
389 descendant of itself. Results are ordered by revision number (a
390 topological sort)."""
390 topological sort)."""
391 first = min(revs)
391 first = min(revs)
392 if first == nullrev:
392 if first == nullrev:
393 for i in self:
393 for i in self:
394 yield i
394 yield i
395 return
395 return
396
396
397 seen = set(revs)
397 seen = set(revs)
398 for i in xrange(first + 1, len(self)):
398 for i in xrange(first + 1, len(self)):
399 for x in self.parentrevs(i):
399 for x in self.parentrevs(i):
400 if x != nullrev and x in seen:
400 if x != nullrev and x in seen:
401 seen.add(i)
401 seen.add(i)
402 yield i
402 yield i
403 break
403 break
404
404
405 def findcommonmissing(self, common=None, heads=None):
405 def findcommonmissing(self, common=None, heads=None):
406 """Return a tuple of the ancestors of common and the ancestors of heads
406 """Return a tuple of the ancestors of common and the ancestors of heads
407 that are not ancestors of common.
407 that are not ancestors of common.
408
408
409 More specifically, the second element is a list of nodes N such that
409 More specifically, the second element is a list of nodes N such that
410 every N satisfies the following constraints:
410 every N satisfies the following constraints:
411
411
412 1. N is an ancestor of some node in 'heads'
412 1. N is an ancestor of some node in 'heads'
413 2. N is not an ancestor of any node in 'common'
413 2. N is not an ancestor of any node in 'common'
414
414
415 The list is sorted by revision number, meaning it is
415 The list is sorted by revision number, meaning it is
416 topologically sorted.
416 topologically sorted.
417
417
418 'heads' and 'common' are both lists of node IDs. If heads is
418 'heads' and 'common' are both lists of node IDs. If heads is
419 not supplied, uses all of the revlog's heads. If common is not
419 not supplied, uses all of the revlog's heads. If common is not
420 supplied, uses nullid."""
420 supplied, uses nullid."""
421 if common is None:
421 if common is None:
422 common = [nullid]
422 common = [nullid]
423 if heads is None:
423 if heads is None:
424 heads = self.heads()
424 heads = self.heads()
425
425
426 common = [self.rev(n) for n in common]
426 common = [self.rev(n) for n in common]
427 heads = [self.rev(n) for n in heads]
427 heads = [self.rev(n) for n in heads]
428
428
429 # we want the ancestors, but inclusive
429 # we want the ancestors, but inclusive
430 has = set(self.ancestors(*common))
430 has = set(self.ancestors(*common))
431 has.add(nullrev)
431 has.add(nullrev)
432 has.update(common)
432 has.update(common)
433
433
434 # take all ancestors from heads that aren't in has
434 # take all ancestors from heads that aren't in has
435 missing = set()
435 missing = set()
436 visit = [r for r in heads if r not in has]
436 visit = [r for r in heads if r not in has]
437 while visit:
437 while visit:
438 r = visit.pop(0)
438 r = visit.pop(0)
439 if r in missing:
439 if r in missing:
440 continue
440 continue
441 else:
441 else:
442 missing.add(r)
442 missing.add(r)
443 for p in self.parentrevs(r):
443 for p in self.parentrevs(r):
444 if p not in has:
444 if p not in has:
445 visit.append(p)
445 visit.append(p)
446 missing = list(missing)
446 missing = list(missing)
447 missing.sort()
447 missing.sort()
448 return has, [self.node(r) for r in missing]
448 return has, [self.node(r) for r in missing]
449
449
450 def findmissing(self, common=None, heads=None):
450 def findmissing(self, common=None, heads=None):
451 """Return the ancestors of heads that are not ancestors of common.
451 """Return the ancestors of heads that are not ancestors of common.
452
452
453 More specifically, return a list of nodes N such that every N
453 More specifically, return a list of nodes N such that every N
454 satisfies the following constraints:
454 satisfies the following constraints:
455
455
456 1. N is an ancestor of some node in 'heads'
456 1. N is an ancestor of some node in 'heads'
457 2. N is not an ancestor of any node in 'common'
457 2. N is not an ancestor of any node in 'common'
458
458
459 The list is sorted by revision number, meaning it is
459 The list is sorted by revision number, meaning it is
460 topologically sorted.
460 topologically sorted.
461
461
462 'heads' and 'common' are both lists of node IDs. If heads is
462 'heads' and 'common' are both lists of node IDs. If heads is
463 not supplied, uses all of the revlog's heads. If common is not
463 not supplied, uses all of the revlog's heads. If common is not
464 supplied, uses nullid."""
464 supplied, uses nullid."""
465 _common, missing = self.findcommonmissing(common, heads)
465 _common, missing = self.findcommonmissing(common, heads)
466 return missing
466 return missing
467
467
468 def nodesbetween(self, roots=None, heads=None):
468 def nodesbetween(self, roots=None, heads=None):
469 """Return a topological path from 'roots' to 'heads'.
469 """Return a topological path from 'roots' to 'heads'.
470
470
471 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
471 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
472 topologically sorted list of all nodes N that satisfy both of
472 topologically sorted list of all nodes N that satisfy both of
473 these constraints:
473 these constraints:
474
474
475 1. N is a descendant of some node in 'roots'
475 1. N is a descendant of some node in 'roots'
476 2. N is an ancestor of some node in 'heads'
476 2. N is an ancestor of some node in 'heads'
477
477
478 Every node is considered to be both a descendant and an ancestor
478 Every node is considered to be both a descendant and an ancestor
479 of itself, so every reachable node in 'roots' and 'heads' will be
479 of itself, so every reachable node in 'roots' and 'heads' will be
480 included in 'nodes'.
480 included in 'nodes'.
481
481
482 'outroots' is the list of reachable nodes in 'roots', i.e., the
482 'outroots' is the list of reachable nodes in 'roots', i.e., the
483 subset of 'roots' that is returned in 'nodes'. Likewise,
483 subset of 'roots' that is returned in 'nodes'. Likewise,
484 'outheads' is the subset of 'heads' that is also in 'nodes'.
484 'outheads' is the subset of 'heads' that is also in 'nodes'.
485
485
486 'roots' and 'heads' are both lists of node IDs. If 'roots' is
486 'roots' and 'heads' are both lists of node IDs. If 'roots' is
487 unspecified, uses nullid as the only root. If 'heads' is
487 unspecified, uses nullid as the only root. If 'heads' is
488 unspecified, uses list of all of the revlog's heads."""
488 unspecified, uses list of all of the revlog's heads."""
489 nonodes = ([], [], [])
489 nonodes = ([], [], [])
490 if roots is not None:
490 if roots is not None:
491 roots = list(roots)
491 roots = list(roots)
492 if not roots:
492 if not roots:
493 return nonodes
493 return nonodes
494 lowestrev = min([self.rev(n) for n in roots])
494 lowestrev = min([self.rev(n) for n in roots])
495 else:
495 else:
496 roots = [nullid] # Everybody's a descendent of nullid
496 roots = [nullid] # Everybody's a descendent of nullid
497 lowestrev = nullrev
497 lowestrev = nullrev
498 if (lowestrev == nullrev) and (heads is None):
498 if (lowestrev == nullrev) and (heads is None):
499 # We want _all_ the nodes!
499 # We want _all_ the nodes!
500 return ([self.node(r) for r in self], [nullid], list(self.heads()))
500 return ([self.node(r) for r in self], [nullid], list(self.heads()))
501 if heads is None:
501 if heads is None:
502 # All nodes are ancestors, so the latest ancestor is the last
502 # All nodes are ancestors, so the latest ancestor is the last
503 # node.
503 # node.
504 highestrev = len(self) - 1
504 highestrev = len(self) - 1
505 # Set ancestors to None to signal that every node is an ancestor.
505 # Set ancestors to None to signal that every node is an ancestor.
506 ancestors = None
506 ancestors = None
507 # Set heads to an empty dictionary for later discovery of heads
507 # Set heads to an empty dictionary for later discovery of heads
508 heads = {}
508 heads = {}
509 else:
509 else:
510 heads = list(heads)
510 heads = list(heads)
511 if not heads:
511 if not heads:
512 return nonodes
512 return nonodes
513 ancestors = set()
513 ancestors = set()
514 # Turn heads into a dictionary so we can remove 'fake' heads.
514 # Turn heads into a dictionary so we can remove 'fake' heads.
515 # Also, later we will be using it to filter out the heads we can't
515 # Also, later we will be using it to filter out the heads we can't
516 # find from roots.
516 # find from roots.
517 heads = dict.fromkeys(heads, False)
517 heads = dict.fromkeys(heads, False)
518 # Start at the top and keep marking parents until we're done.
518 # Start at the top and keep marking parents until we're done.
519 nodestotag = set(heads)
519 nodestotag = set(heads)
520 # Remember where the top was so we can use it as a limit later.
520 # Remember where the top was so we can use it as a limit later.
521 highestrev = max([self.rev(n) for n in nodestotag])
521 highestrev = max([self.rev(n) for n in nodestotag])
522 while nodestotag:
522 while nodestotag:
523 # grab a node to tag
523 # grab a node to tag
524 n = nodestotag.pop()
524 n = nodestotag.pop()
525 # Never tag nullid
525 # Never tag nullid
526 if n == nullid:
526 if n == nullid:
527 continue
527 continue
528 # A node's revision number represents its place in a
528 # A node's revision number represents its place in a
529 # topologically sorted list of nodes.
529 # topologically sorted list of nodes.
530 r = self.rev(n)
530 r = self.rev(n)
531 if r >= lowestrev:
531 if r >= lowestrev:
532 if n not in ancestors:
532 if n not in ancestors:
533 # If we are possibly a descendent of one of the roots
533 # If we are possibly a descendent of one of the roots
534 # and we haven't already been marked as an ancestor
534 # and we haven't already been marked as an ancestor
535 ancestors.add(n) # Mark as ancestor
535 ancestors.add(n) # Mark as ancestor
536 # Add non-nullid parents to list of nodes to tag.
536 # Add non-nullid parents to list of nodes to tag.
537 nodestotag.update([p for p in self.parents(n) if
537 nodestotag.update([p for p in self.parents(n) if
538 p != nullid])
538 p != nullid])
539 elif n in heads: # We've seen it before, is it a fake head?
539 elif n in heads: # We've seen it before, is it a fake head?
540 # So it is, real heads should not be the ancestors of
540 # So it is, real heads should not be the ancestors of
541 # any other heads.
541 # any other heads.
542 heads.pop(n)
542 heads.pop(n)
543 if not ancestors:
543 if not ancestors:
544 return nonodes
544 return nonodes
545 # Now that we have our set of ancestors, we want to remove any
545 # Now that we have our set of ancestors, we want to remove any
546 # roots that are not ancestors.
546 # roots that are not ancestors.
547
547
548 # If one of the roots was nullid, everything is included anyway.
548 # If one of the roots was nullid, everything is included anyway.
549 if lowestrev > nullrev:
549 if lowestrev > nullrev:
550 # But, since we weren't, let's recompute the lowest rev to not
550 # But, since we weren't, let's recompute the lowest rev to not
551 # include roots that aren't ancestors.
551 # include roots that aren't ancestors.
552
552
553 # Filter out roots that aren't ancestors of heads
553 # Filter out roots that aren't ancestors of heads
554 roots = [n for n in roots if n in ancestors]
554 roots = [n for n in roots if n in ancestors]
555 # Recompute the lowest revision
555 # Recompute the lowest revision
556 if roots:
556 if roots:
557 lowestrev = min([self.rev(n) for n in roots])
557 lowestrev = min([self.rev(n) for n in roots])
558 else:
558 else:
559 # No more roots? Return empty list
559 # No more roots? Return empty list
560 return nonodes
560 return nonodes
561 else:
561 else:
562 # We are descending from nullid, and don't need to care about
562 # We are descending from nullid, and don't need to care about
563 # any other roots.
563 # any other roots.
564 lowestrev = nullrev
564 lowestrev = nullrev
565 roots = [nullid]
565 roots = [nullid]
566 # Transform our roots list into a set.
566 # Transform our roots list into a set.
567 descendents = set(roots)
567 descendents = set(roots)
568 # Also, keep the original roots so we can filter out roots that aren't
568 # Also, keep the original roots so we can filter out roots that aren't
569 # 'real' roots (i.e. are descended from other roots).
569 # 'real' roots (i.e. are descended from other roots).
570 roots = descendents.copy()
570 roots = descendents.copy()
571 # Our topologically sorted list of output nodes.
571 # Our topologically sorted list of output nodes.
572 orderedout = []
572 orderedout = []
573 # Don't start at nullid since we don't want nullid in our output list,
573 # Don't start at nullid since we don't want nullid in our output list,
574 # and if nullid shows up in descedents, empty parents will look like
574 # and if nullid shows up in descedents, empty parents will look like
575 # they're descendents.
575 # they're descendents.
576 for r in xrange(max(lowestrev, 0), highestrev + 1):
576 for r in xrange(max(lowestrev, 0), highestrev + 1):
577 n = self.node(r)
577 n = self.node(r)
578 isdescendent = False
578 isdescendent = False
579 if lowestrev == nullrev: # Everybody is a descendent of nullid
579 if lowestrev == nullrev: # Everybody is a descendent of nullid
580 isdescendent = True
580 isdescendent = True
581 elif n in descendents:
581 elif n in descendents:
582 # n is already a descendent
582 # n is already a descendent
583 isdescendent = True
583 isdescendent = True
584 # This check only needs to be done here because all the roots
584 # This check only needs to be done here because all the roots
585 # will start being marked is descendents before the loop.
585 # will start being marked is descendents before the loop.
586 if n in roots:
586 if n in roots:
587 # If n was a root, check if it's a 'real' root.
587 # If n was a root, check if it's a 'real' root.
588 p = tuple(self.parents(n))
588 p = tuple(self.parents(n))
589 # If any of its parents are descendents, it's not a root.
589 # If any of its parents are descendents, it's not a root.
590 if (p[0] in descendents) or (p[1] in descendents):
590 if (p[0] in descendents) or (p[1] in descendents):
591 roots.remove(n)
591 roots.remove(n)
592 else:
592 else:
593 p = tuple(self.parents(n))
593 p = tuple(self.parents(n))
594 # A node is a descendent if either of its parents are
594 # A node is a descendent if either of its parents are
595 # descendents. (We seeded the dependents list with the roots
595 # descendents. (We seeded the dependents list with the roots
596 # up there, remember?)
596 # up there, remember?)
597 if (p[0] in descendents) or (p[1] in descendents):
597 if (p[0] in descendents) or (p[1] in descendents):
598 descendents.add(n)
598 descendents.add(n)
599 isdescendent = True
599 isdescendent = True
600 if isdescendent and ((ancestors is None) or (n in ancestors)):
600 if isdescendent and ((ancestors is None) or (n in ancestors)):
601 # Only include nodes that are both descendents and ancestors.
601 # Only include nodes that are both descendents and ancestors.
602 orderedout.append(n)
602 orderedout.append(n)
603 if (ancestors is not None) and (n in heads):
603 if (ancestors is not None) and (n in heads):
604 # We're trying to figure out which heads are reachable
604 # We're trying to figure out which heads are reachable
605 # from roots.
605 # from roots.
606 # Mark this head as having been reached
606 # Mark this head as having been reached
607 heads[n] = True
607 heads[n] = True
608 elif ancestors is None:
608 elif ancestors is None:
609 # Otherwise, we're trying to discover the heads.
609 # Otherwise, we're trying to discover the heads.
610 # Assume this is a head because if it isn't, the next step
610 # Assume this is a head because if it isn't, the next step
611 # will eventually remove it.
611 # will eventually remove it.
612 heads[n] = True
612 heads[n] = True
613 # But, obviously its parents aren't.
613 # But, obviously its parents aren't.
614 for p in self.parents(n):
614 for p in self.parents(n):
615 heads.pop(p, None)
615 heads.pop(p, None)
616 heads = [n for n, flag in heads.iteritems() if flag]
616 heads = [n for n, flag in heads.iteritems() if flag]
617 roots = list(roots)
617 roots = list(roots)
618 assert orderedout
618 assert orderedout
619 assert roots
619 assert roots
620 assert heads
620 assert heads
621 return (orderedout, roots, heads)
621 return (orderedout, roots, heads)
622
622
623 def headrevs(self):
623 def headrevs(self):
624 count = len(self)
624 count = len(self)
625 if not count:
625 if not count:
626 return [nullrev]
626 return [nullrev]
627 ishead = [1] * (count + 1)
627 ishead = [1] * (count + 1)
628 index = self.index
628 index = self.index
629 for r in xrange(count):
629 for r in xrange(count):
630 e = index[r]
630 e = index[r]
631 ishead[e[5]] = ishead[e[6]] = 0
631 ishead[e[5]] = ishead[e[6]] = 0
632 return [r for r in xrange(count) if ishead[r]]
632 return [r for r in xrange(count) if ishead[r]]
633
633
634 def heads(self, start=None, stop=None):
634 def heads(self, start=None, stop=None):
635 """return the list of all nodes that have no children
635 """return the list of all nodes that have no children
636
636
637 if start is specified, only heads that are descendants of
637 if start is specified, only heads that are descendants of
638 start will be returned
638 start will be returned
639 if stop is specified, it will consider all the revs from stop
639 if stop is specified, it will consider all the revs from stop
640 as if they had no children
640 as if they had no children
641 """
641 """
642 if start is None and stop is None:
642 if start is None and stop is None:
643 if not len(self):
643 if not len(self):
644 return [nullid]
644 return [nullid]
645 return [self.node(r) for r in self.headrevs()]
645 return [self.node(r) for r in self.headrevs()]
646
646
647 if start is None:
647 if start is None:
648 start = nullid
648 start = nullid
649 if stop is None:
649 if stop is None:
650 stop = []
650 stop = []
651 stoprevs = set([self.rev(n) for n in stop])
651 stoprevs = set([self.rev(n) for n in stop])
652 startrev = self.rev(start)
652 startrev = self.rev(start)
653 reachable = set((startrev,))
653 reachable = set((startrev,))
654 heads = set((startrev,))
654 heads = set((startrev,))
655
655
656 parentrevs = self.parentrevs
656 parentrevs = self.parentrevs
657 for r in xrange(startrev + 1, len(self)):
657 for r in xrange(startrev + 1, len(self)):
658 for p in parentrevs(r):
658 for p in parentrevs(r):
659 if p in reachable:
659 if p in reachable:
660 if r not in stoprevs:
660 if r not in stoprevs:
661 reachable.add(r)
661 reachable.add(r)
662 heads.add(r)
662 heads.add(r)
663 if p in heads and p not in stoprevs:
663 if p in heads and p not in stoprevs:
664 heads.remove(p)
664 heads.remove(p)
665
665
666 return [self.node(r) for r in heads]
666 return [self.node(r) for r in heads]
667
667
668 def children(self, node):
668 def children(self, node):
669 """find the children of a given node"""
669 """find the children of a given node"""
670 c = []
670 c = []
671 p = self.rev(node)
671 p = self.rev(node)
672 for r in range(p + 1, len(self)):
672 for r in range(p + 1, len(self)):
673 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
673 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
674 if prevs:
674 if prevs:
675 for pr in prevs:
675 for pr in prevs:
676 if pr == p:
676 if pr == p:
677 c.append(self.node(r))
677 c.append(self.node(r))
678 elif p == nullrev:
678 elif p == nullrev:
679 c.append(self.node(r))
679 c.append(self.node(r))
680 return c
680 return c
681
681
682 def descendant(self, start, end):
682 def descendant(self, start, end):
683 if start == nullrev:
683 if start == nullrev:
684 return True
684 return True
685 for i in self.descendants(start):
685 for i in self.descendants(start):
686 if i == end:
686 if i == end:
687 return True
687 return True
688 elif i > end:
688 elif i > end:
689 break
689 break
690 return False
690 return False
691
691
692 def ancestor(self, a, b):
692 def ancestor(self, a, b):
693 """calculate the least common ancestor of nodes a and b"""
693 """calculate the least common ancestor of nodes a and b"""
694
694
695 # fast path, check if it is a descendant
695 # fast path, check if it is a descendant
696 a, b = self.rev(a), self.rev(b)
696 a, b = self.rev(a), self.rev(b)
697 start, end = sorted((a, b))
697 start, end = sorted((a, b))
698 if self.descendant(start, end):
698 if self.descendant(start, end):
699 return self.node(start)
699 return self.node(start)
700
700
701 def parents(rev):
701 def parents(rev):
702 return [p for p in self.parentrevs(rev) if p != nullrev]
702 return [p for p in self.parentrevs(rev) if p != nullrev]
703
703
704 c = ancestor.ancestor(a, b, parents)
704 c = ancestor.ancestor(a, b, parents)
705 if c is None:
705 if c is None:
706 return nullid
706 return nullid
707
707
708 return self.node(c)
708 return self.node(c)
709
709
710 def _match(self, id):
710 def _match(self, id):
711 if isinstance(id, (long, int)):
711 if isinstance(id, (long, int)):
712 # rev
712 # rev
713 return self.node(id)
713 return self.node(id)
714 if len(id) == 20:
714 if len(id) == 20:
715 # possibly a binary node
715 # possibly a binary node
716 # odds of a binary node being all hex in ASCII are 1 in 10**25
716 # odds of a binary node being all hex in ASCII are 1 in 10**25
717 try:
717 try:
718 node = id
718 node = id
719 self.rev(node) # quick search the index
719 self.rev(node) # quick search the index
720 return node
720 return node
721 except LookupError:
721 except LookupError:
722 pass # may be partial hex id
722 pass # may be partial hex id
723 try:
723 try:
724 # str(rev)
724 # str(rev)
725 rev = int(id)
725 rev = int(id)
726 if str(rev) != id:
726 if str(rev) != id:
727 raise ValueError
727 raise ValueError
728 if rev < 0:
728 if rev < 0:
729 rev = len(self) + rev
729 rev = len(self) + rev
730 if rev < 0 or rev >= len(self):
730 if rev < 0 or rev >= len(self):
731 raise ValueError
731 raise ValueError
732 return self.node(rev)
732 return self.node(rev)
733 except (ValueError, OverflowError):
733 except (ValueError, OverflowError):
734 pass
734 pass
735 if len(id) == 40:
735 if len(id) == 40:
736 try:
736 try:
737 # a full hex nodeid?
737 # a full hex nodeid?
738 node = bin(id)
738 node = bin(id)
739 self.rev(node)
739 self.rev(node)
740 return node
740 return node
741 except (TypeError, LookupError):
741 except (TypeError, LookupError):
742 pass
742 pass
743
743
744 def _partialmatch(self, id):
744 def _partialmatch(self, id):
745 if id in self._pcache:
745 if id in self._pcache:
746 return self._pcache[id]
746 return self._pcache[id]
747
747
748 if len(id) < 40:
748 if len(id) < 40:
749 try:
749 try:
750 # hex(node)[:...]
750 # hex(node)[:...]
751 l = len(id) // 2 # grab an even number of digits
751 l = len(id) // 2 # grab an even number of digits
752 prefix = bin(id[:l * 2])
752 prefix = bin(id[:l * 2])
753 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
753 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
754 nl = [n for n in nl if hex(n).startswith(id)]
754 nl = [n for n in nl if hex(n).startswith(id)]
755 if len(nl) > 0:
755 if len(nl) > 0:
756 if len(nl) == 1:
756 if len(nl) == 1:
757 self._pcache[id] = nl[0]
757 self._pcache[id] = nl[0]
758 return nl[0]
758 return nl[0]
759 raise LookupError(id, self.indexfile,
759 raise LookupError(id, self.indexfile,
760 _('ambiguous identifier'))
760 _('ambiguous identifier'))
761 return None
761 return None
762 except TypeError:
762 except TypeError:
763 pass
763 pass
764
764
765 def lookup(self, id):
765 def lookup(self, id):
766 """locate a node based on:
766 """locate a node based on:
767 - revision number or str(revision number)
767 - revision number or str(revision number)
768 - nodeid or subset of hex nodeid
768 - nodeid or subset of hex nodeid
769 """
769 """
770 n = self._match(id)
770 n = self._match(id)
771 if n is not None:
771 if n is not None:
772 return n
772 return n
773 n = self._partialmatch(id)
773 n = self._partialmatch(id)
774 if n:
774 if n:
775 return n
775 return n
776
776
777 raise LookupError(id, self.indexfile, _('no match found'))
777 raise LookupError(id, self.indexfile, _('no match found'))
778
778
779 def cmp(self, node, text):
779 def cmp(self, node, text):
780 """compare text with a given file revision
780 """compare text with a given file revision
781
781
782 returns True if text is different than what is stored.
782 returns True if text is different than what is stored.
783 """
783 """
784 p1, p2 = self.parents(node)
784 p1, p2 = self.parents(node)
785 return hash(text, p1, p2) != node
785 return hash(text, p1, p2) != node
786
786
787 def _addchunk(self, offset, data):
787 def _addchunk(self, offset, data):
788 o, d = self._chunkcache
788 o, d = self._chunkcache
789 # try to add to existing cache
789 # try to add to existing cache
790 if o + len(d) == offset and len(d) + len(data) < _chunksize:
790 if o + len(d) == offset and len(d) + len(data) < _chunksize:
791 self._chunkcache = o, d + data
791 self._chunkcache = o, d + data
792 else:
792 else:
793 self._chunkcache = offset, data
793 self._chunkcache = offset, data
794
794
795 def _loadchunk(self, offset, length):
795 def _loadchunk(self, offset, length):
796 if self._inline:
796 if self._inline:
797 df = self.opener(self.indexfile)
797 df = self.opener(self.indexfile)
798 else:
798 else:
799 df = self.opener(self.datafile)
799 df = self.opener(self.datafile)
800
800
801 readahead = max(65536, length)
801 readahead = max(65536, length)
802 df.seek(offset)
802 df.seek(offset)
803 d = df.read(readahead)
803 d = df.read(readahead)
804 self._addchunk(offset, d)
804 self._addchunk(offset, d)
805 if readahead > length:
805 if readahead > length:
806 return d[:length]
806 return d[:length]
807 return d
807 return d
808
808
809 def _getchunk(self, offset, length):
809 def _getchunk(self, offset, length):
810 o, d = self._chunkcache
810 o, d = self._chunkcache
811 l = len(d)
811 l = len(d)
812
812
813 # is it in the cache?
813 # is it in the cache?
814 cachestart = offset - o
814 cachestart = offset - o
815 cacheend = cachestart + length
815 cacheend = cachestart + length
816 if cachestart >= 0 and cacheend <= l:
816 if cachestart >= 0 and cacheend <= l:
817 if cachestart == 0 and cacheend == l:
817 if cachestart == 0 and cacheend == l:
818 return d # avoid a copy
818 return d # avoid a copy
819 return d[cachestart:cacheend]
819 return d[cachestart:cacheend]
820
820
821 return self._loadchunk(offset, length)
821 return self._loadchunk(offset, length)
822
822
823 def _chunkraw(self, startrev, endrev):
823 def _chunkraw(self, startrev, endrev):
824 start = self.start(startrev)
824 start = self.start(startrev)
825 length = self.end(endrev) - start
825 length = self.end(endrev) - start
826 if self._inline:
826 if self._inline:
827 start += (startrev + 1) * self._io.size
827 start += (startrev + 1) * self._io.size
828 return self._getchunk(start, length)
828 return self._getchunk(start, length)
829
829
830 def _chunk(self, rev):
830 def _chunk(self, rev):
831 return decompress(self._chunkraw(rev, rev))
831 return decompress(self._chunkraw(rev, rev))
832
832
833 def _chunkbase(self, rev):
833 def _chunkbase(self, rev):
834 return self._chunk(rev)
834 return self._chunk(rev)
835
835
836 def _chunkclear(self):
836 def _chunkclear(self):
837 self._chunkcache = (0, '')
837 self._chunkcache = (0, '')
838
838
839 def deltaparent(self, rev):
839 def deltaparent(self, rev):
840 """return deltaparent of the given revision"""
840 """return deltaparent of the given revision"""
841 base = self.index[rev][3]
841 base = self.index[rev][3]
842 if base == rev:
842 if base == rev:
843 return nullrev
843 return nullrev
844 elif self._generaldelta:
844 elif self._generaldelta:
845 return base
845 return base
846 else:
846 else:
847 return rev - 1
847 return rev - 1
848
848
849 def revdiff(self, rev1, rev2):
849 def revdiff(self, rev1, rev2):
850 """return or calculate a delta between two revisions"""
850 """return or calculate a delta between two revisions"""
851 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
851 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
852 return self._chunk(rev2)
852 return self._chunk(rev2)
853
853
854 return mdiff.textdiff(self.revision(self.node(rev1)),
854 return mdiff.textdiff(self.revision(self.node(rev1)),
855 self.revision(self.node(rev2)))
855 self.revision(self.node(rev2)))
856
856
857 def revision(self, node):
857 def revision(self, node):
858 """return an uncompressed revision of a given node"""
858 """return an uncompressed revision of a given node"""
859 cachedrev = None
859 cachedrev = None
860 if node == nullid:
860 if node == nullid:
861 return ""
861 return ""
862 if self._cache:
862 if self._cache:
863 if self._cache[0] == node:
863 if self._cache[0] == node:
864 return self._cache[2]
864 return self._cache[2]
865 cachedrev = self._cache[1]
865 cachedrev = self._cache[1]
866
866
867 # look up what we need to read
867 # look up what we need to read
868 text = None
868 text = None
869 rev = self.rev(node)
869 rev = self.rev(node)
870
870
871 # check rev flags
871 # check rev flags
872 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
872 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
873 raise RevlogError(_('incompatible revision flag %x') %
873 raise RevlogError(_('incompatible revision flag %x') %
874 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
874 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
875
875
876 # build delta chain
876 # build delta chain
877 chain = []
877 chain = []
878 index = self.index # for performance
878 index = self.index # for performance
879 generaldelta = self._generaldelta
879 generaldelta = self._generaldelta
880 iterrev = rev
880 iterrev = rev
881 e = index[iterrev]
881 e = index[iterrev]
882 while iterrev != e[3] and iterrev != cachedrev:
882 while iterrev != e[3] and iterrev != cachedrev:
883 chain.append(iterrev)
883 chain.append(iterrev)
884 if generaldelta:
884 if generaldelta:
885 iterrev = e[3]
885 iterrev = e[3]
886 else:
886 else:
887 iterrev -= 1
887 iterrev -= 1
888 e = index[iterrev]
888 e = index[iterrev]
889 chain.reverse()
889 chain.reverse()
890 base = iterrev
890 base = iterrev
891
891
892 if iterrev == cachedrev:
892 if iterrev == cachedrev:
893 # cache hit
893 # cache hit
894 text = self._cache[2]
894 text = self._cache[2]
895
895
896 # drop cache to save memory
896 # drop cache to save memory
897 self._cache = None
897 self._cache = None
898
898
899 self._chunkraw(base, rev)
899 self._chunkraw(base, rev)
900 if text is None:
900 if text is None:
901 text = self._chunkbase(base)
901 text = self._chunkbase(base)
902
902
903 bins = [self._chunk(r) for r in chain]
903 bins = [self._chunk(r) for r in chain]
904 text = mdiff.patches(text, bins)
904 text = mdiff.patches(text, bins)
905
905
906 text = self._checkhash(text, node, rev)
906 text = self._checkhash(text, node, rev)
907
907
908 self._cache = (node, rev, text)
908 self._cache = (node, rev, text)
909 return text
909 return text
910
910
911 def _checkhash(self, text, node, rev):
911 def _checkhash(self, text, node, rev):
912 p1, p2 = self.parents(node)
912 p1, p2 = self.parents(node)
913 if node != hash(text, p1, p2):
913 if node != hash(text, p1, p2):
914 raise RevlogError(_("integrity check failed on %s:%d")
914 raise RevlogError(_("integrity check failed on %s:%d")
915 % (self.indexfile, rev))
915 % (self.indexfile, rev))
916 return text
916 return text
917
917
918 def checkinlinesize(self, tr, fp=None):
918 def checkinlinesize(self, tr, fp=None):
919 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
919 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
920 return
920 return
921
921
922 trinfo = tr.find(self.indexfile)
922 trinfo = tr.find(self.indexfile)
923 if trinfo is None:
923 if trinfo is None:
924 raise RevlogError(_("%s not found in the transaction")
924 raise RevlogError(_("%s not found in the transaction")
925 % self.indexfile)
925 % self.indexfile)
926
926
927 trindex = trinfo[2]
927 trindex = trinfo[2]
928 dataoff = self.start(trindex)
928 dataoff = self.start(trindex)
929
929
930 tr.add(self.datafile, dataoff)
930 tr.add(self.datafile, dataoff)
931
931
932 if fp:
932 if fp:
933 fp.flush()
933 fp.flush()
934 fp.close()
934 fp.close()
935
935
936 df = self.opener(self.datafile, 'w')
936 df = self.opener(self.datafile, 'w')
937 try:
937 try:
938 for r in self:
938 for r in self:
939 df.write(self._chunkraw(r, r))
939 df.write(self._chunkraw(r, r))
940 finally:
940 finally:
941 df.close()
941 df.close()
942
942
943 fp = self.opener(self.indexfile, 'w', atomictemp=True)
943 fp = self.opener(self.indexfile, 'w', atomictemp=True)
944 self.version &= ~(REVLOGNGINLINEDATA)
944 self.version &= ~(REVLOGNGINLINEDATA)
945 self._inline = False
945 self._inline = False
946 for i in self:
946 for i in self:
947 e = self._io.packentry(self.index[i], self.node, self.version, i)
947 e = self._io.packentry(self.index[i], self.node, self.version, i)
948 fp.write(e)
948 fp.write(e)
949
949
950 # if we don't call rename, the temp file will never replace the
950 # if we don't call rename, the temp file will never replace the
951 # real index
951 # real index
952 fp.rename()
952 fp.rename()
953
953
954 tr.replace(self.indexfile, trindex * self._io.size)
954 tr.replace(self.indexfile, trindex * self._io.size)
955 self._chunkclear()
955 self._chunkclear()
956
956
957 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
957 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
958 """add a revision to the log
958 """add a revision to the log
959
959
960 text - the revision data to add
960 text - the revision data to add
961 transaction - the transaction object used for rollback
961 transaction - the transaction object used for rollback
962 link - the linkrev data to add
962 link - the linkrev data to add
963 p1, p2 - the parent nodeids of the revision
963 p1, p2 - the parent nodeids of the revision
964 cachedelta - an optional precomputed delta
964 cachedelta - an optional precomputed delta
965 """
965 """
966 node = hash(text, p1, p2)
966 node = hash(text, p1, p2)
967 if node in self.nodemap:
967 if node in self.nodemap:
968 return node
968 return node
969
969
970 dfh = None
970 dfh = None
971 if not self._inline:
971 if not self._inline:
972 dfh = self.opener(self.datafile, "a")
972 dfh = self.opener(self.datafile, "a")
973 ifh = self.opener(self.indexfile, "a+")
973 ifh = self.opener(self.indexfile, "a+")
974 try:
974 try:
975 return self._addrevision(node, text, transaction, link, p1, p2,
975 return self._addrevision(node, text, transaction, link, p1, p2,
976 cachedelta, ifh, dfh)
976 cachedelta, ifh, dfh)
977 finally:
977 finally:
978 if dfh:
978 if dfh:
979 dfh.close()
979 dfh.close()
980 ifh.close()
980 ifh.close()
981
981
982 def _addrevision(self, node, text, transaction, link, p1, p2,
982 def _addrevision(self, node, text, transaction, link, p1, p2,
983 cachedelta, ifh, dfh):
983 cachedelta, ifh, dfh):
984 """internal function to add revisions to the log
984 """internal function to add revisions to the log
985
985
986 see addrevision for argument descriptions.
986 see addrevision for argument descriptions.
987 invariants:
987 invariants:
988 - text is optional (can be None); if not set, cachedelta must be set.
988 - text is optional (can be None); if not set, cachedelta must be set.
989 if both are set, they must correspond to eachother.
989 if both are set, they must correspond to eachother.
990 """
990 """
991 btext = [text]
991 btext = [text]
992 def buildtext():
992 def buildtext():
993 if btext[0] is not None:
993 if btext[0] is not None:
994 return btext[0]
994 return btext[0]
995 # flush any pending writes here so we can read it in revision
995 # flush any pending writes here so we can read it in revision
996 if dfh:
996 if dfh:
997 dfh.flush()
997 dfh.flush()
998 ifh.flush()
998 ifh.flush()
999 basetext = self.revision(self.node(cachedelta[0]))
999 basetext = self.revision(self.node(cachedelta[0]))
1000 btext[0] = mdiff.patch(basetext, cachedelta[1])
1000 btext[0] = mdiff.patch(basetext, cachedelta[1])
1001 chk = hash(btext[0], p1, p2)
1001 chk = hash(btext[0], p1, p2)
1002 if chk != node:
1002 if chk != node:
1003 raise RevlogError(_("consistency error in delta"))
1003 raise RevlogError(_("consistency error in delta"))
1004 return btext[0]
1004 return btext[0]
1005
1005
1006 def builddelta(rev):
1006 def builddelta(rev):
1007 # can we use the cached delta?
1007 # can we use the cached delta?
1008 if cachedelta and cachedelta[0] == rev:
1008 if cachedelta and cachedelta[0] == rev:
1009 delta = cachedelta[1]
1009 delta = cachedelta[1]
1010 else:
1010 else:
1011 t = buildtext()
1011 t = buildtext()
1012 ptext = self.revision(self.node(rev))
1012 ptext = self.revision(self.node(rev))
1013 delta = mdiff.textdiff(ptext, t)
1013 delta = mdiff.textdiff(ptext, t)
1014 data = compress(delta)
1014 data = compress(delta)
1015 l = len(data[1]) + len(data[0])
1015 l = len(data[1]) + len(data[0])
1016 if basecache[0] == rev:
1016 if basecache[0] == rev:
1017 chainbase = basecache[1]
1017 chainbase = basecache[1]
1018 else:
1018 else:
1019 chainbase = self.chainbase(rev)
1019 chainbase = self.chainbase(rev)
1020 dist = l + offset - self.start(chainbase)
1020 dist = l + offset - self.start(chainbase)
1021 if self._generaldelta:
1021 if self._generaldelta:
1022 base = rev
1022 base = rev
1023 else:
1023 else:
1024 base = chainbase
1024 base = chainbase
1025 return dist, l, data, base, chainbase
1025 return dist, l, data, base, chainbase
1026
1026
1027 curr = len(self)
1027 curr = len(self)
1028 prev = curr - 1
1028 prev = curr - 1
1029 base = chainbase = curr
1029 base = chainbase = curr
1030 offset = self.end(prev)
1030 offset = self.end(prev)
1031 flags = 0
1031 flags = 0
1032 d = None
1032 d = None
1033 basecache = self._basecache
1033 basecache = self._basecache
1034 p1r, p2r = self.rev(p1), self.rev(p2)
1034 p1r, p2r = self.rev(p1), self.rev(p2)
1035
1035
1036 # should we try to build a delta?
1036 # should we try to build a delta?
1037 if prev != nullrev:
1037 if prev != nullrev:
1038 if self._generaldelta:
1038 if self._generaldelta:
1039 if p1r >= basecache[1]:
1039 if p1r >= basecache[1]:
1040 d = builddelta(p1r)
1040 d = builddelta(p1r)
1041 elif p2r >= basecache[1]:
1041 elif p2r >= basecache[1]:
1042 d = builddelta(p2r)
1042 d = builddelta(p2r)
1043 else:
1043 else:
1044 d = builddelta(prev)
1044 d = builddelta(prev)
1045 else:
1045 else:
1046 d = builddelta(prev)
1046 d = builddelta(prev)
1047 dist, l, data, base, chainbase = d
1047 dist, l, data, base, chainbase = d
1048
1048
1049 # full versions are inserted when the needed deltas
1049 # full versions are inserted when the needed deltas
1050 # become comparable to the uncompressed text
1050 # become comparable to the uncompressed text
1051 if text is None:
1051 if text is None:
1052 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1052 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1053 cachedelta[1])
1053 cachedelta[1])
1054 else:
1054 else:
1055 textlen = len(text)
1055 textlen = len(text)
1056 if d is None or dist > textlen * 2:
1056 if d is None or dist > textlen * 2:
1057 text = buildtext()
1057 text = buildtext()
1058 data = compress(text)
1058 data = compress(text)
1059 l = len(data[1]) + len(data[0])
1059 l = len(data[1]) + len(data[0])
1060 base = chainbase = curr
1060 base = chainbase = curr
1061
1061
1062 e = (offset_type(offset, flags), l, textlen,
1062 e = (offset_type(offset, flags), l, textlen,
1063 base, link, p1r, p2r, node)
1063 base, link, p1r, p2r, node)
1064 self.index.insert(-1, e)
1064 self.index.insert(-1, e)
1065 self.nodemap[node] = curr
1065 self.nodemap[node] = curr
1066
1066
1067 entry = self._io.packentry(e, self.node, self.version, curr)
1067 entry = self._io.packentry(e, self.node, self.version, curr)
1068 if not self._inline:
1068 if not self._inline:
1069 transaction.add(self.datafile, offset)
1069 transaction.add(self.datafile, offset)
1070 transaction.add(self.indexfile, curr * len(entry))
1070 transaction.add(self.indexfile, curr * len(entry))
1071 if data[0]:
1071 if data[0]:
1072 dfh.write(data[0])
1072 dfh.write(data[0])
1073 dfh.write(data[1])
1073 dfh.write(data[1])
1074 dfh.flush()
1074 dfh.flush()
1075 ifh.write(entry)
1075 ifh.write(entry)
1076 else:
1076 else:
1077 offset += curr * self._io.size
1077 offset += curr * self._io.size
1078 transaction.add(self.indexfile, offset, curr)
1078 transaction.add(self.indexfile, offset, curr)
1079 ifh.write(entry)
1079 ifh.write(entry)
1080 ifh.write(data[0])
1080 ifh.write(data[0])
1081 ifh.write(data[1])
1081 ifh.write(data[1])
1082 self.checkinlinesize(transaction, ifh)
1082 self.checkinlinesize(transaction, ifh)
1083
1083
1084 if type(text) == str: # only accept immutable objects
1084 if type(text) == str: # only accept immutable objects
1085 self._cache = (node, curr, text)
1085 self._cache = (node, curr, text)
1086 self._basecache = (curr, chainbase)
1086 self._basecache = (curr, chainbase)
1087 return node
1087 return node
1088
1088
1089 def group(self, nodelist, bundler):
1089 def group(self, nodelist, bundler, reorder=None):
1090 """Calculate a delta group, yielding a sequence of changegroup chunks
1090 """Calculate a delta group, yielding a sequence of changegroup chunks
1091 (strings).
1091 (strings).
1092
1092
1093 Given a list of changeset revs, return a set of deltas and
1093 Given a list of changeset revs, return a set of deltas and
1094 metadata corresponding to nodes. The first delta is
1094 metadata corresponding to nodes. The first delta is
1095 first parent(nodelist[0]) -> nodelist[0], the receiver is
1095 first parent(nodelist[0]) -> nodelist[0], the receiver is
1096 guaranteed to have this parent as it has all history before
1096 guaranteed to have this parent as it has all history before
1097 these changesets. In the case firstparent is nullrev the
1097 these changesets. In the case firstparent is nullrev the
1098 changegroup starts with a full revision.
1098 changegroup starts with a full revision.
1099 """
1099 """
1100
1100
1101 revs = sorted([self.rev(n) for n in nodelist])
1101 # for generaldelta revlogs, we linearize the revs; this will both be
1102 # much quicker and generate a much smaller bundle
1103 if (self._generaldelta and reorder is not False) or reorder:
1104 dag = dagutil.revlogdag(self)
1105 revs = set(self.rev(n) for n in nodelist)
1106 revs = dag.linearize(revs)
1107 else:
1108 revs = sorted([self.rev(n) for n in nodelist])
1102
1109
1103 # if we don't have any revisions touched by these changesets, bail
1110 # if we don't have any revisions touched by these changesets, bail
1104 if not revs:
1111 if not revs:
1105 yield bundler.close()
1112 yield bundler.close()
1106 return
1113 return
1107
1114
1108 # add the parent of the first rev
1115 # add the parent of the first rev
1109 p = self.parentrevs(revs[0])[0]
1116 p = self.parentrevs(revs[0])[0]
1110 revs.insert(0, p)
1117 revs.insert(0, p)
1111
1118
1112 # build deltas
1119 # build deltas
1113 for r in xrange(len(revs) - 1):
1120 for r in xrange(len(revs) - 1):
1114 prev, curr = revs[r], revs[r + 1]
1121 prev, curr = revs[r], revs[r + 1]
1115 for c in bundler.revchunk(self, curr, prev):
1122 for c in bundler.revchunk(self, curr, prev):
1116 yield c
1123 yield c
1117
1124
1118 yield bundler.close()
1125 yield bundler.close()
1119
1126
1120 def addgroup(self, bundle, linkmapper, transaction):
1127 def addgroup(self, bundle, linkmapper, transaction):
1121 """
1128 """
1122 add a delta group
1129 add a delta group
1123
1130
1124 given a set of deltas, add them to the revision log. the
1131 given a set of deltas, add them to the revision log. the
1125 first delta is against its parent, which should be in our
1132 first delta is against its parent, which should be in our
1126 log, the rest are against the previous delta.
1133 log, the rest are against the previous delta.
1127 """
1134 """
1128
1135
1129 # track the base of the current delta log
1136 # track the base of the current delta log
1130 node = None
1137 node = None
1131
1138
1132 r = len(self)
1139 r = len(self)
1133 end = 0
1140 end = 0
1134 if r:
1141 if r:
1135 end = self.end(r - 1)
1142 end = self.end(r - 1)
1136 ifh = self.opener(self.indexfile, "a+")
1143 ifh = self.opener(self.indexfile, "a+")
1137 isize = r * self._io.size
1144 isize = r * self._io.size
1138 if self._inline:
1145 if self._inline:
1139 transaction.add(self.indexfile, end + isize, r)
1146 transaction.add(self.indexfile, end + isize, r)
1140 dfh = None
1147 dfh = None
1141 else:
1148 else:
1142 transaction.add(self.indexfile, isize, r)
1149 transaction.add(self.indexfile, isize, r)
1143 transaction.add(self.datafile, end)
1150 transaction.add(self.datafile, end)
1144 dfh = self.opener(self.datafile, "a")
1151 dfh = self.opener(self.datafile, "a")
1145
1152
1146 try:
1153 try:
1147 # loop through our set of deltas
1154 # loop through our set of deltas
1148 chain = None
1155 chain = None
1149 while 1:
1156 while 1:
1150 chunkdata = bundle.deltachunk(chain)
1157 chunkdata = bundle.deltachunk(chain)
1151 if not chunkdata:
1158 if not chunkdata:
1152 break
1159 break
1153 node = chunkdata['node']
1160 node = chunkdata['node']
1154 p1 = chunkdata['p1']
1161 p1 = chunkdata['p1']
1155 p2 = chunkdata['p2']
1162 p2 = chunkdata['p2']
1156 cs = chunkdata['cs']
1163 cs = chunkdata['cs']
1157 deltabase = chunkdata['deltabase']
1164 deltabase = chunkdata['deltabase']
1158 delta = chunkdata['delta']
1165 delta = chunkdata['delta']
1159
1166
1160 link = linkmapper(cs)
1167 link = linkmapper(cs)
1161 if node in self.nodemap:
1168 if node in self.nodemap:
1162 # this can happen if two branches make the same change
1169 # this can happen if two branches make the same change
1163 chain = node
1170 chain = node
1164 continue
1171 continue
1165
1172
1166 for p in (p1, p2):
1173 for p in (p1, p2):
1167 if not p in self.nodemap:
1174 if not p in self.nodemap:
1168 raise LookupError(p, self.indexfile,
1175 raise LookupError(p, self.indexfile,
1169 _('unknown parent'))
1176 _('unknown parent'))
1170
1177
1171 if deltabase not in self.nodemap:
1178 if deltabase not in self.nodemap:
1172 raise LookupError(deltabase, self.indexfile,
1179 raise LookupError(deltabase, self.indexfile,
1173 _('unknown delta base'))
1180 _('unknown delta base'))
1174
1181
1175 baserev = self.rev(deltabase)
1182 baserev = self.rev(deltabase)
1176 chain = self._addrevision(node, None, transaction, link,
1183 chain = self._addrevision(node, None, transaction, link,
1177 p1, p2, (baserev, delta), ifh, dfh)
1184 p1, p2, (baserev, delta), ifh, dfh)
1178 if not dfh and not self._inline:
1185 if not dfh and not self._inline:
1179 # addrevision switched from inline to conventional
1186 # addrevision switched from inline to conventional
1180 # reopen the index
1187 # reopen the index
1181 ifh.close()
1188 ifh.close()
1182 dfh = self.opener(self.datafile, "a")
1189 dfh = self.opener(self.datafile, "a")
1183 ifh = self.opener(self.indexfile, "a")
1190 ifh = self.opener(self.indexfile, "a")
1184 finally:
1191 finally:
1185 if dfh:
1192 if dfh:
1186 dfh.close()
1193 dfh.close()
1187 ifh.close()
1194 ifh.close()
1188
1195
1189 return node
1196 return node
1190
1197
1191 def strip(self, minlink, transaction):
1198 def strip(self, minlink, transaction):
1192 """truncate the revlog on the first revision with a linkrev >= minlink
1199 """truncate the revlog on the first revision with a linkrev >= minlink
1193
1200
1194 This function is called when we're stripping revision minlink and
1201 This function is called when we're stripping revision minlink and
1195 its descendants from the repository.
1202 its descendants from the repository.
1196
1203
1197 We have to remove all revisions with linkrev >= minlink, because
1204 We have to remove all revisions with linkrev >= minlink, because
1198 the equivalent changelog revisions will be renumbered after the
1205 the equivalent changelog revisions will be renumbered after the
1199 strip.
1206 strip.
1200
1207
1201 So we truncate the revlog on the first of these revisions, and
1208 So we truncate the revlog on the first of these revisions, and
1202 trust that the caller has saved the revisions that shouldn't be
1209 trust that the caller has saved the revisions that shouldn't be
1203 removed and that it'll readd them after this truncation.
1210 removed and that it'll readd them after this truncation.
1204 """
1211 """
1205 if len(self) == 0:
1212 if len(self) == 0:
1206 return
1213 return
1207
1214
1208 for rev in self:
1215 for rev in self:
1209 if self.index[rev][4] >= minlink:
1216 if self.index[rev][4] >= minlink:
1210 break
1217 break
1211 else:
1218 else:
1212 return
1219 return
1213
1220
1214 # first truncate the files on disk
1221 # first truncate the files on disk
1215 end = self.start(rev)
1222 end = self.start(rev)
1216 if not self._inline:
1223 if not self._inline:
1217 transaction.add(self.datafile, end)
1224 transaction.add(self.datafile, end)
1218 end = rev * self._io.size
1225 end = rev * self._io.size
1219 else:
1226 else:
1220 end += rev * self._io.size
1227 end += rev * self._io.size
1221
1228
1222 transaction.add(self.indexfile, end)
1229 transaction.add(self.indexfile, end)
1223
1230
1224 # then reset internal state in memory to forget those revisions
1231 # then reset internal state in memory to forget those revisions
1225 self._cache = None
1232 self._cache = None
1226 self._chunkclear()
1233 self._chunkclear()
1227 for x in xrange(rev, len(self)):
1234 for x in xrange(rev, len(self)):
1228 del self.nodemap[self.node(x)]
1235 del self.nodemap[self.node(x)]
1229
1236
1230 del self.index[rev:-1]
1237 del self.index[rev:-1]
1231
1238
1232 def checksize(self):
1239 def checksize(self):
1233 expected = 0
1240 expected = 0
1234 if len(self):
1241 if len(self):
1235 expected = max(0, self.end(len(self) - 1))
1242 expected = max(0, self.end(len(self) - 1))
1236
1243
1237 try:
1244 try:
1238 f = self.opener(self.datafile)
1245 f = self.opener(self.datafile)
1239 f.seek(0, 2)
1246 f.seek(0, 2)
1240 actual = f.tell()
1247 actual = f.tell()
1241 f.close()
1248 f.close()
1242 dd = actual - expected
1249 dd = actual - expected
1243 except IOError, inst:
1250 except IOError, inst:
1244 if inst.errno != errno.ENOENT:
1251 if inst.errno != errno.ENOENT:
1245 raise
1252 raise
1246 dd = 0
1253 dd = 0
1247
1254
1248 try:
1255 try:
1249 f = self.opener(self.indexfile)
1256 f = self.opener(self.indexfile)
1250 f.seek(0, 2)
1257 f.seek(0, 2)
1251 actual = f.tell()
1258 actual = f.tell()
1252 f.close()
1259 f.close()
1253 s = self._io.size
1260 s = self._io.size
1254 i = max(0, actual // s)
1261 i = max(0, actual // s)
1255 di = actual - (i * s)
1262 di = actual - (i * s)
1256 if self._inline:
1263 if self._inline:
1257 databytes = 0
1264 databytes = 0
1258 for r in self:
1265 for r in self:
1259 databytes += max(0, self.length(r))
1266 databytes += max(0, self.length(r))
1260 dd = 0
1267 dd = 0
1261 di = actual - len(self) * s - databytes
1268 di = actual - len(self) * s - databytes
1262 except IOError, inst:
1269 except IOError, inst:
1263 if inst.errno != errno.ENOENT:
1270 if inst.errno != errno.ENOENT:
1264 raise
1271 raise
1265 di = 0
1272 di = 0
1266
1273
1267 return (dd, di)
1274 return (dd, di)
1268
1275
1269 def files(self):
1276 def files(self):
1270 res = [self.indexfile]
1277 res = [self.indexfile]
1271 if not self._inline:
1278 if not self._inline:
1272 res.append(self.datafile)
1279 res.append(self.datafile)
1273 return res
1280 return res
General Comments 0
You need to be logged in to leave comments. Login now