##// END OF EJS Templates
revlog: support writing generaldelta revlogs...
Sune Foldager -
r14270:d6907a56 default
parent child Browse files
Show More
@@ -1,1957 +1,1961
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error
13 import scmutil, util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1',))
24 supportedformats = set(('revlogv1', 'generaldelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'generaldelta', False):
65 requirements.append("generaldelta")
64 else:
66 else:
65 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
66 elif create:
68 elif create:
67 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
68 else:
70 else:
69 # find requirements
71 # find requirements
70 requirements = set()
72 requirements = set()
71 try:
73 try:
72 requirements = set(self.opener.read("requires").splitlines())
74 requirements = set(self.opener.read("requires").splitlines())
73 except IOError, inst:
75 except IOError, inst:
74 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
75 raise
77 raise
76 for r in requirements - self.supported:
78 for r in requirements - self.supported:
77 raise error.RequirementError(
79 raise error.RequirementError(
78 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
79
81
80 self.sharedpath = self.path
82 self.sharedpath = self.path
81 try:
83 try:
82 s = os.path.realpath(self.opener.read("sharedpath"))
84 s = os.path.realpath(self.opener.read("sharedpath"))
83 if not os.path.exists(s):
85 if not os.path.exists(s):
84 raise error.RepoError(
86 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
88 self.sharedpath = s
87 except IOError, inst:
89 except IOError, inst:
88 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
89 raise
91 raise
90
92
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
94 self.spath = self.store.path
93 self.sopener = self.store.opener
95 self.sopener = self.store.opener
94 self.sjoin = self.store.join
96 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
97 if create:
99 if create:
98 self._writerequirements()
100 self._writerequirements()
99
101
100 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
105 self._tags = None
107 self._tags = None
106 self._tagtypes = None
108 self._tagtypes = None
107
109
108 self._branchcache = None
110 self._branchcache = None
109 self._branchcachetip = None
111 self._branchcachetip = None
110 self.nodetagscache = None
112 self.nodetagscache = None
111 self.filterpats = {}
113 self.filterpats = {}
112 self._datafilters = {}
114 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
114
116
115 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
116 self.requirements = requirements
118 self.requirements = requirements
117 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'generaldelta' in requirements:
121 self.sopener.options['generaldelta'] = 1
118
122
119 def _writerequirements(self):
123 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
125 for r in self.requirements:
122 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
123 reqfile.close()
127 reqfile.close()
124
128
125 def _checknested(self, path):
129 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
131 if not path.startswith(self.root):
128 return False
132 return False
129 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
130
134
131 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
136 # the sense that it can reject things like
133 #
137 #
134 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
135 #
139 #
136 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
141 # parent revision.
138 #
142 #
139 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
144 #
148 #
145 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
151 # the filesystem *now*.
148 ctx = self[None]
152 ctx = self[None]
149 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
150 while parts:
154 while parts:
151 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
156 if prefix in ctx.substate:
153 if prefix == subpath:
157 if prefix == subpath:
154 return True
158 return True
155 else:
159 else:
156 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
162 else:
159 parts.pop()
163 parts.pop()
160 return False
164 return False
161
165
162 @util.propertycache
166 @util.propertycache
163 def _bookmarks(self):
167 def _bookmarks(self):
164 return bookmarks.read(self)
168 return bookmarks.read(self)
165
169
166 @util.propertycache
170 @util.propertycache
167 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
169
173
170 @propertycache
174 @propertycache
171 def changelog(self):
175 def changelog(self):
172 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
173 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
174 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
175 if p.startswith(self.root):
179 if p.startswith(self.root):
176 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
177 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
178 return c
182 return c
179
183
180 @propertycache
184 @propertycache
181 def manifest(self):
185 def manifest(self):
182 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
183
187
184 @propertycache
188 @propertycache
185 def dirstate(self):
189 def dirstate(self):
186 warned = [0]
190 warned = [0]
187 def validate(node):
191 def validate(node):
188 try:
192 try:
189 self.changelog.rev(node)
193 self.changelog.rev(node)
190 return node
194 return node
191 except error.LookupError:
195 except error.LookupError:
192 if not warned[0]:
196 if not warned[0]:
193 warned[0] = True
197 warned[0] = True
194 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
195 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
196 return nullid
200 return nullid
197
201
198 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
199
203
200 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
201 if changeid is None:
205 if changeid is None:
202 return context.workingctx(self)
206 return context.workingctx(self)
203 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
204
208
205 def __contains__(self, changeid):
209 def __contains__(self, changeid):
206 try:
210 try:
207 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
208 except error.RepoLookupError:
212 except error.RepoLookupError:
209 return False
213 return False
210
214
211 def __nonzero__(self):
215 def __nonzero__(self):
212 return True
216 return True
213
217
214 def __len__(self):
218 def __len__(self):
215 return len(self.changelog)
219 return len(self.changelog)
216
220
217 def __iter__(self):
221 def __iter__(self):
218 for i in xrange(len(self)):
222 for i in xrange(len(self)):
219 yield i
223 yield i
220
224
221 def url(self):
225 def url(self):
222 return 'file:' + self.root
226 return 'file:' + self.root
223
227
224 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
225 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
226
230
227 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
228
232
229 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
230 if isinstance(names, str):
234 if isinstance(names, str):
231 allchars = names
235 allchars = names
232 names = (names,)
236 names = (names,)
233 else:
237 else:
234 allchars = ''.join(names)
238 allchars = ''.join(names)
235 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
236 if c in allchars:
240 if c in allchars:
237 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
238
242
239 branches = self.branchmap()
243 branches = self.branchmap()
240 for name in names:
244 for name in names:
241 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
242 local=local)
246 local=local)
243 if name in branches:
247 if name in branches:
244 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
245 " branch name\n") % name)
249 " branch name\n") % name)
246
250
247 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
248 fp.seek(0, 2)
252 fp.seek(0, 2)
249 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
250 fp.write('\n')
254 fp.write('\n')
251 for name in names:
255 for name in names:
252 m = munge and munge(name) or name
256 m = munge and munge(name) or name
253 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
254 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
255 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
256 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
257 fp.close()
261 fp.close()
258
262
259 prevtags = ''
263 prevtags = ''
260 if local:
264 if local:
261 try:
265 try:
262 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
263 except IOError:
267 except IOError:
264 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
265 else:
269 else:
266 prevtags = fp.read()
270 prevtags = fp.read()
267
271
268 # local tags are stored in the current charset
272 # local tags are stored in the current charset
269 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
270 for name in names:
274 for name in names:
271 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
272 return
276 return
273
277
274 try:
278 try:
275 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
276 except IOError:
280 except IOError:
277 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
278 else:
282 else:
279 prevtags = fp.read()
283 prevtags = fp.read()
280
284
281 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
282 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
283
287
284 fp.close()
288 fp.close()
285
289
286 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
287 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
288
292
289 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
291
295
292 for name in names:
296 for name in names:
293 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
294
298
295 return tagnode
299 return tagnode
296
300
297 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
298 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
299
303
300 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
301 string.
305 string.
302
306
303 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
304 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
305 changeset is committed with the change.
309 changeset is committed with the change.
306
310
307 keyword arguments:
311 keyword arguments:
308
312
309 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
310 (default False)
314 (default False)
311
315
312 message: commit message to use if committing
316 message: commit message to use if committing
313
317
314 user: name of user to use if committing
318 user: name of user to use if committing
315
319
316 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
317
321
318 if not local:
322 if not local:
319 for x in self.status()[:5]:
323 for x in self.status()[:5]:
320 if '.hgtags' in x:
324 if '.hgtags' in x:
321 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
322 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
323
327
324 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
325 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
326
330
327 def tags(self):
331 def tags(self):
328 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
329 if self._tags is None:
333 if self._tags is None:
330 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
331
335
332 return self._tags
336 return self._tags
333
337
334 def _findtags(self):
338 def _findtags(self):
335 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
337 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
338 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
339 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
340 duration of the localrepo object.'''
344 duration of the localrepo object.'''
341
345
342 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
343 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
344 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
345 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
346 # quo fine?
350 # quo fine?
347
351
348 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
349 tagtypes = {}
353 tagtypes = {}
350
354
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
353
357
354 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
355 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
356 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
357 # local encoding.
361 # local encoding.
358 tags = {}
362 tags = {}
359 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
360 if node != nullid:
364 if node != nullid:
361 try:
365 try:
362 # ignore tags to unknown nodes
366 # ignore tags to unknown nodes
363 self.changelog.lookup(node)
367 self.changelog.lookup(node)
364 tags[encoding.tolocal(name)] = node
368 tags[encoding.tolocal(name)] = node
365 except error.LookupError:
369 except error.LookupError:
366 pass
370 pass
367 tags['tip'] = self.changelog.tip()
371 tags['tip'] = self.changelog.tip()
368 tagtypes = dict([(encoding.tolocal(name), value)
372 tagtypes = dict([(encoding.tolocal(name), value)
369 for (name, value) in tagtypes.iteritems()])
373 for (name, value) in tagtypes.iteritems()])
370 return (tags, tagtypes)
374 return (tags, tagtypes)
371
375
372 def tagtype(self, tagname):
376 def tagtype(self, tagname):
373 '''
377 '''
374 return the type of the given tag. result can be:
378 return the type of the given tag. result can be:
375
379
376 'local' : a local tag
380 'local' : a local tag
377 'global' : a global tag
381 'global' : a global tag
378 None : tag does not exist
382 None : tag does not exist
379 '''
383 '''
380
384
381 self.tags()
385 self.tags()
382
386
383 return self._tagtypes.get(tagname)
387 return self._tagtypes.get(tagname)
384
388
385 def tagslist(self):
389 def tagslist(self):
386 '''return a list of tags ordered by revision'''
390 '''return a list of tags ordered by revision'''
387 l = []
391 l = []
388 for t, n in self.tags().iteritems():
392 for t, n in self.tags().iteritems():
389 r = self.changelog.rev(n)
393 r = self.changelog.rev(n)
390 l.append((r, t, n))
394 l.append((r, t, n))
391 return [(t, n) for r, t, n in sorted(l)]
395 return [(t, n) for r, t, n in sorted(l)]
392
396
393 def nodetags(self, node):
397 def nodetags(self, node):
394 '''return the tags associated with a node'''
398 '''return the tags associated with a node'''
395 if not self.nodetagscache:
399 if not self.nodetagscache:
396 self.nodetagscache = {}
400 self.nodetagscache = {}
397 for t, n in self.tags().iteritems():
401 for t, n in self.tags().iteritems():
398 self.nodetagscache.setdefault(n, []).append(t)
402 self.nodetagscache.setdefault(n, []).append(t)
399 for tags in self.nodetagscache.itervalues():
403 for tags in self.nodetagscache.itervalues():
400 tags.sort()
404 tags.sort()
401 return self.nodetagscache.get(node, [])
405 return self.nodetagscache.get(node, [])
402
406
403 def nodebookmarks(self, node):
407 def nodebookmarks(self, node):
404 marks = []
408 marks = []
405 for bookmark, n in self._bookmarks.iteritems():
409 for bookmark, n in self._bookmarks.iteritems():
406 if n == node:
410 if n == node:
407 marks.append(bookmark)
411 marks.append(bookmark)
408 return sorted(marks)
412 return sorted(marks)
409
413
410 def _branchtags(self, partial, lrev):
414 def _branchtags(self, partial, lrev):
411 # TODO: rename this function?
415 # TODO: rename this function?
412 tiprev = len(self) - 1
416 tiprev = len(self) - 1
413 if lrev != tiprev:
417 if lrev != tiprev:
414 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
415 self._updatebranchcache(partial, ctxgen)
419 self._updatebranchcache(partial, ctxgen)
416 self._writebranchcache(partial, self.changelog.tip(), tiprev)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
417
421
418 return partial
422 return partial
419
423
420 def updatebranchcache(self):
424 def updatebranchcache(self):
421 tip = self.changelog.tip()
425 tip = self.changelog.tip()
422 if self._branchcache is not None and self._branchcachetip == tip:
426 if self._branchcache is not None and self._branchcachetip == tip:
423 return self._branchcache
427 return self._branchcache
424
428
425 oldtip = self._branchcachetip
429 oldtip = self._branchcachetip
426 self._branchcachetip = tip
430 self._branchcachetip = tip
427 if oldtip is None or oldtip not in self.changelog.nodemap:
431 if oldtip is None or oldtip not in self.changelog.nodemap:
428 partial, last, lrev = self._readbranchcache()
432 partial, last, lrev = self._readbranchcache()
429 else:
433 else:
430 lrev = self.changelog.rev(oldtip)
434 lrev = self.changelog.rev(oldtip)
431 partial = self._branchcache
435 partial = self._branchcache
432
436
433 self._branchtags(partial, lrev)
437 self._branchtags(partial, lrev)
434 # this private cache holds all heads (not just tips)
438 # this private cache holds all heads (not just tips)
435 self._branchcache = partial
439 self._branchcache = partial
436
440
437 def branchmap(self):
441 def branchmap(self):
438 '''returns a dictionary {branch: [branchheads]}'''
442 '''returns a dictionary {branch: [branchheads]}'''
439 self.updatebranchcache()
443 self.updatebranchcache()
440 return self._branchcache
444 return self._branchcache
441
445
442 def branchtags(self):
446 def branchtags(self):
443 '''return a dict where branch names map to the tipmost head of
447 '''return a dict where branch names map to the tipmost head of
444 the branch, open heads come before closed'''
448 the branch, open heads come before closed'''
445 bt = {}
449 bt = {}
446 for bn, heads in self.branchmap().iteritems():
450 for bn, heads in self.branchmap().iteritems():
447 tip = heads[-1]
451 tip = heads[-1]
448 for h in reversed(heads):
452 for h in reversed(heads):
449 if 'close' not in self.changelog.read(h)[5]:
453 if 'close' not in self.changelog.read(h)[5]:
450 tip = h
454 tip = h
451 break
455 break
452 bt[bn] = tip
456 bt[bn] = tip
453 return bt
457 return bt
454
458
455 def _readbranchcache(self):
459 def _readbranchcache(self):
456 partial = {}
460 partial = {}
457 try:
461 try:
458 f = self.opener("cache/branchheads")
462 f = self.opener("cache/branchheads")
459 lines = f.read().split('\n')
463 lines = f.read().split('\n')
460 f.close()
464 f.close()
461 except (IOError, OSError):
465 except (IOError, OSError):
462 return {}, nullid, nullrev
466 return {}, nullid, nullrev
463
467
464 try:
468 try:
465 last, lrev = lines.pop(0).split(" ", 1)
469 last, lrev = lines.pop(0).split(" ", 1)
466 last, lrev = bin(last), int(lrev)
470 last, lrev = bin(last), int(lrev)
467 if lrev >= len(self) or self[lrev].node() != last:
471 if lrev >= len(self) or self[lrev].node() != last:
468 # invalidate the cache
472 # invalidate the cache
469 raise ValueError('invalidating branch cache (tip differs)')
473 raise ValueError('invalidating branch cache (tip differs)')
470 for l in lines:
474 for l in lines:
471 if not l:
475 if not l:
472 continue
476 continue
473 node, label = l.split(" ", 1)
477 node, label = l.split(" ", 1)
474 label = encoding.tolocal(label.strip())
478 label = encoding.tolocal(label.strip())
475 partial.setdefault(label, []).append(bin(node))
479 partial.setdefault(label, []).append(bin(node))
476 except KeyboardInterrupt:
480 except KeyboardInterrupt:
477 raise
481 raise
478 except Exception, inst:
482 except Exception, inst:
479 if self.ui.debugflag:
483 if self.ui.debugflag:
480 self.ui.warn(str(inst), '\n')
484 self.ui.warn(str(inst), '\n')
481 partial, last, lrev = {}, nullid, nullrev
485 partial, last, lrev = {}, nullid, nullrev
482 return partial, last, lrev
486 return partial, last, lrev
483
487
484 def _writebranchcache(self, branches, tip, tiprev):
488 def _writebranchcache(self, branches, tip, tiprev):
485 try:
489 try:
486 f = self.opener("cache/branchheads", "w", atomictemp=True)
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
487 f.write("%s %s\n" % (hex(tip), tiprev))
491 f.write("%s %s\n" % (hex(tip), tiprev))
488 for label, nodes in branches.iteritems():
492 for label, nodes in branches.iteritems():
489 for node in nodes:
493 for node in nodes:
490 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
491 f.rename()
495 f.rename()
492 except (IOError, OSError):
496 except (IOError, OSError):
493 pass
497 pass
494
498
495 def _updatebranchcache(self, partial, ctxgen):
499 def _updatebranchcache(self, partial, ctxgen):
496 # collect new branch entries
500 # collect new branch entries
497 newbranches = {}
501 newbranches = {}
498 for c in ctxgen:
502 for c in ctxgen:
499 newbranches.setdefault(c.branch(), []).append(c.node())
503 newbranches.setdefault(c.branch(), []).append(c.node())
500 # if older branchheads are reachable from new ones, they aren't
504 # if older branchheads are reachable from new ones, they aren't
501 # really branchheads. Note checking parents is insufficient:
505 # really branchheads. Note checking parents is insufficient:
502 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
503 for branch, newnodes in newbranches.iteritems():
507 for branch, newnodes in newbranches.iteritems():
504 bheads = partial.setdefault(branch, [])
508 bheads = partial.setdefault(branch, [])
505 bheads.extend(newnodes)
509 bheads.extend(newnodes)
506 if len(bheads) <= 1:
510 if len(bheads) <= 1:
507 continue
511 continue
508 bheads = sorted(bheads, key=lambda x: self[x].rev())
512 bheads = sorted(bheads, key=lambda x: self[x].rev())
509 # starting from tip means fewer passes over reachable
513 # starting from tip means fewer passes over reachable
510 while newnodes:
514 while newnodes:
511 latest = newnodes.pop()
515 latest = newnodes.pop()
512 if latest not in bheads:
516 if latest not in bheads:
513 continue
517 continue
514 minbhrev = self[bheads[0]].node()
518 minbhrev = self[bheads[0]].node()
515 reachable = self.changelog.reachable(latest, minbhrev)
519 reachable = self.changelog.reachable(latest, minbhrev)
516 reachable.remove(latest)
520 reachable.remove(latest)
517 if reachable:
521 if reachable:
518 bheads = [b for b in bheads if b not in reachable]
522 bheads = [b for b in bheads if b not in reachable]
519 partial[branch] = bheads
523 partial[branch] = bheads
520
524
521 def lookup(self, key):
525 def lookup(self, key):
522 if isinstance(key, int):
526 if isinstance(key, int):
523 return self.changelog.node(key)
527 return self.changelog.node(key)
524 elif key == '.':
528 elif key == '.':
525 return self.dirstate.p1()
529 return self.dirstate.p1()
526 elif key == 'null':
530 elif key == 'null':
527 return nullid
531 return nullid
528 elif key == 'tip':
532 elif key == 'tip':
529 return self.changelog.tip()
533 return self.changelog.tip()
530 n = self.changelog._match(key)
534 n = self.changelog._match(key)
531 if n:
535 if n:
532 return n
536 return n
533 if key in self._bookmarks:
537 if key in self._bookmarks:
534 return self._bookmarks[key]
538 return self._bookmarks[key]
535 if key in self.tags():
539 if key in self.tags():
536 return self.tags()[key]
540 return self.tags()[key]
537 if key in self.branchtags():
541 if key in self.branchtags():
538 return self.branchtags()[key]
542 return self.branchtags()[key]
539 n = self.changelog._partialmatch(key)
543 n = self.changelog._partialmatch(key)
540 if n:
544 if n:
541 return n
545 return n
542
546
543 # can't find key, check if it might have come from damaged dirstate
547 # can't find key, check if it might have come from damaged dirstate
544 if key in self.dirstate.parents():
548 if key in self.dirstate.parents():
545 raise error.Abort(_("working directory has unknown parent '%s'!")
549 raise error.Abort(_("working directory has unknown parent '%s'!")
546 % short(key))
550 % short(key))
547 try:
551 try:
548 if len(key) == 20:
552 if len(key) == 20:
549 key = hex(key)
553 key = hex(key)
550 except TypeError:
554 except TypeError:
551 pass
555 pass
552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
556 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553
557
554 def lookupbranch(self, key, remote=None):
558 def lookupbranch(self, key, remote=None):
555 repo = remote or self
559 repo = remote or self
556 if key in repo.branchmap():
560 if key in repo.branchmap():
557 return key
561 return key
558
562
559 repo = (remote and remote.local()) and remote or self
563 repo = (remote and remote.local()) and remote or self
560 return repo[key].branch()
564 return repo[key].branch()
561
565
562 def known(self, nodes):
566 def known(self, nodes):
563 nm = self.changelog.nodemap
567 nm = self.changelog.nodemap
564 return [(n in nm) for n in nodes]
568 return [(n in nm) for n in nodes]
565
569
566 def local(self):
570 def local(self):
567 return True
571 return True
568
572
569 def join(self, f):
573 def join(self, f):
570 return os.path.join(self.path, f)
574 return os.path.join(self.path, f)
571
575
572 def wjoin(self, f):
576 def wjoin(self, f):
573 return os.path.join(self.root, f)
577 return os.path.join(self.root, f)
574
578
575 def file(self, f):
579 def file(self, f):
576 if f[0] == '/':
580 if f[0] == '/':
577 f = f[1:]
581 f = f[1:]
578 return filelog.filelog(self.sopener, f)
582 return filelog.filelog(self.sopener, f)
579
583
580 def changectx(self, changeid):
584 def changectx(self, changeid):
581 return self[changeid]
585 return self[changeid]
582
586
583 def parents(self, changeid=None):
587 def parents(self, changeid=None):
584 '''get list of changectxs for parents of changeid'''
588 '''get list of changectxs for parents of changeid'''
585 return self[changeid].parents()
589 return self[changeid].parents()
586
590
587 def filectx(self, path, changeid=None, fileid=None):
591 def filectx(self, path, changeid=None, fileid=None):
588 """changeid can be a changeset revision, node, or tag.
592 """changeid can be a changeset revision, node, or tag.
589 fileid can be a file revision or node."""
593 fileid can be a file revision or node."""
590 return context.filectx(self, path, changeid, fileid)
594 return context.filectx(self, path, changeid, fileid)
591
595
592 def getcwd(self):
596 def getcwd(self):
593 return self.dirstate.getcwd()
597 return self.dirstate.getcwd()
594
598
595 def pathto(self, f, cwd=None):
599 def pathto(self, f, cwd=None):
596 return self.dirstate.pathto(f, cwd)
600 return self.dirstate.pathto(f, cwd)
597
601
598 def wfile(self, f, mode='r'):
602 def wfile(self, f, mode='r'):
599 return self.wopener(f, mode)
603 return self.wopener(f, mode)
600
604
601 def _link(self, f):
605 def _link(self, f):
602 return os.path.islink(self.wjoin(f))
606 return os.path.islink(self.wjoin(f))
603
607
604 def _loadfilter(self, filter):
608 def _loadfilter(self, filter):
605 if filter not in self.filterpats:
609 if filter not in self.filterpats:
606 l = []
610 l = []
607 for pat, cmd in self.ui.configitems(filter):
611 for pat, cmd in self.ui.configitems(filter):
608 if cmd == '!':
612 if cmd == '!':
609 continue
613 continue
610 mf = matchmod.match(self.root, '', [pat])
614 mf = matchmod.match(self.root, '', [pat])
611 fn = None
615 fn = None
612 params = cmd
616 params = cmd
613 for name, filterfn in self._datafilters.iteritems():
617 for name, filterfn in self._datafilters.iteritems():
614 if cmd.startswith(name):
618 if cmd.startswith(name):
615 fn = filterfn
619 fn = filterfn
616 params = cmd[len(name):].lstrip()
620 params = cmd[len(name):].lstrip()
617 break
621 break
618 if not fn:
622 if not fn:
619 fn = lambda s, c, **kwargs: util.filter(s, c)
623 fn = lambda s, c, **kwargs: util.filter(s, c)
620 # Wrap old filters not supporting keyword arguments
624 # Wrap old filters not supporting keyword arguments
621 if not inspect.getargspec(fn)[2]:
625 if not inspect.getargspec(fn)[2]:
622 oldfn = fn
626 oldfn = fn
623 fn = lambda s, c, **kwargs: oldfn(s, c)
627 fn = lambda s, c, **kwargs: oldfn(s, c)
624 l.append((mf, fn, params))
628 l.append((mf, fn, params))
625 self.filterpats[filter] = l
629 self.filterpats[filter] = l
626 return self.filterpats[filter]
630 return self.filterpats[filter]
627
631
628 def _filter(self, filterpats, filename, data):
632 def _filter(self, filterpats, filename, data):
629 for mf, fn, cmd in filterpats:
633 for mf, fn, cmd in filterpats:
630 if mf(filename):
634 if mf(filename):
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
635 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
636 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 break
637 break
634
638
635 return data
639 return data
636
640
637 @propertycache
641 @propertycache
638 def _encodefilterpats(self):
642 def _encodefilterpats(self):
639 return self._loadfilter('encode')
643 return self._loadfilter('encode')
640
644
641 @propertycache
645 @propertycache
642 def _decodefilterpats(self):
646 def _decodefilterpats(self):
643 return self._loadfilter('decode')
647 return self._loadfilter('decode')
644
648
645 def adddatafilter(self, name, filter):
649 def adddatafilter(self, name, filter):
646 self._datafilters[name] = filter
650 self._datafilters[name] = filter
647
651
648 def wread(self, filename):
652 def wread(self, filename):
649 if self._link(filename):
653 if self._link(filename):
650 data = os.readlink(self.wjoin(filename))
654 data = os.readlink(self.wjoin(filename))
651 else:
655 else:
652 data = self.wopener.read(filename)
656 data = self.wopener.read(filename)
653 return self._filter(self._encodefilterpats, filename, data)
657 return self._filter(self._encodefilterpats, filename, data)
654
658
655 def wwrite(self, filename, data, flags):
659 def wwrite(self, filename, data, flags):
656 data = self._filter(self._decodefilterpats, filename, data)
660 data = self._filter(self._decodefilterpats, filename, data)
657 if 'l' in flags:
661 if 'l' in flags:
658 self.wopener.symlink(data, filename)
662 self.wopener.symlink(data, filename)
659 else:
663 else:
660 self.wopener.write(filename, data)
664 self.wopener.write(filename, data)
661 if 'x' in flags:
665 if 'x' in flags:
662 util.setflags(self.wjoin(filename), False, True)
666 util.setflags(self.wjoin(filename), False, True)
663
667
664 def wwritedata(self, filename, data):
668 def wwritedata(self, filename, data):
665 return self._filter(self._decodefilterpats, filename, data)
669 return self._filter(self._decodefilterpats, filename, data)
666
670
667 def transaction(self, desc):
671 def transaction(self, desc):
668 tr = self._transref and self._transref() or None
672 tr = self._transref and self._transref() or None
669 if tr and tr.running():
673 if tr and tr.running():
670 return tr.nest()
674 return tr.nest()
671
675
672 # abort here if the journal already exists
676 # abort here if the journal already exists
673 if os.path.exists(self.sjoin("journal")):
677 if os.path.exists(self.sjoin("journal")):
674 raise error.RepoError(
678 raise error.RepoError(
675 _("abandoned transaction found - run hg recover"))
679 _("abandoned transaction found - run hg recover"))
676
680
677 # save dirstate for rollback
681 # save dirstate for rollback
678 try:
682 try:
679 ds = self.opener.read("dirstate")
683 ds = self.opener.read("dirstate")
680 except IOError:
684 except IOError:
681 ds = ""
685 ds = ""
682 self.opener.write("journal.dirstate", ds)
686 self.opener.write("journal.dirstate", ds)
683 self.opener.write("journal.branch",
687 self.opener.write("journal.branch",
684 encoding.fromlocal(self.dirstate.branch()))
688 encoding.fromlocal(self.dirstate.branch()))
685 self.opener.write("journal.desc",
689 self.opener.write("journal.desc",
686 "%d\n%s\n" % (len(self), desc))
690 "%d\n%s\n" % (len(self), desc))
687
691
688 renames = [(self.sjoin("journal"), self.sjoin("undo")),
692 renames = [(self.sjoin("journal"), self.sjoin("undo")),
689 (self.join("journal.dirstate"), self.join("undo.dirstate")),
693 (self.join("journal.dirstate"), self.join("undo.dirstate")),
690 (self.join("journal.branch"), self.join("undo.branch")),
694 (self.join("journal.branch"), self.join("undo.branch")),
691 (self.join("journal.desc"), self.join("undo.desc"))]
695 (self.join("journal.desc"), self.join("undo.desc"))]
692 tr = transaction.transaction(self.ui.warn, self.sopener,
696 tr = transaction.transaction(self.ui.warn, self.sopener,
693 self.sjoin("journal"),
697 self.sjoin("journal"),
694 aftertrans(renames),
698 aftertrans(renames),
695 self.store.createmode)
699 self.store.createmode)
696 self._transref = weakref.ref(tr)
700 self._transref = weakref.ref(tr)
697 return tr
701 return tr
698
702
699 def recover(self):
703 def recover(self):
700 lock = self.lock()
704 lock = self.lock()
701 try:
705 try:
702 if os.path.exists(self.sjoin("journal")):
706 if os.path.exists(self.sjoin("journal")):
703 self.ui.status(_("rolling back interrupted transaction\n"))
707 self.ui.status(_("rolling back interrupted transaction\n"))
704 transaction.rollback(self.sopener, self.sjoin("journal"),
708 transaction.rollback(self.sopener, self.sjoin("journal"),
705 self.ui.warn)
709 self.ui.warn)
706 self.invalidate()
710 self.invalidate()
707 return True
711 return True
708 else:
712 else:
709 self.ui.warn(_("no interrupted transaction available\n"))
713 self.ui.warn(_("no interrupted transaction available\n"))
710 return False
714 return False
711 finally:
715 finally:
712 lock.release()
716 lock.release()
713
717
714 def rollback(self, dryrun=False):
718 def rollback(self, dryrun=False):
715 wlock = lock = None
719 wlock = lock = None
716 try:
720 try:
717 wlock = self.wlock()
721 wlock = self.wlock()
718 lock = self.lock()
722 lock = self.lock()
719 if os.path.exists(self.sjoin("undo")):
723 if os.path.exists(self.sjoin("undo")):
720 try:
724 try:
721 args = self.opener.read("undo.desc").splitlines()
725 args = self.opener.read("undo.desc").splitlines()
722 if len(args) >= 3 and self.ui.verbose:
726 if len(args) >= 3 and self.ui.verbose:
723 desc = _("repository tip rolled back to revision %s"
727 desc = _("repository tip rolled back to revision %s"
724 " (undo %s: %s)\n") % (
728 " (undo %s: %s)\n") % (
725 int(args[0]) - 1, args[1], args[2])
729 int(args[0]) - 1, args[1], args[2])
726 elif len(args) >= 2:
730 elif len(args) >= 2:
727 desc = _("repository tip rolled back to revision %s"
731 desc = _("repository tip rolled back to revision %s"
728 " (undo %s)\n") % (
732 " (undo %s)\n") % (
729 int(args[0]) - 1, args[1])
733 int(args[0]) - 1, args[1])
730 except IOError:
734 except IOError:
731 desc = _("rolling back unknown transaction\n")
735 desc = _("rolling back unknown transaction\n")
732 self.ui.status(desc)
736 self.ui.status(desc)
733 if dryrun:
737 if dryrun:
734 return
738 return
735 transaction.rollback(self.sopener, self.sjoin("undo"),
739 transaction.rollback(self.sopener, self.sjoin("undo"),
736 self.ui.warn)
740 self.ui.warn)
737 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
741 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
738 if os.path.exists(self.join('undo.bookmarks')):
742 if os.path.exists(self.join('undo.bookmarks')):
739 util.rename(self.join('undo.bookmarks'),
743 util.rename(self.join('undo.bookmarks'),
740 self.join('bookmarks'))
744 self.join('bookmarks'))
741 try:
745 try:
742 branch = self.opener.read("undo.branch")
746 branch = self.opener.read("undo.branch")
743 self.dirstate.setbranch(branch)
747 self.dirstate.setbranch(branch)
744 except IOError:
748 except IOError:
745 self.ui.warn(_("named branch could not be reset, "
749 self.ui.warn(_("named branch could not be reset, "
746 "current branch is still: %s\n")
750 "current branch is still: %s\n")
747 % self.dirstate.branch())
751 % self.dirstate.branch())
748 self.invalidate()
752 self.invalidate()
749 self.dirstate.invalidate()
753 self.dirstate.invalidate()
750 self.destroyed()
754 self.destroyed()
751 parents = tuple([p.rev() for p in self.parents()])
755 parents = tuple([p.rev() for p in self.parents()])
752 if len(parents) > 1:
756 if len(parents) > 1:
753 self.ui.status(_("working directory now based on "
757 self.ui.status(_("working directory now based on "
754 "revisions %d and %d\n") % parents)
758 "revisions %d and %d\n") % parents)
755 else:
759 else:
756 self.ui.status(_("working directory now based on "
760 self.ui.status(_("working directory now based on "
757 "revision %d\n") % parents)
761 "revision %d\n") % parents)
758 else:
762 else:
759 self.ui.warn(_("no rollback information available\n"))
763 self.ui.warn(_("no rollback information available\n"))
760 return 1
764 return 1
761 finally:
765 finally:
762 release(lock, wlock)
766 release(lock, wlock)
763
767
764 def invalidatecaches(self):
768 def invalidatecaches(self):
765 self._tags = None
769 self._tags = None
766 self._tagtypes = None
770 self._tagtypes = None
767 self.nodetagscache = None
771 self.nodetagscache = None
768 self._branchcache = None # in UTF-8
772 self._branchcache = None # in UTF-8
769 self._branchcachetip = None
773 self._branchcachetip = None
770
774
771 def invalidate(self):
775 def invalidate(self):
772 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
776 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
773 if a in self.__dict__:
777 if a in self.__dict__:
774 delattr(self, a)
778 delattr(self, a)
775 self.invalidatecaches()
779 self.invalidatecaches()
776
780
777 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
781 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
778 try:
782 try:
779 l = lock.lock(lockname, 0, releasefn, desc=desc)
783 l = lock.lock(lockname, 0, releasefn, desc=desc)
780 except error.LockHeld, inst:
784 except error.LockHeld, inst:
781 if not wait:
785 if not wait:
782 raise
786 raise
783 self.ui.warn(_("waiting for lock on %s held by %r\n") %
787 self.ui.warn(_("waiting for lock on %s held by %r\n") %
784 (desc, inst.locker))
788 (desc, inst.locker))
785 # default to 600 seconds timeout
789 # default to 600 seconds timeout
786 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
790 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
787 releasefn, desc=desc)
791 releasefn, desc=desc)
788 if acquirefn:
792 if acquirefn:
789 acquirefn()
793 acquirefn()
790 return l
794 return l
791
795
792 def lock(self, wait=True):
796 def lock(self, wait=True):
793 '''Lock the repository store (.hg/store) and return a weak reference
797 '''Lock the repository store (.hg/store) and return a weak reference
794 to the lock. Use this before modifying the store (e.g. committing or
798 to the lock. Use this before modifying the store (e.g. committing or
795 stripping). If you are opening a transaction, get a lock as well.)'''
799 stripping). If you are opening a transaction, get a lock as well.)'''
796 l = self._lockref and self._lockref()
800 l = self._lockref and self._lockref()
797 if l is not None and l.held:
801 if l is not None and l.held:
798 l.lock()
802 l.lock()
799 return l
803 return l
800
804
801 l = self._lock(self.sjoin("lock"), wait, self.store.write,
805 l = self._lock(self.sjoin("lock"), wait, self.store.write,
802 self.invalidate, _('repository %s') % self.origroot)
806 self.invalidate, _('repository %s') % self.origroot)
803 self._lockref = weakref.ref(l)
807 self._lockref = weakref.ref(l)
804 return l
808 return l
805
809
806 def wlock(self, wait=True):
810 def wlock(self, wait=True):
807 '''Lock the non-store parts of the repository (everything under
811 '''Lock the non-store parts of the repository (everything under
808 .hg except .hg/store) and return a weak reference to the lock.
812 .hg except .hg/store) and return a weak reference to the lock.
809 Use this before modifying files in .hg.'''
813 Use this before modifying files in .hg.'''
810 l = self._wlockref and self._wlockref()
814 l = self._wlockref and self._wlockref()
811 if l is not None and l.held:
815 if l is not None and l.held:
812 l.lock()
816 l.lock()
813 return l
817 return l
814
818
815 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
819 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
816 self.dirstate.invalidate, _('working directory of %s') %
820 self.dirstate.invalidate, _('working directory of %s') %
817 self.origroot)
821 self.origroot)
818 self._wlockref = weakref.ref(l)
822 self._wlockref = weakref.ref(l)
819 return l
823 return l
820
824
821 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
825 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
822 """
826 """
823 commit an individual file as part of a larger transaction
827 commit an individual file as part of a larger transaction
824 """
828 """
825
829
826 fname = fctx.path()
830 fname = fctx.path()
827 text = fctx.data()
831 text = fctx.data()
828 flog = self.file(fname)
832 flog = self.file(fname)
829 fparent1 = manifest1.get(fname, nullid)
833 fparent1 = manifest1.get(fname, nullid)
830 fparent2 = fparent2o = manifest2.get(fname, nullid)
834 fparent2 = fparent2o = manifest2.get(fname, nullid)
831
835
832 meta = {}
836 meta = {}
833 copy = fctx.renamed()
837 copy = fctx.renamed()
834 if copy and copy[0] != fname:
838 if copy and copy[0] != fname:
835 # Mark the new revision of this file as a copy of another
839 # Mark the new revision of this file as a copy of another
836 # file. This copy data will effectively act as a parent
840 # file. This copy data will effectively act as a parent
837 # of this new revision. If this is a merge, the first
841 # of this new revision. If this is a merge, the first
838 # parent will be the nullid (meaning "look up the copy data")
842 # parent will be the nullid (meaning "look up the copy data")
839 # and the second one will be the other parent. For example:
843 # and the second one will be the other parent. For example:
840 #
844 #
841 # 0 --- 1 --- 3 rev1 changes file foo
845 # 0 --- 1 --- 3 rev1 changes file foo
842 # \ / rev2 renames foo to bar and changes it
846 # \ / rev2 renames foo to bar and changes it
843 # \- 2 -/ rev3 should have bar with all changes and
847 # \- 2 -/ rev3 should have bar with all changes and
844 # should record that bar descends from
848 # should record that bar descends from
845 # bar in rev2 and foo in rev1
849 # bar in rev2 and foo in rev1
846 #
850 #
847 # this allows this merge to succeed:
851 # this allows this merge to succeed:
848 #
852 #
849 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
853 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
850 # \ / merging rev3 and rev4 should use bar@rev2
854 # \ / merging rev3 and rev4 should use bar@rev2
851 # \- 2 --- 4 as the merge base
855 # \- 2 --- 4 as the merge base
852 #
856 #
853
857
854 cfname = copy[0]
858 cfname = copy[0]
855 crev = manifest1.get(cfname)
859 crev = manifest1.get(cfname)
856 newfparent = fparent2
860 newfparent = fparent2
857
861
858 if manifest2: # branch merge
862 if manifest2: # branch merge
859 if fparent2 == nullid or crev is None: # copied on remote side
863 if fparent2 == nullid or crev is None: # copied on remote side
860 if cfname in manifest2:
864 if cfname in manifest2:
861 crev = manifest2[cfname]
865 crev = manifest2[cfname]
862 newfparent = fparent1
866 newfparent = fparent1
863
867
864 # find source in nearest ancestor if we've lost track
868 # find source in nearest ancestor if we've lost track
865 if not crev:
869 if not crev:
866 self.ui.debug(" %s: searching for copy revision for %s\n" %
870 self.ui.debug(" %s: searching for copy revision for %s\n" %
867 (fname, cfname))
871 (fname, cfname))
868 for ancestor in self[None].ancestors():
872 for ancestor in self[None].ancestors():
869 if cfname in ancestor:
873 if cfname in ancestor:
870 crev = ancestor[cfname].filenode()
874 crev = ancestor[cfname].filenode()
871 break
875 break
872
876
873 if crev:
877 if crev:
874 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
878 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
875 meta["copy"] = cfname
879 meta["copy"] = cfname
876 meta["copyrev"] = hex(crev)
880 meta["copyrev"] = hex(crev)
877 fparent1, fparent2 = nullid, newfparent
881 fparent1, fparent2 = nullid, newfparent
878 else:
882 else:
879 self.ui.warn(_("warning: can't find ancestor for '%s' "
883 self.ui.warn(_("warning: can't find ancestor for '%s' "
880 "copied from '%s'!\n") % (fname, cfname))
884 "copied from '%s'!\n") % (fname, cfname))
881
885
882 elif fparent2 != nullid:
886 elif fparent2 != nullid:
883 # is one parent an ancestor of the other?
887 # is one parent an ancestor of the other?
884 fparentancestor = flog.ancestor(fparent1, fparent2)
888 fparentancestor = flog.ancestor(fparent1, fparent2)
885 if fparentancestor == fparent1:
889 if fparentancestor == fparent1:
886 fparent1, fparent2 = fparent2, nullid
890 fparent1, fparent2 = fparent2, nullid
887 elif fparentancestor == fparent2:
891 elif fparentancestor == fparent2:
888 fparent2 = nullid
892 fparent2 = nullid
889
893
890 # is the file changed?
894 # is the file changed?
891 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
895 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
892 changelist.append(fname)
896 changelist.append(fname)
893 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
897 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
894
898
895 # are just the flags changed during merge?
899 # are just the flags changed during merge?
896 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
900 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
897 changelist.append(fname)
901 changelist.append(fname)
898
902
899 return fparent1
903 return fparent1
900
904
901 def commit(self, text="", user=None, date=None, match=None, force=False,
905 def commit(self, text="", user=None, date=None, match=None, force=False,
902 editor=False, extra={}):
906 editor=False, extra={}):
903 """Add a new revision to current repository.
907 """Add a new revision to current repository.
904
908
905 Revision information is gathered from the working directory,
909 Revision information is gathered from the working directory,
906 match can be used to filter the committed files. If editor is
910 match can be used to filter the committed files. If editor is
907 supplied, it is called to get a commit message.
911 supplied, it is called to get a commit message.
908 """
912 """
909
913
910 def fail(f, msg):
914 def fail(f, msg):
911 raise util.Abort('%s: %s' % (f, msg))
915 raise util.Abort('%s: %s' % (f, msg))
912
916
913 if not match:
917 if not match:
914 match = matchmod.always(self.root, '')
918 match = matchmod.always(self.root, '')
915
919
916 if not force:
920 if not force:
917 vdirs = []
921 vdirs = []
918 match.dir = vdirs.append
922 match.dir = vdirs.append
919 match.bad = fail
923 match.bad = fail
920
924
921 wlock = self.wlock()
925 wlock = self.wlock()
922 try:
926 try:
923 wctx = self[None]
927 wctx = self[None]
924 merge = len(wctx.parents()) > 1
928 merge = len(wctx.parents()) > 1
925
929
926 if (not force and merge and match and
930 if (not force and merge and match and
927 (match.files() or match.anypats())):
931 (match.files() or match.anypats())):
928 raise util.Abort(_('cannot partially commit a merge '
932 raise util.Abort(_('cannot partially commit a merge '
929 '(do not specify files or patterns)'))
933 '(do not specify files or patterns)'))
930
934
931 changes = self.status(match=match, clean=force)
935 changes = self.status(match=match, clean=force)
932 if force:
936 if force:
933 changes[0].extend(changes[6]) # mq may commit unchanged files
937 changes[0].extend(changes[6]) # mq may commit unchanged files
934
938
935 # check subrepos
939 # check subrepos
936 subs = []
940 subs = []
937 removedsubs = set()
941 removedsubs = set()
938 for p in wctx.parents():
942 for p in wctx.parents():
939 removedsubs.update(s for s in p.substate if match(s))
943 removedsubs.update(s for s in p.substate if match(s))
940 for s in wctx.substate:
944 for s in wctx.substate:
941 removedsubs.discard(s)
945 removedsubs.discard(s)
942 if match(s) and wctx.sub(s).dirty():
946 if match(s) and wctx.sub(s).dirty():
943 subs.append(s)
947 subs.append(s)
944 if (subs or removedsubs):
948 if (subs or removedsubs):
945 if (not match('.hgsub') and
949 if (not match('.hgsub') and
946 '.hgsub' in (wctx.modified() + wctx.added())):
950 '.hgsub' in (wctx.modified() + wctx.added())):
947 raise util.Abort(_("can't commit subrepos without .hgsub"))
951 raise util.Abort(_("can't commit subrepos without .hgsub"))
948 if '.hgsubstate' not in changes[0]:
952 if '.hgsubstate' not in changes[0]:
949 changes[0].insert(0, '.hgsubstate')
953 changes[0].insert(0, '.hgsubstate')
950
954
951 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
955 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
952 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
956 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
953 if changedsubs:
957 if changedsubs:
954 raise util.Abort(_("uncommitted changes in subrepo %s")
958 raise util.Abort(_("uncommitted changes in subrepo %s")
955 % changedsubs[0])
959 % changedsubs[0])
956
960
957 # make sure all explicit patterns are matched
961 # make sure all explicit patterns are matched
958 if not force and match.files():
962 if not force and match.files():
959 matched = set(changes[0] + changes[1] + changes[2])
963 matched = set(changes[0] + changes[1] + changes[2])
960
964
961 for f in match.files():
965 for f in match.files():
962 if f == '.' or f in matched or f in wctx.substate:
966 if f == '.' or f in matched or f in wctx.substate:
963 continue
967 continue
964 if f in changes[3]: # missing
968 if f in changes[3]: # missing
965 fail(f, _('file not found!'))
969 fail(f, _('file not found!'))
966 if f in vdirs: # visited directory
970 if f in vdirs: # visited directory
967 d = f + '/'
971 d = f + '/'
968 for mf in matched:
972 for mf in matched:
969 if mf.startswith(d):
973 if mf.startswith(d):
970 break
974 break
971 else:
975 else:
972 fail(f, _("no match under directory!"))
976 fail(f, _("no match under directory!"))
973 elif f not in self.dirstate:
977 elif f not in self.dirstate:
974 fail(f, _("file not tracked!"))
978 fail(f, _("file not tracked!"))
975
979
976 if (not force and not extra.get("close") and not merge
980 if (not force and not extra.get("close") and not merge
977 and not (changes[0] or changes[1] or changes[2])
981 and not (changes[0] or changes[1] or changes[2])
978 and wctx.branch() == wctx.p1().branch()):
982 and wctx.branch() == wctx.p1().branch()):
979 return None
983 return None
980
984
981 ms = mergemod.mergestate(self)
985 ms = mergemod.mergestate(self)
982 for f in changes[0]:
986 for f in changes[0]:
983 if f in ms and ms[f] == 'u':
987 if f in ms and ms[f] == 'u':
984 raise util.Abort(_("unresolved merge conflicts "
988 raise util.Abort(_("unresolved merge conflicts "
985 "(see hg help resolve)"))
989 "(see hg help resolve)"))
986
990
987 cctx = context.workingctx(self, text, user, date, extra, changes)
991 cctx = context.workingctx(self, text, user, date, extra, changes)
988 if editor:
992 if editor:
989 cctx._text = editor(self, cctx, subs)
993 cctx._text = editor(self, cctx, subs)
990 edited = (text != cctx._text)
994 edited = (text != cctx._text)
991
995
992 # commit subs
996 # commit subs
993 if subs or removedsubs:
997 if subs or removedsubs:
994 state = wctx.substate.copy()
998 state = wctx.substate.copy()
995 for s in sorted(subs):
999 for s in sorted(subs):
996 sub = wctx.sub(s)
1000 sub = wctx.sub(s)
997 self.ui.status(_('committing subrepository %s\n') %
1001 self.ui.status(_('committing subrepository %s\n') %
998 subrepo.subrelpath(sub))
1002 subrepo.subrelpath(sub))
999 sr = sub.commit(cctx._text, user, date)
1003 sr = sub.commit(cctx._text, user, date)
1000 state[s] = (state[s][0], sr)
1004 state[s] = (state[s][0], sr)
1001 subrepo.writestate(self, state)
1005 subrepo.writestate(self, state)
1002
1006
1003 # Save commit message in case this transaction gets rolled back
1007 # Save commit message in case this transaction gets rolled back
1004 # (e.g. by a pretxncommit hook). Leave the content alone on
1008 # (e.g. by a pretxncommit hook). Leave the content alone on
1005 # the assumption that the user will use the same editor again.
1009 # the assumption that the user will use the same editor again.
1006 msgfile = self.opener('last-message.txt', 'wb')
1010 msgfile = self.opener('last-message.txt', 'wb')
1007 msgfile.write(cctx._text)
1011 msgfile.write(cctx._text)
1008 msgfile.close()
1012 msgfile.close()
1009
1013
1010 p1, p2 = self.dirstate.parents()
1014 p1, p2 = self.dirstate.parents()
1011 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1012 try:
1016 try:
1013 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1017 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1014 ret = self.commitctx(cctx, True)
1018 ret = self.commitctx(cctx, True)
1015 except:
1019 except:
1016 if edited:
1020 if edited:
1017 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1021 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1018 self.ui.write(
1022 self.ui.write(
1019 _('note: commit message saved in %s\n') % msgfn)
1023 _('note: commit message saved in %s\n') % msgfn)
1020 raise
1024 raise
1021
1025
1022 # update bookmarks, dirstate and mergestate
1026 # update bookmarks, dirstate and mergestate
1023 bookmarks.update(self, p1, ret)
1027 bookmarks.update(self, p1, ret)
1024 for f in changes[0] + changes[1]:
1028 for f in changes[0] + changes[1]:
1025 self.dirstate.normal(f)
1029 self.dirstate.normal(f)
1026 for f in changes[2]:
1030 for f in changes[2]:
1027 self.dirstate.forget(f)
1031 self.dirstate.forget(f)
1028 self.dirstate.setparents(ret)
1032 self.dirstate.setparents(ret)
1029 ms.reset()
1033 ms.reset()
1030 finally:
1034 finally:
1031 wlock.release()
1035 wlock.release()
1032
1036
1033 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1037 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1034 return ret
1038 return ret
1035
1039
1036 def commitctx(self, ctx, error=False):
1040 def commitctx(self, ctx, error=False):
1037 """Add a new revision to current repository.
1041 """Add a new revision to current repository.
1038 Revision information is passed via the context argument.
1042 Revision information is passed via the context argument.
1039 """
1043 """
1040
1044
1041 tr = lock = None
1045 tr = lock = None
1042 removed = list(ctx.removed())
1046 removed = list(ctx.removed())
1043 p1, p2 = ctx.p1(), ctx.p2()
1047 p1, p2 = ctx.p1(), ctx.p2()
1044 user = ctx.user()
1048 user = ctx.user()
1045
1049
1046 lock = self.lock()
1050 lock = self.lock()
1047 try:
1051 try:
1048 tr = self.transaction("commit")
1052 tr = self.transaction("commit")
1049 trp = weakref.proxy(tr)
1053 trp = weakref.proxy(tr)
1050
1054
1051 if ctx.files():
1055 if ctx.files():
1052 m1 = p1.manifest().copy()
1056 m1 = p1.manifest().copy()
1053 m2 = p2.manifest()
1057 m2 = p2.manifest()
1054
1058
1055 # check in files
1059 # check in files
1056 new = {}
1060 new = {}
1057 changed = []
1061 changed = []
1058 linkrev = len(self)
1062 linkrev = len(self)
1059 for f in sorted(ctx.modified() + ctx.added()):
1063 for f in sorted(ctx.modified() + ctx.added()):
1060 self.ui.note(f + "\n")
1064 self.ui.note(f + "\n")
1061 try:
1065 try:
1062 fctx = ctx[f]
1066 fctx = ctx[f]
1063 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1067 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1064 changed)
1068 changed)
1065 m1.set(f, fctx.flags())
1069 m1.set(f, fctx.flags())
1066 except OSError, inst:
1070 except OSError, inst:
1067 self.ui.warn(_("trouble committing %s!\n") % f)
1071 self.ui.warn(_("trouble committing %s!\n") % f)
1068 raise
1072 raise
1069 except IOError, inst:
1073 except IOError, inst:
1070 errcode = getattr(inst, 'errno', errno.ENOENT)
1074 errcode = getattr(inst, 'errno', errno.ENOENT)
1071 if error or errcode and errcode != errno.ENOENT:
1075 if error or errcode and errcode != errno.ENOENT:
1072 self.ui.warn(_("trouble committing %s!\n") % f)
1076 self.ui.warn(_("trouble committing %s!\n") % f)
1073 raise
1077 raise
1074 else:
1078 else:
1075 removed.append(f)
1079 removed.append(f)
1076
1080
1077 # update manifest
1081 # update manifest
1078 m1.update(new)
1082 m1.update(new)
1079 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1083 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1080 drop = [f for f in removed if f in m1]
1084 drop = [f for f in removed if f in m1]
1081 for f in drop:
1085 for f in drop:
1082 del m1[f]
1086 del m1[f]
1083 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1087 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1084 p2.manifestnode(), (new, drop))
1088 p2.manifestnode(), (new, drop))
1085 files = changed + removed
1089 files = changed + removed
1086 else:
1090 else:
1087 mn = p1.manifestnode()
1091 mn = p1.manifestnode()
1088 files = []
1092 files = []
1089
1093
1090 # update changelog
1094 # update changelog
1091 self.changelog.delayupdate()
1095 self.changelog.delayupdate()
1092 n = self.changelog.add(mn, files, ctx.description(),
1096 n = self.changelog.add(mn, files, ctx.description(),
1093 trp, p1.node(), p2.node(),
1097 trp, p1.node(), p2.node(),
1094 user, ctx.date(), ctx.extra().copy())
1098 user, ctx.date(), ctx.extra().copy())
1095 p = lambda: self.changelog.writepending() and self.root or ""
1099 p = lambda: self.changelog.writepending() and self.root or ""
1096 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1100 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1097 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1101 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1098 parent2=xp2, pending=p)
1102 parent2=xp2, pending=p)
1099 self.changelog.finalize(trp)
1103 self.changelog.finalize(trp)
1100 tr.close()
1104 tr.close()
1101
1105
1102 if self._branchcache:
1106 if self._branchcache:
1103 self.updatebranchcache()
1107 self.updatebranchcache()
1104 return n
1108 return n
1105 finally:
1109 finally:
1106 if tr:
1110 if tr:
1107 tr.release()
1111 tr.release()
1108 lock.release()
1112 lock.release()
1109
1113
1110 def destroyed(self):
1114 def destroyed(self):
1111 '''Inform the repository that nodes have been destroyed.
1115 '''Inform the repository that nodes have been destroyed.
1112 Intended for use by strip and rollback, so there's a common
1116 Intended for use by strip and rollback, so there's a common
1113 place for anything that has to be done after destroying history.'''
1117 place for anything that has to be done after destroying history.'''
1114 # XXX it might be nice if we could take the list of destroyed
1118 # XXX it might be nice if we could take the list of destroyed
1115 # nodes, but I don't see an easy way for rollback() to do that
1119 # nodes, but I don't see an easy way for rollback() to do that
1116
1120
1117 # Ensure the persistent tag cache is updated. Doing it now
1121 # Ensure the persistent tag cache is updated. Doing it now
1118 # means that the tag cache only has to worry about destroyed
1122 # means that the tag cache only has to worry about destroyed
1119 # heads immediately after a strip/rollback. That in turn
1123 # heads immediately after a strip/rollback. That in turn
1120 # guarantees that "cachetip == currenttip" (comparing both rev
1124 # guarantees that "cachetip == currenttip" (comparing both rev
1121 # and node) always means no nodes have been added or destroyed.
1125 # and node) always means no nodes have been added or destroyed.
1122
1126
1123 # XXX this is suboptimal when qrefresh'ing: we strip the current
1127 # XXX this is suboptimal when qrefresh'ing: we strip the current
1124 # head, refresh the tag cache, then immediately add a new head.
1128 # head, refresh the tag cache, then immediately add a new head.
1125 # But I think doing it this way is necessary for the "instant
1129 # But I think doing it this way is necessary for the "instant
1126 # tag cache retrieval" case to work.
1130 # tag cache retrieval" case to work.
1127 self.invalidatecaches()
1131 self.invalidatecaches()
1128
1132
1129 def walk(self, match, node=None):
1133 def walk(self, match, node=None):
1130 '''
1134 '''
1131 walk recursively through the directory tree or a given
1135 walk recursively through the directory tree or a given
1132 changeset, finding all files matched by the match
1136 changeset, finding all files matched by the match
1133 function
1137 function
1134 '''
1138 '''
1135 return self[node].walk(match)
1139 return self[node].walk(match)
1136
1140
1137 def status(self, node1='.', node2=None, match=None,
1141 def status(self, node1='.', node2=None, match=None,
1138 ignored=False, clean=False, unknown=False,
1142 ignored=False, clean=False, unknown=False,
1139 listsubrepos=False):
1143 listsubrepos=False):
1140 """return status of files between two nodes or node and working directory
1144 """return status of files between two nodes or node and working directory
1141
1145
1142 If node1 is None, use the first dirstate parent instead.
1146 If node1 is None, use the first dirstate parent instead.
1143 If node2 is None, compare node1 with working directory.
1147 If node2 is None, compare node1 with working directory.
1144 """
1148 """
1145
1149
1146 def mfmatches(ctx):
1150 def mfmatches(ctx):
1147 mf = ctx.manifest().copy()
1151 mf = ctx.manifest().copy()
1148 for fn in mf.keys():
1152 for fn in mf.keys():
1149 if not match(fn):
1153 if not match(fn):
1150 del mf[fn]
1154 del mf[fn]
1151 return mf
1155 return mf
1152
1156
1153 if isinstance(node1, context.changectx):
1157 if isinstance(node1, context.changectx):
1154 ctx1 = node1
1158 ctx1 = node1
1155 else:
1159 else:
1156 ctx1 = self[node1]
1160 ctx1 = self[node1]
1157 if isinstance(node2, context.changectx):
1161 if isinstance(node2, context.changectx):
1158 ctx2 = node2
1162 ctx2 = node2
1159 else:
1163 else:
1160 ctx2 = self[node2]
1164 ctx2 = self[node2]
1161
1165
1162 working = ctx2.rev() is None
1166 working = ctx2.rev() is None
1163 parentworking = working and ctx1 == self['.']
1167 parentworking = working and ctx1 == self['.']
1164 match = match or matchmod.always(self.root, self.getcwd())
1168 match = match or matchmod.always(self.root, self.getcwd())
1165 listignored, listclean, listunknown = ignored, clean, unknown
1169 listignored, listclean, listunknown = ignored, clean, unknown
1166
1170
1167 # load earliest manifest first for caching reasons
1171 # load earliest manifest first for caching reasons
1168 if not working and ctx2.rev() < ctx1.rev():
1172 if not working and ctx2.rev() < ctx1.rev():
1169 ctx2.manifest()
1173 ctx2.manifest()
1170
1174
1171 if not parentworking:
1175 if not parentworking:
1172 def bad(f, msg):
1176 def bad(f, msg):
1173 if f not in ctx1:
1177 if f not in ctx1:
1174 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1178 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1175 match.bad = bad
1179 match.bad = bad
1176
1180
1177 if working: # we need to scan the working dir
1181 if working: # we need to scan the working dir
1178 subrepos = []
1182 subrepos = []
1179 if '.hgsub' in self.dirstate:
1183 if '.hgsub' in self.dirstate:
1180 subrepos = ctx1.substate.keys()
1184 subrepos = ctx1.substate.keys()
1181 s = self.dirstate.status(match, subrepos, listignored,
1185 s = self.dirstate.status(match, subrepos, listignored,
1182 listclean, listunknown)
1186 listclean, listunknown)
1183 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1187 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1184
1188
1185 # check for any possibly clean files
1189 # check for any possibly clean files
1186 if parentworking and cmp:
1190 if parentworking and cmp:
1187 fixup = []
1191 fixup = []
1188 # do a full compare of any files that might have changed
1192 # do a full compare of any files that might have changed
1189 for f in sorted(cmp):
1193 for f in sorted(cmp):
1190 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1194 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1191 or ctx1[f].cmp(ctx2[f])):
1195 or ctx1[f].cmp(ctx2[f])):
1192 modified.append(f)
1196 modified.append(f)
1193 else:
1197 else:
1194 fixup.append(f)
1198 fixup.append(f)
1195
1199
1196 # update dirstate for files that are actually clean
1200 # update dirstate for files that are actually clean
1197 if fixup:
1201 if fixup:
1198 if listclean:
1202 if listclean:
1199 clean += fixup
1203 clean += fixup
1200
1204
1201 try:
1205 try:
1202 # updating the dirstate is optional
1206 # updating the dirstate is optional
1203 # so we don't wait on the lock
1207 # so we don't wait on the lock
1204 wlock = self.wlock(False)
1208 wlock = self.wlock(False)
1205 try:
1209 try:
1206 for f in fixup:
1210 for f in fixup:
1207 self.dirstate.normal(f)
1211 self.dirstate.normal(f)
1208 finally:
1212 finally:
1209 wlock.release()
1213 wlock.release()
1210 except error.LockError:
1214 except error.LockError:
1211 pass
1215 pass
1212
1216
1213 if not parentworking:
1217 if not parentworking:
1214 mf1 = mfmatches(ctx1)
1218 mf1 = mfmatches(ctx1)
1215 if working:
1219 if working:
1216 # we are comparing working dir against non-parent
1220 # we are comparing working dir against non-parent
1217 # generate a pseudo-manifest for the working dir
1221 # generate a pseudo-manifest for the working dir
1218 mf2 = mfmatches(self['.'])
1222 mf2 = mfmatches(self['.'])
1219 for f in cmp + modified + added:
1223 for f in cmp + modified + added:
1220 mf2[f] = None
1224 mf2[f] = None
1221 mf2.set(f, ctx2.flags(f))
1225 mf2.set(f, ctx2.flags(f))
1222 for f in removed:
1226 for f in removed:
1223 if f in mf2:
1227 if f in mf2:
1224 del mf2[f]
1228 del mf2[f]
1225 else:
1229 else:
1226 # we are comparing two revisions
1230 # we are comparing two revisions
1227 deleted, unknown, ignored = [], [], []
1231 deleted, unknown, ignored = [], [], []
1228 mf2 = mfmatches(ctx2)
1232 mf2 = mfmatches(ctx2)
1229
1233
1230 modified, added, clean = [], [], []
1234 modified, added, clean = [], [], []
1231 for fn in mf2:
1235 for fn in mf2:
1232 if fn in mf1:
1236 if fn in mf1:
1233 if (fn not in deleted and
1237 if (fn not in deleted and
1234 (mf1.flags(fn) != mf2.flags(fn) or
1238 (mf1.flags(fn) != mf2.flags(fn) or
1235 (mf1[fn] != mf2[fn] and
1239 (mf1[fn] != mf2[fn] and
1236 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1240 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1237 modified.append(fn)
1241 modified.append(fn)
1238 elif listclean:
1242 elif listclean:
1239 clean.append(fn)
1243 clean.append(fn)
1240 del mf1[fn]
1244 del mf1[fn]
1241 elif fn not in deleted:
1245 elif fn not in deleted:
1242 added.append(fn)
1246 added.append(fn)
1243 removed = mf1.keys()
1247 removed = mf1.keys()
1244
1248
1245 r = modified, added, removed, deleted, unknown, ignored, clean
1249 r = modified, added, removed, deleted, unknown, ignored, clean
1246
1250
1247 if listsubrepos:
1251 if listsubrepos:
1248 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1252 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1249 if working:
1253 if working:
1250 rev2 = None
1254 rev2 = None
1251 else:
1255 else:
1252 rev2 = ctx2.substate[subpath][1]
1256 rev2 = ctx2.substate[subpath][1]
1253 try:
1257 try:
1254 submatch = matchmod.narrowmatcher(subpath, match)
1258 submatch = matchmod.narrowmatcher(subpath, match)
1255 s = sub.status(rev2, match=submatch, ignored=listignored,
1259 s = sub.status(rev2, match=submatch, ignored=listignored,
1256 clean=listclean, unknown=listunknown,
1260 clean=listclean, unknown=listunknown,
1257 listsubrepos=True)
1261 listsubrepos=True)
1258 for rfiles, sfiles in zip(r, s):
1262 for rfiles, sfiles in zip(r, s):
1259 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1263 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1260 except error.LookupError:
1264 except error.LookupError:
1261 self.ui.status(_("skipping missing subrepository: %s\n")
1265 self.ui.status(_("skipping missing subrepository: %s\n")
1262 % subpath)
1266 % subpath)
1263
1267
1264 for l in r:
1268 for l in r:
1265 l.sort()
1269 l.sort()
1266 return r
1270 return r
1267
1271
1268 def heads(self, start=None):
1272 def heads(self, start=None):
1269 heads = self.changelog.heads(start)
1273 heads = self.changelog.heads(start)
1270 # sort the output in rev descending order
1274 # sort the output in rev descending order
1271 return sorted(heads, key=self.changelog.rev, reverse=True)
1275 return sorted(heads, key=self.changelog.rev, reverse=True)
1272
1276
1273 def branchheads(self, branch=None, start=None, closed=False):
1277 def branchheads(self, branch=None, start=None, closed=False):
1274 '''return a (possibly filtered) list of heads for the given branch
1278 '''return a (possibly filtered) list of heads for the given branch
1275
1279
1276 Heads are returned in topological order, from newest to oldest.
1280 Heads are returned in topological order, from newest to oldest.
1277 If branch is None, use the dirstate branch.
1281 If branch is None, use the dirstate branch.
1278 If start is not None, return only heads reachable from start.
1282 If start is not None, return only heads reachable from start.
1279 If closed is True, return heads that are marked as closed as well.
1283 If closed is True, return heads that are marked as closed as well.
1280 '''
1284 '''
1281 if branch is None:
1285 if branch is None:
1282 branch = self[None].branch()
1286 branch = self[None].branch()
1283 branches = self.branchmap()
1287 branches = self.branchmap()
1284 if branch not in branches:
1288 if branch not in branches:
1285 return []
1289 return []
1286 # the cache returns heads ordered lowest to highest
1290 # the cache returns heads ordered lowest to highest
1287 bheads = list(reversed(branches[branch]))
1291 bheads = list(reversed(branches[branch]))
1288 if start is not None:
1292 if start is not None:
1289 # filter out the heads that cannot be reached from startrev
1293 # filter out the heads that cannot be reached from startrev
1290 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1294 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1291 bheads = [h for h in bheads if h in fbheads]
1295 bheads = [h for h in bheads if h in fbheads]
1292 if not closed:
1296 if not closed:
1293 bheads = [h for h in bheads if
1297 bheads = [h for h in bheads if
1294 ('close' not in self.changelog.read(h)[5])]
1298 ('close' not in self.changelog.read(h)[5])]
1295 return bheads
1299 return bheads
1296
1300
1297 def branches(self, nodes):
1301 def branches(self, nodes):
1298 if not nodes:
1302 if not nodes:
1299 nodes = [self.changelog.tip()]
1303 nodes = [self.changelog.tip()]
1300 b = []
1304 b = []
1301 for n in nodes:
1305 for n in nodes:
1302 t = n
1306 t = n
1303 while 1:
1307 while 1:
1304 p = self.changelog.parents(n)
1308 p = self.changelog.parents(n)
1305 if p[1] != nullid or p[0] == nullid:
1309 if p[1] != nullid or p[0] == nullid:
1306 b.append((t, n, p[0], p[1]))
1310 b.append((t, n, p[0], p[1]))
1307 break
1311 break
1308 n = p[0]
1312 n = p[0]
1309 return b
1313 return b
1310
1314
1311 def between(self, pairs):
1315 def between(self, pairs):
1312 r = []
1316 r = []
1313
1317
1314 for top, bottom in pairs:
1318 for top, bottom in pairs:
1315 n, l, i = top, [], 0
1319 n, l, i = top, [], 0
1316 f = 1
1320 f = 1
1317
1321
1318 while n != bottom and n != nullid:
1322 while n != bottom and n != nullid:
1319 p = self.changelog.parents(n)[0]
1323 p = self.changelog.parents(n)[0]
1320 if i == f:
1324 if i == f:
1321 l.append(n)
1325 l.append(n)
1322 f = f * 2
1326 f = f * 2
1323 n = p
1327 n = p
1324 i += 1
1328 i += 1
1325
1329
1326 r.append(l)
1330 r.append(l)
1327
1331
1328 return r
1332 return r
1329
1333
1330 def pull(self, remote, heads=None, force=False):
1334 def pull(self, remote, heads=None, force=False):
1331 lock = self.lock()
1335 lock = self.lock()
1332 try:
1336 try:
1333 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1337 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1334 force=force)
1338 force=force)
1335 common, fetch, rheads = tmp
1339 common, fetch, rheads = tmp
1336 if not fetch:
1340 if not fetch:
1337 self.ui.status(_("no changes found\n"))
1341 self.ui.status(_("no changes found\n"))
1338 result = 0
1342 result = 0
1339 else:
1343 else:
1340 if heads is None and list(common) == [nullid]:
1344 if heads is None and list(common) == [nullid]:
1341 self.ui.status(_("requesting all changes\n"))
1345 self.ui.status(_("requesting all changes\n"))
1342 elif heads is None and remote.capable('changegroupsubset'):
1346 elif heads is None and remote.capable('changegroupsubset'):
1343 # issue1320, avoid a race if remote changed after discovery
1347 # issue1320, avoid a race if remote changed after discovery
1344 heads = rheads
1348 heads = rheads
1345
1349
1346 if remote.capable('getbundle'):
1350 if remote.capable('getbundle'):
1347 cg = remote.getbundle('pull', common=common,
1351 cg = remote.getbundle('pull', common=common,
1348 heads=heads or rheads)
1352 heads=heads or rheads)
1349 elif heads is None:
1353 elif heads is None:
1350 cg = remote.changegroup(fetch, 'pull')
1354 cg = remote.changegroup(fetch, 'pull')
1351 elif not remote.capable('changegroupsubset'):
1355 elif not remote.capable('changegroupsubset'):
1352 raise util.Abort(_("partial pull cannot be done because "
1356 raise util.Abort(_("partial pull cannot be done because "
1353 "other repository doesn't support "
1357 "other repository doesn't support "
1354 "changegroupsubset."))
1358 "changegroupsubset."))
1355 else:
1359 else:
1356 cg = remote.changegroupsubset(fetch, heads, 'pull')
1360 cg = remote.changegroupsubset(fetch, heads, 'pull')
1357 result = self.addchangegroup(cg, 'pull', remote.url(),
1361 result = self.addchangegroup(cg, 'pull', remote.url(),
1358 lock=lock)
1362 lock=lock)
1359 finally:
1363 finally:
1360 lock.release()
1364 lock.release()
1361
1365
1362 return result
1366 return result
1363
1367
1364 def checkpush(self, force, revs):
1368 def checkpush(self, force, revs):
1365 """Extensions can override this function if additional checks have
1369 """Extensions can override this function if additional checks have
1366 to be performed before pushing, or call it if they override push
1370 to be performed before pushing, or call it if they override push
1367 command.
1371 command.
1368 """
1372 """
1369 pass
1373 pass
1370
1374
1371 def push(self, remote, force=False, revs=None, newbranch=False):
1375 def push(self, remote, force=False, revs=None, newbranch=False):
1372 '''Push outgoing changesets (limited by revs) from the current
1376 '''Push outgoing changesets (limited by revs) from the current
1373 repository to remote. Return an integer:
1377 repository to remote. Return an integer:
1374 - 0 means HTTP error *or* nothing to push
1378 - 0 means HTTP error *or* nothing to push
1375 - 1 means we pushed and remote head count is unchanged *or*
1379 - 1 means we pushed and remote head count is unchanged *or*
1376 we have outgoing changesets but refused to push
1380 we have outgoing changesets but refused to push
1377 - other values as described by addchangegroup()
1381 - other values as described by addchangegroup()
1378 '''
1382 '''
1379 # there are two ways to push to remote repo:
1383 # there are two ways to push to remote repo:
1380 #
1384 #
1381 # addchangegroup assumes local user can lock remote
1385 # addchangegroup assumes local user can lock remote
1382 # repo (local filesystem, old ssh servers).
1386 # repo (local filesystem, old ssh servers).
1383 #
1387 #
1384 # unbundle assumes local user cannot lock remote repo (new ssh
1388 # unbundle assumes local user cannot lock remote repo (new ssh
1385 # servers, http servers).
1389 # servers, http servers).
1386
1390
1387 self.checkpush(force, revs)
1391 self.checkpush(force, revs)
1388 lock = None
1392 lock = None
1389 unbundle = remote.capable('unbundle')
1393 unbundle = remote.capable('unbundle')
1390 if not unbundle:
1394 if not unbundle:
1391 lock = remote.lock()
1395 lock = remote.lock()
1392 try:
1396 try:
1393 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1397 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1394 newbranch)
1398 newbranch)
1395 ret = remote_heads
1399 ret = remote_heads
1396 if cg is not None:
1400 if cg is not None:
1397 if unbundle:
1401 if unbundle:
1398 # local repo finds heads on server, finds out what
1402 # local repo finds heads on server, finds out what
1399 # revs it must push. once revs transferred, if server
1403 # revs it must push. once revs transferred, if server
1400 # finds it has different heads (someone else won
1404 # finds it has different heads (someone else won
1401 # commit/push race), server aborts.
1405 # commit/push race), server aborts.
1402 if force:
1406 if force:
1403 remote_heads = ['force']
1407 remote_heads = ['force']
1404 # ssh: return remote's addchangegroup()
1408 # ssh: return remote's addchangegroup()
1405 # http: return remote's addchangegroup() or 0 for error
1409 # http: return remote's addchangegroup() or 0 for error
1406 ret = remote.unbundle(cg, remote_heads, 'push')
1410 ret = remote.unbundle(cg, remote_heads, 'push')
1407 else:
1411 else:
1408 # we return an integer indicating remote head count change
1412 # we return an integer indicating remote head count change
1409 ret = remote.addchangegroup(cg, 'push', self.url(),
1413 ret = remote.addchangegroup(cg, 'push', self.url(),
1410 lock=lock)
1414 lock=lock)
1411 finally:
1415 finally:
1412 if lock is not None:
1416 if lock is not None:
1413 lock.release()
1417 lock.release()
1414
1418
1415 self.ui.debug("checking for updated bookmarks\n")
1419 self.ui.debug("checking for updated bookmarks\n")
1416 rb = remote.listkeys('bookmarks')
1420 rb = remote.listkeys('bookmarks')
1417 for k in rb.keys():
1421 for k in rb.keys():
1418 if k in self._bookmarks:
1422 if k in self._bookmarks:
1419 nr, nl = rb[k], hex(self._bookmarks[k])
1423 nr, nl = rb[k], hex(self._bookmarks[k])
1420 if nr in self:
1424 if nr in self:
1421 cr = self[nr]
1425 cr = self[nr]
1422 cl = self[nl]
1426 cl = self[nl]
1423 if cl in cr.descendants():
1427 if cl in cr.descendants():
1424 r = remote.pushkey('bookmarks', k, nr, nl)
1428 r = remote.pushkey('bookmarks', k, nr, nl)
1425 if r:
1429 if r:
1426 self.ui.status(_("updating bookmark %s\n") % k)
1430 self.ui.status(_("updating bookmark %s\n") % k)
1427 else:
1431 else:
1428 self.ui.warn(_('updating bookmark %s'
1432 self.ui.warn(_('updating bookmark %s'
1429 ' failed!\n') % k)
1433 ' failed!\n') % k)
1430
1434
1431 return ret
1435 return ret
1432
1436
1433 def changegroupinfo(self, nodes, source):
1437 def changegroupinfo(self, nodes, source):
1434 if self.ui.verbose or source == 'bundle':
1438 if self.ui.verbose or source == 'bundle':
1435 self.ui.status(_("%d changesets found\n") % len(nodes))
1439 self.ui.status(_("%d changesets found\n") % len(nodes))
1436 if self.ui.debugflag:
1440 if self.ui.debugflag:
1437 self.ui.debug("list of changesets:\n")
1441 self.ui.debug("list of changesets:\n")
1438 for node in nodes:
1442 for node in nodes:
1439 self.ui.debug("%s\n" % hex(node))
1443 self.ui.debug("%s\n" % hex(node))
1440
1444
1441 def changegroupsubset(self, bases, heads, source):
1445 def changegroupsubset(self, bases, heads, source):
1442 """Compute a changegroup consisting of all the nodes that are
1446 """Compute a changegroup consisting of all the nodes that are
1443 descendents of any of the bases and ancestors of any of the heads.
1447 descendents of any of the bases and ancestors of any of the heads.
1444 Return a chunkbuffer object whose read() method will return
1448 Return a chunkbuffer object whose read() method will return
1445 successive changegroup chunks.
1449 successive changegroup chunks.
1446
1450
1447 It is fairly complex as determining which filenodes and which
1451 It is fairly complex as determining which filenodes and which
1448 manifest nodes need to be included for the changeset to be complete
1452 manifest nodes need to be included for the changeset to be complete
1449 is non-trivial.
1453 is non-trivial.
1450
1454
1451 Another wrinkle is doing the reverse, figuring out which changeset in
1455 Another wrinkle is doing the reverse, figuring out which changeset in
1452 the changegroup a particular filenode or manifestnode belongs to.
1456 the changegroup a particular filenode or manifestnode belongs to.
1453 """
1457 """
1454 cl = self.changelog
1458 cl = self.changelog
1455 if not bases:
1459 if not bases:
1456 bases = [nullid]
1460 bases = [nullid]
1457 csets, bases, heads = cl.nodesbetween(bases, heads)
1461 csets, bases, heads = cl.nodesbetween(bases, heads)
1458 # We assume that all ancestors of bases are known
1462 # We assume that all ancestors of bases are known
1459 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1463 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1460 return self._changegroupsubset(common, csets, heads, source)
1464 return self._changegroupsubset(common, csets, heads, source)
1461
1465
1462 def getbundle(self, source, heads=None, common=None):
1466 def getbundle(self, source, heads=None, common=None):
1463 """Like changegroupsubset, but returns the set difference between the
1467 """Like changegroupsubset, but returns the set difference between the
1464 ancestors of heads and the ancestors common.
1468 ancestors of heads and the ancestors common.
1465
1469
1466 If heads is None, use the local heads. If common is None, use [nullid].
1470 If heads is None, use the local heads. If common is None, use [nullid].
1467
1471
1468 The nodes in common might not all be known locally due to the way the
1472 The nodes in common might not all be known locally due to the way the
1469 current discovery protocol works.
1473 current discovery protocol works.
1470 """
1474 """
1471 cl = self.changelog
1475 cl = self.changelog
1472 if common:
1476 if common:
1473 nm = cl.nodemap
1477 nm = cl.nodemap
1474 common = [n for n in common if n in nm]
1478 common = [n for n in common if n in nm]
1475 else:
1479 else:
1476 common = [nullid]
1480 common = [nullid]
1477 if not heads:
1481 if not heads:
1478 heads = cl.heads()
1482 heads = cl.heads()
1479 common, missing = cl.findcommonmissing(common, heads)
1483 common, missing = cl.findcommonmissing(common, heads)
1480 if not missing:
1484 if not missing:
1481 return None
1485 return None
1482 return self._changegroupsubset(common, missing, heads, source)
1486 return self._changegroupsubset(common, missing, heads, source)
1483
1487
1484 def _changegroupsubset(self, commonrevs, csets, heads, source):
1488 def _changegroupsubset(self, commonrevs, csets, heads, source):
1485
1489
1486 cl = self.changelog
1490 cl = self.changelog
1487 mf = self.manifest
1491 mf = self.manifest
1488 mfs = {} # needed manifests
1492 mfs = {} # needed manifests
1489 fnodes = {} # needed file nodes
1493 fnodes = {} # needed file nodes
1490 changedfiles = set()
1494 changedfiles = set()
1491 fstate = ['', {}]
1495 fstate = ['', {}]
1492 count = [0]
1496 count = [0]
1493
1497
1494 # can we go through the fast path ?
1498 # can we go through the fast path ?
1495 heads.sort()
1499 heads.sort()
1496 if heads == sorted(self.heads()):
1500 if heads == sorted(self.heads()):
1497 return self._changegroup(csets, source)
1501 return self._changegroup(csets, source)
1498
1502
1499 # slow path
1503 # slow path
1500 self.hook('preoutgoing', throw=True, source=source)
1504 self.hook('preoutgoing', throw=True, source=source)
1501 self.changegroupinfo(csets, source)
1505 self.changegroupinfo(csets, source)
1502
1506
1503 # filter any nodes that claim to be part of the known set
1507 # filter any nodes that claim to be part of the known set
1504 def prune(revlog, missing):
1508 def prune(revlog, missing):
1505 for n in missing:
1509 for n in missing:
1506 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1510 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1507 yield n
1511 yield n
1508
1512
1509 def lookup(revlog, x):
1513 def lookup(revlog, x):
1510 if revlog == cl:
1514 if revlog == cl:
1511 c = cl.read(x)
1515 c = cl.read(x)
1512 changedfiles.update(c[3])
1516 changedfiles.update(c[3])
1513 mfs.setdefault(c[0], x)
1517 mfs.setdefault(c[0], x)
1514 count[0] += 1
1518 count[0] += 1
1515 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1519 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1516 return x
1520 return x
1517 elif revlog == mf:
1521 elif revlog == mf:
1518 clnode = mfs[x]
1522 clnode = mfs[x]
1519 mdata = mf.readfast(x)
1523 mdata = mf.readfast(x)
1520 for f in changedfiles:
1524 for f in changedfiles:
1521 if f in mdata:
1525 if f in mdata:
1522 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1526 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1523 count[0] += 1
1527 count[0] += 1
1524 self.ui.progress(_('bundling'), count[0],
1528 self.ui.progress(_('bundling'), count[0],
1525 unit=_('manifests'), total=len(mfs))
1529 unit=_('manifests'), total=len(mfs))
1526 return mfs[x]
1530 return mfs[x]
1527 else:
1531 else:
1528 self.ui.progress(
1532 self.ui.progress(
1529 _('bundling'), count[0], item=fstate[0],
1533 _('bundling'), count[0], item=fstate[0],
1530 unit=_('files'), total=len(changedfiles))
1534 unit=_('files'), total=len(changedfiles))
1531 return fstate[1][x]
1535 return fstate[1][x]
1532
1536
1533 bundler = changegroup.bundle10(lookup)
1537 bundler = changegroup.bundle10(lookup)
1534
1538
1535 def gengroup():
1539 def gengroup():
1536 # Create a changenode group generator that will call our functions
1540 # Create a changenode group generator that will call our functions
1537 # back to lookup the owning changenode and collect information.
1541 # back to lookup the owning changenode and collect information.
1538 for chunk in cl.group(csets, bundler):
1542 for chunk in cl.group(csets, bundler):
1539 yield chunk
1543 yield chunk
1540 self.ui.progress(_('bundling'), None)
1544 self.ui.progress(_('bundling'), None)
1541
1545
1542 # Create a generator for the manifestnodes that calls our lookup
1546 # Create a generator for the manifestnodes that calls our lookup
1543 # and data collection functions back.
1547 # and data collection functions back.
1544 count[0] = 0
1548 count[0] = 0
1545 for chunk in mf.group(prune(mf, mfs), bundler):
1549 for chunk in mf.group(prune(mf, mfs), bundler):
1546 yield chunk
1550 yield chunk
1547 self.ui.progress(_('bundling'), None)
1551 self.ui.progress(_('bundling'), None)
1548
1552
1549 mfs.clear()
1553 mfs.clear()
1550
1554
1551 # Go through all our files in order sorted by name.
1555 # Go through all our files in order sorted by name.
1552 count[0] = 0
1556 count[0] = 0
1553 for fname in sorted(changedfiles):
1557 for fname in sorted(changedfiles):
1554 filerevlog = self.file(fname)
1558 filerevlog = self.file(fname)
1555 if not len(filerevlog):
1559 if not len(filerevlog):
1556 raise util.Abort(_("empty or missing revlog for %s") % fname)
1560 raise util.Abort(_("empty or missing revlog for %s") % fname)
1557 fstate[0] = fname
1561 fstate[0] = fname
1558 fstate[1] = fnodes.pop(fname, {})
1562 fstate[1] = fnodes.pop(fname, {})
1559 first = True
1563 first = True
1560
1564
1561 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1565 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1562 bundler):
1566 bundler):
1563 if first:
1567 if first:
1564 if chunk == bundler.close():
1568 if chunk == bundler.close():
1565 break
1569 break
1566 count[0] += 1
1570 count[0] += 1
1567 yield bundler.fileheader(fname)
1571 yield bundler.fileheader(fname)
1568 first = False
1572 first = False
1569 yield chunk
1573 yield chunk
1570 # Signal that no more groups are left.
1574 # Signal that no more groups are left.
1571 yield bundler.close()
1575 yield bundler.close()
1572 self.ui.progress(_('bundling'), None)
1576 self.ui.progress(_('bundling'), None)
1573
1577
1574 if csets:
1578 if csets:
1575 self.hook('outgoing', node=hex(csets[0]), source=source)
1579 self.hook('outgoing', node=hex(csets[0]), source=source)
1576
1580
1577 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1581 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1578
1582
1579 def changegroup(self, basenodes, source):
1583 def changegroup(self, basenodes, source):
1580 # to avoid a race we use changegroupsubset() (issue1320)
1584 # to avoid a race we use changegroupsubset() (issue1320)
1581 return self.changegroupsubset(basenodes, self.heads(), source)
1585 return self.changegroupsubset(basenodes, self.heads(), source)
1582
1586
1583 def _changegroup(self, nodes, source):
1587 def _changegroup(self, nodes, source):
1584 """Compute the changegroup of all nodes that we have that a recipient
1588 """Compute the changegroup of all nodes that we have that a recipient
1585 doesn't. Return a chunkbuffer object whose read() method will return
1589 doesn't. Return a chunkbuffer object whose read() method will return
1586 successive changegroup chunks.
1590 successive changegroup chunks.
1587
1591
1588 This is much easier than the previous function as we can assume that
1592 This is much easier than the previous function as we can assume that
1589 the recipient has any changenode we aren't sending them.
1593 the recipient has any changenode we aren't sending them.
1590
1594
1591 nodes is the set of nodes to send"""
1595 nodes is the set of nodes to send"""
1592
1596
1593 cl = self.changelog
1597 cl = self.changelog
1594 mf = self.manifest
1598 mf = self.manifest
1595 mfs = {}
1599 mfs = {}
1596 changedfiles = set()
1600 changedfiles = set()
1597 fstate = ['']
1601 fstate = ['']
1598 count = [0]
1602 count = [0]
1599
1603
1600 self.hook('preoutgoing', throw=True, source=source)
1604 self.hook('preoutgoing', throw=True, source=source)
1601 self.changegroupinfo(nodes, source)
1605 self.changegroupinfo(nodes, source)
1602
1606
1603 revset = set([cl.rev(n) for n in nodes])
1607 revset = set([cl.rev(n) for n in nodes])
1604
1608
1605 def gennodelst(log):
1609 def gennodelst(log):
1606 for r in log:
1610 for r in log:
1607 if log.linkrev(r) in revset:
1611 if log.linkrev(r) in revset:
1608 yield log.node(r)
1612 yield log.node(r)
1609
1613
1610 def lookup(revlog, x):
1614 def lookup(revlog, x):
1611 if revlog == cl:
1615 if revlog == cl:
1612 c = cl.read(x)
1616 c = cl.read(x)
1613 changedfiles.update(c[3])
1617 changedfiles.update(c[3])
1614 mfs.setdefault(c[0], x)
1618 mfs.setdefault(c[0], x)
1615 count[0] += 1
1619 count[0] += 1
1616 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1620 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1617 return x
1621 return x
1618 elif revlog == mf:
1622 elif revlog == mf:
1619 count[0] += 1
1623 count[0] += 1
1620 self.ui.progress(_('bundling'), count[0],
1624 self.ui.progress(_('bundling'), count[0],
1621 unit=_('manifests'), total=len(mfs))
1625 unit=_('manifests'), total=len(mfs))
1622 return cl.node(revlog.linkrev(revlog.rev(x)))
1626 return cl.node(revlog.linkrev(revlog.rev(x)))
1623 else:
1627 else:
1624 self.ui.progress(
1628 self.ui.progress(
1625 _('bundling'), count[0], item=fstate[0],
1629 _('bundling'), count[0], item=fstate[0],
1626 total=len(changedfiles), unit=_('files'))
1630 total=len(changedfiles), unit=_('files'))
1627 return cl.node(revlog.linkrev(revlog.rev(x)))
1631 return cl.node(revlog.linkrev(revlog.rev(x)))
1628
1632
1629 bundler = changegroup.bundle10(lookup)
1633 bundler = changegroup.bundle10(lookup)
1630
1634
1631 def gengroup():
1635 def gengroup():
1632 '''yield a sequence of changegroup chunks (strings)'''
1636 '''yield a sequence of changegroup chunks (strings)'''
1633 # construct a list of all changed files
1637 # construct a list of all changed files
1634
1638
1635 for chunk in cl.group(nodes, bundler):
1639 for chunk in cl.group(nodes, bundler):
1636 yield chunk
1640 yield chunk
1637 self.ui.progress(_('bundling'), None)
1641 self.ui.progress(_('bundling'), None)
1638
1642
1639 count[0] = 0
1643 count[0] = 0
1640 for chunk in mf.group(gennodelst(mf), bundler):
1644 for chunk in mf.group(gennodelst(mf), bundler):
1641 yield chunk
1645 yield chunk
1642 self.ui.progress(_('bundling'), None)
1646 self.ui.progress(_('bundling'), None)
1643
1647
1644 count[0] = 0
1648 count[0] = 0
1645 for fname in sorted(changedfiles):
1649 for fname in sorted(changedfiles):
1646 filerevlog = self.file(fname)
1650 filerevlog = self.file(fname)
1647 if not len(filerevlog):
1651 if not len(filerevlog):
1648 raise util.Abort(_("empty or missing revlog for %s") % fname)
1652 raise util.Abort(_("empty or missing revlog for %s") % fname)
1649 fstate[0] = fname
1653 fstate[0] = fname
1650 first = True
1654 first = True
1651 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1655 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1652 if first:
1656 if first:
1653 if chunk == bundler.close():
1657 if chunk == bundler.close():
1654 break
1658 break
1655 count[0] += 1
1659 count[0] += 1
1656 yield bundler.fileheader(fname)
1660 yield bundler.fileheader(fname)
1657 first = False
1661 first = False
1658 yield chunk
1662 yield chunk
1659 yield bundler.close()
1663 yield bundler.close()
1660 self.ui.progress(_('bundling'), None)
1664 self.ui.progress(_('bundling'), None)
1661
1665
1662 if nodes:
1666 if nodes:
1663 self.hook('outgoing', node=hex(nodes[0]), source=source)
1667 self.hook('outgoing', node=hex(nodes[0]), source=source)
1664
1668
1665 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1669 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1666
1670
1667 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1671 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1668 """Add the changegroup returned by source.read() to this repo.
1672 """Add the changegroup returned by source.read() to this repo.
1669 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1673 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1670 the URL of the repo where this changegroup is coming from.
1674 the URL of the repo where this changegroup is coming from.
1671 If lock is not None, the function takes ownership of the lock
1675 If lock is not None, the function takes ownership of the lock
1672 and releases it after the changegroup is added.
1676 and releases it after the changegroup is added.
1673
1677
1674 Return an integer summarizing the change to this repo:
1678 Return an integer summarizing the change to this repo:
1675 - nothing changed or no source: 0
1679 - nothing changed or no source: 0
1676 - more heads than before: 1+added heads (2..n)
1680 - more heads than before: 1+added heads (2..n)
1677 - fewer heads than before: -1-removed heads (-2..-n)
1681 - fewer heads than before: -1-removed heads (-2..-n)
1678 - number of heads stays the same: 1
1682 - number of heads stays the same: 1
1679 """
1683 """
1680 def csmap(x):
1684 def csmap(x):
1681 self.ui.debug("add changeset %s\n" % short(x))
1685 self.ui.debug("add changeset %s\n" % short(x))
1682 return len(cl)
1686 return len(cl)
1683
1687
1684 def revmap(x):
1688 def revmap(x):
1685 return cl.rev(x)
1689 return cl.rev(x)
1686
1690
1687 if not source:
1691 if not source:
1688 return 0
1692 return 0
1689
1693
1690 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1694 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1691
1695
1692 changesets = files = revisions = 0
1696 changesets = files = revisions = 0
1693 efiles = set()
1697 efiles = set()
1694
1698
1695 # write changelog data to temp files so concurrent readers will not see
1699 # write changelog data to temp files so concurrent readers will not see
1696 # inconsistent view
1700 # inconsistent view
1697 cl = self.changelog
1701 cl = self.changelog
1698 cl.delayupdate()
1702 cl.delayupdate()
1699 oldheads = cl.heads()
1703 oldheads = cl.heads()
1700
1704
1701 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1705 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1702 try:
1706 try:
1703 trp = weakref.proxy(tr)
1707 trp = weakref.proxy(tr)
1704 # pull off the changeset group
1708 # pull off the changeset group
1705 self.ui.status(_("adding changesets\n"))
1709 self.ui.status(_("adding changesets\n"))
1706 clstart = len(cl)
1710 clstart = len(cl)
1707 class prog(object):
1711 class prog(object):
1708 step = _('changesets')
1712 step = _('changesets')
1709 count = 1
1713 count = 1
1710 ui = self.ui
1714 ui = self.ui
1711 total = None
1715 total = None
1712 def __call__(self):
1716 def __call__(self):
1713 self.ui.progress(self.step, self.count, unit=_('chunks'),
1717 self.ui.progress(self.step, self.count, unit=_('chunks'),
1714 total=self.total)
1718 total=self.total)
1715 self.count += 1
1719 self.count += 1
1716 pr = prog()
1720 pr = prog()
1717 source.callback = pr
1721 source.callback = pr
1718
1722
1719 source.changelogheader()
1723 source.changelogheader()
1720 if (cl.addgroup(source, csmap, trp) is None
1724 if (cl.addgroup(source, csmap, trp) is None
1721 and not emptyok):
1725 and not emptyok):
1722 raise util.Abort(_("received changelog group is empty"))
1726 raise util.Abort(_("received changelog group is empty"))
1723 clend = len(cl)
1727 clend = len(cl)
1724 changesets = clend - clstart
1728 changesets = clend - clstart
1725 for c in xrange(clstart, clend):
1729 for c in xrange(clstart, clend):
1726 efiles.update(self[c].files())
1730 efiles.update(self[c].files())
1727 efiles = len(efiles)
1731 efiles = len(efiles)
1728 self.ui.progress(_('changesets'), None)
1732 self.ui.progress(_('changesets'), None)
1729
1733
1730 # pull off the manifest group
1734 # pull off the manifest group
1731 self.ui.status(_("adding manifests\n"))
1735 self.ui.status(_("adding manifests\n"))
1732 pr.step = _('manifests')
1736 pr.step = _('manifests')
1733 pr.count = 1
1737 pr.count = 1
1734 pr.total = changesets # manifests <= changesets
1738 pr.total = changesets # manifests <= changesets
1735 # no need to check for empty manifest group here:
1739 # no need to check for empty manifest group here:
1736 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1740 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1737 # no new manifest will be created and the manifest group will
1741 # no new manifest will be created and the manifest group will
1738 # be empty during the pull
1742 # be empty during the pull
1739 source.manifestheader()
1743 source.manifestheader()
1740 self.manifest.addgroup(source, revmap, trp)
1744 self.manifest.addgroup(source, revmap, trp)
1741 self.ui.progress(_('manifests'), None)
1745 self.ui.progress(_('manifests'), None)
1742
1746
1743 needfiles = {}
1747 needfiles = {}
1744 if self.ui.configbool('server', 'validate', default=False):
1748 if self.ui.configbool('server', 'validate', default=False):
1745 # validate incoming csets have their manifests
1749 # validate incoming csets have their manifests
1746 for cset in xrange(clstart, clend):
1750 for cset in xrange(clstart, clend):
1747 mfest = self.changelog.read(self.changelog.node(cset))[0]
1751 mfest = self.changelog.read(self.changelog.node(cset))[0]
1748 mfest = self.manifest.readdelta(mfest)
1752 mfest = self.manifest.readdelta(mfest)
1749 # store file nodes we must see
1753 # store file nodes we must see
1750 for f, n in mfest.iteritems():
1754 for f, n in mfest.iteritems():
1751 needfiles.setdefault(f, set()).add(n)
1755 needfiles.setdefault(f, set()).add(n)
1752
1756
1753 # process the files
1757 # process the files
1754 self.ui.status(_("adding file changes\n"))
1758 self.ui.status(_("adding file changes\n"))
1755 pr.step = 'files'
1759 pr.step = 'files'
1756 pr.count = 1
1760 pr.count = 1
1757 pr.total = efiles
1761 pr.total = efiles
1758 source.callback = None
1762 source.callback = None
1759
1763
1760 while 1:
1764 while 1:
1761 chunkdata = source.filelogheader()
1765 chunkdata = source.filelogheader()
1762 if not chunkdata:
1766 if not chunkdata:
1763 break
1767 break
1764 f = chunkdata["filename"]
1768 f = chunkdata["filename"]
1765 self.ui.debug("adding %s revisions\n" % f)
1769 self.ui.debug("adding %s revisions\n" % f)
1766 pr()
1770 pr()
1767 fl = self.file(f)
1771 fl = self.file(f)
1768 o = len(fl)
1772 o = len(fl)
1769 if fl.addgroup(source, revmap, trp) is None:
1773 if fl.addgroup(source, revmap, trp) is None:
1770 raise util.Abort(_("received file revlog group is empty"))
1774 raise util.Abort(_("received file revlog group is empty"))
1771 revisions += len(fl) - o
1775 revisions += len(fl) - o
1772 files += 1
1776 files += 1
1773 if f in needfiles:
1777 if f in needfiles:
1774 needs = needfiles[f]
1778 needs = needfiles[f]
1775 for new in xrange(o, len(fl)):
1779 for new in xrange(o, len(fl)):
1776 n = fl.node(new)
1780 n = fl.node(new)
1777 if n in needs:
1781 if n in needs:
1778 needs.remove(n)
1782 needs.remove(n)
1779 if not needs:
1783 if not needs:
1780 del needfiles[f]
1784 del needfiles[f]
1781 self.ui.progress(_('files'), None)
1785 self.ui.progress(_('files'), None)
1782
1786
1783 for f, needs in needfiles.iteritems():
1787 for f, needs in needfiles.iteritems():
1784 fl = self.file(f)
1788 fl = self.file(f)
1785 for n in needs:
1789 for n in needs:
1786 try:
1790 try:
1787 fl.rev(n)
1791 fl.rev(n)
1788 except error.LookupError:
1792 except error.LookupError:
1789 raise util.Abort(
1793 raise util.Abort(
1790 _('missing file data for %s:%s - run hg verify') %
1794 _('missing file data for %s:%s - run hg verify') %
1791 (f, hex(n)))
1795 (f, hex(n)))
1792
1796
1793 dh = 0
1797 dh = 0
1794 if oldheads:
1798 if oldheads:
1795 heads = cl.heads()
1799 heads = cl.heads()
1796 dh = len(heads) - len(oldheads)
1800 dh = len(heads) - len(oldheads)
1797 for h in heads:
1801 for h in heads:
1798 if h not in oldheads and 'close' in self[h].extra():
1802 if h not in oldheads and 'close' in self[h].extra():
1799 dh -= 1
1803 dh -= 1
1800 htext = ""
1804 htext = ""
1801 if dh:
1805 if dh:
1802 htext = _(" (%+d heads)") % dh
1806 htext = _(" (%+d heads)") % dh
1803
1807
1804 self.ui.status(_("added %d changesets"
1808 self.ui.status(_("added %d changesets"
1805 " with %d changes to %d files%s\n")
1809 " with %d changes to %d files%s\n")
1806 % (changesets, revisions, files, htext))
1810 % (changesets, revisions, files, htext))
1807
1811
1808 if changesets > 0:
1812 if changesets > 0:
1809 p = lambda: cl.writepending() and self.root or ""
1813 p = lambda: cl.writepending() and self.root or ""
1810 self.hook('pretxnchangegroup', throw=True,
1814 self.hook('pretxnchangegroup', throw=True,
1811 node=hex(cl.node(clstart)), source=srctype,
1815 node=hex(cl.node(clstart)), source=srctype,
1812 url=url, pending=p)
1816 url=url, pending=p)
1813
1817
1814 # make changelog see real files again
1818 # make changelog see real files again
1815 cl.finalize(trp)
1819 cl.finalize(trp)
1816
1820
1817 tr.close()
1821 tr.close()
1818 finally:
1822 finally:
1819 tr.release()
1823 tr.release()
1820 if lock:
1824 if lock:
1821 lock.release()
1825 lock.release()
1822
1826
1823 if changesets > 0:
1827 if changesets > 0:
1824 # forcefully update the on-disk branch cache
1828 # forcefully update the on-disk branch cache
1825 self.ui.debug("updating the branch cache\n")
1829 self.ui.debug("updating the branch cache\n")
1826 self.updatebranchcache()
1830 self.updatebranchcache()
1827 self.hook("changegroup", node=hex(cl.node(clstart)),
1831 self.hook("changegroup", node=hex(cl.node(clstart)),
1828 source=srctype, url=url)
1832 source=srctype, url=url)
1829
1833
1830 for i in xrange(clstart, clend):
1834 for i in xrange(clstart, clend):
1831 self.hook("incoming", node=hex(cl.node(i)),
1835 self.hook("incoming", node=hex(cl.node(i)),
1832 source=srctype, url=url)
1836 source=srctype, url=url)
1833
1837
1834 # never return 0 here:
1838 # never return 0 here:
1835 if dh < 0:
1839 if dh < 0:
1836 return dh - 1
1840 return dh - 1
1837 else:
1841 else:
1838 return dh + 1
1842 return dh + 1
1839
1843
1840 def stream_in(self, remote, requirements):
1844 def stream_in(self, remote, requirements):
1841 lock = self.lock()
1845 lock = self.lock()
1842 try:
1846 try:
1843 fp = remote.stream_out()
1847 fp = remote.stream_out()
1844 l = fp.readline()
1848 l = fp.readline()
1845 try:
1849 try:
1846 resp = int(l)
1850 resp = int(l)
1847 except ValueError:
1851 except ValueError:
1848 raise error.ResponseError(
1852 raise error.ResponseError(
1849 _('Unexpected response from remote server:'), l)
1853 _('Unexpected response from remote server:'), l)
1850 if resp == 1:
1854 if resp == 1:
1851 raise util.Abort(_('operation forbidden by server'))
1855 raise util.Abort(_('operation forbidden by server'))
1852 elif resp == 2:
1856 elif resp == 2:
1853 raise util.Abort(_('locking the remote repository failed'))
1857 raise util.Abort(_('locking the remote repository failed'))
1854 elif resp != 0:
1858 elif resp != 0:
1855 raise util.Abort(_('the server sent an unknown error code'))
1859 raise util.Abort(_('the server sent an unknown error code'))
1856 self.ui.status(_('streaming all changes\n'))
1860 self.ui.status(_('streaming all changes\n'))
1857 l = fp.readline()
1861 l = fp.readline()
1858 try:
1862 try:
1859 total_files, total_bytes = map(int, l.split(' ', 1))
1863 total_files, total_bytes = map(int, l.split(' ', 1))
1860 except (ValueError, TypeError):
1864 except (ValueError, TypeError):
1861 raise error.ResponseError(
1865 raise error.ResponseError(
1862 _('Unexpected response from remote server:'), l)
1866 _('Unexpected response from remote server:'), l)
1863 self.ui.status(_('%d files to transfer, %s of data\n') %
1867 self.ui.status(_('%d files to transfer, %s of data\n') %
1864 (total_files, util.bytecount(total_bytes)))
1868 (total_files, util.bytecount(total_bytes)))
1865 start = time.time()
1869 start = time.time()
1866 for i in xrange(total_files):
1870 for i in xrange(total_files):
1867 # XXX doesn't support '\n' or '\r' in filenames
1871 # XXX doesn't support '\n' or '\r' in filenames
1868 l = fp.readline()
1872 l = fp.readline()
1869 try:
1873 try:
1870 name, size = l.split('\0', 1)
1874 name, size = l.split('\0', 1)
1871 size = int(size)
1875 size = int(size)
1872 except (ValueError, TypeError):
1876 except (ValueError, TypeError):
1873 raise error.ResponseError(
1877 raise error.ResponseError(
1874 _('Unexpected response from remote server:'), l)
1878 _('Unexpected response from remote server:'), l)
1875 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1879 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1876 # for backwards compat, name was partially encoded
1880 # for backwards compat, name was partially encoded
1877 ofp = self.sopener(store.decodedir(name), 'w')
1881 ofp = self.sopener(store.decodedir(name), 'w')
1878 for chunk in util.filechunkiter(fp, limit=size):
1882 for chunk in util.filechunkiter(fp, limit=size):
1879 ofp.write(chunk)
1883 ofp.write(chunk)
1880 ofp.close()
1884 ofp.close()
1881 elapsed = time.time() - start
1885 elapsed = time.time() - start
1882 if elapsed <= 0:
1886 if elapsed <= 0:
1883 elapsed = 0.001
1887 elapsed = 0.001
1884 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1888 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1885 (util.bytecount(total_bytes), elapsed,
1889 (util.bytecount(total_bytes), elapsed,
1886 util.bytecount(total_bytes / elapsed)))
1890 util.bytecount(total_bytes / elapsed)))
1887
1891
1888 # new requirements = old non-format requirements + new format-related
1892 # new requirements = old non-format requirements + new format-related
1889 # requirements from the streamed-in repository
1893 # requirements from the streamed-in repository
1890 requirements.update(set(self.requirements) - self.supportedformats)
1894 requirements.update(set(self.requirements) - self.supportedformats)
1891 self._applyrequirements(requirements)
1895 self._applyrequirements(requirements)
1892 self._writerequirements()
1896 self._writerequirements()
1893
1897
1894 self.invalidate()
1898 self.invalidate()
1895 return len(self.heads()) + 1
1899 return len(self.heads()) + 1
1896 finally:
1900 finally:
1897 lock.release()
1901 lock.release()
1898
1902
1899 def clone(self, remote, heads=[], stream=False):
1903 def clone(self, remote, heads=[], stream=False):
1900 '''clone remote repository.
1904 '''clone remote repository.
1901
1905
1902 keyword arguments:
1906 keyword arguments:
1903 heads: list of revs to clone (forces use of pull)
1907 heads: list of revs to clone (forces use of pull)
1904 stream: use streaming clone if possible'''
1908 stream: use streaming clone if possible'''
1905
1909
1906 # now, all clients that can request uncompressed clones can
1910 # now, all clients that can request uncompressed clones can
1907 # read repo formats supported by all servers that can serve
1911 # read repo formats supported by all servers that can serve
1908 # them.
1912 # them.
1909
1913
1910 # if revlog format changes, client will have to check version
1914 # if revlog format changes, client will have to check version
1911 # and format flags on "stream" capability, and use
1915 # and format flags on "stream" capability, and use
1912 # uncompressed only if compatible.
1916 # uncompressed only if compatible.
1913
1917
1914 if stream and not heads:
1918 if stream and not heads:
1915 # 'stream' means remote revlog format is revlogv1 only
1919 # 'stream' means remote revlog format is revlogv1 only
1916 if remote.capable('stream'):
1920 if remote.capable('stream'):
1917 return self.stream_in(remote, set(('revlogv1',)))
1921 return self.stream_in(remote, set(('revlogv1',)))
1918 # otherwise, 'streamreqs' contains the remote revlog format
1922 # otherwise, 'streamreqs' contains the remote revlog format
1919 streamreqs = remote.capable('streamreqs')
1923 streamreqs = remote.capable('streamreqs')
1920 if streamreqs:
1924 if streamreqs:
1921 streamreqs = set(streamreqs.split(','))
1925 streamreqs = set(streamreqs.split(','))
1922 # if we support it, stream in and adjust our requirements
1926 # if we support it, stream in and adjust our requirements
1923 if not streamreqs - self.supportedformats:
1927 if not streamreqs - self.supportedformats:
1924 return self.stream_in(remote, streamreqs)
1928 return self.stream_in(remote, streamreqs)
1925 return self.pull(remote, heads)
1929 return self.pull(remote, heads)
1926
1930
1927 def pushkey(self, namespace, key, old, new):
1931 def pushkey(self, namespace, key, old, new):
1928 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1932 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1929 old=old, new=new)
1933 old=old, new=new)
1930 ret = pushkey.push(self, namespace, key, old, new)
1934 ret = pushkey.push(self, namespace, key, old, new)
1931 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1935 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1932 ret=ret)
1936 ret=ret)
1933 return ret
1937 return ret
1934
1938
1935 def listkeys(self, namespace):
1939 def listkeys(self, namespace):
1936 self.hook('prelistkeys', throw=True, namespace=namespace)
1940 self.hook('prelistkeys', throw=True, namespace=namespace)
1937 values = pushkey.list(self, namespace)
1941 values = pushkey.list(self, namespace)
1938 self.hook('listkeys', namespace=namespace, values=values)
1942 self.hook('listkeys', namespace=namespace, values=values)
1939 return values
1943 return values
1940
1944
1941 def debugwireargs(self, one, two, three=None, four=None, five=None):
1945 def debugwireargs(self, one, two, three=None, four=None, five=None):
1942 '''used to test argument passing over the wire'''
1946 '''used to test argument passing over the wire'''
1943 return "%s %s %s %s %s" % (one, two, three, four, five)
1947 return "%s %s %s %s %s" % (one, two, three, four, five)
1944
1948
1945 # used to avoid circular references so destructors work
1949 # used to avoid circular references so destructors work
1946 def aftertrans(files):
1950 def aftertrans(files):
1947 renamefiles = [tuple(t) for t in files]
1951 renamefiles = [tuple(t) for t in files]
1948 def a():
1952 def a():
1949 for src, dest in renamefiles:
1953 for src, dest in renamefiles:
1950 util.rename(src, dest)
1954 util.rename(src, dest)
1951 return a
1955 return a
1952
1956
1953 def instance(ui, path, create):
1957 def instance(ui, path, create):
1954 return localrepository(ui, util.localpath(path), create)
1958 return localrepository(ui, util.localpath(path), create)
1955
1959
1956 def islocal(path):
1960 def islocal(path):
1957 return True
1961 return True
@@ -1,1249 +1,1259
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util
17 import ancestor, mdiff, parsers, error, util
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45
45
46 def getoffset(q):
46 def getoffset(q):
47 return int(q >> 16)
47 return int(q >> 16)
48
48
49 def gettype(q):
49 def gettype(q):
50 return int(q & 0xFFFF)
50 return int(q & 0xFFFF)
51
51
52 def offset_type(offset, type):
52 def offset_type(offset, type):
53 return long(long(offset) << 16 | type)
53 return long(long(offset) << 16 | type)
54
54
55 nullhash = _sha(nullid)
55 nullhash = _sha(nullid)
56
56
57 def hash(text, p1, p2):
57 def hash(text, p1, p2):
58 """generate a hash from the given text and its parent hashes
58 """generate a hash from the given text and its parent hashes
59
59
60 This hash combines both the current file contents and its history
60 This hash combines both the current file contents and its history
61 in a manner that makes it easy to distinguish nodes with the same
61 in a manner that makes it easy to distinguish nodes with the same
62 content in the revision graph.
62 content in the revision graph.
63 """
63 """
64 # As of now, if one of the parent node is null, p2 is null
64 # As of now, if one of the parent node is null, p2 is null
65 if p2 == nullid:
65 if p2 == nullid:
66 # deep copy of a hash is faster than creating one
66 # deep copy of a hash is faster than creating one
67 s = nullhash.copy()
67 s = nullhash.copy()
68 s.update(p1)
68 s.update(p1)
69 else:
69 else:
70 # none of the parent nodes are nullid
70 # none of the parent nodes are nullid
71 l = [p1, p2]
71 l = [p1, p2]
72 l.sort()
72 l.sort()
73 s = _sha(l[0])
73 s = _sha(l[0])
74 s.update(l[1])
74 s.update(l[1])
75 s.update(text)
75 s.update(text)
76 return s.digest()
76 return s.digest()
77
77
78 def compress(text):
78 def compress(text):
79 """ generate a possibly-compressed representation of text """
79 """ generate a possibly-compressed representation of text """
80 if not text:
80 if not text:
81 return ("", text)
81 return ("", text)
82 l = len(text)
82 l = len(text)
83 bin = None
83 bin = None
84 if l < 44:
84 if l < 44:
85 pass
85 pass
86 elif l > 1000000:
86 elif l > 1000000:
87 # zlib makes an internal copy, thus doubling memory usage for
87 # zlib makes an internal copy, thus doubling memory usage for
88 # large files, so lets do this in pieces
88 # large files, so lets do this in pieces
89 z = zlib.compressobj()
89 z = zlib.compressobj()
90 p = []
90 p = []
91 pos = 0
91 pos = 0
92 while pos < l:
92 while pos < l:
93 pos2 = pos + 2**20
93 pos2 = pos + 2**20
94 p.append(z.compress(text[pos:pos2]))
94 p.append(z.compress(text[pos:pos2]))
95 pos = pos2
95 pos = pos2
96 p.append(z.flush())
96 p.append(z.flush())
97 if sum(map(len, p)) < l:
97 if sum(map(len, p)) < l:
98 bin = "".join(p)
98 bin = "".join(p)
99 else:
99 else:
100 bin = _compress(text)
100 bin = _compress(text)
101 if bin is None or len(bin) > l:
101 if bin is None or len(bin) > l:
102 if text[0] == '\0':
102 if text[0] == '\0':
103 return ("", text)
103 return ("", text)
104 return ('u', text)
104 return ('u', text)
105 return ("", bin)
105 return ("", bin)
106
106
107 def decompress(bin):
107 def decompress(bin):
108 """ decompress the given input """
108 """ decompress the given input """
109 if not bin:
109 if not bin:
110 return bin
110 return bin
111 t = bin[0]
111 t = bin[0]
112 if t == '\0':
112 if t == '\0':
113 return bin
113 return bin
114 if t == 'x':
114 if t == 'x':
115 return _decompress(bin)
115 return _decompress(bin)
116 if t == 'u':
116 if t == 'u':
117 return bin[1:]
117 return bin[1:]
118 raise RevlogError(_("unknown compression type %r") % t)
118 raise RevlogError(_("unknown compression type %r") % t)
119
119
120 indexformatv0 = ">4l20s20s20s"
120 indexformatv0 = ">4l20s20s20s"
121 v0shaoffset = 56
121 v0shaoffset = 56
122
122
123 class revlogoldio(object):
123 class revlogoldio(object):
124 def __init__(self):
124 def __init__(self):
125 self.size = struct.calcsize(indexformatv0)
125 self.size = struct.calcsize(indexformatv0)
126
126
127 def parseindex(self, data, inline):
127 def parseindex(self, data, inline):
128 s = self.size
128 s = self.size
129 index = []
129 index = []
130 nodemap = {nullid: nullrev}
130 nodemap = {nullid: nullrev}
131 n = off = 0
131 n = off = 0
132 l = len(data)
132 l = len(data)
133 while off + s <= l:
133 while off + s <= l:
134 cur = data[off:off + s]
134 cur = data[off:off + s]
135 off += s
135 off += s
136 e = _unpack(indexformatv0, cur)
136 e = _unpack(indexformatv0, cur)
137 # transform to revlogv1 format
137 # transform to revlogv1 format
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 index.append(e2)
140 index.append(e2)
141 nodemap[e[6]] = n
141 nodemap[e[6]] = n
142 n += 1
142 n += 1
143
143
144 # add the magic null revision at -1
144 # add the magic null revision at -1
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146
146
147 return index, nodemap, None
147 return index, nodemap, None
148
148
149 def packentry(self, entry, node, version, rev):
149 def packentry(self, entry, node, version, rev):
150 if gettype(entry[0]):
150 if gettype(entry[0]):
151 raise RevlogError(_("index entry flags need RevlogNG"))
151 raise RevlogError(_("index entry flags need RevlogNG"))
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 node(entry[5]), node(entry[6]), entry[7])
153 node(entry[5]), node(entry[6]), entry[7])
154 return _pack(indexformatv0, *e2)
154 return _pack(indexformatv0, *e2)
155
155
156 # index ng:
156 # index ng:
157 # 6 bytes: offset
157 # 6 bytes: offset
158 # 2 bytes: flags
158 # 2 bytes: flags
159 # 4 bytes: compressed length
159 # 4 bytes: compressed length
160 # 4 bytes: uncompressed length
160 # 4 bytes: uncompressed length
161 # 4 bytes: base rev
161 # 4 bytes: base rev
162 # 4 bytes: link rev
162 # 4 bytes: link rev
163 # 4 bytes: parent 1 rev
163 # 4 bytes: parent 1 rev
164 # 4 bytes: parent 2 rev
164 # 4 bytes: parent 2 rev
165 # 32 bytes: nodeid
165 # 32 bytes: nodeid
166 indexformatng = ">Qiiiiii20s12x"
166 indexformatng = ">Qiiiiii20s12x"
167 ngshaoffset = 32
167 ngshaoffset = 32
168 versionformat = ">I"
168 versionformat = ">I"
169
169
170 class revlogio(object):
170 class revlogio(object):
171 def __init__(self):
171 def __init__(self):
172 self.size = struct.calcsize(indexformatng)
172 self.size = struct.calcsize(indexformatng)
173
173
174 def parseindex(self, data, inline):
174 def parseindex(self, data, inline):
175 # call the C implementation to parse the index data
175 # call the C implementation to parse the index data
176 index, cache = parsers.parse_index2(data, inline)
176 index, cache = parsers.parse_index2(data, inline)
177 return index, None, cache
177 return index, None, cache
178
178
179 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
180 p = _pack(indexformatng, *entry)
180 p = _pack(indexformatng, *entry)
181 if rev == 0:
181 if rev == 0:
182 p = _pack(versionformat, version) + p[4:]
182 p = _pack(versionformat, version) + p[4:]
183 return p
183 return p
184
184
185 class revlog(object):
185 class revlog(object):
186 """
186 """
187 the underlying revision storage object
187 the underlying revision storage object
188
188
189 A revlog consists of two parts, an index and the revision data.
189 A revlog consists of two parts, an index and the revision data.
190
190
191 The index is a file with a fixed record size containing
191 The index is a file with a fixed record size containing
192 information on each revision, including its nodeid (hash), the
192 information on each revision, including its nodeid (hash), the
193 nodeids of its parents, the position and offset of its data within
193 nodeids of its parents, the position and offset of its data within
194 the data file, and the revision it's based on. Finally, each entry
194 the data file, and the revision it's based on. Finally, each entry
195 contains a linkrev entry that can serve as a pointer to external
195 contains a linkrev entry that can serve as a pointer to external
196 data.
196 data.
197
197
198 The revision data itself is a linear collection of data chunks.
198 The revision data itself is a linear collection of data chunks.
199 Each chunk represents a revision and is usually represented as a
199 Each chunk represents a revision and is usually represented as a
200 delta against the previous chunk. To bound lookup time, runs of
200 delta against the previous chunk. To bound lookup time, runs of
201 deltas are limited to about 2 times the length of the original
201 deltas are limited to about 2 times the length of the original
202 version data. This makes retrieval of a version proportional to
202 version data. This makes retrieval of a version proportional to
203 its size, or O(1) relative to the number of revisions.
203 its size, or O(1) relative to the number of revisions.
204
204
205 Both pieces of the revlog are written to in an append-only
205 Both pieces of the revlog are written to in an append-only
206 fashion, which means we never need to rewrite a file to insert or
206 fashion, which means we never need to rewrite a file to insert or
207 remove data, and can use some simple techniques to avoid the need
207 remove data, and can use some simple techniques to avoid the need
208 for locking while reading.
208 for locking while reading.
209 """
209 """
210 def __init__(self, opener, indexfile):
210 def __init__(self, opener, indexfile):
211 """
211 """
212 create a revlog object
212 create a revlog object
213
213
214 opener is a function that abstracts the file opening operation
214 opener is a function that abstracts the file opening operation
215 and can be used to implement COW semantics or the like.
215 and can be used to implement COW semantics or the like.
216 """
216 """
217 self.indexfile = indexfile
217 self.indexfile = indexfile
218 self.datafile = indexfile[:-2] + ".d"
218 self.datafile = indexfile[:-2] + ".d"
219 self.opener = opener
219 self.opener = opener
220 self._cache = None
220 self._cache = None
221 self._basecache = None
221 self._basecache = None
222 self._chunkcache = (0, '')
222 self._chunkcache = (0, '')
223 self.index = []
223 self.index = []
224 self._pcache = {}
224 self._pcache = {}
225 self._nodecache = {nullid: nullrev}
225 self._nodecache = {nullid: nullrev}
226 self._nodepos = None
226 self._nodepos = None
227
227
228 v = REVLOG_DEFAULT_VERSION
228 v = REVLOG_DEFAULT_VERSION
229 if hasattr(opener, 'options') and 'defversion' in opener.options:
229 if hasattr(opener, 'options'):
230 v = opener.options['defversion']
230 if 'defversion' in opener.options:
231 if v & REVLOGNG:
231 v = opener.options['defversion']
232 v |= REVLOGNGINLINEDATA
232 if v & REVLOGNG:
233 v |= REVLOGNGINLINEDATA
234 if v & REVLOGNG and 'generaldelta' in opener.options:
235 v |= REVLOGGENERALDELTA
233
236
234 i = ''
237 i = ''
235 try:
238 try:
236 f = self.opener(self.indexfile)
239 f = self.opener(self.indexfile)
237 i = f.read()
240 i = f.read()
238 f.close()
241 f.close()
239 if len(i) > 0:
242 if len(i) > 0:
240 v = struct.unpack(versionformat, i[:4])[0]
243 v = struct.unpack(versionformat, i[:4])[0]
241 except IOError, inst:
244 except IOError, inst:
242 if inst.errno != errno.ENOENT:
245 if inst.errno != errno.ENOENT:
243 raise
246 raise
244
247
245 self.version = v
248 self.version = v
246 self._inline = v & REVLOGNGINLINEDATA
249 self._inline = v & REVLOGNGINLINEDATA
247 self._generaldelta = v & REVLOGGENERALDELTA
250 self._generaldelta = v & REVLOGGENERALDELTA
248 flags = v & ~0xFFFF
251 flags = v & ~0xFFFF
249 fmt = v & 0xFFFF
252 fmt = v & 0xFFFF
250 if fmt == REVLOGV0 and flags:
253 if fmt == REVLOGV0 and flags:
251 raise RevlogError(_("index %s unknown flags %#04x for format v0")
254 raise RevlogError(_("index %s unknown flags %#04x for format v0")
252 % (self.indexfile, flags >> 16))
255 % (self.indexfile, flags >> 16))
253 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
256 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
254 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
257 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
255 % (self.indexfile, flags >> 16))
258 % (self.indexfile, flags >> 16))
256 elif fmt > REVLOGNG:
259 elif fmt > REVLOGNG:
257 raise RevlogError(_("index %s unknown format %d")
260 raise RevlogError(_("index %s unknown format %d")
258 % (self.indexfile, fmt))
261 % (self.indexfile, fmt))
259
262
260 self._io = revlogio()
263 self._io = revlogio()
261 if self.version == REVLOGV0:
264 if self.version == REVLOGV0:
262 self._io = revlogoldio()
265 self._io = revlogoldio()
263 try:
266 try:
264 d = self._io.parseindex(i, self._inline)
267 d = self._io.parseindex(i, self._inline)
265 except (ValueError, IndexError):
268 except (ValueError, IndexError):
266 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
269 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
267 self.index, nodemap, self._chunkcache = d
270 self.index, nodemap, self._chunkcache = d
268 if nodemap is not None:
271 if nodemap is not None:
269 self.nodemap = self._nodecache = nodemap
272 self.nodemap = self._nodecache = nodemap
270 if not self._chunkcache:
273 if not self._chunkcache:
271 self._chunkclear()
274 self._chunkclear()
272
275
273 def tip(self):
276 def tip(self):
274 return self.node(len(self.index) - 2)
277 return self.node(len(self.index) - 2)
275 def __len__(self):
278 def __len__(self):
276 return len(self.index) - 1
279 return len(self.index) - 1
277 def __iter__(self):
280 def __iter__(self):
278 for i in xrange(len(self)):
281 for i in xrange(len(self)):
279 yield i
282 yield i
280
283
281 @util.propertycache
284 @util.propertycache
282 def nodemap(self):
285 def nodemap(self):
283 self.rev(self.node(0))
286 self.rev(self.node(0))
284 return self._nodecache
287 return self._nodecache
285
288
286 def rev(self, node):
289 def rev(self, node):
287 try:
290 try:
288 return self._nodecache[node]
291 return self._nodecache[node]
289 except KeyError:
292 except KeyError:
290 n = self._nodecache
293 n = self._nodecache
291 i = self.index
294 i = self.index
292 p = self._nodepos
295 p = self._nodepos
293 if p is None:
296 if p is None:
294 p = len(i) - 2
297 p = len(i) - 2
295 for r in xrange(p, -1, -1):
298 for r in xrange(p, -1, -1):
296 v = i[r][7]
299 v = i[r][7]
297 n[v] = r
300 n[v] = r
298 if v == node:
301 if v == node:
299 self._nodepos = r - 1
302 self._nodepos = r - 1
300 return r
303 return r
301 raise LookupError(node, self.indexfile, _('no node'))
304 raise LookupError(node, self.indexfile, _('no node'))
302
305
303 def node(self, rev):
306 def node(self, rev):
304 return self.index[rev][7]
307 return self.index[rev][7]
305 def linkrev(self, rev):
308 def linkrev(self, rev):
306 return self.index[rev][4]
309 return self.index[rev][4]
307 def parents(self, node):
310 def parents(self, node):
308 i = self.index
311 i = self.index
309 d = i[self.rev(node)]
312 d = i[self.rev(node)]
310 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
313 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
311 def parentrevs(self, rev):
314 def parentrevs(self, rev):
312 return self.index[rev][5:7]
315 return self.index[rev][5:7]
313 def start(self, rev):
316 def start(self, rev):
314 return int(self.index[rev][0] >> 16)
317 return int(self.index[rev][0] >> 16)
315 def end(self, rev):
318 def end(self, rev):
316 return self.start(rev) + self.length(rev)
319 return self.start(rev) + self.length(rev)
317 def length(self, rev):
320 def length(self, rev):
318 return self.index[rev][1]
321 return self.index[rev][1]
319 def chainbase(self, rev):
322 def chainbase(self, rev):
320 index = self.index
323 index = self.index
321 base = index[rev][3]
324 base = index[rev][3]
322 while base != rev:
325 while base != rev:
323 rev = base
326 rev = base
324 base = index[rev][3]
327 base = index[rev][3]
325 return base
328 return base
326 def flags(self, rev):
329 def flags(self, rev):
327 return self.index[rev][0] & 0xFFFF
330 return self.index[rev][0] & 0xFFFF
328 def rawsize(self, rev):
331 def rawsize(self, rev):
329 """return the length of the uncompressed text for a given revision"""
332 """return the length of the uncompressed text for a given revision"""
330 l = self.index[rev][2]
333 l = self.index[rev][2]
331 if l >= 0:
334 if l >= 0:
332 return l
335 return l
333
336
334 t = self.revision(self.node(rev))
337 t = self.revision(self.node(rev))
335 return len(t)
338 return len(t)
336 size = rawsize
339 size = rawsize
337
340
338 def reachable(self, node, stop=None):
341 def reachable(self, node, stop=None):
339 """return the set of all nodes ancestral to a given node, including
342 """return the set of all nodes ancestral to a given node, including
340 the node itself, stopping when stop is matched"""
343 the node itself, stopping when stop is matched"""
341 reachable = set((node,))
344 reachable = set((node,))
342 visit = [node]
345 visit = [node]
343 if stop:
346 if stop:
344 stopn = self.rev(stop)
347 stopn = self.rev(stop)
345 else:
348 else:
346 stopn = 0
349 stopn = 0
347 while visit:
350 while visit:
348 n = visit.pop(0)
351 n = visit.pop(0)
349 if n == stop:
352 if n == stop:
350 continue
353 continue
351 if n == nullid:
354 if n == nullid:
352 continue
355 continue
353 for p in self.parents(n):
356 for p in self.parents(n):
354 if self.rev(p) < stopn:
357 if self.rev(p) < stopn:
355 continue
358 continue
356 if p not in reachable:
359 if p not in reachable:
357 reachable.add(p)
360 reachable.add(p)
358 visit.append(p)
361 visit.append(p)
359 return reachable
362 return reachable
360
363
361 def ancestors(self, *revs):
364 def ancestors(self, *revs):
362 """Generate the ancestors of 'revs' in reverse topological order.
365 """Generate the ancestors of 'revs' in reverse topological order.
363
366
364 Yield a sequence of revision numbers starting with the parents
367 Yield a sequence of revision numbers starting with the parents
365 of each revision in revs, i.e., each revision is *not* considered
368 of each revision in revs, i.e., each revision is *not* considered
366 an ancestor of itself. Results are in breadth-first order:
369 an ancestor of itself. Results are in breadth-first order:
367 parents of each rev in revs, then parents of those, etc. Result
370 parents of each rev in revs, then parents of those, etc. Result
368 does not include the null revision."""
371 does not include the null revision."""
369 visit = list(revs)
372 visit = list(revs)
370 seen = set([nullrev])
373 seen = set([nullrev])
371 while visit:
374 while visit:
372 for parent in self.parentrevs(visit.pop(0)):
375 for parent in self.parentrevs(visit.pop(0)):
373 if parent not in seen:
376 if parent not in seen:
374 visit.append(parent)
377 visit.append(parent)
375 seen.add(parent)
378 seen.add(parent)
376 yield parent
379 yield parent
377
380
378 def descendants(self, *revs):
381 def descendants(self, *revs):
379 """Generate the descendants of 'revs' in revision order.
382 """Generate the descendants of 'revs' in revision order.
380
383
381 Yield a sequence of revision numbers starting with a child of
384 Yield a sequence of revision numbers starting with a child of
382 some rev in revs, i.e., each revision is *not* considered a
385 some rev in revs, i.e., each revision is *not* considered a
383 descendant of itself. Results are ordered by revision number (a
386 descendant of itself. Results are ordered by revision number (a
384 topological sort)."""
387 topological sort)."""
385 first = min(revs)
388 first = min(revs)
386 if first == nullrev:
389 if first == nullrev:
387 for i in self:
390 for i in self:
388 yield i
391 yield i
389 return
392 return
390
393
391 seen = set(revs)
394 seen = set(revs)
392 for i in xrange(first + 1, len(self)):
395 for i in xrange(first + 1, len(self)):
393 for x in self.parentrevs(i):
396 for x in self.parentrevs(i):
394 if x != nullrev and x in seen:
397 if x != nullrev and x in seen:
395 seen.add(i)
398 seen.add(i)
396 yield i
399 yield i
397 break
400 break
398
401
399 def findcommonmissing(self, common=None, heads=None):
402 def findcommonmissing(self, common=None, heads=None):
400 """Return a tuple of the ancestors of common and the ancestors of heads
403 """Return a tuple of the ancestors of common and the ancestors of heads
401 that are not ancestors of common.
404 that are not ancestors of common.
402
405
403 More specifically, the second element is a list of nodes N such that
406 More specifically, the second element is a list of nodes N such that
404 every N satisfies the following constraints:
407 every N satisfies the following constraints:
405
408
406 1. N is an ancestor of some node in 'heads'
409 1. N is an ancestor of some node in 'heads'
407 2. N is not an ancestor of any node in 'common'
410 2. N is not an ancestor of any node in 'common'
408
411
409 The list is sorted by revision number, meaning it is
412 The list is sorted by revision number, meaning it is
410 topologically sorted.
413 topologically sorted.
411
414
412 'heads' and 'common' are both lists of node IDs. If heads is
415 'heads' and 'common' are both lists of node IDs. If heads is
413 not supplied, uses all of the revlog's heads. If common is not
416 not supplied, uses all of the revlog's heads. If common is not
414 supplied, uses nullid."""
417 supplied, uses nullid."""
415 if common is None:
418 if common is None:
416 common = [nullid]
419 common = [nullid]
417 if heads is None:
420 if heads is None:
418 heads = self.heads()
421 heads = self.heads()
419
422
420 common = [self.rev(n) for n in common]
423 common = [self.rev(n) for n in common]
421 heads = [self.rev(n) for n in heads]
424 heads = [self.rev(n) for n in heads]
422
425
423 # we want the ancestors, but inclusive
426 # we want the ancestors, but inclusive
424 has = set(self.ancestors(*common))
427 has = set(self.ancestors(*common))
425 has.add(nullrev)
428 has.add(nullrev)
426 has.update(common)
429 has.update(common)
427
430
428 # take all ancestors from heads that aren't in has
431 # take all ancestors from heads that aren't in has
429 missing = set()
432 missing = set()
430 visit = [r for r in heads if r not in has]
433 visit = [r for r in heads if r not in has]
431 while visit:
434 while visit:
432 r = visit.pop(0)
435 r = visit.pop(0)
433 if r in missing:
436 if r in missing:
434 continue
437 continue
435 else:
438 else:
436 missing.add(r)
439 missing.add(r)
437 for p in self.parentrevs(r):
440 for p in self.parentrevs(r):
438 if p not in has:
441 if p not in has:
439 visit.append(p)
442 visit.append(p)
440 missing = list(missing)
443 missing = list(missing)
441 missing.sort()
444 missing.sort()
442 return has, [self.node(r) for r in missing]
445 return has, [self.node(r) for r in missing]
443
446
444 def findmissing(self, common=None, heads=None):
447 def findmissing(self, common=None, heads=None):
445 """Return the ancestors of heads that are not ancestors of common.
448 """Return the ancestors of heads that are not ancestors of common.
446
449
447 More specifically, return a list of nodes N such that every N
450 More specifically, return a list of nodes N such that every N
448 satisfies the following constraints:
451 satisfies the following constraints:
449
452
450 1. N is an ancestor of some node in 'heads'
453 1. N is an ancestor of some node in 'heads'
451 2. N is not an ancestor of any node in 'common'
454 2. N is not an ancestor of any node in 'common'
452
455
453 The list is sorted by revision number, meaning it is
456 The list is sorted by revision number, meaning it is
454 topologically sorted.
457 topologically sorted.
455
458
456 'heads' and 'common' are both lists of node IDs. If heads is
459 'heads' and 'common' are both lists of node IDs. If heads is
457 not supplied, uses all of the revlog's heads. If common is not
460 not supplied, uses all of the revlog's heads. If common is not
458 supplied, uses nullid."""
461 supplied, uses nullid."""
459 _common, missing = self.findcommonmissing(common, heads)
462 _common, missing = self.findcommonmissing(common, heads)
460 return missing
463 return missing
461
464
462 def nodesbetween(self, roots=None, heads=None):
465 def nodesbetween(self, roots=None, heads=None):
463 """Return a topological path from 'roots' to 'heads'.
466 """Return a topological path from 'roots' to 'heads'.
464
467
465 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
468 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
466 topologically sorted list of all nodes N that satisfy both of
469 topologically sorted list of all nodes N that satisfy both of
467 these constraints:
470 these constraints:
468
471
469 1. N is a descendant of some node in 'roots'
472 1. N is a descendant of some node in 'roots'
470 2. N is an ancestor of some node in 'heads'
473 2. N is an ancestor of some node in 'heads'
471
474
472 Every node is considered to be both a descendant and an ancestor
475 Every node is considered to be both a descendant and an ancestor
473 of itself, so every reachable node in 'roots' and 'heads' will be
476 of itself, so every reachable node in 'roots' and 'heads' will be
474 included in 'nodes'.
477 included in 'nodes'.
475
478
476 'outroots' is the list of reachable nodes in 'roots', i.e., the
479 'outroots' is the list of reachable nodes in 'roots', i.e., the
477 subset of 'roots' that is returned in 'nodes'. Likewise,
480 subset of 'roots' that is returned in 'nodes'. Likewise,
478 'outheads' is the subset of 'heads' that is also in 'nodes'.
481 'outheads' is the subset of 'heads' that is also in 'nodes'.
479
482
480 'roots' and 'heads' are both lists of node IDs. If 'roots' is
483 'roots' and 'heads' are both lists of node IDs. If 'roots' is
481 unspecified, uses nullid as the only root. If 'heads' is
484 unspecified, uses nullid as the only root. If 'heads' is
482 unspecified, uses list of all of the revlog's heads."""
485 unspecified, uses list of all of the revlog's heads."""
483 nonodes = ([], [], [])
486 nonodes = ([], [], [])
484 if roots is not None:
487 if roots is not None:
485 roots = list(roots)
488 roots = list(roots)
486 if not roots:
489 if not roots:
487 return nonodes
490 return nonodes
488 lowestrev = min([self.rev(n) for n in roots])
491 lowestrev = min([self.rev(n) for n in roots])
489 else:
492 else:
490 roots = [nullid] # Everybody's a descendent of nullid
493 roots = [nullid] # Everybody's a descendent of nullid
491 lowestrev = nullrev
494 lowestrev = nullrev
492 if (lowestrev == nullrev) and (heads is None):
495 if (lowestrev == nullrev) and (heads is None):
493 # We want _all_ the nodes!
496 # We want _all_ the nodes!
494 return ([self.node(r) for r in self], [nullid], list(self.heads()))
497 return ([self.node(r) for r in self], [nullid], list(self.heads()))
495 if heads is None:
498 if heads is None:
496 # All nodes are ancestors, so the latest ancestor is the last
499 # All nodes are ancestors, so the latest ancestor is the last
497 # node.
500 # node.
498 highestrev = len(self) - 1
501 highestrev = len(self) - 1
499 # Set ancestors to None to signal that every node is an ancestor.
502 # Set ancestors to None to signal that every node is an ancestor.
500 ancestors = None
503 ancestors = None
501 # Set heads to an empty dictionary for later discovery of heads
504 # Set heads to an empty dictionary for later discovery of heads
502 heads = {}
505 heads = {}
503 else:
506 else:
504 heads = list(heads)
507 heads = list(heads)
505 if not heads:
508 if not heads:
506 return nonodes
509 return nonodes
507 ancestors = set()
510 ancestors = set()
508 # Turn heads into a dictionary so we can remove 'fake' heads.
511 # Turn heads into a dictionary so we can remove 'fake' heads.
509 # Also, later we will be using it to filter out the heads we can't
512 # Also, later we will be using it to filter out the heads we can't
510 # find from roots.
513 # find from roots.
511 heads = dict.fromkeys(heads, False)
514 heads = dict.fromkeys(heads, False)
512 # Start at the top and keep marking parents until we're done.
515 # Start at the top and keep marking parents until we're done.
513 nodestotag = set(heads)
516 nodestotag = set(heads)
514 # Remember where the top was so we can use it as a limit later.
517 # Remember where the top was so we can use it as a limit later.
515 highestrev = max([self.rev(n) for n in nodestotag])
518 highestrev = max([self.rev(n) for n in nodestotag])
516 while nodestotag:
519 while nodestotag:
517 # grab a node to tag
520 # grab a node to tag
518 n = nodestotag.pop()
521 n = nodestotag.pop()
519 # Never tag nullid
522 # Never tag nullid
520 if n == nullid:
523 if n == nullid:
521 continue
524 continue
522 # A node's revision number represents its place in a
525 # A node's revision number represents its place in a
523 # topologically sorted list of nodes.
526 # topologically sorted list of nodes.
524 r = self.rev(n)
527 r = self.rev(n)
525 if r >= lowestrev:
528 if r >= lowestrev:
526 if n not in ancestors:
529 if n not in ancestors:
527 # If we are possibly a descendent of one of the roots
530 # If we are possibly a descendent of one of the roots
528 # and we haven't already been marked as an ancestor
531 # and we haven't already been marked as an ancestor
529 ancestors.add(n) # Mark as ancestor
532 ancestors.add(n) # Mark as ancestor
530 # Add non-nullid parents to list of nodes to tag.
533 # Add non-nullid parents to list of nodes to tag.
531 nodestotag.update([p for p in self.parents(n) if
534 nodestotag.update([p for p in self.parents(n) if
532 p != nullid])
535 p != nullid])
533 elif n in heads: # We've seen it before, is it a fake head?
536 elif n in heads: # We've seen it before, is it a fake head?
534 # So it is, real heads should not be the ancestors of
537 # So it is, real heads should not be the ancestors of
535 # any other heads.
538 # any other heads.
536 heads.pop(n)
539 heads.pop(n)
537 if not ancestors:
540 if not ancestors:
538 return nonodes
541 return nonodes
539 # Now that we have our set of ancestors, we want to remove any
542 # Now that we have our set of ancestors, we want to remove any
540 # roots that are not ancestors.
543 # roots that are not ancestors.
541
544
542 # If one of the roots was nullid, everything is included anyway.
545 # If one of the roots was nullid, everything is included anyway.
543 if lowestrev > nullrev:
546 if lowestrev > nullrev:
544 # But, since we weren't, let's recompute the lowest rev to not
547 # But, since we weren't, let's recompute the lowest rev to not
545 # include roots that aren't ancestors.
548 # include roots that aren't ancestors.
546
549
547 # Filter out roots that aren't ancestors of heads
550 # Filter out roots that aren't ancestors of heads
548 roots = [n for n in roots if n in ancestors]
551 roots = [n for n in roots if n in ancestors]
549 # Recompute the lowest revision
552 # Recompute the lowest revision
550 if roots:
553 if roots:
551 lowestrev = min([self.rev(n) for n in roots])
554 lowestrev = min([self.rev(n) for n in roots])
552 else:
555 else:
553 # No more roots? Return empty list
556 # No more roots? Return empty list
554 return nonodes
557 return nonodes
555 else:
558 else:
556 # We are descending from nullid, and don't need to care about
559 # We are descending from nullid, and don't need to care about
557 # any other roots.
560 # any other roots.
558 lowestrev = nullrev
561 lowestrev = nullrev
559 roots = [nullid]
562 roots = [nullid]
560 # Transform our roots list into a set.
563 # Transform our roots list into a set.
561 descendents = set(roots)
564 descendents = set(roots)
562 # Also, keep the original roots so we can filter out roots that aren't
565 # Also, keep the original roots so we can filter out roots that aren't
563 # 'real' roots (i.e. are descended from other roots).
566 # 'real' roots (i.e. are descended from other roots).
564 roots = descendents.copy()
567 roots = descendents.copy()
565 # Our topologically sorted list of output nodes.
568 # Our topologically sorted list of output nodes.
566 orderedout = []
569 orderedout = []
567 # Don't start at nullid since we don't want nullid in our output list,
570 # Don't start at nullid since we don't want nullid in our output list,
568 # and if nullid shows up in descedents, empty parents will look like
571 # and if nullid shows up in descedents, empty parents will look like
569 # they're descendents.
572 # they're descendents.
570 for r in xrange(max(lowestrev, 0), highestrev + 1):
573 for r in xrange(max(lowestrev, 0), highestrev + 1):
571 n = self.node(r)
574 n = self.node(r)
572 isdescendent = False
575 isdescendent = False
573 if lowestrev == nullrev: # Everybody is a descendent of nullid
576 if lowestrev == nullrev: # Everybody is a descendent of nullid
574 isdescendent = True
577 isdescendent = True
575 elif n in descendents:
578 elif n in descendents:
576 # n is already a descendent
579 # n is already a descendent
577 isdescendent = True
580 isdescendent = True
578 # This check only needs to be done here because all the roots
581 # This check only needs to be done here because all the roots
579 # will start being marked is descendents before the loop.
582 # will start being marked is descendents before the loop.
580 if n in roots:
583 if n in roots:
581 # If n was a root, check if it's a 'real' root.
584 # If n was a root, check if it's a 'real' root.
582 p = tuple(self.parents(n))
585 p = tuple(self.parents(n))
583 # If any of its parents are descendents, it's not a root.
586 # If any of its parents are descendents, it's not a root.
584 if (p[0] in descendents) or (p[1] in descendents):
587 if (p[0] in descendents) or (p[1] in descendents):
585 roots.remove(n)
588 roots.remove(n)
586 else:
589 else:
587 p = tuple(self.parents(n))
590 p = tuple(self.parents(n))
588 # A node is a descendent if either of its parents are
591 # A node is a descendent if either of its parents are
589 # descendents. (We seeded the dependents list with the roots
592 # descendents. (We seeded the dependents list with the roots
590 # up there, remember?)
593 # up there, remember?)
591 if (p[0] in descendents) or (p[1] in descendents):
594 if (p[0] in descendents) or (p[1] in descendents):
592 descendents.add(n)
595 descendents.add(n)
593 isdescendent = True
596 isdescendent = True
594 if isdescendent and ((ancestors is None) or (n in ancestors)):
597 if isdescendent and ((ancestors is None) or (n in ancestors)):
595 # Only include nodes that are both descendents and ancestors.
598 # Only include nodes that are both descendents and ancestors.
596 orderedout.append(n)
599 orderedout.append(n)
597 if (ancestors is not None) and (n in heads):
600 if (ancestors is not None) and (n in heads):
598 # We're trying to figure out which heads are reachable
601 # We're trying to figure out which heads are reachable
599 # from roots.
602 # from roots.
600 # Mark this head as having been reached
603 # Mark this head as having been reached
601 heads[n] = True
604 heads[n] = True
602 elif ancestors is None:
605 elif ancestors is None:
603 # Otherwise, we're trying to discover the heads.
606 # Otherwise, we're trying to discover the heads.
604 # Assume this is a head because if it isn't, the next step
607 # Assume this is a head because if it isn't, the next step
605 # will eventually remove it.
608 # will eventually remove it.
606 heads[n] = True
609 heads[n] = True
607 # But, obviously its parents aren't.
610 # But, obviously its parents aren't.
608 for p in self.parents(n):
611 for p in self.parents(n):
609 heads.pop(p, None)
612 heads.pop(p, None)
610 heads = [n for n, flag in heads.iteritems() if flag]
613 heads = [n for n, flag in heads.iteritems() if flag]
611 roots = list(roots)
614 roots = list(roots)
612 assert orderedout
615 assert orderedout
613 assert roots
616 assert roots
614 assert heads
617 assert heads
615 return (orderedout, roots, heads)
618 return (orderedout, roots, heads)
616
619
617 def headrevs(self):
620 def headrevs(self):
618 count = len(self)
621 count = len(self)
619 if not count:
622 if not count:
620 return [nullrev]
623 return [nullrev]
621 ishead = [1] * (count + 1)
624 ishead = [1] * (count + 1)
622 index = self.index
625 index = self.index
623 for r in xrange(count):
626 for r in xrange(count):
624 e = index[r]
627 e = index[r]
625 ishead[e[5]] = ishead[e[6]] = 0
628 ishead[e[5]] = ishead[e[6]] = 0
626 return [r for r in xrange(count) if ishead[r]]
629 return [r for r in xrange(count) if ishead[r]]
627
630
628 def heads(self, start=None, stop=None):
631 def heads(self, start=None, stop=None):
629 """return the list of all nodes that have no children
632 """return the list of all nodes that have no children
630
633
631 if start is specified, only heads that are descendants of
634 if start is specified, only heads that are descendants of
632 start will be returned
635 start will be returned
633 if stop is specified, it will consider all the revs from stop
636 if stop is specified, it will consider all the revs from stop
634 as if they had no children
637 as if they had no children
635 """
638 """
636 if start is None and stop is None:
639 if start is None and stop is None:
637 if not len(self):
640 if not len(self):
638 return [nullid]
641 return [nullid]
639 return [self.node(r) for r in self.headrevs()]
642 return [self.node(r) for r in self.headrevs()]
640
643
641 if start is None:
644 if start is None:
642 start = nullid
645 start = nullid
643 if stop is None:
646 if stop is None:
644 stop = []
647 stop = []
645 stoprevs = set([self.rev(n) for n in stop])
648 stoprevs = set([self.rev(n) for n in stop])
646 startrev = self.rev(start)
649 startrev = self.rev(start)
647 reachable = set((startrev,))
650 reachable = set((startrev,))
648 heads = set((startrev,))
651 heads = set((startrev,))
649
652
650 parentrevs = self.parentrevs
653 parentrevs = self.parentrevs
651 for r in xrange(startrev + 1, len(self)):
654 for r in xrange(startrev + 1, len(self)):
652 for p in parentrevs(r):
655 for p in parentrevs(r):
653 if p in reachable:
656 if p in reachable:
654 if r not in stoprevs:
657 if r not in stoprevs:
655 reachable.add(r)
658 reachable.add(r)
656 heads.add(r)
659 heads.add(r)
657 if p in heads and p not in stoprevs:
660 if p in heads and p not in stoprevs:
658 heads.remove(p)
661 heads.remove(p)
659
662
660 return [self.node(r) for r in heads]
663 return [self.node(r) for r in heads]
661
664
662 def children(self, node):
665 def children(self, node):
663 """find the children of a given node"""
666 """find the children of a given node"""
664 c = []
667 c = []
665 p = self.rev(node)
668 p = self.rev(node)
666 for r in range(p + 1, len(self)):
669 for r in range(p + 1, len(self)):
667 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
670 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
668 if prevs:
671 if prevs:
669 for pr in prevs:
672 for pr in prevs:
670 if pr == p:
673 if pr == p:
671 c.append(self.node(r))
674 c.append(self.node(r))
672 elif p == nullrev:
675 elif p == nullrev:
673 c.append(self.node(r))
676 c.append(self.node(r))
674 return c
677 return c
675
678
676 def descendant(self, start, end):
679 def descendant(self, start, end):
677 if start == nullrev:
680 if start == nullrev:
678 return True
681 return True
679 for i in self.descendants(start):
682 for i in self.descendants(start):
680 if i == end:
683 if i == end:
681 return True
684 return True
682 elif i > end:
685 elif i > end:
683 break
686 break
684 return False
687 return False
685
688
686 def ancestor(self, a, b):
689 def ancestor(self, a, b):
687 """calculate the least common ancestor of nodes a and b"""
690 """calculate the least common ancestor of nodes a and b"""
688
691
689 # fast path, check if it is a descendant
692 # fast path, check if it is a descendant
690 a, b = self.rev(a), self.rev(b)
693 a, b = self.rev(a), self.rev(b)
691 start, end = sorted((a, b))
694 start, end = sorted((a, b))
692 if self.descendant(start, end):
695 if self.descendant(start, end):
693 return self.node(start)
696 return self.node(start)
694
697
695 def parents(rev):
698 def parents(rev):
696 return [p for p in self.parentrevs(rev) if p != nullrev]
699 return [p for p in self.parentrevs(rev) if p != nullrev]
697
700
698 c = ancestor.ancestor(a, b, parents)
701 c = ancestor.ancestor(a, b, parents)
699 if c is None:
702 if c is None:
700 return nullid
703 return nullid
701
704
702 return self.node(c)
705 return self.node(c)
703
706
704 def _match(self, id):
707 def _match(self, id):
705 if isinstance(id, (long, int)):
708 if isinstance(id, (long, int)):
706 # rev
709 # rev
707 return self.node(id)
710 return self.node(id)
708 if len(id) == 20:
711 if len(id) == 20:
709 # possibly a binary node
712 # possibly a binary node
710 # odds of a binary node being all hex in ASCII are 1 in 10**25
713 # odds of a binary node being all hex in ASCII are 1 in 10**25
711 try:
714 try:
712 node = id
715 node = id
713 self.rev(node) # quick search the index
716 self.rev(node) # quick search the index
714 return node
717 return node
715 except LookupError:
718 except LookupError:
716 pass # may be partial hex id
719 pass # may be partial hex id
717 try:
720 try:
718 # str(rev)
721 # str(rev)
719 rev = int(id)
722 rev = int(id)
720 if str(rev) != id:
723 if str(rev) != id:
721 raise ValueError
724 raise ValueError
722 if rev < 0:
725 if rev < 0:
723 rev = len(self) + rev
726 rev = len(self) + rev
724 if rev < 0 or rev >= len(self):
727 if rev < 0 or rev >= len(self):
725 raise ValueError
728 raise ValueError
726 return self.node(rev)
729 return self.node(rev)
727 except (ValueError, OverflowError):
730 except (ValueError, OverflowError):
728 pass
731 pass
729 if len(id) == 40:
732 if len(id) == 40:
730 try:
733 try:
731 # a full hex nodeid?
734 # a full hex nodeid?
732 node = bin(id)
735 node = bin(id)
733 self.rev(node)
736 self.rev(node)
734 return node
737 return node
735 except (TypeError, LookupError):
738 except (TypeError, LookupError):
736 pass
739 pass
737
740
738 def _partialmatch(self, id):
741 def _partialmatch(self, id):
739 if id in self._pcache:
742 if id in self._pcache:
740 return self._pcache[id]
743 return self._pcache[id]
741
744
742 if len(id) < 40:
745 if len(id) < 40:
743 try:
746 try:
744 # hex(node)[:...]
747 # hex(node)[:...]
745 l = len(id) // 2 # grab an even number of digits
748 l = len(id) // 2 # grab an even number of digits
746 prefix = bin(id[:l * 2])
749 prefix = bin(id[:l * 2])
747 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
750 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
748 nl = [n for n in nl if hex(n).startswith(id)]
751 nl = [n for n in nl if hex(n).startswith(id)]
749 if len(nl) > 0:
752 if len(nl) > 0:
750 if len(nl) == 1:
753 if len(nl) == 1:
751 self._pcache[id] = nl[0]
754 self._pcache[id] = nl[0]
752 return nl[0]
755 return nl[0]
753 raise LookupError(id, self.indexfile,
756 raise LookupError(id, self.indexfile,
754 _('ambiguous identifier'))
757 _('ambiguous identifier'))
755 return None
758 return None
756 except TypeError:
759 except TypeError:
757 pass
760 pass
758
761
759 def lookup(self, id):
762 def lookup(self, id):
760 """locate a node based on:
763 """locate a node based on:
761 - revision number or str(revision number)
764 - revision number or str(revision number)
762 - nodeid or subset of hex nodeid
765 - nodeid or subset of hex nodeid
763 """
766 """
764 n = self._match(id)
767 n = self._match(id)
765 if n is not None:
768 if n is not None:
766 return n
769 return n
767 n = self._partialmatch(id)
770 n = self._partialmatch(id)
768 if n:
771 if n:
769 return n
772 return n
770
773
771 raise LookupError(id, self.indexfile, _('no match found'))
774 raise LookupError(id, self.indexfile, _('no match found'))
772
775
773 def cmp(self, node, text):
776 def cmp(self, node, text):
774 """compare text with a given file revision
777 """compare text with a given file revision
775
778
776 returns True if text is different than what is stored.
779 returns True if text is different than what is stored.
777 """
780 """
778 p1, p2 = self.parents(node)
781 p1, p2 = self.parents(node)
779 return hash(text, p1, p2) != node
782 return hash(text, p1, p2) != node
780
783
781 def _addchunk(self, offset, data):
784 def _addchunk(self, offset, data):
782 o, d = self._chunkcache
785 o, d = self._chunkcache
783 # try to add to existing cache
786 # try to add to existing cache
784 if o + len(d) == offset and len(d) + len(data) < _chunksize:
787 if o + len(d) == offset and len(d) + len(data) < _chunksize:
785 self._chunkcache = o, d + data
788 self._chunkcache = o, d + data
786 else:
789 else:
787 self._chunkcache = offset, data
790 self._chunkcache = offset, data
788
791
789 def _loadchunk(self, offset, length):
792 def _loadchunk(self, offset, length):
790 if self._inline:
793 if self._inline:
791 df = self.opener(self.indexfile)
794 df = self.opener(self.indexfile)
792 else:
795 else:
793 df = self.opener(self.datafile)
796 df = self.opener(self.datafile)
794
797
795 readahead = max(65536, length)
798 readahead = max(65536, length)
796 df.seek(offset)
799 df.seek(offset)
797 d = df.read(readahead)
800 d = df.read(readahead)
798 self._addchunk(offset, d)
801 self._addchunk(offset, d)
799 if readahead > length:
802 if readahead > length:
800 return d[:length]
803 return d[:length]
801 return d
804 return d
802
805
803 def _getchunk(self, offset, length):
806 def _getchunk(self, offset, length):
804 o, d = self._chunkcache
807 o, d = self._chunkcache
805 l = len(d)
808 l = len(d)
806
809
807 # is it in the cache?
810 # is it in the cache?
808 cachestart = offset - o
811 cachestart = offset - o
809 cacheend = cachestart + length
812 cacheend = cachestart + length
810 if cachestart >= 0 and cacheend <= l:
813 if cachestart >= 0 and cacheend <= l:
811 if cachestart == 0 and cacheend == l:
814 if cachestart == 0 and cacheend == l:
812 return d # avoid a copy
815 return d # avoid a copy
813 return d[cachestart:cacheend]
816 return d[cachestart:cacheend]
814
817
815 return self._loadchunk(offset, length)
818 return self._loadchunk(offset, length)
816
819
817 def _chunkraw(self, startrev, endrev):
820 def _chunkraw(self, startrev, endrev):
818 start = self.start(startrev)
821 start = self.start(startrev)
819 length = self.end(endrev) - start
822 length = self.end(endrev) - start
820 if self._inline:
823 if self._inline:
821 start += (startrev + 1) * self._io.size
824 start += (startrev + 1) * self._io.size
822 return self._getchunk(start, length)
825 return self._getchunk(start, length)
823
826
824 def _chunk(self, rev):
827 def _chunk(self, rev):
825 return decompress(self._chunkraw(rev, rev))
828 return decompress(self._chunkraw(rev, rev))
826
829
827 def _chunkbase(self, rev):
830 def _chunkbase(self, rev):
828 return self._chunk(rev)
831 return self._chunk(rev)
829
832
830 def _chunkclear(self):
833 def _chunkclear(self):
831 self._chunkcache = (0, '')
834 self._chunkcache = (0, '')
832
835
833 def deltaparent(self, rev):
836 def deltaparent(self, rev):
834 """return deltaparent of the given revision"""
837 """return deltaparent of the given revision"""
835 base = self.index[rev][3]
838 base = self.index[rev][3]
836 if base == rev:
839 if base == rev:
837 return nullrev
840 return nullrev
838 elif self._generaldelta:
841 elif self._generaldelta:
839 return base
842 return base
840 else:
843 else:
841 return rev - 1
844 return rev - 1
842
845
843 def revdiff(self, rev1, rev2):
846 def revdiff(self, rev1, rev2):
844 """return or calculate a delta between two revisions"""
847 """return or calculate a delta between two revisions"""
845 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
848 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
846 return self._chunk(rev2)
849 return self._chunk(rev2)
847
850
848 return mdiff.textdiff(self.revision(self.node(rev1)),
851 return mdiff.textdiff(self.revision(self.node(rev1)),
849 self.revision(self.node(rev2)))
852 self.revision(self.node(rev2)))
850
853
851 def revision(self, node):
854 def revision(self, node):
852 """return an uncompressed revision of a given node"""
855 """return an uncompressed revision of a given node"""
853 cachedrev = None
856 cachedrev = None
854 if node == nullid:
857 if node == nullid:
855 return ""
858 return ""
856 if self._cache:
859 if self._cache:
857 if self._cache[0] == node:
860 if self._cache[0] == node:
858 return self._cache[2]
861 return self._cache[2]
859 cachedrev = self._cache[1]
862 cachedrev = self._cache[1]
860
863
861 # look up what we need to read
864 # look up what we need to read
862 text = None
865 text = None
863 rev = self.rev(node)
866 rev = self.rev(node)
864
867
865 # check rev flags
868 # check rev flags
866 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
869 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
867 raise RevlogError(_('incompatible revision flag %x') %
870 raise RevlogError(_('incompatible revision flag %x') %
868 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
871 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
869
872
870 # build delta chain
873 # build delta chain
871 chain = []
874 chain = []
872 index = self.index # for performance
875 index = self.index # for performance
873 generaldelta = self._generaldelta
876 generaldelta = self._generaldelta
874 iterrev = rev
877 iterrev = rev
875 e = index[iterrev]
878 e = index[iterrev]
876 while iterrev != e[3] and iterrev != cachedrev:
879 while iterrev != e[3] and iterrev != cachedrev:
877 chain.append(iterrev)
880 chain.append(iterrev)
878 if generaldelta:
881 if generaldelta:
879 iterrev = e[3]
882 iterrev = e[3]
880 else:
883 else:
881 iterrev -= 1
884 iterrev -= 1
882 e = index[iterrev]
885 e = index[iterrev]
883 chain.reverse()
886 chain.reverse()
884 base = iterrev
887 base = iterrev
885
888
886 if iterrev == cachedrev:
889 if iterrev == cachedrev:
887 # cache hit
890 # cache hit
888 text = self._cache[2]
891 text = self._cache[2]
889
892
890 # drop cache to save memory
893 # drop cache to save memory
891 self._cache = None
894 self._cache = None
892
895
893 self._chunkraw(base, rev)
896 self._chunkraw(base, rev)
894 if text is None:
897 if text is None:
895 text = self._chunkbase(base)
898 text = self._chunkbase(base)
896
899
897 bins = [self._chunk(r) for r in chain]
900 bins = [self._chunk(r) for r in chain]
898 text = mdiff.patches(text, bins)
901 text = mdiff.patches(text, bins)
899
902
900 text = self._checkhash(text, node, rev)
903 text = self._checkhash(text, node, rev)
901
904
902 self._cache = (node, rev, text)
905 self._cache = (node, rev, text)
903 return text
906 return text
904
907
905 def _checkhash(self, text, node, rev):
908 def _checkhash(self, text, node, rev):
906 p1, p2 = self.parents(node)
909 p1, p2 = self.parents(node)
907 if node != hash(text, p1, p2):
910 if node != hash(text, p1, p2):
908 raise RevlogError(_("integrity check failed on %s:%d")
911 raise RevlogError(_("integrity check failed on %s:%d")
909 % (self.indexfile, rev))
912 % (self.indexfile, rev))
910 return text
913 return text
911
914
912 def checkinlinesize(self, tr, fp=None):
915 def checkinlinesize(self, tr, fp=None):
913 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
916 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
914 return
917 return
915
918
916 trinfo = tr.find(self.indexfile)
919 trinfo = tr.find(self.indexfile)
917 if trinfo is None:
920 if trinfo is None:
918 raise RevlogError(_("%s not found in the transaction")
921 raise RevlogError(_("%s not found in the transaction")
919 % self.indexfile)
922 % self.indexfile)
920
923
921 trindex = trinfo[2]
924 trindex = trinfo[2]
922 dataoff = self.start(trindex)
925 dataoff = self.start(trindex)
923
926
924 tr.add(self.datafile, dataoff)
927 tr.add(self.datafile, dataoff)
925
928
926 if fp:
929 if fp:
927 fp.flush()
930 fp.flush()
928 fp.close()
931 fp.close()
929
932
930 df = self.opener(self.datafile, 'w')
933 df = self.opener(self.datafile, 'w')
931 try:
934 try:
932 for r in self:
935 for r in self:
933 df.write(self._chunkraw(r, r))
936 df.write(self._chunkraw(r, r))
934 finally:
937 finally:
935 df.close()
938 df.close()
936
939
937 fp = self.opener(self.indexfile, 'w', atomictemp=True)
940 fp = self.opener(self.indexfile, 'w', atomictemp=True)
938 self.version &= ~(REVLOGNGINLINEDATA)
941 self.version &= ~(REVLOGNGINLINEDATA)
939 self._inline = False
942 self._inline = False
940 for i in self:
943 for i in self:
941 e = self._io.packentry(self.index[i], self.node, self.version, i)
944 e = self._io.packentry(self.index[i], self.node, self.version, i)
942 fp.write(e)
945 fp.write(e)
943
946
944 # if we don't call rename, the temp file will never replace the
947 # if we don't call rename, the temp file will never replace the
945 # real index
948 # real index
946 fp.rename()
949 fp.rename()
947
950
948 tr.replace(self.indexfile, trindex * self._io.size)
951 tr.replace(self.indexfile, trindex * self._io.size)
949 self._chunkclear()
952 self._chunkclear()
950
953
951 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
954 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
952 """add a revision to the log
955 """add a revision to the log
953
956
954 text - the revision data to add
957 text - the revision data to add
955 transaction - the transaction object used for rollback
958 transaction - the transaction object used for rollback
956 link - the linkrev data to add
959 link - the linkrev data to add
957 p1, p2 - the parent nodeids of the revision
960 p1, p2 - the parent nodeids of the revision
958 cachedelta - an optional precomputed delta
961 cachedelta - an optional precomputed delta
959 """
962 """
960 node = hash(text, p1, p2)
963 node = hash(text, p1, p2)
961 if node in self.nodemap:
964 if node in self.nodemap:
962 return node
965 return node
963
966
964 dfh = None
967 dfh = None
965 if not self._inline:
968 if not self._inline:
966 dfh = self.opener(self.datafile, "a")
969 dfh = self.opener(self.datafile, "a")
967 ifh = self.opener(self.indexfile, "a+")
970 ifh = self.opener(self.indexfile, "a+")
968 try:
971 try:
969 return self._addrevision(node, text, transaction, link, p1, p2,
972 return self._addrevision(node, text, transaction, link, p1, p2,
970 cachedelta, ifh, dfh)
973 cachedelta, ifh, dfh)
971 finally:
974 finally:
972 if dfh:
975 if dfh:
973 dfh.close()
976 dfh.close()
974 ifh.close()
977 ifh.close()
975
978
976 def _addrevision(self, node, text, transaction, link, p1, p2,
979 def _addrevision(self, node, text, transaction, link, p1, p2,
977 cachedelta, ifh, dfh):
980 cachedelta, ifh, dfh):
978
981
979 btext = [text]
982 btext = [text]
980 def buildtext():
983 def buildtext():
981 if btext[0] is not None:
984 if btext[0] is not None:
982 return btext[0]
985 return btext[0]
983 # flush any pending writes here so we can read it in revision
986 # flush any pending writes here so we can read it in revision
984 if dfh:
987 if dfh:
985 dfh.flush()
988 dfh.flush()
986 ifh.flush()
989 ifh.flush()
987 basetext = self.revision(self.node(cachedelta[0]))
990 basetext = self.revision(self.node(cachedelta[0]))
988 btext[0] = mdiff.patch(basetext, cachedelta[1])
991 btext[0] = mdiff.patch(basetext, cachedelta[1])
989 chk = hash(btext[0], p1, p2)
992 chk = hash(btext[0], p1, p2)
990 if chk != node:
993 if chk != node:
991 raise RevlogError(_("consistency error in delta"))
994 raise RevlogError(_("consistency error in delta"))
992 return btext[0]
995 return btext[0]
993
996
994 def builddelta(rev):
997 def builddelta(rev):
995 # can we use the cached delta?
998 # can we use the cached delta?
996 if cachedelta and cachedelta[0] == rev:
999 if cachedelta and cachedelta[0] == rev:
997 delta = cachedelta[1]
1000 delta = cachedelta[1]
998 else:
1001 else:
999 t = buildtext()
1002 t = buildtext()
1000 ptext = self.revision(self.node(rev))
1003 ptext = self.revision(self.node(rev))
1001 delta = mdiff.textdiff(ptext, t)
1004 delta = mdiff.textdiff(ptext, t)
1002 data = compress(delta)
1005 data = compress(delta)
1003 l = len(data[1]) + len(data[0])
1006 l = len(data[1]) + len(data[0])
1004 basecache = self._basecache
1007 basecache = self._basecache
1005 if basecache and basecache[0] == rev:
1008 if basecache and basecache[0] == rev:
1006 base = basecache[1]
1009 chainbase = basecache[1]
1007 else:
1010 else:
1008 base = self.chainbase(rev)
1011 chainbase = self.chainbase(rev)
1009 dist = l + offset - self.start(base)
1012 dist = l + offset - self.start(chainbase)
1013 if self._generaldelta:
1014 base = rev
1015 else:
1016 base = chainbase
1010 return dist, l, data, base
1017 return dist, l, data, base
1011
1018
1012 curr = len(self)
1019 curr = len(self)
1013 prev = curr - 1
1020 prev = curr - 1
1014 base = curr
1021 base = curr
1015 offset = self.end(prev)
1022 offset = self.end(prev)
1016 flags = 0
1023 flags = 0
1017 d = None
1024 d = None
1018 p1r, p2r = self.rev(p1), self.rev(p2)
1025 p1r, p2r = self.rev(p1), self.rev(p2)
1019
1026
1020 # should we try to build a delta?
1027 # should we try to build a delta?
1021 if prev != nullrev:
1028 if prev != nullrev:
1022 d = builddelta(prev)
1029 if self._generaldelta:
1030 d = builddelta(p1r)
1031 else:
1032 d = builddelta(prev)
1023 dist, l, data, base = d
1033 dist, l, data, base = d
1024
1034
1025 # full versions are inserted when the needed deltas
1035 # full versions are inserted when the needed deltas
1026 # become comparable to the uncompressed text
1036 # become comparable to the uncompressed text
1027 if text is None:
1037 if text is None:
1028 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1038 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1029 cachedelta[1])
1039 cachedelta[1])
1030 else:
1040 else:
1031 textlen = len(text)
1041 textlen = len(text)
1032 if d is None or dist > textlen * 2:
1042 if d is None or dist > textlen * 2:
1033 text = buildtext()
1043 text = buildtext()
1034 data = compress(text)
1044 data = compress(text)
1035 l = len(data[1]) + len(data[0])
1045 l = len(data[1]) + len(data[0])
1036 base = curr
1046 base = curr
1037
1047
1038 e = (offset_type(offset, flags), l, textlen,
1048 e = (offset_type(offset, flags), l, textlen,
1039 base, link, p1r, p2r, node)
1049 base, link, p1r, p2r, node)
1040 self.index.insert(-1, e)
1050 self.index.insert(-1, e)
1041 self.nodemap[node] = curr
1051 self.nodemap[node] = curr
1042
1052
1043 entry = self._io.packentry(e, self.node, self.version, curr)
1053 entry = self._io.packentry(e, self.node, self.version, curr)
1044 if not self._inline:
1054 if not self._inline:
1045 transaction.add(self.datafile, offset)
1055 transaction.add(self.datafile, offset)
1046 transaction.add(self.indexfile, curr * len(entry))
1056 transaction.add(self.indexfile, curr * len(entry))
1047 if data[0]:
1057 if data[0]:
1048 dfh.write(data[0])
1058 dfh.write(data[0])
1049 dfh.write(data[1])
1059 dfh.write(data[1])
1050 dfh.flush()
1060 dfh.flush()
1051 ifh.write(entry)
1061 ifh.write(entry)
1052 else:
1062 else:
1053 offset += curr * self._io.size
1063 offset += curr * self._io.size
1054 transaction.add(self.indexfile, offset, curr)
1064 transaction.add(self.indexfile, offset, curr)
1055 ifh.write(entry)
1065 ifh.write(entry)
1056 ifh.write(data[0])
1066 ifh.write(data[0])
1057 ifh.write(data[1])
1067 ifh.write(data[1])
1058 self.checkinlinesize(transaction, ifh)
1068 self.checkinlinesize(transaction, ifh)
1059
1069
1060 if type(text) == str: # only accept immutable objects
1070 if type(text) == str: # only accept immutable objects
1061 self._cache = (node, curr, text)
1071 self._cache = (node, curr, text)
1062 self._basecache = (curr, base)
1072 self._basecache = (curr, base)
1063 return node
1073 return node
1064
1074
1065 def group(self, nodelist, bundler):
1075 def group(self, nodelist, bundler):
1066 """Calculate a delta group, yielding a sequence of changegroup chunks
1076 """Calculate a delta group, yielding a sequence of changegroup chunks
1067 (strings).
1077 (strings).
1068
1078
1069 Given a list of changeset revs, return a set of deltas and
1079 Given a list of changeset revs, return a set of deltas and
1070 metadata corresponding to nodes. The first delta is
1080 metadata corresponding to nodes. The first delta is
1071 first parent(nodelist[0]) -> nodelist[0], the receiver is
1081 first parent(nodelist[0]) -> nodelist[0], the receiver is
1072 guaranteed to have this parent as it has all history before
1082 guaranteed to have this parent as it has all history before
1073 these changesets. In the case firstparent is nullrev the
1083 these changesets. In the case firstparent is nullrev the
1074 changegroup starts with a full revision.
1084 changegroup starts with a full revision.
1075 """
1085 """
1076
1086
1077 revs = sorted([self.rev(n) for n in nodelist])
1087 revs = sorted([self.rev(n) for n in nodelist])
1078
1088
1079 # if we don't have any revisions touched by these changesets, bail
1089 # if we don't have any revisions touched by these changesets, bail
1080 if not revs:
1090 if not revs:
1081 yield bundler.close()
1091 yield bundler.close()
1082 return
1092 return
1083
1093
1084 # add the parent of the first rev
1094 # add the parent of the first rev
1085 p = self.parentrevs(revs[0])[0]
1095 p = self.parentrevs(revs[0])[0]
1086 revs.insert(0, p)
1096 revs.insert(0, p)
1087
1097
1088 # build deltas
1098 # build deltas
1089 for r in xrange(len(revs) - 1):
1099 for r in xrange(len(revs) - 1):
1090 prev, curr = revs[r], revs[r + 1]
1100 prev, curr = revs[r], revs[r + 1]
1091 for c in bundler.revchunk(self, curr, prev):
1101 for c in bundler.revchunk(self, curr, prev):
1092 yield c
1102 yield c
1093
1103
1094 yield bundler.close()
1104 yield bundler.close()
1095
1105
1096 def addgroup(self, bundle, linkmapper, transaction):
1106 def addgroup(self, bundle, linkmapper, transaction):
1097 """
1107 """
1098 add a delta group
1108 add a delta group
1099
1109
1100 given a set of deltas, add them to the revision log. the
1110 given a set of deltas, add them to the revision log. the
1101 first delta is against its parent, which should be in our
1111 first delta is against its parent, which should be in our
1102 log, the rest are against the previous delta.
1112 log, the rest are against the previous delta.
1103 """
1113 """
1104
1114
1105 # track the base of the current delta log
1115 # track the base of the current delta log
1106 node = None
1116 node = None
1107
1117
1108 r = len(self)
1118 r = len(self)
1109 end = 0
1119 end = 0
1110 if r:
1120 if r:
1111 end = self.end(r - 1)
1121 end = self.end(r - 1)
1112 ifh = self.opener(self.indexfile, "a+")
1122 ifh = self.opener(self.indexfile, "a+")
1113 isize = r * self._io.size
1123 isize = r * self._io.size
1114 if self._inline:
1124 if self._inline:
1115 transaction.add(self.indexfile, end + isize, r)
1125 transaction.add(self.indexfile, end + isize, r)
1116 dfh = None
1126 dfh = None
1117 else:
1127 else:
1118 transaction.add(self.indexfile, isize, r)
1128 transaction.add(self.indexfile, isize, r)
1119 transaction.add(self.datafile, end)
1129 transaction.add(self.datafile, end)
1120 dfh = self.opener(self.datafile, "a")
1130 dfh = self.opener(self.datafile, "a")
1121
1131
1122 try:
1132 try:
1123 # loop through our set of deltas
1133 # loop through our set of deltas
1124 chain = None
1134 chain = None
1125 while 1:
1135 while 1:
1126 chunkdata = bundle.deltachunk(chain)
1136 chunkdata = bundle.deltachunk(chain)
1127 if not chunkdata:
1137 if not chunkdata:
1128 break
1138 break
1129 node = chunkdata['node']
1139 node = chunkdata['node']
1130 p1 = chunkdata['p1']
1140 p1 = chunkdata['p1']
1131 p2 = chunkdata['p2']
1141 p2 = chunkdata['p2']
1132 cs = chunkdata['cs']
1142 cs = chunkdata['cs']
1133 deltabase = chunkdata['deltabase']
1143 deltabase = chunkdata['deltabase']
1134 delta = chunkdata['delta']
1144 delta = chunkdata['delta']
1135
1145
1136 link = linkmapper(cs)
1146 link = linkmapper(cs)
1137 if node in self.nodemap:
1147 if node in self.nodemap:
1138 # this can happen if two branches make the same change
1148 # this can happen if two branches make the same change
1139 chain = node
1149 chain = node
1140 continue
1150 continue
1141
1151
1142 for p in (p1, p2):
1152 for p in (p1, p2):
1143 if not p in self.nodemap:
1153 if not p in self.nodemap:
1144 raise LookupError(p, self.indexfile,
1154 raise LookupError(p, self.indexfile,
1145 _('unknown parent'))
1155 _('unknown parent'))
1146
1156
1147 if deltabase not in self.nodemap:
1157 if deltabase not in self.nodemap:
1148 raise LookupError(deltabase, self.indexfile,
1158 raise LookupError(deltabase, self.indexfile,
1149 _('unknown delta base'))
1159 _('unknown delta base'))
1150
1160
1151 baserev = self.rev(deltabase)
1161 baserev = self.rev(deltabase)
1152 chain = self._addrevision(node, None, transaction, link,
1162 chain = self._addrevision(node, None, transaction, link,
1153 p1, p2, (baserev, delta), ifh, dfh)
1163 p1, p2, (baserev, delta), ifh, dfh)
1154 if not dfh and not self._inline:
1164 if not dfh and not self._inline:
1155 # addrevision switched from inline to conventional
1165 # addrevision switched from inline to conventional
1156 # reopen the index
1166 # reopen the index
1157 ifh.close()
1167 ifh.close()
1158 dfh = self.opener(self.datafile, "a")
1168 dfh = self.opener(self.datafile, "a")
1159 ifh = self.opener(self.indexfile, "a")
1169 ifh = self.opener(self.indexfile, "a")
1160 finally:
1170 finally:
1161 if dfh:
1171 if dfh:
1162 dfh.close()
1172 dfh.close()
1163 ifh.close()
1173 ifh.close()
1164
1174
1165 return node
1175 return node
1166
1176
1167 def strip(self, minlink, transaction):
1177 def strip(self, minlink, transaction):
1168 """truncate the revlog on the first revision with a linkrev >= minlink
1178 """truncate the revlog on the first revision with a linkrev >= minlink
1169
1179
1170 This function is called when we're stripping revision minlink and
1180 This function is called when we're stripping revision minlink and
1171 its descendants from the repository.
1181 its descendants from the repository.
1172
1182
1173 We have to remove all revisions with linkrev >= minlink, because
1183 We have to remove all revisions with linkrev >= minlink, because
1174 the equivalent changelog revisions will be renumbered after the
1184 the equivalent changelog revisions will be renumbered after the
1175 strip.
1185 strip.
1176
1186
1177 So we truncate the revlog on the first of these revisions, and
1187 So we truncate the revlog on the first of these revisions, and
1178 trust that the caller has saved the revisions that shouldn't be
1188 trust that the caller has saved the revisions that shouldn't be
1179 removed and that it'll readd them after this truncation.
1189 removed and that it'll readd them after this truncation.
1180 """
1190 """
1181 if len(self) == 0:
1191 if len(self) == 0:
1182 return
1192 return
1183
1193
1184 for rev in self:
1194 for rev in self:
1185 if self.index[rev][4] >= minlink:
1195 if self.index[rev][4] >= minlink:
1186 break
1196 break
1187 else:
1197 else:
1188 return
1198 return
1189
1199
1190 # first truncate the files on disk
1200 # first truncate the files on disk
1191 end = self.start(rev)
1201 end = self.start(rev)
1192 if not self._inline:
1202 if not self._inline:
1193 transaction.add(self.datafile, end)
1203 transaction.add(self.datafile, end)
1194 end = rev * self._io.size
1204 end = rev * self._io.size
1195 else:
1205 else:
1196 end += rev * self._io.size
1206 end += rev * self._io.size
1197
1207
1198 transaction.add(self.indexfile, end)
1208 transaction.add(self.indexfile, end)
1199
1209
1200 # then reset internal state in memory to forget those revisions
1210 # then reset internal state in memory to forget those revisions
1201 self._cache = None
1211 self._cache = None
1202 self._chunkclear()
1212 self._chunkclear()
1203 for x in xrange(rev, len(self)):
1213 for x in xrange(rev, len(self)):
1204 del self.nodemap[self.node(x)]
1214 del self.nodemap[self.node(x)]
1205
1215
1206 del self.index[rev:-1]
1216 del self.index[rev:-1]
1207
1217
1208 def checksize(self):
1218 def checksize(self):
1209 expected = 0
1219 expected = 0
1210 if len(self):
1220 if len(self):
1211 expected = max(0, self.end(len(self) - 1))
1221 expected = max(0, self.end(len(self) - 1))
1212
1222
1213 try:
1223 try:
1214 f = self.opener(self.datafile)
1224 f = self.opener(self.datafile)
1215 f.seek(0, 2)
1225 f.seek(0, 2)
1216 actual = f.tell()
1226 actual = f.tell()
1217 f.close()
1227 f.close()
1218 dd = actual - expected
1228 dd = actual - expected
1219 except IOError, inst:
1229 except IOError, inst:
1220 if inst.errno != errno.ENOENT:
1230 if inst.errno != errno.ENOENT:
1221 raise
1231 raise
1222 dd = 0
1232 dd = 0
1223
1233
1224 try:
1234 try:
1225 f = self.opener(self.indexfile)
1235 f = self.opener(self.indexfile)
1226 f.seek(0, 2)
1236 f.seek(0, 2)
1227 actual = f.tell()
1237 actual = f.tell()
1228 f.close()
1238 f.close()
1229 s = self._io.size
1239 s = self._io.size
1230 i = max(0, actual // s)
1240 i = max(0, actual // s)
1231 di = actual - (i * s)
1241 di = actual - (i * s)
1232 if self._inline:
1242 if self._inline:
1233 databytes = 0
1243 databytes = 0
1234 for r in self:
1244 for r in self:
1235 databytes += max(0, self.length(r))
1245 databytes += max(0, self.length(r))
1236 dd = 0
1246 dd = 0
1237 di = actual - len(self) * s - databytes
1247 di = actual - len(self) * s - databytes
1238 except IOError, inst:
1248 except IOError, inst:
1239 if inst.errno != errno.ENOENT:
1249 if inst.errno != errno.ENOENT:
1240 raise
1250 raise
1241 di = 0
1251 di = 0
1242
1252
1243 return (dd, di)
1253 return (dd, di)
1244
1254
1245 def files(self):
1255 def files(self):
1246 res = [self.indexfile]
1256 res = [self.indexfile]
1247 if not self._inline:
1257 if not self._inline:
1248 res.append(self.datafile)
1258 res.append(self.datafile)
1249 return res
1259 return res
General Comments 0
You need to be logged in to leave comments. Login now