##// END OF EJS Templates
revlog: get rid of defversion...
Sune Foldager -
r14333:31a5973f default
parent child Browse files
Show More
@@ -1,1976 +1,1975 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error
13 import scmutil, util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'generaldelta', False):
64 if self.ui.configbool('format', 'generaldelta', False):
65 requirements.append("generaldelta")
65 requirements.append("generaldelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener.read("requires").splitlines())
74 requirements = set(self.opener.read("requires").splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener.read("sharedpath"))
84 s = os.path.realpath(self.opener.read("sharedpath"))
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 self.sopener.options = {}
119 openerreqs = set(('revlogv1', 'generaldelta'))
120 if 'generaldelta' in requirements:
120 self.sopener.options = dict((r, 1) for r in requirements
121 self.sopener.options['generaldelta'] = 1
121 if r in openerreqs)
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 self.sopener.options['defversion'] = c.version
182 return c
181 return c
183
182
184 @propertycache
183 @propertycache
185 def manifest(self):
184 def manifest(self):
186 return manifest.manifest(self.sopener)
185 return manifest.manifest(self.sopener)
187
186
188 @propertycache
187 @propertycache
189 def dirstate(self):
188 def dirstate(self):
190 warned = [0]
189 warned = [0]
191 def validate(node):
190 def validate(node):
192 try:
191 try:
193 self.changelog.rev(node)
192 self.changelog.rev(node)
194 return node
193 return node
195 except error.LookupError:
194 except error.LookupError:
196 if not warned[0]:
195 if not warned[0]:
197 warned[0] = True
196 warned[0] = True
198 self.ui.warn(_("warning: ignoring unknown"
197 self.ui.warn(_("warning: ignoring unknown"
199 " working parent %s!\n") % short(node))
198 " working parent %s!\n") % short(node))
200 return nullid
199 return nullid
201
200
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203
202
204 def __getitem__(self, changeid):
203 def __getitem__(self, changeid):
205 if changeid is None:
204 if changeid is None:
206 return context.workingctx(self)
205 return context.workingctx(self)
207 return context.changectx(self, changeid)
206 return context.changectx(self, changeid)
208
207
209 def __contains__(self, changeid):
208 def __contains__(self, changeid):
210 try:
209 try:
211 return bool(self.lookup(changeid))
210 return bool(self.lookup(changeid))
212 except error.RepoLookupError:
211 except error.RepoLookupError:
213 return False
212 return False
214
213
215 def __nonzero__(self):
214 def __nonzero__(self):
216 return True
215 return True
217
216
218 def __len__(self):
217 def __len__(self):
219 return len(self.changelog)
218 return len(self.changelog)
220
219
221 def __iter__(self):
220 def __iter__(self):
222 for i in xrange(len(self)):
221 for i in xrange(len(self)):
223 yield i
222 yield i
224
223
225 def url(self):
224 def url(self):
226 return 'file:' + self.root
225 return 'file:' + self.root
227
226
228 def hook(self, name, throw=False, **args):
227 def hook(self, name, throw=False, **args):
229 return hook.hook(self.ui, self, name, throw, **args)
228 return hook.hook(self.ui, self, name, throw, **args)
230
229
231 tag_disallowed = ':\r\n'
230 tag_disallowed = ':\r\n'
232
231
233 def _tag(self, names, node, message, local, user, date, extra={}):
232 def _tag(self, names, node, message, local, user, date, extra={}):
234 if isinstance(names, str):
233 if isinstance(names, str):
235 allchars = names
234 allchars = names
236 names = (names,)
235 names = (names,)
237 else:
236 else:
238 allchars = ''.join(names)
237 allchars = ''.join(names)
239 for c in self.tag_disallowed:
238 for c in self.tag_disallowed:
240 if c in allchars:
239 if c in allchars:
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
242
241
243 branches = self.branchmap()
242 branches = self.branchmap()
244 for name in names:
243 for name in names:
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 local=local)
245 local=local)
247 if name in branches:
246 if name in branches:
248 self.ui.warn(_("warning: tag %s conflicts with existing"
247 self.ui.warn(_("warning: tag %s conflicts with existing"
249 " branch name\n") % name)
248 " branch name\n") % name)
250
249
251 def writetags(fp, names, munge, prevtags):
250 def writetags(fp, names, munge, prevtags):
252 fp.seek(0, 2)
251 fp.seek(0, 2)
253 if prevtags and prevtags[-1] != '\n':
252 if prevtags and prevtags[-1] != '\n':
254 fp.write('\n')
253 fp.write('\n')
255 for name in names:
254 for name in names:
256 m = munge and munge(name) or name
255 m = munge and munge(name) or name
257 if self._tagtypes and name in self._tagtypes:
256 if self._tagtypes and name in self._tagtypes:
258 old = self._tags.get(name, nullid)
257 old = self._tags.get(name, nullid)
259 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(old), m))
260 fp.write('%s %s\n' % (hex(node), m))
259 fp.write('%s %s\n' % (hex(node), m))
261 fp.close()
260 fp.close()
262
261
263 prevtags = ''
262 prevtags = ''
264 if local:
263 if local:
265 try:
264 try:
266 fp = self.opener('localtags', 'r+')
265 fp = self.opener('localtags', 'r+')
267 except IOError:
266 except IOError:
268 fp = self.opener('localtags', 'a')
267 fp = self.opener('localtags', 'a')
269 else:
268 else:
270 prevtags = fp.read()
269 prevtags = fp.read()
271
270
272 # local tags are stored in the current charset
271 # local tags are stored in the current charset
273 writetags(fp, names, None, prevtags)
272 writetags(fp, names, None, prevtags)
274 for name in names:
273 for name in names:
275 self.hook('tag', node=hex(node), tag=name, local=local)
274 self.hook('tag', node=hex(node), tag=name, local=local)
276 return
275 return
277
276
278 try:
277 try:
279 fp = self.wfile('.hgtags', 'rb+')
278 fp = self.wfile('.hgtags', 'rb+')
280 except IOError:
279 except IOError:
281 fp = self.wfile('.hgtags', 'ab')
280 fp = self.wfile('.hgtags', 'ab')
282 else:
281 else:
283 prevtags = fp.read()
282 prevtags = fp.read()
284
283
285 # committed tags are stored in UTF-8
284 # committed tags are stored in UTF-8
286 writetags(fp, names, encoding.fromlocal, prevtags)
285 writetags(fp, names, encoding.fromlocal, prevtags)
287
286
288 fp.close()
287 fp.close()
289
288
290 if '.hgtags' not in self.dirstate:
289 if '.hgtags' not in self.dirstate:
291 self[None].add(['.hgtags'])
290 self[None].add(['.hgtags'])
292
291
293 m = matchmod.exact(self.root, '', ['.hgtags'])
292 m = matchmod.exact(self.root, '', ['.hgtags'])
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
293 tagnode = self.commit(message, user, date, extra=extra, match=m)
295
294
296 for name in names:
295 for name in names:
297 self.hook('tag', node=hex(node), tag=name, local=local)
296 self.hook('tag', node=hex(node), tag=name, local=local)
298
297
299 return tagnode
298 return tagnode
300
299
301 def tag(self, names, node, message, local, user, date):
300 def tag(self, names, node, message, local, user, date):
302 '''tag a revision with one or more symbolic names.
301 '''tag a revision with one or more symbolic names.
303
302
304 names is a list of strings or, when adding a single tag, names may be a
303 names is a list of strings or, when adding a single tag, names may be a
305 string.
304 string.
306
305
307 if local is True, the tags are stored in a per-repository file.
306 if local is True, the tags are stored in a per-repository file.
308 otherwise, they are stored in the .hgtags file, and a new
307 otherwise, they are stored in the .hgtags file, and a new
309 changeset is committed with the change.
308 changeset is committed with the change.
310
309
311 keyword arguments:
310 keyword arguments:
312
311
313 local: whether to store tags in non-version-controlled file
312 local: whether to store tags in non-version-controlled file
314 (default False)
313 (default False)
315
314
316 message: commit message to use if committing
315 message: commit message to use if committing
317
316
318 user: name of user to use if committing
317 user: name of user to use if committing
319
318
320 date: date tuple to use if committing'''
319 date: date tuple to use if committing'''
321
320
322 if not local:
321 if not local:
323 for x in self.status()[:5]:
322 for x in self.status()[:5]:
324 if '.hgtags' in x:
323 if '.hgtags' in x:
325 raise util.Abort(_('working copy of .hgtags is changed '
324 raise util.Abort(_('working copy of .hgtags is changed '
326 '(please commit .hgtags manually)'))
325 '(please commit .hgtags manually)'))
327
326
328 self.tags() # instantiate the cache
327 self.tags() # instantiate the cache
329 self._tag(names, node, message, local, user, date)
328 self._tag(names, node, message, local, user, date)
330
329
331 def tags(self):
330 def tags(self):
332 '''return a mapping of tag to node'''
331 '''return a mapping of tag to node'''
333 if self._tags is None:
332 if self._tags is None:
334 (self._tags, self._tagtypes) = self._findtags()
333 (self._tags, self._tagtypes) = self._findtags()
335
334
336 return self._tags
335 return self._tags
337
336
338 def _findtags(self):
337 def _findtags(self):
339 '''Do the hard work of finding tags. Return a pair of dicts
338 '''Do the hard work of finding tags. Return a pair of dicts
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
339 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 maps tag name to a string like \'global\' or \'local\'.
340 maps tag name to a string like \'global\' or \'local\'.
342 Subclasses or extensions are free to add their own tags, but
341 Subclasses or extensions are free to add their own tags, but
343 should be aware that the returned dicts will be retained for the
342 should be aware that the returned dicts will be retained for the
344 duration of the localrepo object.'''
343 duration of the localrepo object.'''
345
344
346 # XXX what tagtype should subclasses/extensions use? Currently
345 # XXX what tagtype should subclasses/extensions use? Currently
347 # mq and bookmarks add tags, but do not set the tagtype at all.
346 # mq and bookmarks add tags, but do not set the tagtype at all.
348 # Should each extension invent its own tag type? Should there
347 # Should each extension invent its own tag type? Should there
349 # be one tagtype for all such "virtual" tags? Or is the status
348 # be one tagtype for all such "virtual" tags? Or is the status
350 # quo fine?
349 # quo fine?
351
350
352 alltags = {} # map tag name to (node, hist)
351 alltags = {} # map tag name to (node, hist)
353 tagtypes = {}
352 tagtypes = {}
354
353
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
354 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357
356
358 # Build the return dicts. Have to re-encode tag names because
357 # Build the return dicts. Have to re-encode tag names because
359 # the tags module always uses UTF-8 (in order not to lose info
358 # the tags module always uses UTF-8 (in order not to lose info
360 # writing to the cache), but the rest of Mercurial wants them in
359 # writing to the cache), but the rest of Mercurial wants them in
361 # local encoding.
360 # local encoding.
362 tags = {}
361 tags = {}
363 for (name, (node, hist)) in alltags.iteritems():
362 for (name, (node, hist)) in alltags.iteritems():
364 if node != nullid:
363 if node != nullid:
365 try:
364 try:
366 # ignore tags to unknown nodes
365 # ignore tags to unknown nodes
367 self.changelog.lookup(node)
366 self.changelog.lookup(node)
368 tags[encoding.tolocal(name)] = node
367 tags[encoding.tolocal(name)] = node
369 except error.LookupError:
368 except error.LookupError:
370 pass
369 pass
371 tags['tip'] = self.changelog.tip()
370 tags['tip'] = self.changelog.tip()
372 tagtypes = dict([(encoding.tolocal(name), value)
371 tagtypes = dict([(encoding.tolocal(name), value)
373 for (name, value) in tagtypes.iteritems()])
372 for (name, value) in tagtypes.iteritems()])
374 return (tags, tagtypes)
373 return (tags, tagtypes)
375
374
376 def tagtype(self, tagname):
375 def tagtype(self, tagname):
377 '''
376 '''
378 return the type of the given tag. result can be:
377 return the type of the given tag. result can be:
379
378
380 'local' : a local tag
379 'local' : a local tag
381 'global' : a global tag
380 'global' : a global tag
382 None : tag does not exist
381 None : tag does not exist
383 '''
382 '''
384
383
385 self.tags()
384 self.tags()
386
385
387 return self._tagtypes.get(tagname)
386 return self._tagtypes.get(tagname)
388
387
389 def tagslist(self):
388 def tagslist(self):
390 '''return a list of tags ordered by revision'''
389 '''return a list of tags ordered by revision'''
391 l = []
390 l = []
392 for t, n in self.tags().iteritems():
391 for t, n in self.tags().iteritems():
393 r = self.changelog.rev(n)
392 r = self.changelog.rev(n)
394 l.append((r, t, n))
393 l.append((r, t, n))
395 return [(t, n) for r, t, n in sorted(l)]
394 return [(t, n) for r, t, n in sorted(l)]
396
395
397 def nodetags(self, node):
396 def nodetags(self, node):
398 '''return the tags associated with a node'''
397 '''return the tags associated with a node'''
399 if not self.nodetagscache:
398 if not self.nodetagscache:
400 self.nodetagscache = {}
399 self.nodetagscache = {}
401 for t, n in self.tags().iteritems():
400 for t, n in self.tags().iteritems():
402 self.nodetagscache.setdefault(n, []).append(t)
401 self.nodetagscache.setdefault(n, []).append(t)
403 for tags in self.nodetagscache.itervalues():
402 for tags in self.nodetagscache.itervalues():
404 tags.sort()
403 tags.sort()
405 return self.nodetagscache.get(node, [])
404 return self.nodetagscache.get(node, [])
406
405
407 def nodebookmarks(self, node):
406 def nodebookmarks(self, node):
408 marks = []
407 marks = []
409 for bookmark, n in self._bookmarks.iteritems():
408 for bookmark, n in self._bookmarks.iteritems():
410 if n == node:
409 if n == node:
411 marks.append(bookmark)
410 marks.append(bookmark)
412 return sorted(marks)
411 return sorted(marks)
413
412
414 def _branchtags(self, partial, lrev):
413 def _branchtags(self, partial, lrev):
415 # TODO: rename this function?
414 # TODO: rename this function?
416 tiprev = len(self) - 1
415 tiprev = len(self) - 1
417 if lrev != tiprev:
416 if lrev != tiprev:
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
417 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
419 self._updatebranchcache(partial, ctxgen)
418 self._updatebranchcache(partial, ctxgen)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
419 self._writebranchcache(partial, self.changelog.tip(), tiprev)
421
420
422 return partial
421 return partial
423
422
424 def updatebranchcache(self):
423 def updatebranchcache(self):
425 tip = self.changelog.tip()
424 tip = self.changelog.tip()
426 if self._branchcache is not None and self._branchcachetip == tip:
425 if self._branchcache is not None and self._branchcachetip == tip:
427 return self._branchcache
426 return self._branchcache
428
427
429 oldtip = self._branchcachetip
428 oldtip = self._branchcachetip
430 self._branchcachetip = tip
429 self._branchcachetip = tip
431 if oldtip is None or oldtip not in self.changelog.nodemap:
430 if oldtip is None or oldtip not in self.changelog.nodemap:
432 partial, last, lrev = self._readbranchcache()
431 partial, last, lrev = self._readbranchcache()
433 else:
432 else:
434 lrev = self.changelog.rev(oldtip)
433 lrev = self.changelog.rev(oldtip)
435 partial = self._branchcache
434 partial = self._branchcache
436
435
437 self._branchtags(partial, lrev)
436 self._branchtags(partial, lrev)
438 # this private cache holds all heads (not just tips)
437 # this private cache holds all heads (not just tips)
439 self._branchcache = partial
438 self._branchcache = partial
440
439
441 def branchmap(self):
440 def branchmap(self):
442 '''returns a dictionary {branch: [branchheads]}'''
441 '''returns a dictionary {branch: [branchheads]}'''
443 self.updatebranchcache()
442 self.updatebranchcache()
444 return self._branchcache
443 return self._branchcache
445
444
446 def branchtags(self):
445 def branchtags(self):
447 '''return a dict where branch names map to the tipmost head of
446 '''return a dict where branch names map to the tipmost head of
448 the branch, open heads come before closed'''
447 the branch, open heads come before closed'''
449 bt = {}
448 bt = {}
450 for bn, heads in self.branchmap().iteritems():
449 for bn, heads in self.branchmap().iteritems():
451 tip = heads[-1]
450 tip = heads[-1]
452 for h in reversed(heads):
451 for h in reversed(heads):
453 if 'close' not in self.changelog.read(h)[5]:
452 if 'close' not in self.changelog.read(h)[5]:
454 tip = h
453 tip = h
455 break
454 break
456 bt[bn] = tip
455 bt[bn] = tip
457 return bt
456 return bt
458
457
459 def _readbranchcache(self):
458 def _readbranchcache(self):
460 partial = {}
459 partial = {}
461 try:
460 try:
462 f = self.opener("cache/branchheads")
461 f = self.opener("cache/branchheads")
463 lines = f.read().split('\n')
462 lines = f.read().split('\n')
464 f.close()
463 f.close()
465 except (IOError, OSError):
464 except (IOError, OSError):
466 return {}, nullid, nullrev
465 return {}, nullid, nullrev
467
466
468 try:
467 try:
469 last, lrev = lines.pop(0).split(" ", 1)
468 last, lrev = lines.pop(0).split(" ", 1)
470 last, lrev = bin(last), int(lrev)
469 last, lrev = bin(last), int(lrev)
471 if lrev >= len(self) or self[lrev].node() != last:
470 if lrev >= len(self) or self[lrev].node() != last:
472 # invalidate the cache
471 # invalidate the cache
473 raise ValueError('invalidating branch cache (tip differs)')
472 raise ValueError('invalidating branch cache (tip differs)')
474 for l in lines:
473 for l in lines:
475 if not l:
474 if not l:
476 continue
475 continue
477 node, label = l.split(" ", 1)
476 node, label = l.split(" ", 1)
478 label = encoding.tolocal(label.strip())
477 label = encoding.tolocal(label.strip())
479 partial.setdefault(label, []).append(bin(node))
478 partial.setdefault(label, []).append(bin(node))
480 except KeyboardInterrupt:
479 except KeyboardInterrupt:
481 raise
480 raise
482 except Exception, inst:
481 except Exception, inst:
483 if self.ui.debugflag:
482 if self.ui.debugflag:
484 self.ui.warn(str(inst), '\n')
483 self.ui.warn(str(inst), '\n')
485 partial, last, lrev = {}, nullid, nullrev
484 partial, last, lrev = {}, nullid, nullrev
486 return partial, last, lrev
485 return partial, last, lrev
487
486
488 def _writebranchcache(self, branches, tip, tiprev):
487 def _writebranchcache(self, branches, tip, tiprev):
489 try:
488 try:
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
489 f = self.opener("cache/branchheads", "w", atomictemp=True)
491 f.write("%s %s\n" % (hex(tip), tiprev))
490 f.write("%s %s\n" % (hex(tip), tiprev))
492 for label, nodes in branches.iteritems():
491 for label, nodes in branches.iteritems():
493 for node in nodes:
492 for node in nodes:
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
493 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
495 f.rename()
494 f.rename()
496 except (IOError, OSError):
495 except (IOError, OSError):
497 pass
496 pass
498
497
499 def _updatebranchcache(self, partial, ctxgen):
498 def _updatebranchcache(self, partial, ctxgen):
500 # collect new branch entries
499 # collect new branch entries
501 newbranches = {}
500 newbranches = {}
502 for c in ctxgen:
501 for c in ctxgen:
503 newbranches.setdefault(c.branch(), []).append(c.node())
502 newbranches.setdefault(c.branch(), []).append(c.node())
504 # if older branchheads are reachable from new ones, they aren't
503 # if older branchheads are reachable from new ones, they aren't
505 # really branchheads. Note checking parents is insufficient:
504 # really branchheads. Note checking parents is insufficient:
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
505 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
507 for branch, newnodes in newbranches.iteritems():
506 for branch, newnodes in newbranches.iteritems():
508 bheads = partial.setdefault(branch, [])
507 bheads = partial.setdefault(branch, [])
509 bheads.extend(newnodes)
508 bheads.extend(newnodes)
510 if len(bheads) <= 1:
509 if len(bheads) <= 1:
511 continue
510 continue
512 bheads = sorted(bheads, key=lambda x: self[x].rev())
511 bheads = sorted(bheads, key=lambda x: self[x].rev())
513 # starting from tip means fewer passes over reachable
512 # starting from tip means fewer passes over reachable
514 while newnodes:
513 while newnodes:
515 latest = newnodes.pop()
514 latest = newnodes.pop()
516 if latest not in bheads:
515 if latest not in bheads:
517 continue
516 continue
518 minbhrev = self[bheads[0]].node()
517 minbhrev = self[bheads[0]].node()
519 reachable = self.changelog.reachable(latest, minbhrev)
518 reachable = self.changelog.reachable(latest, minbhrev)
520 reachable.remove(latest)
519 reachable.remove(latest)
521 if reachable:
520 if reachable:
522 bheads = [b for b in bheads if b not in reachable]
521 bheads = [b for b in bheads if b not in reachable]
523 partial[branch] = bheads
522 partial[branch] = bheads
524
523
525 def lookup(self, key):
524 def lookup(self, key):
526 if isinstance(key, int):
525 if isinstance(key, int):
527 return self.changelog.node(key)
526 return self.changelog.node(key)
528 elif key == '.':
527 elif key == '.':
529 return self.dirstate.p1()
528 return self.dirstate.p1()
530 elif key == 'null':
529 elif key == 'null':
531 return nullid
530 return nullid
532 elif key == 'tip':
531 elif key == 'tip':
533 return self.changelog.tip()
532 return self.changelog.tip()
534 n = self.changelog._match(key)
533 n = self.changelog._match(key)
535 if n:
534 if n:
536 return n
535 return n
537 if key in self._bookmarks:
536 if key in self._bookmarks:
538 return self._bookmarks[key]
537 return self._bookmarks[key]
539 if key in self.tags():
538 if key in self.tags():
540 return self.tags()[key]
539 return self.tags()[key]
541 if key in self.branchtags():
540 if key in self.branchtags():
542 return self.branchtags()[key]
541 return self.branchtags()[key]
543 n = self.changelog._partialmatch(key)
542 n = self.changelog._partialmatch(key)
544 if n:
543 if n:
545 return n
544 return n
546
545
547 # can't find key, check if it might have come from damaged dirstate
546 # can't find key, check if it might have come from damaged dirstate
548 if key in self.dirstate.parents():
547 if key in self.dirstate.parents():
549 raise error.Abort(_("working directory has unknown parent '%s'!")
548 raise error.Abort(_("working directory has unknown parent '%s'!")
550 % short(key))
549 % short(key))
551 try:
550 try:
552 if len(key) == 20:
551 if len(key) == 20:
553 key = hex(key)
552 key = hex(key)
554 except TypeError:
553 except TypeError:
555 pass
554 pass
556 raise error.RepoLookupError(_("unknown revision '%s'") % key)
555 raise error.RepoLookupError(_("unknown revision '%s'") % key)
557
556
558 def lookupbranch(self, key, remote=None):
557 def lookupbranch(self, key, remote=None):
559 repo = remote or self
558 repo = remote or self
560 if key in repo.branchmap():
559 if key in repo.branchmap():
561 return key
560 return key
562
561
563 repo = (remote and remote.local()) and remote or self
562 repo = (remote and remote.local()) and remote or self
564 return repo[key].branch()
563 return repo[key].branch()
565
564
566 def known(self, nodes):
565 def known(self, nodes):
567 nm = self.changelog.nodemap
566 nm = self.changelog.nodemap
568 return [(n in nm) for n in nodes]
567 return [(n in nm) for n in nodes]
569
568
570 def local(self):
569 def local(self):
571 return True
570 return True
572
571
573 def join(self, f):
572 def join(self, f):
574 return os.path.join(self.path, f)
573 return os.path.join(self.path, f)
575
574
576 def wjoin(self, f):
575 def wjoin(self, f):
577 return os.path.join(self.root, f)
576 return os.path.join(self.root, f)
578
577
579 def file(self, f):
578 def file(self, f):
580 if f[0] == '/':
579 if f[0] == '/':
581 f = f[1:]
580 f = f[1:]
582 return filelog.filelog(self.sopener, f)
581 return filelog.filelog(self.sopener, f)
583
582
584 def changectx(self, changeid):
583 def changectx(self, changeid):
585 return self[changeid]
584 return self[changeid]
586
585
587 def parents(self, changeid=None):
586 def parents(self, changeid=None):
588 '''get list of changectxs for parents of changeid'''
587 '''get list of changectxs for parents of changeid'''
589 return self[changeid].parents()
588 return self[changeid].parents()
590
589
591 def filectx(self, path, changeid=None, fileid=None):
590 def filectx(self, path, changeid=None, fileid=None):
592 """changeid can be a changeset revision, node, or tag.
591 """changeid can be a changeset revision, node, or tag.
593 fileid can be a file revision or node."""
592 fileid can be a file revision or node."""
594 return context.filectx(self, path, changeid, fileid)
593 return context.filectx(self, path, changeid, fileid)
595
594
596 def getcwd(self):
595 def getcwd(self):
597 return self.dirstate.getcwd()
596 return self.dirstate.getcwd()
598
597
599 def pathto(self, f, cwd=None):
598 def pathto(self, f, cwd=None):
600 return self.dirstate.pathto(f, cwd)
599 return self.dirstate.pathto(f, cwd)
601
600
602 def wfile(self, f, mode='r'):
601 def wfile(self, f, mode='r'):
603 return self.wopener(f, mode)
602 return self.wopener(f, mode)
604
603
605 def _link(self, f):
604 def _link(self, f):
606 return os.path.islink(self.wjoin(f))
605 return os.path.islink(self.wjoin(f))
607
606
608 def _loadfilter(self, filter):
607 def _loadfilter(self, filter):
609 if filter not in self.filterpats:
608 if filter not in self.filterpats:
610 l = []
609 l = []
611 for pat, cmd in self.ui.configitems(filter):
610 for pat, cmd in self.ui.configitems(filter):
612 if cmd == '!':
611 if cmd == '!':
613 continue
612 continue
614 mf = matchmod.match(self.root, '', [pat])
613 mf = matchmod.match(self.root, '', [pat])
615 fn = None
614 fn = None
616 params = cmd
615 params = cmd
617 for name, filterfn in self._datafilters.iteritems():
616 for name, filterfn in self._datafilters.iteritems():
618 if cmd.startswith(name):
617 if cmd.startswith(name):
619 fn = filterfn
618 fn = filterfn
620 params = cmd[len(name):].lstrip()
619 params = cmd[len(name):].lstrip()
621 break
620 break
622 if not fn:
621 if not fn:
623 fn = lambda s, c, **kwargs: util.filter(s, c)
622 fn = lambda s, c, **kwargs: util.filter(s, c)
624 # Wrap old filters not supporting keyword arguments
623 # Wrap old filters not supporting keyword arguments
625 if not inspect.getargspec(fn)[2]:
624 if not inspect.getargspec(fn)[2]:
626 oldfn = fn
625 oldfn = fn
627 fn = lambda s, c, **kwargs: oldfn(s, c)
626 fn = lambda s, c, **kwargs: oldfn(s, c)
628 l.append((mf, fn, params))
627 l.append((mf, fn, params))
629 self.filterpats[filter] = l
628 self.filterpats[filter] = l
630 return self.filterpats[filter]
629 return self.filterpats[filter]
631
630
632 def _filter(self, filterpats, filename, data):
631 def _filter(self, filterpats, filename, data):
633 for mf, fn, cmd in filterpats:
632 for mf, fn, cmd in filterpats:
634 if mf(filename):
633 if mf(filename):
635 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
634 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
636 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
635 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
637 break
636 break
638
637
639 return data
638 return data
640
639
641 @propertycache
640 @propertycache
642 def _encodefilterpats(self):
641 def _encodefilterpats(self):
643 return self._loadfilter('encode')
642 return self._loadfilter('encode')
644
643
645 @propertycache
644 @propertycache
646 def _decodefilterpats(self):
645 def _decodefilterpats(self):
647 return self._loadfilter('decode')
646 return self._loadfilter('decode')
648
647
649 def adddatafilter(self, name, filter):
648 def adddatafilter(self, name, filter):
650 self._datafilters[name] = filter
649 self._datafilters[name] = filter
651
650
652 def wread(self, filename):
651 def wread(self, filename):
653 if self._link(filename):
652 if self._link(filename):
654 data = os.readlink(self.wjoin(filename))
653 data = os.readlink(self.wjoin(filename))
655 else:
654 else:
656 data = self.wopener.read(filename)
655 data = self.wopener.read(filename)
657 return self._filter(self._encodefilterpats, filename, data)
656 return self._filter(self._encodefilterpats, filename, data)
658
657
659 def wwrite(self, filename, data, flags):
658 def wwrite(self, filename, data, flags):
660 data = self._filter(self._decodefilterpats, filename, data)
659 data = self._filter(self._decodefilterpats, filename, data)
661 if 'l' in flags:
660 if 'l' in flags:
662 self.wopener.symlink(data, filename)
661 self.wopener.symlink(data, filename)
663 else:
662 else:
664 self.wopener.write(filename, data)
663 self.wopener.write(filename, data)
665 if 'x' in flags:
664 if 'x' in flags:
666 util.setflags(self.wjoin(filename), False, True)
665 util.setflags(self.wjoin(filename), False, True)
667
666
668 def wwritedata(self, filename, data):
667 def wwritedata(self, filename, data):
669 return self._filter(self._decodefilterpats, filename, data)
668 return self._filter(self._decodefilterpats, filename, data)
670
669
671 def transaction(self, desc):
670 def transaction(self, desc):
672 tr = self._transref and self._transref() or None
671 tr = self._transref and self._transref() or None
673 if tr and tr.running():
672 if tr and tr.running():
674 return tr.nest()
673 return tr.nest()
675
674
676 # abort here if the journal already exists
675 # abort here if the journal already exists
677 if os.path.exists(self.sjoin("journal")):
676 if os.path.exists(self.sjoin("journal")):
678 raise error.RepoError(
677 raise error.RepoError(
679 _("abandoned transaction found - run hg recover"))
678 _("abandoned transaction found - run hg recover"))
680
679
681 journalfiles = self._writejournal(desc)
680 journalfiles = self._writejournal(desc)
682 renames = [(x, undoname(x)) for x in journalfiles]
681 renames = [(x, undoname(x)) for x in journalfiles]
683
682
684 tr = transaction.transaction(self.ui.warn, self.sopener,
683 tr = transaction.transaction(self.ui.warn, self.sopener,
685 self.sjoin("journal"),
684 self.sjoin("journal"),
686 aftertrans(renames),
685 aftertrans(renames),
687 self.store.createmode)
686 self.store.createmode)
688 self._transref = weakref.ref(tr)
687 self._transref = weakref.ref(tr)
689 return tr
688 return tr
690
689
691 def _writejournal(self, desc):
690 def _writejournal(self, desc):
692 # save dirstate for rollback
691 # save dirstate for rollback
693 try:
692 try:
694 ds = self.opener.read("dirstate")
693 ds = self.opener.read("dirstate")
695 except IOError:
694 except IOError:
696 ds = ""
695 ds = ""
697 self.opener.write("journal.dirstate", ds)
696 self.opener.write("journal.dirstate", ds)
698 self.opener.write("journal.branch",
697 self.opener.write("journal.branch",
699 encoding.fromlocal(self.dirstate.branch()))
698 encoding.fromlocal(self.dirstate.branch()))
700 self.opener.write("journal.desc",
699 self.opener.write("journal.desc",
701 "%d\n%s\n" % (len(self), desc))
700 "%d\n%s\n" % (len(self), desc))
702
701
703 bkname = self.join('bookmarks')
702 bkname = self.join('bookmarks')
704 if os.path.exists(bkname):
703 if os.path.exists(bkname):
705 util.copyfile(bkname, self.join('journal.bookmarks'))
704 util.copyfile(bkname, self.join('journal.bookmarks'))
706 else:
705 else:
707 self.opener.write('journal.bookmarks', '')
706 self.opener.write('journal.bookmarks', '')
708
707
709 return (self.sjoin('journal'), self.join('journal.dirstate'),
708 return (self.sjoin('journal'), self.join('journal.dirstate'),
710 self.join('journal.branch'), self.join('journal.desc'),
709 self.join('journal.branch'), self.join('journal.desc'),
711 self.join('journal.bookmarks'))
710 self.join('journal.bookmarks'))
712
711
713 def recover(self):
712 def recover(self):
714 lock = self.lock()
713 lock = self.lock()
715 try:
714 try:
716 if os.path.exists(self.sjoin("journal")):
715 if os.path.exists(self.sjoin("journal")):
717 self.ui.status(_("rolling back interrupted transaction\n"))
716 self.ui.status(_("rolling back interrupted transaction\n"))
718 transaction.rollback(self.sopener, self.sjoin("journal"),
717 transaction.rollback(self.sopener, self.sjoin("journal"),
719 self.ui.warn)
718 self.ui.warn)
720 self.invalidate()
719 self.invalidate()
721 return True
720 return True
722 else:
721 else:
723 self.ui.warn(_("no interrupted transaction available\n"))
722 self.ui.warn(_("no interrupted transaction available\n"))
724 return False
723 return False
725 finally:
724 finally:
726 lock.release()
725 lock.release()
727
726
728 def rollback(self, dryrun=False):
727 def rollback(self, dryrun=False):
729 wlock = lock = None
728 wlock = lock = None
730 try:
729 try:
731 wlock = self.wlock()
730 wlock = self.wlock()
732 lock = self.lock()
731 lock = self.lock()
733 if os.path.exists(self.sjoin("undo")):
732 if os.path.exists(self.sjoin("undo")):
734 try:
733 try:
735 args = self.opener.read("undo.desc").splitlines()
734 args = self.opener.read("undo.desc").splitlines()
736 if len(args) >= 3 and self.ui.verbose:
735 if len(args) >= 3 and self.ui.verbose:
737 desc = _("repository tip rolled back to revision %s"
736 desc = _("repository tip rolled back to revision %s"
738 " (undo %s: %s)\n") % (
737 " (undo %s: %s)\n") % (
739 int(args[0]) - 1, args[1], args[2])
738 int(args[0]) - 1, args[1], args[2])
740 elif len(args) >= 2:
739 elif len(args) >= 2:
741 desc = _("repository tip rolled back to revision %s"
740 desc = _("repository tip rolled back to revision %s"
742 " (undo %s)\n") % (
741 " (undo %s)\n") % (
743 int(args[0]) - 1, args[1])
742 int(args[0]) - 1, args[1])
744 except IOError:
743 except IOError:
745 desc = _("rolling back unknown transaction\n")
744 desc = _("rolling back unknown transaction\n")
746 self.ui.status(desc)
745 self.ui.status(desc)
747 if dryrun:
746 if dryrun:
748 return
747 return
749 transaction.rollback(self.sopener, self.sjoin("undo"),
748 transaction.rollback(self.sopener, self.sjoin("undo"),
750 self.ui.warn)
749 self.ui.warn)
751 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
750 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
752 if os.path.exists(self.join('undo.bookmarks')):
751 if os.path.exists(self.join('undo.bookmarks')):
753 util.rename(self.join('undo.bookmarks'),
752 util.rename(self.join('undo.bookmarks'),
754 self.join('bookmarks'))
753 self.join('bookmarks'))
755 try:
754 try:
756 branch = self.opener.read("undo.branch")
755 branch = self.opener.read("undo.branch")
757 self.dirstate.setbranch(branch)
756 self.dirstate.setbranch(branch)
758 except IOError:
757 except IOError:
759 self.ui.warn(_("named branch could not be reset, "
758 self.ui.warn(_("named branch could not be reset, "
760 "current branch is still: %s\n")
759 "current branch is still: %s\n")
761 % self.dirstate.branch())
760 % self.dirstate.branch())
762 self.invalidate()
761 self.invalidate()
763 self.dirstate.invalidate()
762 self.dirstate.invalidate()
764 self.destroyed()
763 self.destroyed()
765 parents = tuple([p.rev() for p in self.parents()])
764 parents = tuple([p.rev() for p in self.parents()])
766 if len(parents) > 1:
765 if len(parents) > 1:
767 self.ui.status(_("working directory now based on "
766 self.ui.status(_("working directory now based on "
768 "revisions %d and %d\n") % parents)
767 "revisions %d and %d\n") % parents)
769 else:
768 else:
770 self.ui.status(_("working directory now based on "
769 self.ui.status(_("working directory now based on "
771 "revision %d\n") % parents)
770 "revision %d\n") % parents)
772 else:
771 else:
773 self.ui.warn(_("no rollback information available\n"))
772 self.ui.warn(_("no rollback information available\n"))
774 return 1
773 return 1
775 finally:
774 finally:
776 release(lock, wlock)
775 release(lock, wlock)
777
776
778 def invalidatecaches(self):
777 def invalidatecaches(self):
779 self._tags = None
778 self._tags = None
780 self._tagtypes = None
779 self._tagtypes = None
781 self.nodetagscache = None
780 self.nodetagscache = None
782 self._branchcache = None # in UTF-8
781 self._branchcache = None # in UTF-8
783 self._branchcachetip = None
782 self._branchcachetip = None
784
783
785 def invalidate(self):
784 def invalidate(self):
786 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
785 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
787 if a in self.__dict__:
786 if a in self.__dict__:
788 delattr(self, a)
787 delattr(self, a)
789 self.invalidatecaches()
788 self.invalidatecaches()
790
789
791 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
790 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
792 try:
791 try:
793 l = lock.lock(lockname, 0, releasefn, desc=desc)
792 l = lock.lock(lockname, 0, releasefn, desc=desc)
794 except error.LockHeld, inst:
793 except error.LockHeld, inst:
795 if not wait:
794 if not wait:
796 raise
795 raise
797 self.ui.warn(_("waiting for lock on %s held by %r\n") %
796 self.ui.warn(_("waiting for lock on %s held by %r\n") %
798 (desc, inst.locker))
797 (desc, inst.locker))
799 # default to 600 seconds timeout
798 # default to 600 seconds timeout
800 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
799 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
801 releasefn, desc=desc)
800 releasefn, desc=desc)
802 if acquirefn:
801 if acquirefn:
803 acquirefn()
802 acquirefn()
804 return l
803 return l
805
804
806 def lock(self, wait=True):
805 def lock(self, wait=True):
807 '''Lock the repository store (.hg/store) and return a weak reference
806 '''Lock the repository store (.hg/store) and return a weak reference
808 to the lock. Use this before modifying the store (e.g. committing or
807 to the lock. Use this before modifying the store (e.g. committing or
809 stripping). If you are opening a transaction, get a lock as well.)'''
808 stripping). If you are opening a transaction, get a lock as well.)'''
810 l = self._lockref and self._lockref()
809 l = self._lockref and self._lockref()
811 if l is not None and l.held:
810 if l is not None and l.held:
812 l.lock()
811 l.lock()
813 return l
812 return l
814
813
815 l = self._lock(self.sjoin("lock"), wait, self.store.write,
814 l = self._lock(self.sjoin("lock"), wait, self.store.write,
816 self.invalidate, _('repository %s') % self.origroot)
815 self.invalidate, _('repository %s') % self.origroot)
817 self._lockref = weakref.ref(l)
816 self._lockref = weakref.ref(l)
818 return l
817 return l
819
818
820 def wlock(self, wait=True):
819 def wlock(self, wait=True):
821 '''Lock the non-store parts of the repository (everything under
820 '''Lock the non-store parts of the repository (everything under
822 .hg except .hg/store) and return a weak reference to the lock.
821 .hg except .hg/store) and return a weak reference to the lock.
823 Use this before modifying files in .hg.'''
822 Use this before modifying files in .hg.'''
824 l = self._wlockref and self._wlockref()
823 l = self._wlockref and self._wlockref()
825 if l is not None and l.held:
824 if l is not None and l.held:
826 l.lock()
825 l.lock()
827 return l
826 return l
828
827
829 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
828 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
830 self.dirstate.invalidate, _('working directory of %s') %
829 self.dirstate.invalidate, _('working directory of %s') %
831 self.origroot)
830 self.origroot)
832 self._wlockref = weakref.ref(l)
831 self._wlockref = weakref.ref(l)
833 return l
832 return l
834
833
835 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
834 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
836 """
835 """
837 commit an individual file as part of a larger transaction
836 commit an individual file as part of a larger transaction
838 """
837 """
839
838
840 fname = fctx.path()
839 fname = fctx.path()
841 text = fctx.data()
840 text = fctx.data()
842 flog = self.file(fname)
841 flog = self.file(fname)
843 fparent1 = manifest1.get(fname, nullid)
842 fparent1 = manifest1.get(fname, nullid)
844 fparent2 = fparent2o = manifest2.get(fname, nullid)
843 fparent2 = fparent2o = manifest2.get(fname, nullid)
845
844
846 meta = {}
845 meta = {}
847 copy = fctx.renamed()
846 copy = fctx.renamed()
848 if copy and copy[0] != fname:
847 if copy and copy[0] != fname:
849 # Mark the new revision of this file as a copy of another
848 # Mark the new revision of this file as a copy of another
850 # file. This copy data will effectively act as a parent
849 # file. This copy data will effectively act as a parent
851 # of this new revision. If this is a merge, the first
850 # of this new revision. If this is a merge, the first
852 # parent will be the nullid (meaning "look up the copy data")
851 # parent will be the nullid (meaning "look up the copy data")
853 # and the second one will be the other parent. For example:
852 # and the second one will be the other parent. For example:
854 #
853 #
855 # 0 --- 1 --- 3 rev1 changes file foo
854 # 0 --- 1 --- 3 rev1 changes file foo
856 # \ / rev2 renames foo to bar and changes it
855 # \ / rev2 renames foo to bar and changes it
857 # \- 2 -/ rev3 should have bar with all changes and
856 # \- 2 -/ rev3 should have bar with all changes and
858 # should record that bar descends from
857 # should record that bar descends from
859 # bar in rev2 and foo in rev1
858 # bar in rev2 and foo in rev1
860 #
859 #
861 # this allows this merge to succeed:
860 # this allows this merge to succeed:
862 #
861 #
863 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
862 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
864 # \ / merging rev3 and rev4 should use bar@rev2
863 # \ / merging rev3 and rev4 should use bar@rev2
865 # \- 2 --- 4 as the merge base
864 # \- 2 --- 4 as the merge base
866 #
865 #
867
866
868 cfname = copy[0]
867 cfname = copy[0]
869 crev = manifest1.get(cfname)
868 crev = manifest1.get(cfname)
870 newfparent = fparent2
869 newfparent = fparent2
871
870
872 if manifest2: # branch merge
871 if manifest2: # branch merge
873 if fparent2 == nullid or crev is None: # copied on remote side
872 if fparent2 == nullid or crev is None: # copied on remote side
874 if cfname in manifest2:
873 if cfname in manifest2:
875 crev = manifest2[cfname]
874 crev = manifest2[cfname]
876 newfparent = fparent1
875 newfparent = fparent1
877
876
878 # find source in nearest ancestor if we've lost track
877 # find source in nearest ancestor if we've lost track
879 if not crev:
878 if not crev:
880 self.ui.debug(" %s: searching for copy revision for %s\n" %
879 self.ui.debug(" %s: searching for copy revision for %s\n" %
881 (fname, cfname))
880 (fname, cfname))
882 for ancestor in self[None].ancestors():
881 for ancestor in self[None].ancestors():
883 if cfname in ancestor:
882 if cfname in ancestor:
884 crev = ancestor[cfname].filenode()
883 crev = ancestor[cfname].filenode()
885 break
884 break
886
885
887 if crev:
886 if crev:
888 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
887 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
889 meta["copy"] = cfname
888 meta["copy"] = cfname
890 meta["copyrev"] = hex(crev)
889 meta["copyrev"] = hex(crev)
891 fparent1, fparent2 = nullid, newfparent
890 fparent1, fparent2 = nullid, newfparent
892 else:
891 else:
893 self.ui.warn(_("warning: can't find ancestor for '%s' "
892 self.ui.warn(_("warning: can't find ancestor for '%s' "
894 "copied from '%s'!\n") % (fname, cfname))
893 "copied from '%s'!\n") % (fname, cfname))
895
894
896 elif fparent2 != nullid:
895 elif fparent2 != nullid:
897 # is one parent an ancestor of the other?
896 # is one parent an ancestor of the other?
898 fparentancestor = flog.ancestor(fparent1, fparent2)
897 fparentancestor = flog.ancestor(fparent1, fparent2)
899 if fparentancestor == fparent1:
898 if fparentancestor == fparent1:
900 fparent1, fparent2 = fparent2, nullid
899 fparent1, fparent2 = fparent2, nullid
901 elif fparentancestor == fparent2:
900 elif fparentancestor == fparent2:
902 fparent2 = nullid
901 fparent2 = nullid
903
902
904 # is the file changed?
903 # is the file changed?
905 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
904 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
906 changelist.append(fname)
905 changelist.append(fname)
907 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
906 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
908
907
909 # are just the flags changed during merge?
908 # are just the flags changed during merge?
910 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
909 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
911 changelist.append(fname)
910 changelist.append(fname)
912
911
913 return fparent1
912 return fparent1
914
913
915 def commit(self, text="", user=None, date=None, match=None, force=False,
914 def commit(self, text="", user=None, date=None, match=None, force=False,
916 editor=False, extra={}):
915 editor=False, extra={}):
917 """Add a new revision to current repository.
916 """Add a new revision to current repository.
918
917
919 Revision information is gathered from the working directory,
918 Revision information is gathered from the working directory,
920 match can be used to filter the committed files. If editor is
919 match can be used to filter the committed files. If editor is
921 supplied, it is called to get a commit message.
920 supplied, it is called to get a commit message.
922 """
921 """
923
922
924 def fail(f, msg):
923 def fail(f, msg):
925 raise util.Abort('%s: %s' % (f, msg))
924 raise util.Abort('%s: %s' % (f, msg))
926
925
927 if not match:
926 if not match:
928 match = matchmod.always(self.root, '')
927 match = matchmod.always(self.root, '')
929
928
930 if not force:
929 if not force:
931 vdirs = []
930 vdirs = []
932 match.dir = vdirs.append
931 match.dir = vdirs.append
933 match.bad = fail
932 match.bad = fail
934
933
935 wlock = self.wlock()
934 wlock = self.wlock()
936 try:
935 try:
937 wctx = self[None]
936 wctx = self[None]
938 merge = len(wctx.parents()) > 1
937 merge = len(wctx.parents()) > 1
939
938
940 if (not force and merge and match and
939 if (not force and merge and match and
941 (match.files() or match.anypats())):
940 (match.files() or match.anypats())):
942 raise util.Abort(_('cannot partially commit a merge '
941 raise util.Abort(_('cannot partially commit a merge '
943 '(do not specify files or patterns)'))
942 '(do not specify files or patterns)'))
944
943
945 changes = self.status(match=match, clean=force)
944 changes = self.status(match=match, clean=force)
946 if force:
945 if force:
947 changes[0].extend(changes[6]) # mq may commit unchanged files
946 changes[0].extend(changes[6]) # mq may commit unchanged files
948
947
949 # check subrepos
948 # check subrepos
950 subs = []
949 subs = []
951 removedsubs = set()
950 removedsubs = set()
952 for p in wctx.parents():
951 for p in wctx.parents():
953 removedsubs.update(s for s in p.substate if match(s))
952 removedsubs.update(s for s in p.substate if match(s))
954 for s in wctx.substate:
953 for s in wctx.substate:
955 removedsubs.discard(s)
954 removedsubs.discard(s)
956 if match(s) and wctx.sub(s).dirty():
955 if match(s) and wctx.sub(s).dirty():
957 subs.append(s)
956 subs.append(s)
958 if (subs or removedsubs):
957 if (subs or removedsubs):
959 if (not match('.hgsub') and
958 if (not match('.hgsub') and
960 '.hgsub' in (wctx.modified() + wctx.added())):
959 '.hgsub' in (wctx.modified() + wctx.added())):
961 raise util.Abort(_("can't commit subrepos without .hgsub"))
960 raise util.Abort(_("can't commit subrepos without .hgsub"))
962 if '.hgsubstate' not in changes[0]:
961 if '.hgsubstate' not in changes[0]:
963 changes[0].insert(0, '.hgsubstate')
962 changes[0].insert(0, '.hgsubstate')
964
963
965 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
964 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
966 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
965 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
967 if changedsubs:
966 if changedsubs:
968 raise util.Abort(_("uncommitted changes in subrepo %s")
967 raise util.Abort(_("uncommitted changes in subrepo %s")
969 % changedsubs[0])
968 % changedsubs[0])
970
969
971 # make sure all explicit patterns are matched
970 # make sure all explicit patterns are matched
972 if not force and match.files():
971 if not force and match.files():
973 matched = set(changes[0] + changes[1] + changes[2])
972 matched = set(changes[0] + changes[1] + changes[2])
974
973
975 for f in match.files():
974 for f in match.files():
976 if f == '.' or f in matched or f in wctx.substate:
975 if f == '.' or f in matched or f in wctx.substate:
977 continue
976 continue
978 if f in changes[3]: # missing
977 if f in changes[3]: # missing
979 fail(f, _('file not found!'))
978 fail(f, _('file not found!'))
980 if f in vdirs: # visited directory
979 if f in vdirs: # visited directory
981 d = f + '/'
980 d = f + '/'
982 for mf in matched:
981 for mf in matched:
983 if mf.startswith(d):
982 if mf.startswith(d):
984 break
983 break
985 else:
984 else:
986 fail(f, _("no match under directory!"))
985 fail(f, _("no match under directory!"))
987 elif f not in self.dirstate:
986 elif f not in self.dirstate:
988 fail(f, _("file not tracked!"))
987 fail(f, _("file not tracked!"))
989
988
990 if (not force and not extra.get("close") and not merge
989 if (not force and not extra.get("close") and not merge
991 and not (changes[0] or changes[1] or changes[2])
990 and not (changes[0] or changes[1] or changes[2])
992 and wctx.branch() == wctx.p1().branch()):
991 and wctx.branch() == wctx.p1().branch()):
993 return None
992 return None
994
993
995 ms = mergemod.mergestate(self)
994 ms = mergemod.mergestate(self)
996 for f in changes[0]:
995 for f in changes[0]:
997 if f in ms and ms[f] == 'u':
996 if f in ms and ms[f] == 'u':
998 raise util.Abort(_("unresolved merge conflicts "
997 raise util.Abort(_("unresolved merge conflicts "
999 "(see hg help resolve)"))
998 "(see hg help resolve)"))
1000
999
1001 cctx = context.workingctx(self, text, user, date, extra, changes)
1000 cctx = context.workingctx(self, text, user, date, extra, changes)
1002 if editor:
1001 if editor:
1003 cctx._text = editor(self, cctx, subs)
1002 cctx._text = editor(self, cctx, subs)
1004 edited = (text != cctx._text)
1003 edited = (text != cctx._text)
1005
1004
1006 # commit subs
1005 # commit subs
1007 if subs or removedsubs:
1006 if subs or removedsubs:
1008 state = wctx.substate.copy()
1007 state = wctx.substate.copy()
1009 for s in sorted(subs):
1008 for s in sorted(subs):
1010 sub = wctx.sub(s)
1009 sub = wctx.sub(s)
1011 self.ui.status(_('committing subrepository %s\n') %
1010 self.ui.status(_('committing subrepository %s\n') %
1012 subrepo.subrelpath(sub))
1011 subrepo.subrelpath(sub))
1013 sr = sub.commit(cctx._text, user, date)
1012 sr = sub.commit(cctx._text, user, date)
1014 state[s] = (state[s][0], sr)
1013 state[s] = (state[s][0], sr)
1015 subrepo.writestate(self, state)
1014 subrepo.writestate(self, state)
1016
1015
1017 # Save commit message in case this transaction gets rolled back
1016 # Save commit message in case this transaction gets rolled back
1018 # (e.g. by a pretxncommit hook). Leave the content alone on
1017 # (e.g. by a pretxncommit hook). Leave the content alone on
1019 # the assumption that the user will use the same editor again.
1018 # the assumption that the user will use the same editor again.
1020 msgfile = self.opener('last-message.txt', 'wb')
1019 msgfile = self.opener('last-message.txt', 'wb')
1021 msgfile.write(cctx._text)
1020 msgfile.write(cctx._text)
1022 msgfile.close()
1021 msgfile.close()
1023
1022
1024 p1, p2 = self.dirstate.parents()
1023 p1, p2 = self.dirstate.parents()
1025 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1024 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1026 try:
1025 try:
1027 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1026 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1028 ret = self.commitctx(cctx, True)
1027 ret = self.commitctx(cctx, True)
1029 except:
1028 except:
1030 if edited:
1029 if edited:
1031 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1030 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1032 self.ui.write(
1031 self.ui.write(
1033 _('note: commit message saved in %s\n') % msgfn)
1032 _('note: commit message saved in %s\n') % msgfn)
1034 raise
1033 raise
1035
1034
1036 # update bookmarks, dirstate and mergestate
1035 # update bookmarks, dirstate and mergestate
1037 bookmarks.update(self, p1, ret)
1036 bookmarks.update(self, p1, ret)
1038 for f in changes[0] + changes[1]:
1037 for f in changes[0] + changes[1]:
1039 self.dirstate.normal(f)
1038 self.dirstate.normal(f)
1040 for f in changes[2]:
1039 for f in changes[2]:
1041 self.dirstate.forget(f)
1040 self.dirstate.forget(f)
1042 self.dirstate.setparents(ret)
1041 self.dirstate.setparents(ret)
1043 ms.reset()
1042 ms.reset()
1044 finally:
1043 finally:
1045 wlock.release()
1044 wlock.release()
1046
1045
1047 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1046 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1048 return ret
1047 return ret
1049
1048
1050 def commitctx(self, ctx, error=False):
1049 def commitctx(self, ctx, error=False):
1051 """Add a new revision to current repository.
1050 """Add a new revision to current repository.
1052 Revision information is passed via the context argument.
1051 Revision information is passed via the context argument.
1053 """
1052 """
1054
1053
1055 tr = lock = None
1054 tr = lock = None
1056 removed = list(ctx.removed())
1055 removed = list(ctx.removed())
1057 p1, p2 = ctx.p1(), ctx.p2()
1056 p1, p2 = ctx.p1(), ctx.p2()
1058 user = ctx.user()
1057 user = ctx.user()
1059
1058
1060 lock = self.lock()
1059 lock = self.lock()
1061 try:
1060 try:
1062 tr = self.transaction("commit")
1061 tr = self.transaction("commit")
1063 trp = weakref.proxy(tr)
1062 trp = weakref.proxy(tr)
1064
1063
1065 if ctx.files():
1064 if ctx.files():
1066 m1 = p1.manifest().copy()
1065 m1 = p1.manifest().copy()
1067 m2 = p2.manifest()
1066 m2 = p2.manifest()
1068
1067
1069 # check in files
1068 # check in files
1070 new = {}
1069 new = {}
1071 changed = []
1070 changed = []
1072 linkrev = len(self)
1071 linkrev = len(self)
1073 for f in sorted(ctx.modified() + ctx.added()):
1072 for f in sorted(ctx.modified() + ctx.added()):
1074 self.ui.note(f + "\n")
1073 self.ui.note(f + "\n")
1075 try:
1074 try:
1076 fctx = ctx[f]
1075 fctx = ctx[f]
1077 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1076 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1078 changed)
1077 changed)
1079 m1.set(f, fctx.flags())
1078 m1.set(f, fctx.flags())
1080 except OSError, inst:
1079 except OSError, inst:
1081 self.ui.warn(_("trouble committing %s!\n") % f)
1080 self.ui.warn(_("trouble committing %s!\n") % f)
1082 raise
1081 raise
1083 except IOError, inst:
1082 except IOError, inst:
1084 errcode = getattr(inst, 'errno', errno.ENOENT)
1083 errcode = getattr(inst, 'errno', errno.ENOENT)
1085 if error or errcode and errcode != errno.ENOENT:
1084 if error or errcode and errcode != errno.ENOENT:
1086 self.ui.warn(_("trouble committing %s!\n") % f)
1085 self.ui.warn(_("trouble committing %s!\n") % f)
1087 raise
1086 raise
1088 else:
1087 else:
1089 removed.append(f)
1088 removed.append(f)
1090
1089
1091 # update manifest
1090 # update manifest
1092 m1.update(new)
1091 m1.update(new)
1093 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1092 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1094 drop = [f for f in removed if f in m1]
1093 drop = [f for f in removed if f in m1]
1095 for f in drop:
1094 for f in drop:
1096 del m1[f]
1095 del m1[f]
1097 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1096 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1098 p2.manifestnode(), (new, drop))
1097 p2.manifestnode(), (new, drop))
1099 files = changed + removed
1098 files = changed + removed
1100 else:
1099 else:
1101 mn = p1.manifestnode()
1100 mn = p1.manifestnode()
1102 files = []
1101 files = []
1103
1102
1104 # update changelog
1103 # update changelog
1105 self.changelog.delayupdate()
1104 self.changelog.delayupdate()
1106 n = self.changelog.add(mn, files, ctx.description(),
1105 n = self.changelog.add(mn, files, ctx.description(),
1107 trp, p1.node(), p2.node(),
1106 trp, p1.node(), p2.node(),
1108 user, ctx.date(), ctx.extra().copy())
1107 user, ctx.date(), ctx.extra().copy())
1109 p = lambda: self.changelog.writepending() and self.root or ""
1108 p = lambda: self.changelog.writepending() and self.root or ""
1110 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1109 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1111 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1110 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1112 parent2=xp2, pending=p)
1111 parent2=xp2, pending=p)
1113 self.changelog.finalize(trp)
1112 self.changelog.finalize(trp)
1114 tr.close()
1113 tr.close()
1115
1114
1116 if self._branchcache:
1115 if self._branchcache:
1117 self.updatebranchcache()
1116 self.updatebranchcache()
1118 return n
1117 return n
1119 finally:
1118 finally:
1120 if tr:
1119 if tr:
1121 tr.release()
1120 tr.release()
1122 lock.release()
1121 lock.release()
1123
1122
1124 def destroyed(self):
1123 def destroyed(self):
1125 '''Inform the repository that nodes have been destroyed.
1124 '''Inform the repository that nodes have been destroyed.
1126 Intended for use by strip and rollback, so there's a common
1125 Intended for use by strip and rollback, so there's a common
1127 place for anything that has to be done after destroying history.'''
1126 place for anything that has to be done after destroying history.'''
1128 # XXX it might be nice if we could take the list of destroyed
1127 # XXX it might be nice if we could take the list of destroyed
1129 # nodes, but I don't see an easy way for rollback() to do that
1128 # nodes, but I don't see an easy way for rollback() to do that
1130
1129
1131 # Ensure the persistent tag cache is updated. Doing it now
1130 # Ensure the persistent tag cache is updated. Doing it now
1132 # means that the tag cache only has to worry about destroyed
1131 # means that the tag cache only has to worry about destroyed
1133 # heads immediately after a strip/rollback. That in turn
1132 # heads immediately after a strip/rollback. That in turn
1134 # guarantees that "cachetip == currenttip" (comparing both rev
1133 # guarantees that "cachetip == currenttip" (comparing both rev
1135 # and node) always means no nodes have been added or destroyed.
1134 # and node) always means no nodes have been added or destroyed.
1136
1135
1137 # XXX this is suboptimal when qrefresh'ing: we strip the current
1136 # XXX this is suboptimal when qrefresh'ing: we strip the current
1138 # head, refresh the tag cache, then immediately add a new head.
1137 # head, refresh the tag cache, then immediately add a new head.
1139 # But I think doing it this way is necessary for the "instant
1138 # But I think doing it this way is necessary for the "instant
1140 # tag cache retrieval" case to work.
1139 # tag cache retrieval" case to work.
1141 self.invalidatecaches()
1140 self.invalidatecaches()
1142
1141
1143 def walk(self, match, node=None):
1142 def walk(self, match, node=None):
1144 '''
1143 '''
1145 walk recursively through the directory tree or a given
1144 walk recursively through the directory tree or a given
1146 changeset, finding all files matched by the match
1145 changeset, finding all files matched by the match
1147 function
1146 function
1148 '''
1147 '''
1149 return self[node].walk(match)
1148 return self[node].walk(match)
1150
1149
1151 def status(self, node1='.', node2=None, match=None,
1150 def status(self, node1='.', node2=None, match=None,
1152 ignored=False, clean=False, unknown=False,
1151 ignored=False, clean=False, unknown=False,
1153 listsubrepos=False):
1152 listsubrepos=False):
1154 """return status of files between two nodes or node and working directory
1153 """return status of files between two nodes or node and working directory
1155
1154
1156 If node1 is None, use the first dirstate parent instead.
1155 If node1 is None, use the first dirstate parent instead.
1157 If node2 is None, compare node1 with working directory.
1156 If node2 is None, compare node1 with working directory.
1158 """
1157 """
1159
1158
1160 def mfmatches(ctx):
1159 def mfmatches(ctx):
1161 mf = ctx.manifest().copy()
1160 mf = ctx.manifest().copy()
1162 for fn in mf.keys():
1161 for fn in mf.keys():
1163 if not match(fn):
1162 if not match(fn):
1164 del mf[fn]
1163 del mf[fn]
1165 return mf
1164 return mf
1166
1165
1167 if isinstance(node1, context.changectx):
1166 if isinstance(node1, context.changectx):
1168 ctx1 = node1
1167 ctx1 = node1
1169 else:
1168 else:
1170 ctx1 = self[node1]
1169 ctx1 = self[node1]
1171 if isinstance(node2, context.changectx):
1170 if isinstance(node2, context.changectx):
1172 ctx2 = node2
1171 ctx2 = node2
1173 else:
1172 else:
1174 ctx2 = self[node2]
1173 ctx2 = self[node2]
1175
1174
1176 working = ctx2.rev() is None
1175 working = ctx2.rev() is None
1177 parentworking = working and ctx1 == self['.']
1176 parentworking = working and ctx1 == self['.']
1178 match = match or matchmod.always(self.root, self.getcwd())
1177 match = match or matchmod.always(self.root, self.getcwd())
1179 listignored, listclean, listunknown = ignored, clean, unknown
1178 listignored, listclean, listunknown = ignored, clean, unknown
1180
1179
1181 # load earliest manifest first for caching reasons
1180 # load earliest manifest first for caching reasons
1182 if not working and ctx2.rev() < ctx1.rev():
1181 if not working and ctx2.rev() < ctx1.rev():
1183 ctx2.manifest()
1182 ctx2.manifest()
1184
1183
1185 if not parentworking:
1184 if not parentworking:
1186 def bad(f, msg):
1185 def bad(f, msg):
1187 if f not in ctx1:
1186 if f not in ctx1:
1188 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1187 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1189 match.bad = bad
1188 match.bad = bad
1190
1189
1191 if working: # we need to scan the working dir
1190 if working: # we need to scan the working dir
1192 subrepos = []
1191 subrepos = []
1193 if '.hgsub' in self.dirstate:
1192 if '.hgsub' in self.dirstate:
1194 subrepos = ctx1.substate.keys()
1193 subrepos = ctx1.substate.keys()
1195 s = self.dirstate.status(match, subrepos, listignored,
1194 s = self.dirstate.status(match, subrepos, listignored,
1196 listclean, listunknown)
1195 listclean, listunknown)
1197 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1196 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1198
1197
1199 # check for any possibly clean files
1198 # check for any possibly clean files
1200 if parentworking and cmp:
1199 if parentworking and cmp:
1201 fixup = []
1200 fixup = []
1202 # do a full compare of any files that might have changed
1201 # do a full compare of any files that might have changed
1203 for f in sorted(cmp):
1202 for f in sorted(cmp):
1204 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1203 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1205 or ctx1[f].cmp(ctx2[f])):
1204 or ctx1[f].cmp(ctx2[f])):
1206 modified.append(f)
1205 modified.append(f)
1207 else:
1206 else:
1208 fixup.append(f)
1207 fixup.append(f)
1209
1208
1210 # update dirstate for files that are actually clean
1209 # update dirstate for files that are actually clean
1211 if fixup:
1210 if fixup:
1212 if listclean:
1211 if listclean:
1213 clean += fixup
1212 clean += fixup
1214
1213
1215 try:
1214 try:
1216 # updating the dirstate is optional
1215 # updating the dirstate is optional
1217 # so we don't wait on the lock
1216 # so we don't wait on the lock
1218 wlock = self.wlock(False)
1217 wlock = self.wlock(False)
1219 try:
1218 try:
1220 for f in fixup:
1219 for f in fixup:
1221 self.dirstate.normal(f)
1220 self.dirstate.normal(f)
1222 finally:
1221 finally:
1223 wlock.release()
1222 wlock.release()
1224 except error.LockError:
1223 except error.LockError:
1225 pass
1224 pass
1226
1225
1227 if not parentworking:
1226 if not parentworking:
1228 mf1 = mfmatches(ctx1)
1227 mf1 = mfmatches(ctx1)
1229 if working:
1228 if working:
1230 # we are comparing working dir against non-parent
1229 # we are comparing working dir against non-parent
1231 # generate a pseudo-manifest for the working dir
1230 # generate a pseudo-manifest for the working dir
1232 mf2 = mfmatches(self['.'])
1231 mf2 = mfmatches(self['.'])
1233 for f in cmp + modified + added:
1232 for f in cmp + modified + added:
1234 mf2[f] = None
1233 mf2[f] = None
1235 mf2.set(f, ctx2.flags(f))
1234 mf2.set(f, ctx2.flags(f))
1236 for f in removed:
1235 for f in removed:
1237 if f in mf2:
1236 if f in mf2:
1238 del mf2[f]
1237 del mf2[f]
1239 else:
1238 else:
1240 # we are comparing two revisions
1239 # we are comparing two revisions
1241 deleted, unknown, ignored = [], [], []
1240 deleted, unknown, ignored = [], [], []
1242 mf2 = mfmatches(ctx2)
1241 mf2 = mfmatches(ctx2)
1243
1242
1244 modified, added, clean = [], [], []
1243 modified, added, clean = [], [], []
1245 for fn in mf2:
1244 for fn in mf2:
1246 if fn in mf1:
1245 if fn in mf1:
1247 if (fn not in deleted and
1246 if (fn not in deleted and
1248 (mf1.flags(fn) != mf2.flags(fn) or
1247 (mf1.flags(fn) != mf2.flags(fn) or
1249 (mf1[fn] != mf2[fn] and
1248 (mf1[fn] != mf2[fn] and
1250 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1249 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1251 modified.append(fn)
1250 modified.append(fn)
1252 elif listclean:
1251 elif listclean:
1253 clean.append(fn)
1252 clean.append(fn)
1254 del mf1[fn]
1253 del mf1[fn]
1255 elif fn not in deleted:
1254 elif fn not in deleted:
1256 added.append(fn)
1255 added.append(fn)
1257 removed = mf1.keys()
1256 removed = mf1.keys()
1258
1257
1259 r = modified, added, removed, deleted, unknown, ignored, clean
1258 r = modified, added, removed, deleted, unknown, ignored, clean
1260
1259
1261 if listsubrepos:
1260 if listsubrepos:
1262 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1261 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1263 if working:
1262 if working:
1264 rev2 = None
1263 rev2 = None
1265 else:
1264 else:
1266 rev2 = ctx2.substate[subpath][1]
1265 rev2 = ctx2.substate[subpath][1]
1267 try:
1266 try:
1268 submatch = matchmod.narrowmatcher(subpath, match)
1267 submatch = matchmod.narrowmatcher(subpath, match)
1269 s = sub.status(rev2, match=submatch, ignored=listignored,
1268 s = sub.status(rev2, match=submatch, ignored=listignored,
1270 clean=listclean, unknown=listunknown,
1269 clean=listclean, unknown=listunknown,
1271 listsubrepos=True)
1270 listsubrepos=True)
1272 for rfiles, sfiles in zip(r, s):
1271 for rfiles, sfiles in zip(r, s):
1273 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1272 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1274 except error.LookupError:
1273 except error.LookupError:
1275 self.ui.status(_("skipping missing subrepository: %s\n")
1274 self.ui.status(_("skipping missing subrepository: %s\n")
1276 % subpath)
1275 % subpath)
1277
1276
1278 for l in r:
1277 for l in r:
1279 l.sort()
1278 l.sort()
1280 return r
1279 return r
1281
1280
1282 def heads(self, start=None):
1281 def heads(self, start=None):
1283 heads = self.changelog.heads(start)
1282 heads = self.changelog.heads(start)
1284 # sort the output in rev descending order
1283 # sort the output in rev descending order
1285 return sorted(heads, key=self.changelog.rev, reverse=True)
1284 return sorted(heads, key=self.changelog.rev, reverse=True)
1286
1285
1287 def branchheads(self, branch=None, start=None, closed=False):
1286 def branchheads(self, branch=None, start=None, closed=False):
1288 '''return a (possibly filtered) list of heads for the given branch
1287 '''return a (possibly filtered) list of heads for the given branch
1289
1288
1290 Heads are returned in topological order, from newest to oldest.
1289 Heads are returned in topological order, from newest to oldest.
1291 If branch is None, use the dirstate branch.
1290 If branch is None, use the dirstate branch.
1292 If start is not None, return only heads reachable from start.
1291 If start is not None, return only heads reachable from start.
1293 If closed is True, return heads that are marked as closed as well.
1292 If closed is True, return heads that are marked as closed as well.
1294 '''
1293 '''
1295 if branch is None:
1294 if branch is None:
1296 branch = self[None].branch()
1295 branch = self[None].branch()
1297 branches = self.branchmap()
1296 branches = self.branchmap()
1298 if branch not in branches:
1297 if branch not in branches:
1299 return []
1298 return []
1300 # the cache returns heads ordered lowest to highest
1299 # the cache returns heads ordered lowest to highest
1301 bheads = list(reversed(branches[branch]))
1300 bheads = list(reversed(branches[branch]))
1302 if start is not None:
1301 if start is not None:
1303 # filter out the heads that cannot be reached from startrev
1302 # filter out the heads that cannot be reached from startrev
1304 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1303 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1305 bheads = [h for h in bheads if h in fbheads]
1304 bheads = [h for h in bheads if h in fbheads]
1306 if not closed:
1305 if not closed:
1307 bheads = [h for h in bheads if
1306 bheads = [h for h in bheads if
1308 ('close' not in self.changelog.read(h)[5])]
1307 ('close' not in self.changelog.read(h)[5])]
1309 return bheads
1308 return bheads
1310
1309
1311 def branches(self, nodes):
1310 def branches(self, nodes):
1312 if not nodes:
1311 if not nodes:
1313 nodes = [self.changelog.tip()]
1312 nodes = [self.changelog.tip()]
1314 b = []
1313 b = []
1315 for n in nodes:
1314 for n in nodes:
1316 t = n
1315 t = n
1317 while 1:
1316 while 1:
1318 p = self.changelog.parents(n)
1317 p = self.changelog.parents(n)
1319 if p[1] != nullid or p[0] == nullid:
1318 if p[1] != nullid or p[0] == nullid:
1320 b.append((t, n, p[0], p[1]))
1319 b.append((t, n, p[0], p[1]))
1321 break
1320 break
1322 n = p[0]
1321 n = p[0]
1323 return b
1322 return b
1324
1323
1325 def between(self, pairs):
1324 def between(self, pairs):
1326 r = []
1325 r = []
1327
1326
1328 for top, bottom in pairs:
1327 for top, bottom in pairs:
1329 n, l, i = top, [], 0
1328 n, l, i = top, [], 0
1330 f = 1
1329 f = 1
1331
1330
1332 while n != bottom and n != nullid:
1331 while n != bottom and n != nullid:
1333 p = self.changelog.parents(n)[0]
1332 p = self.changelog.parents(n)[0]
1334 if i == f:
1333 if i == f:
1335 l.append(n)
1334 l.append(n)
1336 f = f * 2
1335 f = f * 2
1337 n = p
1336 n = p
1338 i += 1
1337 i += 1
1339
1338
1340 r.append(l)
1339 r.append(l)
1341
1340
1342 return r
1341 return r
1343
1342
1344 def pull(self, remote, heads=None, force=False):
1343 def pull(self, remote, heads=None, force=False):
1345 lock = self.lock()
1344 lock = self.lock()
1346 try:
1345 try:
1347 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1346 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1348 force=force)
1347 force=force)
1349 common, fetch, rheads = tmp
1348 common, fetch, rheads = tmp
1350 if not fetch:
1349 if not fetch:
1351 self.ui.status(_("no changes found\n"))
1350 self.ui.status(_("no changes found\n"))
1352 result = 0
1351 result = 0
1353 else:
1352 else:
1354 if heads is None and list(common) == [nullid]:
1353 if heads is None and list(common) == [nullid]:
1355 self.ui.status(_("requesting all changes\n"))
1354 self.ui.status(_("requesting all changes\n"))
1356 elif heads is None and remote.capable('changegroupsubset'):
1355 elif heads is None and remote.capable('changegroupsubset'):
1357 # issue1320, avoid a race if remote changed after discovery
1356 # issue1320, avoid a race if remote changed after discovery
1358 heads = rheads
1357 heads = rheads
1359
1358
1360 if remote.capable('getbundle'):
1359 if remote.capable('getbundle'):
1361 cg = remote.getbundle('pull', common=common,
1360 cg = remote.getbundle('pull', common=common,
1362 heads=heads or rheads)
1361 heads=heads or rheads)
1363 elif heads is None:
1362 elif heads is None:
1364 cg = remote.changegroup(fetch, 'pull')
1363 cg = remote.changegroup(fetch, 'pull')
1365 elif not remote.capable('changegroupsubset'):
1364 elif not remote.capable('changegroupsubset'):
1366 raise util.Abort(_("partial pull cannot be done because "
1365 raise util.Abort(_("partial pull cannot be done because "
1367 "other repository doesn't support "
1366 "other repository doesn't support "
1368 "changegroupsubset."))
1367 "changegroupsubset."))
1369 else:
1368 else:
1370 cg = remote.changegroupsubset(fetch, heads, 'pull')
1369 cg = remote.changegroupsubset(fetch, heads, 'pull')
1371 result = self.addchangegroup(cg, 'pull', remote.url(),
1370 result = self.addchangegroup(cg, 'pull', remote.url(),
1372 lock=lock)
1371 lock=lock)
1373 finally:
1372 finally:
1374 lock.release()
1373 lock.release()
1375
1374
1376 return result
1375 return result
1377
1376
1378 def checkpush(self, force, revs):
1377 def checkpush(self, force, revs):
1379 """Extensions can override this function if additional checks have
1378 """Extensions can override this function if additional checks have
1380 to be performed before pushing, or call it if they override push
1379 to be performed before pushing, or call it if they override push
1381 command.
1380 command.
1382 """
1381 """
1383 pass
1382 pass
1384
1383
1385 def push(self, remote, force=False, revs=None, newbranch=False):
1384 def push(self, remote, force=False, revs=None, newbranch=False):
1386 '''Push outgoing changesets (limited by revs) from the current
1385 '''Push outgoing changesets (limited by revs) from the current
1387 repository to remote. Return an integer:
1386 repository to remote. Return an integer:
1388 - 0 means HTTP error *or* nothing to push
1387 - 0 means HTTP error *or* nothing to push
1389 - 1 means we pushed and remote head count is unchanged *or*
1388 - 1 means we pushed and remote head count is unchanged *or*
1390 we have outgoing changesets but refused to push
1389 we have outgoing changesets but refused to push
1391 - other values as described by addchangegroup()
1390 - other values as described by addchangegroup()
1392 '''
1391 '''
1393 # there are two ways to push to remote repo:
1392 # there are two ways to push to remote repo:
1394 #
1393 #
1395 # addchangegroup assumes local user can lock remote
1394 # addchangegroup assumes local user can lock remote
1396 # repo (local filesystem, old ssh servers).
1395 # repo (local filesystem, old ssh servers).
1397 #
1396 #
1398 # unbundle assumes local user cannot lock remote repo (new ssh
1397 # unbundle assumes local user cannot lock remote repo (new ssh
1399 # servers, http servers).
1398 # servers, http servers).
1400
1399
1401 self.checkpush(force, revs)
1400 self.checkpush(force, revs)
1402 lock = None
1401 lock = None
1403 unbundle = remote.capable('unbundle')
1402 unbundle = remote.capable('unbundle')
1404 if not unbundle:
1403 if not unbundle:
1405 lock = remote.lock()
1404 lock = remote.lock()
1406 try:
1405 try:
1407 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1406 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1408 newbranch)
1407 newbranch)
1409 ret = remote_heads
1408 ret = remote_heads
1410 if cg is not None:
1409 if cg is not None:
1411 if unbundle:
1410 if unbundle:
1412 # local repo finds heads on server, finds out what
1411 # local repo finds heads on server, finds out what
1413 # revs it must push. once revs transferred, if server
1412 # revs it must push. once revs transferred, if server
1414 # finds it has different heads (someone else won
1413 # finds it has different heads (someone else won
1415 # commit/push race), server aborts.
1414 # commit/push race), server aborts.
1416 if force:
1415 if force:
1417 remote_heads = ['force']
1416 remote_heads = ['force']
1418 # ssh: return remote's addchangegroup()
1417 # ssh: return remote's addchangegroup()
1419 # http: return remote's addchangegroup() or 0 for error
1418 # http: return remote's addchangegroup() or 0 for error
1420 ret = remote.unbundle(cg, remote_heads, 'push')
1419 ret = remote.unbundle(cg, remote_heads, 'push')
1421 else:
1420 else:
1422 # we return an integer indicating remote head count change
1421 # we return an integer indicating remote head count change
1423 ret = remote.addchangegroup(cg, 'push', self.url(),
1422 ret = remote.addchangegroup(cg, 'push', self.url(),
1424 lock=lock)
1423 lock=lock)
1425 finally:
1424 finally:
1426 if lock is not None:
1425 if lock is not None:
1427 lock.release()
1426 lock.release()
1428
1427
1429 self.ui.debug("checking for updated bookmarks\n")
1428 self.ui.debug("checking for updated bookmarks\n")
1430 rb = remote.listkeys('bookmarks')
1429 rb = remote.listkeys('bookmarks')
1431 for k in rb.keys():
1430 for k in rb.keys():
1432 if k in self._bookmarks:
1431 if k in self._bookmarks:
1433 nr, nl = rb[k], hex(self._bookmarks[k])
1432 nr, nl = rb[k], hex(self._bookmarks[k])
1434 if nr in self:
1433 if nr in self:
1435 cr = self[nr]
1434 cr = self[nr]
1436 cl = self[nl]
1435 cl = self[nl]
1437 if cl in cr.descendants():
1436 if cl in cr.descendants():
1438 r = remote.pushkey('bookmarks', k, nr, nl)
1437 r = remote.pushkey('bookmarks', k, nr, nl)
1439 if r:
1438 if r:
1440 self.ui.status(_("updating bookmark %s\n") % k)
1439 self.ui.status(_("updating bookmark %s\n") % k)
1441 else:
1440 else:
1442 self.ui.warn(_('updating bookmark %s'
1441 self.ui.warn(_('updating bookmark %s'
1443 ' failed!\n') % k)
1442 ' failed!\n') % k)
1444
1443
1445 return ret
1444 return ret
1446
1445
1447 def changegroupinfo(self, nodes, source):
1446 def changegroupinfo(self, nodes, source):
1448 if self.ui.verbose or source == 'bundle':
1447 if self.ui.verbose or source == 'bundle':
1449 self.ui.status(_("%d changesets found\n") % len(nodes))
1448 self.ui.status(_("%d changesets found\n") % len(nodes))
1450 if self.ui.debugflag:
1449 if self.ui.debugflag:
1451 self.ui.debug("list of changesets:\n")
1450 self.ui.debug("list of changesets:\n")
1452 for node in nodes:
1451 for node in nodes:
1453 self.ui.debug("%s\n" % hex(node))
1452 self.ui.debug("%s\n" % hex(node))
1454
1453
1455 def changegroupsubset(self, bases, heads, source):
1454 def changegroupsubset(self, bases, heads, source):
1456 """Compute a changegroup consisting of all the nodes that are
1455 """Compute a changegroup consisting of all the nodes that are
1457 descendents of any of the bases and ancestors of any of the heads.
1456 descendents of any of the bases and ancestors of any of the heads.
1458 Return a chunkbuffer object whose read() method will return
1457 Return a chunkbuffer object whose read() method will return
1459 successive changegroup chunks.
1458 successive changegroup chunks.
1460
1459
1461 It is fairly complex as determining which filenodes and which
1460 It is fairly complex as determining which filenodes and which
1462 manifest nodes need to be included for the changeset to be complete
1461 manifest nodes need to be included for the changeset to be complete
1463 is non-trivial.
1462 is non-trivial.
1464
1463
1465 Another wrinkle is doing the reverse, figuring out which changeset in
1464 Another wrinkle is doing the reverse, figuring out which changeset in
1466 the changegroup a particular filenode or manifestnode belongs to.
1465 the changegroup a particular filenode or manifestnode belongs to.
1467 """
1466 """
1468 cl = self.changelog
1467 cl = self.changelog
1469 if not bases:
1468 if not bases:
1470 bases = [nullid]
1469 bases = [nullid]
1471 csets, bases, heads = cl.nodesbetween(bases, heads)
1470 csets, bases, heads = cl.nodesbetween(bases, heads)
1472 # We assume that all ancestors of bases are known
1471 # We assume that all ancestors of bases are known
1473 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1472 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1474 return self._changegroupsubset(common, csets, heads, source)
1473 return self._changegroupsubset(common, csets, heads, source)
1475
1474
1476 def getbundle(self, source, heads=None, common=None):
1475 def getbundle(self, source, heads=None, common=None):
1477 """Like changegroupsubset, but returns the set difference between the
1476 """Like changegroupsubset, but returns the set difference between the
1478 ancestors of heads and the ancestors common.
1477 ancestors of heads and the ancestors common.
1479
1478
1480 If heads is None, use the local heads. If common is None, use [nullid].
1479 If heads is None, use the local heads. If common is None, use [nullid].
1481
1480
1482 The nodes in common might not all be known locally due to the way the
1481 The nodes in common might not all be known locally due to the way the
1483 current discovery protocol works.
1482 current discovery protocol works.
1484 """
1483 """
1485 cl = self.changelog
1484 cl = self.changelog
1486 if common:
1485 if common:
1487 nm = cl.nodemap
1486 nm = cl.nodemap
1488 common = [n for n in common if n in nm]
1487 common = [n for n in common if n in nm]
1489 else:
1488 else:
1490 common = [nullid]
1489 common = [nullid]
1491 if not heads:
1490 if not heads:
1492 heads = cl.heads()
1491 heads = cl.heads()
1493 common, missing = cl.findcommonmissing(common, heads)
1492 common, missing = cl.findcommonmissing(common, heads)
1494 if not missing:
1493 if not missing:
1495 return None
1494 return None
1496 return self._changegroupsubset(common, missing, heads, source)
1495 return self._changegroupsubset(common, missing, heads, source)
1497
1496
1498 def _changegroupsubset(self, commonrevs, csets, heads, source):
1497 def _changegroupsubset(self, commonrevs, csets, heads, source):
1499
1498
1500 cl = self.changelog
1499 cl = self.changelog
1501 mf = self.manifest
1500 mf = self.manifest
1502 mfs = {} # needed manifests
1501 mfs = {} # needed manifests
1503 fnodes = {} # needed file nodes
1502 fnodes = {} # needed file nodes
1504 changedfiles = set()
1503 changedfiles = set()
1505 fstate = ['', {}]
1504 fstate = ['', {}]
1506 count = [0]
1505 count = [0]
1507
1506
1508 # can we go through the fast path ?
1507 # can we go through the fast path ?
1509 heads.sort()
1508 heads.sort()
1510 if heads == sorted(self.heads()):
1509 if heads == sorted(self.heads()):
1511 return self._changegroup(csets, source)
1510 return self._changegroup(csets, source)
1512
1511
1513 # slow path
1512 # slow path
1514 self.hook('preoutgoing', throw=True, source=source)
1513 self.hook('preoutgoing', throw=True, source=source)
1515 self.changegroupinfo(csets, source)
1514 self.changegroupinfo(csets, source)
1516
1515
1517 # filter any nodes that claim to be part of the known set
1516 # filter any nodes that claim to be part of the known set
1518 def prune(revlog, missing):
1517 def prune(revlog, missing):
1519 for n in missing:
1518 for n in missing:
1520 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1519 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1521 yield n
1520 yield n
1522
1521
1523 def lookup(revlog, x):
1522 def lookup(revlog, x):
1524 if revlog == cl:
1523 if revlog == cl:
1525 c = cl.read(x)
1524 c = cl.read(x)
1526 changedfiles.update(c[3])
1525 changedfiles.update(c[3])
1527 mfs.setdefault(c[0], x)
1526 mfs.setdefault(c[0], x)
1528 count[0] += 1
1527 count[0] += 1
1529 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1528 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1530 return x
1529 return x
1531 elif revlog == mf:
1530 elif revlog == mf:
1532 clnode = mfs[x]
1531 clnode = mfs[x]
1533 mdata = mf.readfast(x)
1532 mdata = mf.readfast(x)
1534 for f in changedfiles:
1533 for f in changedfiles:
1535 if f in mdata:
1534 if f in mdata:
1536 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1535 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1537 count[0] += 1
1536 count[0] += 1
1538 self.ui.progress(_('bundling'), count[0],
1537 self.ui.progress(_('bundling'), count[0],
1539 unit=_('manifests'), total=len(mfs))
1538 unit=_('manifests'), total=len(mfs))
1540 return mfs[x]
1539 return mfs[x]
1541 else:
1540 else:
1542 self.ui.progress(
1541 self.ui.progress(
1543 _('bundling'), count[0], item=fstate[0],
1542 _('bundling'), count[0], item=fstate[0],
1544 unit=_('files'), total=len(changedfiles))
1543 unit=_('files'), total=len(changedfiles))
1545 return fstate[1][x]
1544 return fstate[1][x]
1546
1545
1547 bundler = changegroup.bundle10(lookup)
1546 bundler = changegroup.bundle10(lookup)
1548
1547
1549 def gengroup():
1548 def gengroup():
1550 # Create a changenode group generator that will call our functions
1549 # Create a changenode group generator that will call our functions
1551 # back to lookup the owning changenode and collect information.
1550 # back to lookup the owning changenode and collect information.
1552 for chunk in cl.group(csets, bundler):
1551 for chunk in cl.group(csets, bundler):
1553 yield chunk
1552 yield chunk
1554 self.ui.progress(_('bundling'), None)
1553 self.ui.progress(_('bundling'), None)
1555
1554
1556 # Create a generator for the manifestnodes that calls our lookup
1555 # Create a generator for the manifestnodes that calls our lookup
1557 # and data collection functions back.
1556 # and data collection functions back.
1558 count[0] = 0
1557 count[0] = 0
1559 for chunk in mf.group(prune(mf, mfs), bundler):
1558 for chunk in mf.group(prune(mf, mfs), bundler):
1560 yield chunk
1559 yield chunk
1561 self.ui.progress(_('bundling'), None)
1560 self.ui.progress(_('bundling'), None)
1562
1561
1563 mfs.clear()
1562 mfs.clear()
1564
1563
1565 # Go through all our files in order sorted by name.
1564 # Go through all our files in order sorted by name.
1566 count[0] = 0
1565 count[0] = 0
1567 for fname in sorted(changedfiles):
1566 for fname in sorted(changedfiles):
1568 filerevlog = self.file(fname)
1567 filerevlog = self.file(fname)
1569 if not len(filerevlog):
1568 if not len(filerevlog):
1570 raise util.Abort(_("empty or missing revlog for %s") % fname)
1569 raise util.Abort(_("empty or missing revlog for %s") % fname)
1571 fstate[0] = fname
1570 fstate[0] = fname
1572 fstate[1] = fnodes.pop(fname, {})
1571 fstate[1] = fnodes.pop(fname, {})
1573 first = True
1572 first = True
1574
1573
1575 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1574 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1576 bundler):
1575 bundler):
1577 if first:
1576 if first:
1578 if chunk == bundler.close():
1577 if chunk == bundler.close():
1579 break
1578 break
1580 count[0] += 1
1579 count[0] += 1
1581 yield bundler.fileheader(fname)
1580 yield bundler.fileheader(fname)
1582 first = False
1581 first = False
1583 yield chunk
1582 yield chunk
1584 # Signal that no more groups are left.
1583 # Signal that no more groups are left.
1585 yield bundler.close()
1584 yield bundler.close()
1586 self.ui.progress(_('bundling'), None)
1585 self.ui.progress(_('bundling'), None)
1587
1586
1588 if csets:
1587 if csets:
1589 self.hook('outgoing', node=hex(csets[0]), source=source)
1588 self.hook('outgoing', node=hex(csets[0]), source=source)
1590
1589
1591 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1590 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1592
1591
1593 def changegroup(self, basenodes, source):
1592 def changegroup(self, basenodes, source):
1594 # to avoid a race we use changegroupsubset() (issue1320)
1593 # to avoid a race we use changegroupsubset() (issue1320)
1595 return self.changegroupsubset(basenodes, self.heads(), source)
1594 return self.changegroupsubset(basenodes, self.heads(), source)
1596
1595
1597 def _changegroup(self, nodes, source):
1596 def _changegroup(self, nodes, source):
1598 """Compute the changegroup of all nodes that we have that a recipient
1597 """Compute the changegroup of all nodes that we have that a recipient
1599 doesn't. Return a chunkbuffer object whose read() method will return
1598 doesn't. Return a chunkbuffer object whose read() method will return
1600 successive changegroup chunks.
1599 successive changegroup chunks.
1601
1600
1602 This is much easier than the previous function as we can assume that
1601 This is much easier than the previous function as we can assume that
1603 the recipient has any changenode we aren't sending them.
1602 the recipient has any changenode we aren't sending them.
1604
1603
1605 nodes is the set of nodes to send"""
1604 nodes is the set of nodes to send"""
1606
1605
1607 cl = self.changelog
1606 cl = self.changelog
1608 mf = self.manifest
1607 mf = self.manifest
1609 mfs = {}
1608 mfs = {}
1610 changedfiles = set()
1609 changedfiles = set()
1611 fstate = ['']
1610 fstate = ['']
1612 count = [0]
1611 count = [0]
1613
1612
1614 self.hook('preoutgoing', throw=True, source=source)
1613 self.hook('preoutgoing', throw=True, source=source)
1615 self.changegroupinfo(nodes, source)
1614 self.changegroupinfo(nodes, source)
1616
1615
1617 revset = set([cl.rev(n) for n in nodes])
1616 revset = set([cl.rev(n) for n in nodes])
1618
1617
1619 def gennodelst(log):
1618 def gennodelst(log):
1620 for r in log:
1619 for r in log:
1621 if log.linkrev(r) in revset:
1620 if log.linkrev(r) in revset:
1622 yield log.node(r)
1621 yield log.node(r)
1623
1622
1624 def lookup(revlog, x):
1623 def lookup(revlog, x):
1625 if revlog == cl:
1624 if revlog == cl:
1626 c = cl.read(x)
1625 c = cl.read(x)
1627 changedfiles.update(c[3])
1626 changedfiles.update(c[3])
1628 mfs.setdefault(c[0], x)
1627 mfs.setdefault(c[0], x)
1629 count[0] += 1
1628 count[0] += 1
1630 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1629 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1631 return x
1630 return x
1632 elif revlog == mf:
1631 elif revlog == mf:
1633 count[0] += 1
1632 count[0] += 1
1634 self.ui.progress(_('bundling'), count[0],
1633 self.ui.progress(_('bundling'), count[0],
1635 unit=_('manifests'), total=len(mfs))
1634 unit=_('manifests'), total=len(mfs))
1636 return cl.node(revlog.linkrev(revlog.rev(x)))
1635 return cl.node(revlog.linkrev(revlog.rev(x)))
1637 else:
1636 else:
1638 self.ui.progress(
1637 self.ui.progress(
1639 _('bundling'), count[0], item=fstate[0],
1638 _('bundling'), count[0], item=fstate[0],
1640 total=len(changedfiles), unit=_('files'))
1639 total=len(changedfiles), unit=_('files'))
1641 return cl.node(revlog.linkrev(revlog.rev(x)))
1640 return cl.node(revlog.linkrev(revlog.rev(x)))
1642
1641
1643 bundler = changegroup.bundle10(lookup)
1642 bundler = changegroup.bundle10(lookup)
1644
1643
1645 def gengroup():
1644 def gengroup():
1646 '''yield a sequence of changegroup chunks (strings)'''
1645 '''yield a sequence of changegroup chunks (strings)'''
1647 # construct a list of all changed files
1646 # construct a list of all changed files
1648
1647
1649 for chunk in cl.group(nodes, bundler):
1648 for chunk in cl.group(nodes, bundler):
1650 yield chunk
1649 yield chunk
1651 self.ui.progress(_('bundling'), None)
1650 self.ui.progress(_('bundling'), None)
1652
1651
1653 count[0] = 0
1652 count[0] = 0
1654 for chunk in mf.group(gennodelst(mf), bundler):
1653 for chunk in mf.group(gennodelst(mf), bundler):
1655 yield chunk
1654 yield chunk
1656 self.ui.progress(_('bundling'), None)
1655 self.ui.progress(_('bundling'), None)
1657
1656
1658 count[0] = 0
1657 count[0] = 0
1659 for fname in sorted(changedfiles):
1658 for fname in sorted(changedfiles):
1660 filerevlog = self.file(fname)
1659 filerevlog = self.file(fname)
1661 if not len(filerevlog):
1660 if not len(filerevlog):
1662 raise util.Abort(_("empty or missing revlog for %s") % fname)
1661 raise util.Abort(_("empty or missing revlog for %s") % fname)
1663 fstate[0] = fname
1662 fstate[0] = fname
1664 first = True
1663 first = True
1665 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1664 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1666 if first:
1665 if first:
1667 if chunk == bundler.close():
1666 if chunk == bundler.close():
1668 break
1667 break
1669 count[0] += 1
1668 count[0] += 1
1670 yield bundler.fileheader(fname)
1669 yield bundler.fileheader(fname)
1671 first = False
1670 first = False
1672 yield chunk
1671 yield chunk
1673 yield bundler.close()
1672 yield bundler.close()
1674 self.ui.progress(_('bundling'), None)
1673 self.ui.progress(_('bundling'), None)
1675
1674
1676 if nodes:
1675 if nodes:
1677 self.hook('outgoing', node=hex(nodes[0]), source=source)
1676 self.hook('outgoing', node=hex(nodes[0]), source=source)
1678
1677
1679 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1678 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1680
1679
1681 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1680 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1682 """Add the changegroup returned by source.read() to this repo.
1681 """Add the changegroup returned by source.read() to this repo.
1683 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1682 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1684 the URL of the repo where this changegroup is coming from.
1683 the URL of the repo where this changegroup is coming from.
1685 If lock is not None, the function takes ownership of the lock
1684 If lock is not None, the function takes ownership of the lock
1686 and releases it after the changegroup is added.
1685 and releases it after the changegroup is added.
1687
1686
1688 Return an integer summarizing the change to this repo:
1687 Return an integer summarizing the change to this repo:
1689 - nothing changed or no source: 0
1688 - nothing changed or no source: 0
1690 - more heads than before: 1+added heads (2..n)
1689 - more heads than before: 1+added heads (2..n)
1691 - fewer heads than before: -1-removed heads (-2..-n)
1690 - fewer heads than before: -1-removed heads (-2..-n)
1692 - number of heads stays the same: 1
1691 - number of heads stays the same: 1
1693 """
1692 """
1694 def csmap(x):
1693 def csmap(x):
1695 self.ui.debug("add changeset %s\n" % short(x))
1694 self.ui.debug("add changeset %s\n" % short(x))
1696 return len(cl)
1695 return len(cl)
1697
1696
1698 def revmap(x):
1697 def revmap(x):
1699 return cl.rev(x)
1698 return cl.rev(x)
1700
1699
1701 if not source:
1700 if not source:
1702 return 0
1701 return 0
1703
1702
1704 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1703 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1705
1704
1706 changesets = files = revisions = 0
1705 changesets = files = revisions = 0
1707 efiles = set()
1706 efiles = set()
1708
1707
1709 # write changelog data to temp files so concurrent readers will not see
1708 # write changelog data to temp files so concurrent readers will not see
1710 # inconsistent view
1709 # inconsistent view
1711 cl = self.changelog
1710 cl = self.changelog
1712 cl.delayupdate()
1711 cl.delayupdate()
1713 oldheads = cl.heads()
1712 oldheads = cl.heads()
1714
1713
1715 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1714 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1716 try:
1715 try:
1717 trp = weakref.proxy(tr)
1716 trp = weakref.proxy(tr)
1718 # pull off the changeset group
1717 # pull off the changeset group
1719 self.ui.status(_("adding changesets\n"))
1718 self.ui.status(_("adding changesets\n"))
1720 clstart = len(cl)
1719 clstart = len(cl)
1721 class prog(object):
1720 class prog(object):
1722 step = _('changesets')
1721 step = _('changesets')
1723 count = 1
1722 count = 1
1724 ui = self.ui
1723 ui = self.ui
1725 total = None
1724 total = None
1726 def __call__(self):
1725 def __call__(self):
1727 self.ui.progress(self.step, self.count, unit=_('chunks'),
1726 self.ui.progress(self.step, self.count, unit=_('chunks'),
1728 total=self.total)
1727 total=self.total)
1729 self.count += 1
1728 self.count += 1
1730 pr = prog()
1729 pr = prog()
1731 source.callback = pr
1730 source.callback = pr
1732
1731
1733 source.changelogheader()
1732 source.changelogheader()
1734 if (cl.addgroup(source, csmap, trp) is None
1733 if (cl.addgroup(source, csmap, trp) is None
1735 and not emptyok):
1734 and not emptyok):
1736 raise util.Abort(_("received changelog group is empty"))
1735 raise util.Abort(_("received changelog group is empty"))
1737 clend = len(cl)
1736 clend = len(cl)
1738 changesets = clend - clstart
1737 changesets = clend - clstart
1739 for c in xrange(clstart, clend):
1738 for c in xrange(clstart, clend):
1740 efiles.update(self[c].files())
1739 efiles.update(self[c].files())
1741 efiles = len(efiles)
1740 efiles = len(efiles)
1742 self.ui.progress(_('changesets'), None)
1741 self.ui.progress(_('changesets'), None)
1743
1742
1744 # pull off the manifest group
1743 # pull off the manifest group
1745 self.ui.status(_("adding manifests\n"))
1744 self.ui.status(_("adding manifests\n"))
1746 pr.step = _('manifests')
1745 pr.step = _('manifests')
1747 pr.count = 1
1746 pr.count = 1
1748 pr.total = changesets # manifests <= changesets
1747 pr.total = changesets # manifests <= changesets
1749 # no need to check for empty manifest group here:
1748 # no need to check for empty manifest group here:
1750 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1749 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1751 # no new manifest will be created and the manifest group will
1750 # no new manifest will be created and the manifest group will
1752 # be empty during the pull
1751 # be empty during the pull
1753 source.manifestheader()
1752 source.manifestheader()
1754 self.manifest.addgroup(source, revmap, trp)
1753 self.manifest.addgroup(source, revmap, trp)
1755 self.ui.progress(_('manifests'), None)
1754 self.ui.progress(_('manifests'), None)
1756
1755
1757 needfiles = {}
1756 needfiles = {}
1758 if self.ui.configbool('server', 'validate', default=False):
1757 if self.ui.configbool('server', 'validate', default=False):
1759 # validate incoming csets have their manifests
1758 # validate incoming csets have their manifests
1760 for cset in xrange(clstart, clend):
1759 for cset in xrange(clstart, clend):
1761 mfest = self.changelog.read(self.changelog.node(cset))[0]
1760 mfest = self.changelog.read(self.changelog.node(cset))[0]
1762 mfest = self.manifest.readdelta(mfest)
1761 mfest = self.manifest.readdelta(mfest)
1763 # store file nodes we must see
1762 # store file nodes we must see
1764 for f, n in mfest.iteritems():
1763 for f, n in mfest.iteritems():
1765 needfiles.setdefault(f, set()).add(n)
1764 needfiles.setdefault(f, set()).add(n)
1766
1765
1767 # process the files
1766 # process the files
1768 self.ui.status(_("adding file changes\n"))
1767 self.ui.status(_("adding file changes\n"))
1769 pr.step = 'files'
1768 pr.step = 'files'
1770 pr.count = 1
1769 pr.count = 1
1771 pr.total = efiles
1770 pr.total = efiles
1772 source.callback = None
1771 source.callback = None
1773
1772
1774 while 1:
1773 while 1:
1775 chunkdata = source.filelogheader()
1774 chunkdata = source.filelogheader()
1776 if not chunkdata:
1775 if not chunkdata:
1777 break
1776 break
1778 f = chunkdata["filename"]
1777 f = chunkdata["filename"]
1779 self.ui.debug("adding %s revisions\n" % f)
1778 self.ui.debug("adding %s revisions\n" % f)
1780 pr()
1779 pr()
1781 fl = self.file(f)
1780 fl = self.file(f)
1782 o = len(fl)
1781 o = len(fl)
1783 if fl.addgroup(source, revmap, trp) is None:
1782 if fl.addgroup(source, revmap, trp) is None:
1784 raise util.Abort(_("received file revlog group is empty"))
1783 raise util.Abort(_("received file revlog group is empty"))
1785 revisions += len(fl) - o
1784 revisions += len(fl) - o
1786 files += 1
1785 files += 1
1787 if f in needfiles:
1786 if f in needfiles:
1788 needs = needfiles[f]
1787 needs = needfiles[f]
1789 for new in xrange(o, len(fl)):
1788 for new in xrange(o, len(fl)):
1790 n = fl.node(new)
1789 n = fl.node(new)
1791 if n in needs:
1790 if n in needs:
1792 needs.remove(n)
1791 needs.remove(n)
1793 if not needs:
1792 if not needs:
1794 del needfiles[f]
1793 del needfiles[f]
1795 self.ui.progress(_('files'), None)
1794 self.ui.progress(_('files'), None)
1796
1795
1797 for f, needs in needfiles.iteritems():
1796 for f, needs in needfiles.iteritems():
1798 fl = self.file(f)
1797 fl = self.file(f)
1799 for n in needs:
1798 for n in needs:
1800 try:
1799 try:
1801 fl.rev(n)
1800 fl.rev(n)
1802 except error.LookupError:
1801 except error.LookupError:
1803 raise util.Abort(
1802 raise util.Abort(
1804 _('missing file data for %s:%s - run hg verify') %
1803 _('missing file data for %s:%s - run hg verify') %
1805 (f, hex(n)))
1804 (f, hex(n)))
1806
1805
1807 dh = 0
1806 dh = 0
1808 if oldheads:
1807 if oldheads:
1809 heads = cl.heads()
1808 heads = cl.heads()
1810 dh = len(heads) - len(oldheads)
1809 dh = len(heads) - len(oldheads)
1811 for h in heads:
1810 for h in heads:
1812 if h not in oldheads and 'close' in self[h].extra():
1811 if h not in oldheads and 'close' in self[h].extra():
1813 dh -= 1
1812 dh -= 1
1814 htext = ""
1813 htext = ""
1815 if dh:
1814 if dh:
1816 htext = _(" (%+d heads)") % dh
1815 htext = _(" (%+d heads)") % dh
1817
1816
1818 self.ui.status(_("added %d changesets"
1817 self.ui.status(_("added %d changesets"
1819 " with %d changes to %d files%s\n")
1818 " with %d changes to %d files%s\n")
1820 % (changesets, revisions, files, htext))
1819 % (changesets, revisions, files, htext))
1821
1820
1822 if changesets > 0:
1821 if changesets > 0:
1823 p = lambda: cl.writepending() and self.root or ""
1822 p = lambda: cl.writepending() and self.root or ""
1824 self.hook('pretxnchangegroup', throw=True,
1823 self.hook('pretxnchangegroup', throw=True,
1825 node=hex(cl.node(clstart)), source=srctype,
1824 node=hex(cl.node(clstart)), source=srctype,
1826 url=url, pending=p)
1825 url=url, pending=p)
1827
1826
1828 # make changelog see real files again
1827 # make changelog see real files again
1829 cl.finalize(trp)
1828 cl.finalize(trp)
1830
1829
1831 tr.close()
1830 tr.close()
1832 finally:
1831 finally:
1833 tr.release()
1832 tr.release()
1834 if lock:
1833 if lock:
1835 lock.release()
1834 lock.release()
1836
1835
1837 if changesets > 0:
1836 if changesets > 0:
1838 # forcefully update the on-disk branch cache
1837 # forcefully update the on-disk branch cache
1839 self.ui.debug("updating the branch cache\n")
1838 self.ui.debug("updating the branch cache\n")
1840 self.updatebranchcache()
1839 self.updatebranchcache()
1841 self.hook("changegroup", node=hex(cl.node(clstart)),
1840 self.hook("changegroup", node=hex(cl.node(clstart)),
1842 source=srctype, url=url)
1841 source=srctype, url=url)
1843
1842
1844 for i in xrange(clstart, clend):
1843 for i in xrange(clstart, clend):
1845 self.hook("incoming", node=hex(cl.node(i)),
1844 self.hook("incoming", node=hex(cl.node(i)),
1846 source=srctype, url=url)
1845 source=srctype, url=url)
1847
1846
1848 # never return 0 here:
1847 # never return 0 here:
1849 if dh < 0:
1848 if dh < 0:
1850 return dh - 1
1849 return dh - 1
1851 else:
1850 else:
1852 return dh + 1
1851 return dh + 1
1853
1852
1854 def stream_in(self, remote, requirements):
1853 def stream_in(self, remote, requirements):
1855 lock = self.lock()
1854 lock = self.lock()
1856 try:
1855 try:
1857 fp = remote.stream_out()
1856 fp = remote.stream_out()
1858 l = fp.readline()
1857 l = fp.readline()
1859 try:
1858 try:
1860 resp = int(l)
1859 resp = int(l)
1861 except ValueError:
1860 except ValueError:
1862 raise error.ResponseError(
1861 raise error.ResponseError(
1863 _('Unexpected response from remote server:'), l)
1862 _('Unexpected response from remote server:'), l)
1864 if resp == 1:
1863 if resp == 1:
1865 raise util.Abort(_('operation forbidden by server'))
1864 raise util.Abort(_('operation forbidden by server'))
1866 elif resp == 2:
1865 elif resp == 2:
1867 raise util.Abort(_('locking the remote repository failed'))
1866 raise util.Abort(_('locking the remote repository failed'))
1868 elif resp != 0:
1867 elif resp != 0:
1869 raise util.Abort(_('the server sent an unknown error code'))
1868 raise util.Abort(_('the server sent an unknown error code'))
1870 self.ui.status(_('streaming all changes\n'))
1869 self.ui.status(_('streaming all changes\n'))
1871 l = fp.readline()
1870 l = fp.readline()
1872 try:
1871 try:
1873 total_files, total_bytes = map(int, l.split(' ', 1))
1872 total_files, total_bytes = map(int, l.split(' ', 1))
1874 except (ValueError, TypeError):
1873 except (ValueError, TypeError):
1875 raise error.ResponseError(
1874 raise error.ResponseError(
1876 _('Unexpected response from remote server:'), l)
1875 _('Unexpected response from remote server:'), l)
1877 self.ui.status(_('%d files to transfer, %s of data\n') %
1876 self.ui.status(_('%d files to transfer, %s of data\n') %
1878 (total_files, util.bytecount(total_bytes)))
1877 (total_files, util.bytecount(total_bytes)))
1879 start = time.time()
1878 start = time.time()
1880 for i in xrange(total_files):
1879 for i in xrange(total_files):
1881 # XXX doesn't support '\n' or '\r' in filenames
1880 # XXX doesn't support '\n' or '\r' in filenames
1882 l = fp.readline()
1881 l = fp.readline()
1883 try:
1882 try:
1884 name, size = l.split('\0', 1)
1883 name, size = l.split('\0', 1)
1885 size = int(size)
1884 size = int(size)
1886 except (ValueError, TypeError):
1885 except (ValueError, TypeError):
1887 raise error.ResponseError(
1886 raise error.ResponseError(
1888 _('Unexpected response from remote server:'), l)
1887 _('Unexpected response from remote server:'), l)
1889 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1888 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1890 # for backwards compat, name was partially encoded
1889 # for backwards compat, name was partially encoded
1891 ofp = self.sopener(store.decodedir(name), 'w')
1890 ofp = self.sopener(store.decodedir(name), 'w')
1892 for chunk in util.filechunkiter(fp, limit=size):
1891 for chunk in util.filechunkiter(fp, limit=size):
1893 ofp.write(chunk)
1892 ofp.write(chunk)
1894 ofp.close()
1893 ofp.close()
1895 elapsed = time.time() - start
1894 elapsed = time.time() - start
1896 if elapsed <= 0:
1895 if elapsed <= 0:
1897 elapsed = 0.001
1896 elapsed = 0.001
1898 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1897 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1899 (util.bytecount(total_bytes), elapsed,
1898 (util.bytecount(total_bytes), elapsed,
1900 util.bytecount(total_bytes / elapsed)))
1899 util.bytecount(total_bytes / elapsed)))
1901
1900
1902 # new requirements = old non-format requirements + new format-related
1901 # new requirements = old non-format requirements + new format-related
1903 # requirements from the streamed-in repository
1902 # requirements from the streamed-in repository
1904 requirements.update(set(self.requirements) - self.supportedformats)
1903 requirements.update(set(self.requirements) - self.supportedformats)
1905 self._applyrequirements(requirements)
1904 self._applyrequirements(requirements)
1906 self._writerequirements()
1905 self._writerequirements()
1907
1906
1908 self.invalidate()
1907 self.invalidate()
1909 return len(self.heads()) + 1
1908 return len(self.heads()) + 1
1910 finally:
1909 finally:
1911 lock.release()
1910 lock.release()
1912
1911
1913 def clone(self, remote, heads=[], stream=False):
1912 def clone(self, remote, heads=[], stream=False):
1914 '''clone remote repository.
1913 '''clone remote repository.
1915
1914
1916 keyword arguments:
1915 keyword arguments:
1917 heads: list of revs to clone (forces use of pull)
1916 heads: list of revs to clone (forces use of pull)
1918 stream: use streaming clone if possible'''
1917 stream: use streaming clone if possible'''
1919
1918
1920 # now, all clients that can request uncompressed clones can
1919 # now, all clients that can request uncompressed clones can
1921 # read repo formats supported by all servers that can serve
1920 # read repo formats supported by all servers that can serve
1922 # them.
1921 # them.
1923
1922
1924 # if revlog format changes, client will have to check version
1923 # if revlog format changes, client will have to check version
1925 # and format flags on "stream" capability, and use
1924 # and format flags on "stream" capability, and use
1926 # uncompressed only if compatible.
1925 # uncompressed only if compatible.
1927
1926
1928 if stream and not heads:
1927 if stream and not heads:
1929 # 'stream' means remote revlog format is revlogv1 only
1928 # 'stream' means remote revlog format is revlogv1 only
1930 if remote.capable('stream'):
1929 if remote.capable('stream'):
1931 return self.stream_in(remote, set(('revlogv1',)))
1930 return self.stream_in(remote, set(('revlogv1',)))
1932 # otherwise, 'streamreqs' contains the remote revlog format
1931 # otherwise, 'streamreqs' contains the remote revlog format
1933 streamreqs = remote.capable('streamreqs')
1932 streamreqs = remote.capable('streamreqs')
1934 if streamreqs:
1933 if streamreqs:
1935 streamreqs = set(streamreqs.split(','))
1934 streamreqs = set(streamreqs.split(','))
1936 # if we support it, stream in and adjust our requirements
1935 # if we support it, stream in and adjust our requirements
1937 if not streamreqs - self.supportedformats:
1936 if not streamreqs - self.supportedformats:
1938 return self.stream_in(remote, streamreqs)
1937 return self.stream_in(remote, streamreqs)
1939 return self.pull(remote, heads)
1938 return self.pull(remote, heads)
1940
1939
1941 def pushkey(self, namespace, key, old, new):
1940 def pushkey(self, namespace, key, old, new):
1942 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1941 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1943 old=old, new=new)
1942 old=old, new=new)
1944 ret = pushkey.push(self, namespace, key, old, new)
1943 ret = pushkey.push(self, namespace, key, old, new)
1945 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1944 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1946 ret=ret)
1945 ret=ret)
1947 return ret
1946 return ret
1948
1947
1949 def listkeys(self, namespace):
1948 def listkeys(self, namespace):
1950 self.hook('prelistkeys', throw=True, namespace=namespace)
1949 self.hook('prelistkeys', throw=True, namespace=namespace)
1951 values = pushkey.list(self, namespace)
1950 values = pushkey.list(self, namespace)
1952 self.hook('listkeys', namespace=namespace, values=values)
1951 self.hook('listkeys', namespace=namespace, values=values)
1953 return values
1952 return values
1954
1953
1955 def debugwireargs(self, one, two, three=None, four=None, five=None):
1954 def debugwireargs(self, one, two, three=None, four=None, five=None):
1956 '''used to test argument passing over the wire'''
1955 '''used to test argument passing over the wire'''
1957 return "%s %s %s %s %s" % (one, two, three, four, five)
1956 return "%s %s %s %s %s" % (one, two, three, four, five)
1958
1957
1959 # used to avoid circular references so destructors work
1958 # used to avoid circular references so destructors work
1960 def aftertrans(files):
1959 def aftertrans(files):
1961 renamefiles = [tuple(t) for t in files]
1960 renamefiles = [tuple(t) for t in files]
1962 def a():
1961 def a():
1963 for src, dest in renamefiles:
1962 for src, dest in renamefiles:
1964 util.rename(src, dest)
1963 util.rename(src, dest)
1965 return a
1964 return a
1966
1965
1967 def undoname(fn):
1966 def undoname(fn):
1968 base, name = os.path.split(fn)
1967 base, name = os.path.split(fn)
1969 assert name.startswith('journal')
1968 assert name.startswith('journal')
1970 return os.path.join(base, name.replace('journal', 'undo', 1))
1969 return os.path.join(base, name.replace('journal', 'undo', 1))
1971
1970
1972 def instance(ui, path, create):
1971 def instance(ui, path, create):
1973 return localrepository(ui, util.localpath(path), create)
1972 return localrepository(ui, util.localpath(path), create)
1974
1973
1975 def islocal(path):
1974 def islocal(path):
1976 return True
1975 return True
@@ -1,1272 +1,1271 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util
17 import ancestor, mdiff, parsers, error, util
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45
45
46 def getoffset(q):
46 def getoffset(q):
47 return int(q >> 16)
47 return int(q >> 16)
48
48
49 def gettype(q):
49 def gettype(q):
50 return int(q & 0xFFFF)
50 return int(q & 0xFFFF)
51
51
52 def offset_type(offset, type):
52 def offset_type(offset, type):
53 return long(long(offset) << 16 | type)
53 return long(long(offset) << 16 | type)
54
54
55 nullhash = _sha(nullid)
55 nullhash = _sha(nullid)
56
56
57 def hash(text, p1, p2):
57 def hash(text, p1, p2):
58 """generate a hash from the given text and its parent hashes
58 """generate a hash from the given text and its parent hashes
59
59
60 This hash combines both the current file contents and its history
60 This hash combines both the current file contents and its history
61 in a manner that makes it easy to distinguish nodes with the same
61 in a manner that makes it easy to distinguish nodes with the same
62 content in the revision graph.
62 content in the revision graph.
63 """
63 """
64 # As of now, if one of the parent node is null, p2 is null
64 # As of now, if one of the parent node is null, p2 is null
65 if p2 == nullid:
65 if p2 == nullid:
66 # deep copy of a hash is faster than creating one
66 # deep copy of a hash is faster than creating one
67 s = nullhash.copy()
67 s = nullhash.copy()
68 s.update(p1)
68 s.update(p1)
69 else:
69 else:
70 # none of the parent nodes are nullid
70 # none of the parent nodes are nullid
71 l = [p1, p2]
71 l = [p1, p2]
72 l.sort()
72 l.sort()
73 s = _sha(l[0])
73 s = _sha(l[0])
74 s.update(l[1])
74 s.update(l[1])
75 s.update(text)
75 s.update(text)
76 return s.digest()
76 return s.digest()
77
77
78 def compress(text):
78 def compress(text):
79 """ generate a possibly-compressed representation of text """
79 """ generate a possibly-compressed representation of text """
80 if not text:
80 if not text:
81 return ("", text)
81 return ("", text)
82 l = len(text)
82 l = len(text)
83 bin = None
83 bin = None
84 if l < 44:
84 if l < 44:
85 pass
85 pass
86 elif l > 1000000:
86 elif l > 1000000:
87 # zlib makes an internal copy, thus doubling memory usage for
87 # zlib makes an internal copy, thus doubling memory usage for
88 # large files, so lets do this in pieces
88 # large files, so lets do this in pieces
89 z = zlib.compressobj()
89 z = zlib.compressobj()
90 p = []
90 p = []
91 pos = 0
91 pos = 0
92 while pos < l:
92 while pos < l:
93 pos2 = pos + 2**20
93 pos2 = pos + 2**20
94 p.append(z.compress(text[pos:pos2]))
94 p.append(z.compress(text[pos:pos2]))
95 pos = pos2
95 pos = pos2
96 p.append(z.flush())
96 p.append(z.flush())
97 if sum(map(len, p)) < l:
97 if sum(map(len, p)) < l:
98 bin = "".join(p)
98 bin = "".join(p)
99 else:
99 else:
100 bin = _compress(text)
100 bin = _compress(text)
101 if bin is None or len(bin) > l:
101 if bin is None or len(bin) > l:
102 if text[0] == '\0':
102 if text[0] == '\0':
103 return ("", text)
103 return ("", text)
104 return ('u', text)
104 return ('u', text)
105 return ("", bin)
105 return ("", bin)
106
106
107 def decompress(bin):
107 def decompress(bin):
108 """ decompress the given input """
108 """ decompress the given input """
109 if not bin:
109 if not bin:
110 return bin
110 return bin
111 t = bin[0]
111 t = bin[0]
112 if t == '\0':
112 if t == '\0':
113 return bin
113 return bin
114 if t == 'x':
114 if t == 'x':
115 return _decompress(bin)
115 return _decompress(bin)
116 if t == 'u':
116 if t == 'u':
117 return bin[1:]
117 return bin[1:]
118 raise RevlogError(_("unknown compression type %r") % t)
118 raise RevlogError(_("unknown compression type %r") % t)
119
119
120 indexformatv0 = ">4l20s20s20s"
120 indexformatv0 = ">4l20s20s20s"
121 v0shaoffset = 56
121 v0shaoffset = 56
122
122
123 class revlogoldio(object):
123 class revlogoldio(object):
124 def __init__(self):
124 def __init__(self):
125 self.size = struct.calcsize(indexformatv0)
125 self.size = struct.calcsize(indexformatv0)
126
126
127 def parseindex(self, data, inline):
127 def parseindex(self, data, inline):
128 s = self.size
128 s = self.size
129 index = []
129 index = []
130 nodemap = {nullid: nullrev}
130 nodemap = {nullid: nullrev}
131 n = off = 0
131 n = off = 0
132 l = len(data)
132 l = len(data)
133 while off + s <= l:
133 while off + s <= l:
134 cur = data[off:off + s]
134 cur = data[off:off + s]
135 off += s
135 off += s
136 e = _unpack(indexformatv0, cur)
136 e = _unpack(indexformatv0, cur)
137 # transform to revlogv1 format
137 # transform to revlogv1 format
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 index.append(e2)
140 index.append(e2)
141 nodemap[e[6]] = n
141 nodemap[e[6]] = n
142 n += 1
142 n += 1
143
143
144 # add the magic null revision at -1
144 # add the magic null revision at -1
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146
146
147 return index, nodemap, None
147 return index, nodemap, None
148
148
149 def packentry(self, entry, node, version, rev):
149 def packentry(self, entry, node, version, rev):
150 if gettype(entry[0]):
150 if gettype(entry[0]):
151 raise RevlogError(_("index entry flags need RevlogNG"))
151 raise RevlogError(_("index entry flags need RevlogNG"))
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 node(entry[5]), node(entry[6]), entry[7])
153 node(entry[5]), node(entry[6]), entry[7])
154 return _pack(indexformatv0, *e2)
154 return _pack(indexformatv0, *e2)
155
155
156 # index ng:
156 # index ng:
157 # 6 bytes: offset
157 # 6 bytes: offset
158 # 2 bytes: flags
158 # 2 bytes: flags
159 # 4 bytes: compressed length
159 # 4 bytes: compressed length
160 # 4 bytes: uncompressed length
160 # 4 bytes: uncompressed length
161 # 4 bytes: base rev
161 # 4 bytes: base rev
162 # 4 bytes: link rev
162 # 4 bytes: link rev
163 # 4 bytes: parent 1 rev
163 # 4 bytes: parent 1 rev
164 # 4 bytes: parent 2 rev
164 # 4 bytes: parent 2 rev
165 # 32 bytes: nodeid
165 # 32 bytes: nodeid
166 indexformatng = ">Qiiiiii20s12x"
166 indexformatng = ">Qiiiiii20s12x"
167 ngshaoffset = 32
167 ngshaoffset = 32
168 versionformat = ">I"
168 versionformat = ">I"
169
169
170 class revlogio(object):
170 class revlogio(object):
171 def __init__(self):
171 def __init__(self):
172 self.size = struct.calcsize(indexformatng)
172 self.size = struct.calcsize(indexformatng)
173
173
174 def parseindex(self, data, inline):
174 def parseindex(self, data, inline):
175 # call the C implementation to parse the index data
175 # call the C implementation to parse the index data
176 index, cache = parsers.parse_index2(data, inline)
176 index, cache = parsers.parse_index2(data, inline)
177 return index, None, cache
177 return index, None, cache
178
178
179 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
180 p = _pack(indexformatng, *entry)
180 p = _pack(indexformatng, *entry)
181 if rev == 0:
181 if rev == 0:
182 p = _pack(versionformat, version) + p[4:]
182 p = _pack(versionformat, version) + p[4:]
183 return p
183 return p
184
184
185 class revlog(object):
185 class revlog(object):
186 """
186 """
187 the underlying revision storage object
187 the underlying revision storage object
188
188
189 A revlog consists of two parts, an index and the revision data.
189 A revlog consists of two parts, an index and the revision data.
190
190
191 The index is a file with a fixed record size containing
191 The index is a file with a fixed record size containing
192 information on each revision, including its nodeid (hash), the
192 information on each revision, including its nodeid (hash), the
193 nodeids of its parents, the position and offset of its data within
193 nodeids of its parents, the position and offset of its data within
194 the data file, and the revision it's based on. Finally, each entry
194 the data file, and the revision it's based on. Finally, each entry
195 contains a linkrev entry that can serve as a pointer to external
195 contains a linkrev entry that can serve as a pointer to external
196 data.
196 data.
197
197
198 The revision data itself is a linear collection of data chunks.
198 The revision data itself is a linear collection of data chunks.
199 Each chunk represents a revision and is usually represented as a
199 Each chunk represents a revision and is usually represented as a
200 delta against the previous chunk. To bound lookup time, runs of
200 delta against the previous chunk. To bound lookup time, runs of
201 deltas are limited to about 2 times the length of the original
201 deltas are limited to about 2 times the length of the original
202 version data. This makes retrieval of a version proportional to
202 version data. This makes retrieval of a version proportional to
203 its size, or O(1) relative to the number of revisions.
203 its size, or O(1) relative to the number of revisions.
204
204
205 Both pieces of the revlog are written to in an append-only
205 Both pieces of the revlog are written to in an append-only
206 fashion, which means we never need to rewrite a file to insert or
206 fashion, which means we never need to rewrite a file to insert or
207 remove data, and can use some simple techniques to avoid the need
207 remove data, and can use some simple techniques to avoid the need
208 for locking while reading.
208 for locking while reading.
209 """
209 """
210 def __init__(self, opener, indexfile):
210 def __init__(self, opener, indexfile):
211 """
211 """
212 create a revlog object
212 create a revlog object
213
213
214 opener is a function that abstracts the file opening operation
214 opener is a function that abstracts the file opening operation
215 and can be used to implement COW semantics or the like.
215 and can be used to implement COW semantics or the like.
216 """
216 """
217 self.indexfile = indexfile
217 self.indexfile = indexfile
218 self.datafile = indexfile[:-2] + ".d"
218 self.datafile = indexfile[:-2] + ".d"
219 self.opener = opener
219 self.opener = opener
220 self._cache = None
220 self._cache = None
221 self._basecache = (0, 0)
221 self._basecache = (0, 0)
222 self._chunkcache = (0, '')
222 self._chunkcache = (0, '')
223 self.index = []
223 self.index = []
224 self._pcache = {}
224 self._pcache = {}
225 self._nodecache = {nullid: nullrev}
225 self._nodecache = {nullid: nullrev}
226 self._nodepos = None
226 self._nodepos = None
227
227
228 v = REVLOG_DEFAULT_VERSION
228 v = REVLOG_DEFAULT_VERSION
229 if hasattr(opener, 'options'):
229 if hasattr(opener, 'options'):
230 if 'defversion' in opener.options:
230 if 'revlogv1' in opener.options:
231 v = opener.options['defversion']
231 if 'generaldelta' in opener.options:
232 if v & REVLOGNG:
233 v |= REVLOGNGINLINEDATA
234 if v & REVLOGNG and 'generaldelta' in opener.options:
235 v |= REVLOGGENERALDELTA
232 v |= REVLOGGENERALDELTA
233 else:
234 v = 0
236
235
237 i = ''
236 i = ''
238 try:
237 try:
239 f = self.opener(self.indexfile)
238 f = self.opener(self.indexfile)
240 i = f.read()
239 i = f.read()
241 f.close()
240 f.close()
242 if len(i) > 0:
241 if len(i) > 0:
243 v = struct.unpack(versionformat, i[:4])[0]
242 v = struct.unpack(versionformat, i[:4])[0]
244 except IOError, inst:
243 except IOError, inst:
245 if inst.errno != errno.ENOENT:
244 if inst.errno != errno.ENOENT:
246 raise
245 raise
247
246
248 self.version = v
247 self.version = v
249 self._inline = v & REVLOGNGINLINEDATA
248 self._inline = v & REVLOGNGINLINEDATA
250 self._generaldelta = v & REVLOGGENERALDELTA
249 self._generaldelta = v & REVLOGGENERALDELTA
251 flags = v & ~0xFFFF
250 flags = v & ~0xFFFF
252 fmt = v & 0xFFFF
251 fmt = v & 0xFFFF
253 if fmt == REVLOGV0 and flags:
252 if fmt == REVLOGV0 and flags:
254 raise RevlogError(_("index %s unknown flags %#04x for format v0")
253 raise RevlogError(_("index %s unknown flags %#04x for format v0")
255 % (self.indexfile, flags >> 16))
254 % (self.indexfile, flags >> 16))
256 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
255 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
257 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
256 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
258 % (self.indexfile, flags >> 16))
257 % (self.indexfile, flags >> 16))
259 elif fmt > REVLOGNG:
258 elif fmt > REVLOGNG:
260 raise RevlogError(_("index %s unknown format %d")
259 raise RevlogError(_("index %s unknown format %d")
261 % (self.indexfile, fmt))
260 % (self.indexfile, fmt))
262
261
263 self._io = revlogio()
262 self._io = revlogio()
264 if self.version == REVLOGV0:
263 if self.version == REVLOGV0:
265 self._io = revlogoldio()
264 self._io = revlogoldio()
266 try:
265 try:
267 d = self._io.parseindex(i, self._inline)
266 d = self._io.parseindex(i, self._inline)
268 except (ValueError, IndexError):
267 except (ValueError, IndexError):
269 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
268 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
270 self.index, nodemap, self._chunkcache = d
269 self.index, nodemap, self._chunkcache = d
271 if nodemap is not None:
270 if nodemap is not None:
272 self.nodemap = self._nodecache = nodemap
271 self.nodemap = self._nodecache = nodemap
273 if not self._chunkcache:
272 if not self._chunkcache:
274 self._chunkclear()
273 self._chunkclear()
275
274
276 def tip(self):
275 def tip(self):
277 return self.node(len(self.index) - 2)
276 return self.node(len(self.index) - 2)
278 def __len__(self):
277 def __len__(self):
279 return len(self.index) - 1
278 return len(self.index) - 1
280 def __iter__(self):
279 def __iter__(self):
281 for i in xrange(len(self)):
280 for i in xrange(len(self)):
282 yield i
281 yield i
283
282
284 @util.propertycache
283 @util.propertycache
285 def nodemap(self):
284 def nodemap(self):
286 self.rev(self.node(0))
285 self.rev(self.node(0))
287 return self._nodecache
286 return self._nodecache
288
287
289 def rev(self, node):
288 def rev(self, node):
290 try:
289 try:
291 return self._nodecache[node]
290 return self._nodecache[node]
292 except KeyError:
291 except KeyError:
293 n = self._nodecache
292 n = self._nodecache
294 i = self.index
293 i = self.index
295 p = self._nodepos
294 p = self._nodepos
296 if p is None:
295 if p is None:
297 p = len(i) - 2
296 p = len(i) - 2
298 for r in xrange(p, -1, -1):
297 for r in xrange(p, -1, -1):
299 v = i[r][7]
298 v = i[r][7]
300 n[v] = r
299 n[v] = r
301 if v == node:
300 if v == node:
302 self._nodepos = r - 1
301 self._nodepos = r - 1
303 return r
302 return r
304 raise LookupError(node, self.indexfile, _('no node'))
303 raise LookupError(node, self.indexfile, _('no node'))
305
304
306 def node(self, rev):
305 def node(self, rev):
307 return self.index[rev][7]
306 return self.index[rev][7]
308 def linkrev(self, rev):
307 def linkrev(self, rev):
309 return self.index[rev][4]
308 return self.index[rev][4]
310 def parents(self, node):
309 def parents(self, node):
311 i = self.index
310 i = self.index
312 d = i[self.rev(node)]
311 d = i[self.rev(node)]
313 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
312 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
314 def parentrevs(self, rev):
313 def parentrevs(self, rev):
315 return self.index[rev][5:7]
314 return self.index[rev][5:7]
316 def start(self, rev):
315 def start(self, rev):
317 return int(self.index[rev][0] >> 16)
316 return int(self.index[rev][0] >> 16)
318 def end(self, rev):
317 def end(self, rev):
319 return self.start(rev) + self.length(rev)
318 return self.start(rev) + self.length(rev)
320 def length(self, rev):
319 def length(self, rev):
321 return self.index[rev][1]
320 return self.index[rev][1]
322 def base(self, rev):
321 def base(self, rev):
323 return self.index[rev][3]
322 return self.index[rev][3]
324 def chainbase(self, rev):
323 def chainbase(self, rev):
325 index = self.index
324 index = self.index
326 base = index[rev][3]
325 base = index[rev][3]
327 while base != rev:
326 while base != rev:
328 rev = base
327 rev = base
329 base = index[rev][3]
328 base = index[rev][3]
330 return base
329 return base
331 def flags(self, rev):
330 def flags(self, rev):
332 return self.index[rev][0] & 0xFFFF
331 return self.index[rev][0] & 0xFFFF
333 def rawsize(self, rev):
332 def rawsize(self, rev):
334 """return the length of the uncompressed text for a given revision"""
333 """return the length of the uncompressed text for a given revision"""
335 l = self.index[rev][2]
334 l = self.index[rev][2]
336 if l >= 0:
335 if l >= 0:
337 return l
336 return l
338
337
339 t = self.revision(self.node(rev))
338 t = self.revision(self.node(rev))
340 return len(t)
339 return len(t)
341 size = rawsize
340 size = rawsize
342
341
343 def reachable(self, node, stop=None):
342 def reachable(self, node, stop=None):
344 """return the set of all nodes ancestral to a given node, including
343 """return the set of all nodes ancestral to a given node, including
345 the node itself, stopping when stop is matched"""
344 the node itself, stopping when stop is matched"""
346 reachable = set((node,))
345 reachable = set((node,))
347 visit = [node]
346 visit = [node]
348 if stop:
347 if stop:
349 stopn = self.rev(stop)
348 stopn = self.rev(stop)
350 else:
349 else:
351 stopn = 0
350 stopn = 0
352 while visit:
351 while visit:
353 n = visit.pop(0)
352 n = visit.pop(0)
354 if n == stop:
353 if n == stop:
355 continue
354 continue
356 if n == nullid:
355 if n == nullid:
357 continue
356 continue
358 for p in self.parents(n):
357 for p in self.parents(n):
359 if self.rev(p) < stopn:
358 if self.rev(p) < stopn:
360 continue
359 continue
361 if p not in reachable:
360 if p not in reachable:
362 reachable.add(p)
361 reachable.add(p)
363 visit.append(p)
362 visit.append(p)
364 return reachable
363 return reachable
365
364
366 def ancestors(self, *revs):
365 def ancestors(self, *revs):
367 """Generate the ancestors of 'revs' in reverse topological order.
366 """Generate the ancestors of 'revs' in reverse topological order.
368
367
369 Yield a sequence of revision numbers starting with the parents
368 Yield a sequence of revision numbers starting with the parents
370 of each revision in revs, i.e., each revision is *not* considered
369 of each revision in revs, i.e., each revision is *not* considered
371 an ancestor of itself. Results are in breadth-first order:
370 an ancestor of itself. Results are in breadth-first order:
372 parents of each rev in revs, then parents of those, etc. Result
371 parents of each rev in revs, then parents of those, etc. Result
373 does not include the null revision."""
372 does not include the null revision."""
374 visit = list(revs)
373 visit = list(revs)
375 seen = set([nullrev])
374 seen = set([nullrev])
376 while visit:
375 while visit:
377 for parent in self.parentrevs(visit.pop(0)):
376 for parent in self.parentrevs(visit.pop(0)):
378 if parent not in seen:
377 if parent not in seen:
379 visit.append(parent)
378 visit.append(parent)
380 seen.add(parent)
379 seen.add(parent)
381 yield parent
380 yield parent
382
381
383 def descendants(self, *revs):
382 def descendants(self, *revs):
384 """Generate the descendants of 'revs' in revision order.
383 """Generate the descendants of 'revs' in revision order.
385
384
386 Yield a sequence of revision numbers starting with a child of
385 Yield a sequence of revision numbers starting with a child of
387 some rev in revs, i.e., each revision is *not* considered a
386 some rev in revs, i.e., each revision is *not* considered a
388 descendant of itself. Results are ordered by revision number (a
387 descendant of itself. Results are ordered by revision number (a
389 topological sort)."""
388 topological sort)."""
390 first = min(revs)
389 first = min(revs)
391 if first == nullrev:
390 if first == nullrev:
392 for i in self:
391 for i in self:
393 yield i
392 yield i
394 return
393 return
395
394
396 seen = set(revs)
395 seen = set(revs)
397 for i in xrange(first + 1, len(self)):
396 for i in xrange(first + 1, len(self)):
398 for x in self.parentrevs(i):
397 for x in self.parentrevs(i):
399 if x != nullrev and x in seen:
398 if x != nullrev and x in seen:
400 seen.add(i)
399 seen.add(i)
401 yield i
400 yield i
402 break
401 break
403
402
404 def findcommonmissing(self, common=None, heads=None):
403 def findcommonmissing(self, common=None, heads=None):
405 """Return a tuple of the ancestors of common and the ancestors of heads
404 """Return a tuple of the ancestors of common and the ancestors of heads
406 that are not ancestors of common.
405 that are not ancestors of common.
407
406
408 More specifically, the second element is a list of nodes N such that
407 More specifically, the second element is a list of nodes N such that
409 every N satisfies the following constraints:
408 every N satisfies the following constraints:
410
409
411 1. N is an ancestor of some node in 'heads'
410 1. N is an ancestor of some node in 'heads'
412 2. N is not an ancestor of any node in 'common'
411 2. N is not an ancestor of any node in 'common'
413
412
414 The list is sorted by revision number, meaning it is
413 The list is sorted by revision number, meaning it is
415 topologically sorted.
414 topologically sorted.
416
415
417 'heads' and 'common' are both lists of node IDs. If heads is
416 'heads' and 'common' are both lists of node IDs. If heads is
418 not supplied, uses all of the revlog's heads. If common is not
417 not supplied, uses all of the revlog's heads. If common is not
419 supplied, uses nullid."""
418 supplied, uses nullid."""
420 if common is None:
419 if common is None:
421 common = [nullid]
420 common = [nullid]
422 if heads is None:
421 if heads is None:
423 heads = self.heads()
422 heads = self.heads()
424
423
425 common = [self.rev(n) for n in common]
424 common = [self.rev(n) for n in common]
426 heads = [self.rev(n) for n in heads]
425 heads = [self.rev(n) for n in heads]
427
426
428 # we want the ancestors, but inclusive
427 # we want the ancestors, but inclusive
429 has = set(self.ancestors(*common))
428 has = set(self.ancestors(*common))
430 has.add(nullrev)
429 has.add(nullrev)
431 has.update(common)
430 has.update(common)
432
431
433 # take all ancestors from heads that aren't in has
432 # take all ancestors from heads that aren't in has
434 missing = set()
433 missing = set()
435 visit = [r for r in heads if r not in has]
434 visit = [r for r in heads if r not in has]
436 while visit:
435 while visit:
437 r = visit.pop(0)
436 r = visit.pop(0)
438 if r in missing:
437 if r in missing:
439 continue
438 continue
440 else:
439 else:
441 missing.add(r)
440 missing.add(r)
442 for p in self.parentrevs(r):
441 for p in self.parentrevs(r):
443 if p not in has:
442 if p not in has:
444 visit.append(p)
443 visit.append(p)
445 missing = list(missing)
444 missing = list(missing)
446 missing.sort()
445 missing.sort()
447 return has, [self.node(r) for r in missing]
446 return has, [self.node(r) for r in missing]
448
447
449 def findmissing(self, common=None, heads=None):
448 def findmissing(self, common=None, heads=None):
450 """Return the ancestors of heads that are not ancestors of common.
449 """Return the ancestors of heads that are not ancestors of common.
451
450
452 More specifically, return a list of nodes N such that every N
451 More specifically, return a list of nodes N such that every N
453 satisfies the following constraints:
452 satisfies the following constraints:
454
453
455 1. N is an ancestor of some node in 'heads'
454 1. N is an ancestor of some node in 'heads'
456 2. N is not an ancestor of any node in 'common'
455 2. N is not an ancestor of any node in 'common'
457
456
458 The list is sorted by revision number, meaning it is
457 The list is sorted by revision number, meaning it is
459 topologically sorted.
458 topologically sorted.
460
459
461 'heads' and 'common' are both lists of node IDs. If heads is
460 'heads' and 'common' are both lists of node IDs. If heads is
462 not supplied, uses all of the revlog's heads. If common is not
461 not supplied, uses all of the revlog's heads. If common is not
463 supplied, uses nullid."""
462 supplied, uses nullid."""
464 _common, missing = self.findcommonmissing(common, heads)
463 _common, missing = self.findcommonmissing(common, heads)
465 return missing
464 return missing
466
465
467 def nodesbetween(self, roots=None, heads=None):
466 def nodesbetween(self, roots=None, heads=None):
468 """Return a topological path from 'roots' to 'heads'.
467 """Return a topological path from 'roots' to 'heads'.
469
468
470 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
469 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
471 topologically sorted list of all nodes N that satisfy both of
470 topologically sorted list of all nodes N that satisfy both of
472 these constraints:
471 these constraints:
473
472
474 1. N is a descendant of some node in 'roots'
473 1. N is a descendant of some node in 'roots'
475 2. N is an ancestor of some node in 'heads'
474 2. N is an ancestor of some node in 'heads'
476
475
477 Every node is considered to be both a descendant and an ancestor
476 Every node is considered to be both a descendant and an ancestor
478 of itself, so every reachable node in 'roots' and 'heads' will be
477 of itself, so every reachable node in 'roots' and 'heads' will be
479 included in 'nodes'.
478 included in 'nodes'.
480
479
481 'outroots' is the list of reachable nodes in 'roots', i.e., the
480 'outroots' is the list of reachable nodes in 'roots', i.e., the
482 subset of 'roots' that is returned in 'nodes'. Likewise,
481 subset of 'roots' that is returned in 'nodes'. Likewise,
483 'outheads' is the subset of 'heads' that is also in 'nodes'.
482 'outheads' is the subset of 'heads' that is also in 'nodes'.
484
483
485 'roots' and 'heads' are both lists of node IDs. If 'roots' is
484 'roots' and 'heads' are both lists of node IDs. If 'roots' is
486 unspecified, uses nullid as the only root. If 'heads' is
485 unspecified, uses nullid as the only root. If 'heads' is
487 unspecified, uses list of all of the revlog's heads."""
486 unspecified, uses list of all of the revlog's heads."""
488 nonodes = ([], [], [])
487 nonodes = ([], [], [])
489 if roots is not None:
488 if roots is not None:
490 roots = list(roots)
489 roots = list(roots)
491 if not roots:
490 if not roots:
492 return nonodes
491 return nonodes
493 lowestrev = min([self.rev(n) for n in roots])
492 lowestrev = min([self.rev(n) for n in roots])
494 else:
493 else:
495 roots = [nullid] # Everybody's a descendent of nullid
494 roots = [nullid] # Everybody's a descendent of nullid
496 lowestrev = nullrev
495 lowestrev = nullrev
497 if (lowestrev == nullrev) and (heads is None):
496 if (lowestrev == nullrev) and (heads is None):
498 # We want _all_ the nodes!
497 # We want _all_ the nodes!
499 return ([self.node(r) for r in self], [nullid], list(self.heads()))
498 return ([self.node(r) for r in self], [nullid], list(self.heads()))
500 if heads is None:
499 if heads is None:
501 # All nodes are ancestors, so the latest ancestor is the last
500 # All nodes are ancestors, so the latest ancestor is the last
502 # node.
501 # node.
503 highestrev = len(self) - 1
502 highestrev = len(self) - 1
504 # Set ancestors to None to signal that every node is an ancestor.
503 # Set ancestors to None to signal that every node is an ancestor.
505 ancestors = None
504 ancestors = None
506 # Set heads to an empty dictionary for later discovery of heads
505 # Set heads to an empty dictionary for later discovery of heads
507 heads = {}
506 heads = {}
508 else:
507 else:
509 heads = list(heads)
508 heads = list(heads)
510 if not heads:
509 if not heads:
511 return nonodes
510 return nonodes
512 ancestors = set()
511 ancestors = set()
513 # Turn heads into a dictionary so we can remove 'fake' heads.
512 # Turn heads into a dictionary so we can remove 'fake' heads.
514 # Also, later we will be using it to filter out the heads we can't
513 # Also, later we will be using it to filter out the heads we can't
515 # find from roots.
514 # find from roots.
516 heads = dict.fromkeys(heads, False)
515 heads = dict.fromkeys(heads, False)
517 # Start at the top and keep marking parents until we're done.
516 # Start at the top and keep marking parents until we're done.
518 nodestotag = set(heads)
517 nodestotag = set(heads)
519 # Remember where the top was so we can use it as a limit later.
518 # Remember where the top was so we can use it as a limit later.
520 highestrev = max([self.rev(n) for n in nodestotag])
519 highestrev = max([self.rev(n) for n in nodestotag])
521 while nodestotag:
520 while nodestotag:
522 # grab a node to tag
521 # grab a node to tag
523 n = nodestotag.pop()
522 n = nodestotag.pop()
524 # Never tag nullid
523 # Never tag nullid
525 if n == nullid:
524 if n == nullid:
526 continue
525 continue
527 # A node's revision number represents its place in a
526 # A node's revision number represents its place in a
528 # topologically sorted list of nodes.
527 # topologically sorted list of nodes.
529 r = self.rev(n)
528 r = self.rev(n)
530 if r >= lowestrev:
529 if r >= lowestrev:
531 if n not in ancestors:
530 if n not in ancestors:
532 # If we are possibly a descendent of one of the roots
531 # If we are possibly a descendent of one of the roots
533 # and we haven't already been marked as an ancestor
532 # and we haven't already been marked as an ancestor
534 ancestors.add(n) # Mark as ancestor
533 ancestors.add(n) # Mark as ancestor
535 # Add non-nullid parents to list of nodes to tag.
534 # Add non-nullid parents to list of nodes to tag.
536 nodestotag.update([p for p in self.parents(n) if
535 nodestotag.update([p for p in self.parents(n) if
537 p != nullid])
536 p != nullid])
538 elif n in heads: # We've seen it before, is it a fake head?
537 elif n in heads: # We've seen it before, is it a fake head?
539 # So it is, real heads should not be the ancestors of
538 # So it is, real heads should not be the ancestors of
540 # any other heads.
539 # any other heads.
541 heads.pop(n)
540 heads.pop(n)
542 if not ancestors:
541 if not ancestors:
543 return nonodes
542 return nonodes
544 # Now that we have our set of ancestors, we want to remove any
543 # Now that we have our set of ancestors, we want to remove any
545 # roots that are not ancestors.
544 # roots that are not ancestors.
546
545
547 # If one of the roots was nullid, everything is included anyway.
546 # If one of the roots was nullid, everything is included anyway.
548 if lowestrev > nullrev:
547 if lowestrev > nullrev:
549 # But, since we weren't, let's recompute the lowest rev to not
548 # But, since we weren't, let's recompute the lowest rev to not
550 # include roots that aren't ancestors.
549 # include roots that aren't ancestors.
551
550
552 # Filter out roots that aren't ancestors of heads
551 # Filter out roots that aren't ancestors of heads
553 roots = [n for n in roots if n in ancestors]
552 roots = [n for n in roots if n in ancestors]
554 # Recompute the lowest revision
553 # Recompute the lowest revision
555 if roots:
554 if roots:
556 lowestrev = min([self.rev(n) for n in roots])
555 lowestrev = min([self.rev(n) for n in roots])
557 else:
556 else:
558 # No more roots? Return empty list
557 # No more roots? Return empty list
559 return nonodes
558 return nonodes
560 else:
559 else:
561 # We are descending from nullid, and don't need to care about
560 # We are descending from nullid, and don't need to care about
562 # any other roots.
561 # any other roots.
563 lowestrev = nullrev
562 lowestrev = nullrev
564 roots = [nullid]
563 roots = [nullid]
565 # Transform our roots list into a set.
564 # Transform our roots list into a set.
566 descendents = set(roots)
565 descendents = set(roots)
567 # Also, keep the original roots so we can filter out roots that aren't
566 # Also, keep the original roots so we can filter out roots that aren't
568 # 'real' roots (i.e. are descended from other roots).
567 # 'real' roots (i.e. are descended from other roots).
569 roots = descendents.copy()
568 roots = descendents.copy()
570 # Our topologically sorted list of output nodes.
569 # Our topologically sorted list of output nodes.
571 orderedout = []
570 orderedout = []
572 # Don't start at nullid since we don't want nullid in our output list,
571 # Don't start at nullid since we don't want nullid in our output list,
573 # and if nullid shows up in descedents, empty parents will look like
572 # and if nullid shows up in descedents, empty parents will look like
574 # they're descendents.
573 # they're descendents.
575 for r in xrange(max(lowestrev, 0), highestrev + 1):
574 for r in xrange(max(lowestrev, 0), highestrev + 1):
576 n = self.node(r)
575 n = self.node(r)
577 isdescendent = False
576 isdescendent = False
578 if lowestrev == nullrev: # Everybody is a descendent of nullid
577 if lowestrev == nullrev: # Everybody is a descendent of nullid
579 isdescendent = True
578 isdescendent = True
580 elif n in descendents:
579 elif n in descendents:
581 # n is already a descendent
580 # n is already a descendent
582 isdescendent = True
581 isdescendent = True
583 # This check only needs to be done here because all the roots
582 # This check only needs to be done here because all the roots
584 # will start being marked is descendents before the loop.
583 # will start being marked is descendents before the loop.
585 if n in roots:
584 if n in roots:
586 # If n was a root, check if it's a 'real' root.
585 # If n was a root, check if it's a 'real' root.
587 p = tuple(self.parents(n))
586 p = tuple(self.parents(n))
588 # If any of its parents are descendents, it's not a root.
587 # If any of its parents are descendents, it's not a root.
589 if (p[0] in descendents) or (p[1] in descendents):
588 if (p[0] in descendents) or (p[1] in descendents):
590 roots.remove(n)
589 roots.remove(n)
591 else:
590 else:
592 p = tuple(self.parents(n))
591 p = tuple(self.parents(n))
593 # A node is a descendent if either of its parents are
592 # A node is a descendent if either of its parents are
594 # descendents. (We seeded the dependents list with the roots
593 # descendents. (We seeded the dependents list with the roots
595 # up there, remember?)
594 # up there, remember?)
596 if (p[0] in descendents) or (p[1] in descendents):
595 if (p[0] in descendents) or (p[1] in descendents):
597 descendents.add(n)
596 descendents.add(n)
598 isdescendent = True
597 isdescendent = True
599 if isdescendent and ((ancestors is None) or (n in ancestors)):
598 if isdescendent and ((ancestors is None) or (n in ancestors)):
600 # Only include nodes that are both descendents and ancestors.
599 # Only include nodes that are both descendents and ancestors.
601 orderedout.append(n)
600 orderedout.append(n)
602 if (ancestors is not None) and (n in heads):
601 if (ancestors is not None) and (n in heads):
603 # We're trying to figure out which heads are reachable
602 # We're trying to figure out which heads are reachable
604 # from roots.
603 # from roots.
605 # Mark this head as having been reached
604 # Mark this head as having been reached
606 heads[n] = True
605 heads[n] = True
607 elif ancestors is None:
606 elif ancestors is None:
608 # Otherwise, we're trying to discover the heads.
607 # Otherwise, we're trying to discover the heads.
609 # Assume this is a head because if it isn't, the next step
608 # Assume this is a head because if it isn't, the next step
610 # will eventually remove it.
609 # will eventually remove it.
611 heads[n] = True
610 heads[n] = True
612 # But, obviously its parents aren't.
611 # But, obviously its parents aren't.
613 for p in self.parents(n):
612 for p in self.parents(n):
614 heads.pop(p, None)
613 heads.pop(p, None)
615 heads = [n for n, flag in heads.iteritems() if flag]
614 heads = [n for n, flag in heads.iteritems() if flag]
616 roots = list(roots)
615 roots = list(roots)
617 assert orderedout
616 assert orderedout
618 assert roots
617 assert roots
619 assert heads
618 assert heads
620 return (orderedout, roots, heads)
619 return (orderedout, roots, heads)
621
620
622 def headrevs(self):
621 def headrevs(self):
623 count = len(self)
622 count = len(self)
624 if not count:
623 if not count:
625 return [nullrev]
624 return [nullrev]
626 ishead = [1] * (count + 1)
625 ishead = [1] * (count + 1)
627 index = self.index
626 index = self.index
628 for r in xrange(count):
627 for r in xrange(count):
629 e = index[r]
628 e = index[r]
630 ishead[e[5]] = ishead[e[6]] = 0
629 ishead[e[5]] = ishead[e[6]] = 0
631 return [r for r in xrange(count) if ishead[r]]
630 return [r for r in xrange(count) if ishead[r]]
632
631
633 def heads(self, start=None, stop=None):
632 def heads(self, start=None, stop=None):
634 """return the list of all nodes that have no children
633 """return the list of all nodes that have no children
635
634
636 if start is specified, only heads that are descendants of
635 if start is specified, only heads that are descendants of
637 start will be returned
636 start will be returned
638 if stop is specified, it will consider all the revs from stop
637 if stop is specified, it will consider all the revs from stop
639 as if they had no children
638 as if they had no children
640 """
639 """
641 if start is None and stop is None:
640 if start is None and stop is None:
642 if not len(self):
641 if not len(self):
643 return [nullid]
642 return [nullid]
644 return [self.node(r) for r in self.headrevs()]
643 return [self.node(r) for r in self.headrevs()]
645
644
646 if start is None:
645 if start is None:
647 start = nullid
646 start = nullid
648 if stop is None:
647 if stop is None:
649 stop = []
648 stop = []
650 stoprevs = set([self.rev(n) for n in stop])
649 stoprevs = set([self.rev(n) for n in stop])
651 startrev = self.rev(start)
650 startrev = self.rev(start)
652 reachable = set((startrev,))
651 reachable = set((startrev,))
653 heads = set((startrev,))
652 heads = set((startrev,))
654
653
655 parentrevs = self.parentrevs
654 parentrevs = self.parentrevs
656 for r in xrange(startrev + 1, len(self)):
655 for r in xrange(startrev + 1, len(self)):
657 for p in parentrevs(r):
656 for p in parentrevs(r):
658 if p in reachable:
657 if p in reachable:
659 if r not in stoprevs:
658 if r not in stoprevs:
660 reachable.add(r)
659 reachable.add(r)
661 heads.add(r)
660 heads.add(r)
662 if p in heads and p not in stoprevs:
661 if p in heads and p not in stoprevs:
663 heads.remove(p)
662 heads.remove(p)
664
663
665 return [self.node(r) for r in heads]
664 return [self.node(r) for r in heads]
666
665
667 def children(self, node):
666 def children(self, node):
668 """find the children of a given node"""
667 """find the children of a given node"""
669 c = []
668 c = []
670 p = self.rev(node)
669 p = self.rev(node)
671 for r in range(p + 1, len(self)):
670 for r in range(p + 1, len(self)):
672 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
671 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
673 if prevs:
672 if prevs:
674 for pr in prevs:
673 for pr in prevs:
675 if pr == p:
674 if pr == p:
676 c.append(self.node(r))
675 c.append(self.node(r))
677 elif p == nullrev:
676 elif p == nullrev:
678 c.append(self.node(r))
677 c.append(self.node(r))
679 return c
678 return c
680
679
681 def descendant(self, start, end):
680 def descendant(self, start, end):
682 if start == nullrev:
681 if start == nullrev:
683 return True
682 return True
684 for i in self.descendants(start):
683 for i in self.descendants(start):
685 if i == end:
684 if i == end:
686 return True
685 return True
687 elif i > end:
686 elif i > end:
688 break
687 break
689 return False
688 return False
690
689
691 def ancestor(self, a, b):
690 def ancestor(self, a, b):
692 """calculate the least common ancestor of nodes a and b"""
691 """calculate the least common ancestor of nodes a and b"""
693
692
694 # fast path, check if it is a descendant
693 # fast path, check if it is a descendant
695 a, b = self.rev(a), self.rev(b)
694 a, b = self.rev(a), self.rev(b)
696 start, end = sorted((a, b))
695 start, end = sorted((a, b))
697 if self.descendant(start, end):
696 if self.descendant(start, end):
698 return self.node(start)
697 return self.node(start)
699
698
700 def parents(rev):
699 def parents(rev):
701 return [p for p in self.parentrevs(rev) if p != nullrev]
700 return [p for p in self.parentrevs(rev) if p != nullrev]
702
701
703 c = ancestor.ancestor(a, b, parents)
702 c = ancestor.ancestor(a, b, parents)
704 if c is None:
703 if c is None:
705 return nullid
704 return nullid
706
705
707 return self.node(c)
706 return self.node(c)
708
707
709 def _match(self, id):
708 def _match(self, id):
710 if isinstance(id, (long, int)):
709 if isinstance(id, (long, int)):
711 # rev
710 # rev
712 return self.node(id)
711 return self.node(id)
713 if len(id) == 20:
712 if len(id) == 20:
714 # possibly a binary node
713 # possibly a binary node
715 # odds of a binary node being all hex in ASCII are 1 in 10**25
714 # odds of a binary node being all hex in ASCII are 1 in 10**25
716 try:
715 try:
717 node = id
716 node = id
718 self.rev(node) # quick search the index
717 self.rev(node) # quick search the index
719 return node
718 return node
720 except LookupError:
719 except LookupError:
721 pass # may be partial hex id
720 pass # may be partial hex id
722 try:
721 try:
723 # str(rev)
722 # str(rev)
724 rev = int(id)
723 rev = int(id)
725 if str(rev) != id:
724 if str(rev) != id:
726 raise ValueError
725 raise ValueError
727 if rev < 0:
726 if rev < 0:
728 rev = len(self) + rev
727 rev = len(self) + rev
729 if rev < 0 or rev >= len(self):
728 if rev < 0 or rev >= len(self):
730 raise ValueError
729 raise ValueError
731 return self.node(rev)
730 return self.node(rev)
732 except (ValueError, OverflowError):
731 except (ValueError, OverflowError):
733 pass
732 pass
734 if len(id) == 40:
733 if len(id) == 40:
735 try:
734 try:
736 # a full hex nodeid?
735 # a full hex nodeid?
737 node = bin(id)
736 node = bin(id)
738 self.rev(node)
737 self.rev(node)
739 return node
738 return node
740 except (TypeError, LookupError):
739 except (TypeError, LookupError):
741 pass
740 pass
742
741
743 def _partialmatch(self, id):
742 def _partialmatch(self, id):
744 if id in self._pcache:
743 if id in self._pcache:
745 return self._pcache[id]
744 return self._pcache[id]
746
745
747 if len(id) < 40:
746 if len(id) < 40:
748 try:
747 try:
749 # hex(node)[:...]
748 # hex(node)[:...]
750 l = len(id) // 2 # grab an even number of digits
749 l = len(id) // 2 # grab an even number of digits
751 prefix = bin(id[:l * 2])
750 prefix = bin(id[:l * 2])
752 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
751 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
753 nl = [n for n in nl if hex(n).startswith(id)]
752 nl = [n for n in nl if hex(n).startswith(id)]
754 if len(nl) > 0:
753 if len(nl) > 0:
755 if len(nl) == 1:
754 if len(nl) == 1:
756 self._pcache[id] = nl[0]
755 self._pcache[id] = nl[0]
757 return nl[0]
756 return nl[0]
758 raise LookupError(id, self.indexfile,
757 raise LookupError(id, self.indexfile,
759 _('ambiguous identifier'))
758 _('ambiguous identifier'))
760 return None
759 return None
761 except TypeError:
760 except TypeError:
762 pass
761 pass
763
762
764 def lookup(self, id):
763 def lookup(self, id):
765 """locate a node based on:
764 """locate a node based on:
766 - revision number or str(revision number)
765 - revision number or str(revision number)
767 - nodeid or subset of hex nodeid
766 - nodeid or subset of hex nodeid
768 """
767 """
769 n = self._match(id)
768 n = self._match(id)
770 if n is not None:
769 if n is not None:
771 return n
770 return n
772 n = self._partialmatch(id)
771 n = self._partialmatch(id)
773 if n:
772 if n:
774 return n
773 return n
775
774
776 raise LookupError(id, self.indexfile, _('no match found'))
775 raise LookupError(id, self.indexfile, _('no match found'))
777
776
778 def cmp(self, node, text):
777 def cmp(self, node, text):
779 """compare text with a given file revision
778 """compare text with a given file revision
780
779
781 returns True if text is different than what is stored.
780 returns True if text is different than what is stored.
782 """
781 """
783 p1, p2 = self.parents(node)
782 p1, p2 = self.parents(node)
784 return hash(text, p1, p2) != node
783 return hash(text, p1, p2) != node
785
784
786 def _addchunk(self, offset, data):
785 def _addchunk(self, offset, data):
787 o, d = self._chunkcache
786 o, d = self._chunkcache
788 # try to add to existing cache
787 # try to add to existing cache
789 if o + len(d) == offset and len(d) + len(data) < _chunksize:
788 if o + len(d) == offset and len(d) + len(data) < _chunksize:
790 self._chunkcache = o, d + data
789 self._chunkcache = o, d + data
791 else:
790 else:
792 self._chunkcache = offset, data
791 self._chunkcache = offset, data
793
792
794 def _loadchunk(self, offset, length):
793 def _loadchunk(self, offset, length):
795 if self._inline:
794 if self._inline:
796 df = self.opener(self.indexfile)
795 df = self.opener(self.indexfile)
797 else:
796 else:
798 df = self.opener(self.datafile)
797 df = self.opener(self.datafile)
799
798
800 readahead = max(65536, length)
799 readahead = max(65536, length)
801 df.seek(offset)
800 df.seek(offset)
802 d = df.read(readahead)
801 d = df.read(readahead)
803 self._addchunk(offset, d)
802 self._addchunk(offset, d)
804 if readahead > length:
803 if readahead > length:
805 return d[:length]
804 return d[:length]
806 return d
805 return d
807
806
808 def _getchunk(self, offset, length):
807 def _getchunk(self, offset, length):
809 o, d = self._chunkcache
808 o, d = self._chunkcache
810 l = len(d)
809 l = len(d)
811
810
812 # is it in the cache?
811 # is it in the cache?
813 cachestart = offset - o
812 cachestart = offset - o
814 cacheend = cachestart + length
813 cacheend = cachestart + length
815 if cachestart >= 0 and cacheend <= l:
814 if cachestart >= 0 and cacheend <= l:
816 if cachestart == 0 and cacheend == l:
815 if cachestart == 0 and cacheend == l:
817 return d # avoid a copy
816 return d # avoid a copy
818 return d[cachestart:cacheend]
817 return d[cachestart:cacheend]
819
818
820 return self._loadchunk(offset, length)
819 return self._loadchunk(offset, length)
821
820
822 def _chunkraw(self, startrev, endrev):
821 def _chunkraw(self, startrev, endrev):
823 start = self.start(startrev)
822 start = self.start(startrev)
824 length = self.end(endrev) - start
823 length = self.end(endrev) - start
825 if self._inline:
824 if self._inline:
826 start += (startrev + 1) * self._io.size
825 start += (startrev + 1) * self._io.size
827 return self._getchunk(start, length)
826 return self._getchunk(start, length)
828
827
829 def _chunk(self, rev):
828 def _chunk(self, rev):
830 return decompress(self._chunkraw(rev, rev))
829 return decompress(self._chunkraw(rev, rev))
831
830
832 def _chunkbase(self, rev):
831 def _chunkbase(self, rev):
833 return self._chunk(rev)
832 return self._chunk(rev)
834
833
835 def _chunkclear(self):
834 def _chunkclear(self):
836 self._chunkcache = (0, '')
835 self._chunkcache = (0, '')
837
836
838 def deltaparent(self, rev):
837 def deltaparent(self, rev):
839 """return deltaparent of the given revision"""
838 """return deltaparent of the given revision"""
840 base = self.index[rev][3]
839 base = self.index[rev][3]
841 if base == rev:
840 if base == rev:
842 return nullrev
841 return nullrev
843 elif self._generaldelta:
842 elif self._generaldelta:
844 return base
843 return base
845 else:
844 else:
846 return rev - 1
845 return rev - 1
847
846
848 def revdiff(self, rev1, rev2):
847 def revdiff(self, rev1, rev2):
849 """return or calculate a delta between two revisions"""
848 """return or calculate a delta between two revisions"""
850 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
849 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
851 return self._chunk(rev2)
850 return self._chunk(rev2)
852
851
853 return mdiff.textdiff(self.revision(self.node(rev1)),
852 return mdiff.textdiff(self.revision(self.node(rev1)),
854 self.revision(self.node(rev2)))
853 self.revision(self.node(rev2)))
855
854
856 def revision(self, node):
855 def revision(self, node):
857 """return an uncompressed revision of a given node"""
856 """return an uncompressed revision of a given node"""
858 cachedrev = None
857 cachedrev = None
859 if node == nullid:
858 if node == nullid:
860 return ""
859 return ""
861 if self._cache:
860 if self._cache:
862 if self._cache[0] == node:
861 if self._cache[0] == node:
863 return self._cache[2]
862 return self._cache[2]
864 cachedrev = self._cache[1]
863 cachedrev = self._cache[1]
865
864
866 # look up what we need to read
865 # look up what we need to read
867 text = None
866 text = None
868 rev = self.rev(node)
867 rev = self.rev(node)
869
868
870 # check rev flags
869 # check rev flags
871 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
870 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
872 raise RevlogError(_('incompatible revision flag %x') %
871 raise RevlogError(_('incompatible revision flag %x') %
873 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
872 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
874
873
875 # build delta chain
874 # build delta chain
876 chain = []
875 chain = []
877 index = self.index # for performance
876 index = self.index # for performance
878 generaldelta = self._generaldelta
877 generaldelta = self._generaldelta
879 iterrev = rev
878 iterrev = rev
880 e = index[iterrev]
879 e = index[iterrev]
881 while iterrev != e[3] and iterrev != cachedrev:
880 while iterrev != e[3] and iterrev != cachedrev:
882 chain.append(iterrev)
881 chain.append(iterrev)
883 if generaldelta:
882 if generaldelta:
884 iterrev = e[3]
883 iterrev = e[3]
885 else:
884 else:
886 iterrev -= 1
885 iterrev -= 1
887 e = index[iterrev]
886 e = index[iterrev]
888 chain.reverse()
887 chain.reverse()
889 base = iterrev
888 base = iterrev
890
889
891 if iterrev == cachedrev:
890 if iterrev == cachedrev:
892 # cache hit
891 # cache hit
893 text = self._cache[2]
892 text = self._cache[2]
894
893
895 # drop cache to save memory
894 # drop cache to save memory
896 self._cache = None
895 self._cache = None
897
896
898 self._chunkraw(base, rev)
897 self._chunkraw(base, rev)
899 if text is None:
898 if text is None:
900 text = self._chunkbase(base)
899 text = self._chunkbase(base)
901
900
902 bins = [self._chunk(r) for r in chain]
901 bins = [self._chunk(r) for r in chain]
903 text = mdiff.patches(text, bins)
902 text = mdiff.patches(text, bins)
904
903
905 text = self._checkhash(text, node, rev)
904 text = self._checkhash(text, node, rev)
906
905
907 self._cache = (node, rev, text)
906 self._cache = (node, rev, text)
908 return text
907 return text
909
908
910 def _checkhash(self, text, node, rev):
909 def _checkhash(self, text, node, rev):
911 p1, p2 = self.parents(node)
910 p1, p2 = self.parents(node)
912 if node != hash(text, p1, p2):
911 if node != hash(text, p1, p2):
913 raise RevlogError(_("integrity check failed on %s:%d")
912 raise RevlogError(_("integrity check failed on %s:%d")
914 % (self.indexfile, rev))
913 % (self.indexfile, rev))
915 return text
914 return text
916
915
917 def checkinlinesize(self, tr, fp=None):
916 def checkinlinesize(self, tr, fp=None):
918 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
917 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
919 return
918 return
920
919
921 trinfo = tr.find(self.indexfile)
920 trinfo = tr.find(self.indexfile)
922 if trinfo is None:
921 if trinfo is None:
923 raise RevlogError(_("%s not found in the transaction")
922 raise RevlogError(_("%s not found in the transaction")
924 % self.indexfile)
923 % self.indexfile)
925
924
926 trindex = trinfo[2]
925 trindex = trinfo[2]
927 dataoff = self.start(trindex)
926 dataoff = self.start(trindex)
928
927
929 tr.add(self.datafile, dataoff)
928 tr.add(self.datafile, dataoff)
930
929
931 if fp:
930 if fp:
932 fp.flush()
931 fp.flush()
933 fp.close()
932 fp.close()
934
933
935 df = self.opener(self.datafile, 'w')
934 df = self.opener(self.datafile, 'w')
936 try:
935 try:
937 for r in self:
936 for r in self:
938 df.write(self._chunkraw(r, r))
937 df.write(self._chunkraw(r, r))
939 finally:
938 finally:
940 df.close()
939 df.close()
941
940
942 fp = self.opener(self.indexfile, 'w', atomictemp=True)
941 fp = self.opener(self.indexfile, 'w', atomictemp=True)
943 self.version &= ~(REVLOGNGINLINEDATA)
942 self.version &= ~(REVLOGNGINLINEDATA)
944 self._inline = False
943 self._inline = False
945 for i in self:
944 for i in self:
946 e = self._io.packentry(self.index[i], self.node, self.version, i)
945 e = self._io.packentry(self.index[i], self.node, self.version, i)
947 fp.write(e)
946 fp.write(e)
948
947
949 # if we don't call rename, the temp file will never replace the
948 # if we don't call rename, the temp file will never replace the
950 # real index
949 # real index
951 fp.rename()
950 fp.rename()
952
951
953 tr.replace(self.indexfile, trindex * self._io.size)
952 tr.replace(self.indexfile, trindex * self._io.size)
954 self._chunkclear()
953 self._chunkclear()
955
954
956 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
955 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
957 """add a revision to the log
956 """add a revision to the log
958
957
959 text - the revision data to add
958 text - the revision data to add
960 transaction - the transaction object used for rollback
959 transaction - the transaction object used for rollback
961 link - the linkrev data to add
960 link - the linkrev data to add
962 p1, p2 - the parent nodeids of the revision
961 p1, p2 - the parent nodeids of the revision
963 cachedelta - an optional precomputed delta
962 cachedelta - an optional precomputed delta
964 """
963 """
965 node = hash(text, p1, p2)
964 node = hash(text, p1, p2)
966 if node in self.nodemap:
965 if node in self.nodemap:
967 return node
966 return node
968
967
969 dfh = None
968 dfh = None
970 if not self._inline:
969 if not self._inline:
971 dfh = self.opener(self.datafile, "a")
970 dfh = self.opener(self.datafile, "a")
972 ifh = self.opener(self.indexfile, "a+")
971 ifh = self.opener(self.indexfile, "a+")
973 try:
972 try:
974 return self._addrevision(node, text, transaction, link, p1, p2,
973 return self._addrevision(node, text, transaction, link, p1, p2,
975 cachedelta, ifh, dfh)
974 cachedelta, ifh, dfh)
976 finally:
975 finally:
977 if dfh:
976 if dfh:
978 dfh.close()
977 dfh.close()
979 ifh.close()
978 ifh.close()
980
979
981 def _addrevision(self, node, text, transaction, link, p1, p2,
980 def _addrevision(self, node, text, transaction, link, p1, p2,
982 cachedelta, ifh, dfh):
981 cachedelta, ifh, dfh):
983 """internal function to add revisions to the log
982 """internal function to add revisions to the log
984
983
985 see addrevision for argument descriptions.
984 see addrevision for argument descriptions.
986 invariants:
985 invariants:
987 - text is optional (can be None); if not set, cachedelta must be set.
986 - text is optional (can be None); if not set, cachedelta must be set.
988 if both are set, they must correspond to eachother.
987 if both are set, they must correspond to eachother.
989 """
988 """
990 btext = [text]
989 btext = [text]
991 def buildtext():
990 def buildtext():
992 if btext[0] is not None:
991 if btext[0] is not None:
993 return btext[0]
992 return btext[0]
994 # flush any pending writes here so we can read it in revision
993 # flush any pending writes here so we can read it in revision
995 if dfh:
994 if dfh:
996 dfh.flush()
995 dfh.flush()
997 ifh.flush()
996 ifh.flush()
998 basetext = self.revision(self.node(cachedelta[0]))
997 basetext = self.revision(self.node(cachedelta[0]))
999 btext[0] = mdiff.patch(basetext, cachedelta[1])
998 btext[0] = mdiff.patch(basetext, cachedelta[1])
1000 chk = hash(btext[0], p1, p2)
999 chk = hash(btext[0], p1, p2)
1001 if chk != node:
1000 if chk != node:
1002 raise RevlogError(_("consistency error in delta"))
1001 raise RevlogError(_("consistency error in delta"))
1003 return btext[0]
1002 return btext[0]
1004
1003
1005 def builddelta(rev):
1004 def builddelta(rev):
1006 # can we use the cached delta?
1005 # can we use the cached delta?
1007 if cachedelta and cachedelta[0] == rev:
1006 if cachedelta and cachedelta[0] == rev:
1008 delta = cachedelta[1]
1007 delta = cachedelta[1]
1009 else:
1008 else:
1010 t = buildtext()
1009 t = buildtext()
1011 ptext = self.revision(self.node(rev))
1010 ptext = self.revision(self.node(rev))
1012 delta = mdiff.textdiff(ptext, t)
1011 delta = mdiff.textdiff(ptext, t)
1013 data = compress(delta)
1012 data = compress(delta)
1014 l = len(data[1]) + len(data[0])
1013 l = len(data[1]) + len(data[0])
1015 if basecache[0] == rev:
1014 if basecache[0] == rev:
1016 chainbase = basecache[1]
1015 chainbase = basecache[1]
1017 else:
1016 else:
1018 chainbase = self.chainbase(rev)
1017 chainbase = self.chainbase(rev)
1019 dist = l + offset - self.start(chainbase)
1018 dist = l + offset - self.start(chainbase)
1020 if self._generaldelta:
1019 if self._generaldelta:
1021 base = rev
1020 base = rev
1022 else:
1021 else:
1023 base = chainbase
1022 base = chainbase
1024 return dist, l, data, base, chainbase
1023 return dist, l, data, base, chainbase
1025
1024
1026 curr = len(self)
1025 curr = len(self)
1027 prev = curr - 1
1026 prev = curr - 1
1028 base = chainbase = curr
1027 base = chainbase = curr
1029 offset = self.end(prev)
1028 offset = self.end(prev)
1030 flags = 0
1029 flags = 0
1031 d = None
1030 d = None
1032 basecache = self._basecache
1031 basecache = self._basecache
1033 p1r, p2r = self.rev(p1), self.rev(p2)
1032 p1r, p2r = self.rev(p1), self.rev(p2)
1034
1033
1035 # should we try to build a delta?
1034 # should we try to build a delta?
1036 if prev != nullrev:
1035 if prev != nullrev:
1037 if self._generaldelta:
1036 if self._generaldelta:
1038 if p1r >= basecache[1]:
1037 if p1r >= basecache[1]:
1039 d = builddelta(p1r)
1038 d = builddelta(p1r)
1040 elif p2r >= basecache[1]:
1039 elif p2r >= basecache[1]:
1041 d = builddelta(p2r)
1040 d = builddelta(p2r)
1042 else:
1041 else:
1043 d = builddelta(prev)
1042 d = builddelta(prev)
1044 else:
1043 else:
1045 d = builddelta(prev)
1044 d = builddelta(prev)
1046 dist, l, data, base, chainbase = d
1045 dist, l, data, base, chainbase = d
1047
1046
1048 # full versions are inserted when the needed deltas
1047 # full versions are inserted when the needed deltas
1049 # become comparable to the uncompressed text
1048 # become comparable to the uncompressed text
1050 if text is None:
1049 if text is None:
1051 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1050 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1052 cachedelta[1])
1051 cachedelta[1])
1053 else:
1052 else:
1054 textlen = len(text)
1053 textlen = len(text)
1055 if d is None or dist > textlen * 2:
1054 if d is None or dist > textlen * 2:
1056 text = buildtext()
1055 text = buildtext()
1057 data = compress(text)
1056 data = compress(text)
1058 l = len(data[1]) + len(data[0])
1057 l = len(data[1]) + len(data[0])
1059 base = chainbase = curr
1058 base = chainbase = curr
1060
1059
1061 e = (offset_type(offset, flags), l, textlen,
1060 e = (offset_type(offset, flags), l, textlen,
1062 base, link, p1r, p2r, node)
1061 base, link, p1r, p2r, node)
1063 self.index.insert(-1, e)
1062 self.index.insert(-1, e)
1064 self.nodemap[node] = curr
1063 self.nodemap[node] = curr
1065
1064
1066 entry = self._io.packentry(e, self.node, self.version, curr)
1065 entry = self._io.packentry(e, self.node, self.version, curr)
1067 if not self._inline:
1066 if not self._inline:
1068 transaction.add(self.datafile, offset)
1067 transaction.add(self.datafile, offset)
1069 transaction.add(self.indexfile, curr * len(entry))
1068 transaction.add(self.indexfile, curr * len(entry))
1070 if data[0]:
1069 if data[0]:
1071 dfh.write(data[0])
1070 dfh.write(data[0])
1072 dfh.write(data[1])
1071 dfh.write(data[1])
1073 dfh.flush()
1072 dfh.flush()
1074 ifh.write(entry)
1073 ifh.write(entry)
1075 else:
1074 else:
1076 offset += curr * self._io.size
1075 offset += curr * self._io.size
1077 transaction.add(self.indexfile, offset, curr)
1076 transaction.add(self.indexfile, offset, curr)
1078 ifh.write(entry)
1077 ifh.write(entry)
1079 ifh.write(data[0])
1078 ifh.write(data[0])
1080 ifh.write(data[1])
1079 ifh.write(data[1])
1081 self.checkinlinesize(transaction, ifh)
1080 self.checkinlinesize(transaction, ifh)
1082
1081
1083 if type(text) == str: # only accept immutable objects
1082 if type(text) == str: # only accept immutable objects
1084 self._cache = (node, curr, text)
1083 self._cache = (node, curr, text)
1085 self._basecache = (curr, chainbase)
1084 self._basecache = (curr, chainbase)
1086 return node
1085 return node
1087
1086
1088 def group(self, nodelist, bundler):
1087 def group(self, nodelist, bundler):
1089 """Calculate a delta group, yielding a sequence of changegroup chunks
1088 """Calculate a delta group, yielding a sequence of changegroup chunks
1090 (strings).
1089 (strings).
1091
1090
1092 Given a list of changeset revs, return a set of deltas and
1091 Given a list of changeset revs, return a set of deltas and
1093 metadata corresponding to nodes. The first delta is
1092 metadata corresponding to nodes. The first delta is
1094 first parent(nodelist[0]) -> nodelist[0], the receiver is
1093 first parent(nodelist[0]) -> nodelist[0], the receiver is
1095 guaranteed to have this parent as it has all history before
1094 guaranteed to have this parent as it has all history before
1096 these changesets. In the case firstparent is nullrev the
1095 these changesets. In the case firstparent is nullrev the
1097 changegroup starts with a full revision.
1096 changegroup starts with a full revision.
1098 """
1097 """
1099
1098
1100 revs = sorted([self.rev(n) for n in nodelist])
1099 revs = sorted([self.rev(n) for n in nodelist])
1101
1100
1102 # if we don't have any revisions touched by these changesets, bail
1101 # if we don't have any revisions touched by these changesets, bail
1103 if not revs:
1102 if not revs:
1104 yield bundler.close()
1103 yield bundler.close()
1105 return
1104 return
1106
1105
1107 # add the parent of the first rev
1106 # add the parent of the first rev
1108 p = self.parentrevs(revs[0])[0]
1107 p = self.parentrevs(revs[0])[0]
1109 revs.insert(0, p)
1108 revs.insert(0, p)
1110
1109
1111 # build deltas
1110 # build deltas
1112 for r in xrange(len(revs) - 1):
1111 for r in xrange(len(revs) - 1):
1113 prev, curr = revs[r], revs[r + 1]
1112 prev, curr = revs[r], revs[r + 1]
1114 for c in bundler.revchunk(self, curr, prev):
1113 for c in bundler.revchunk(self, curr, prev):
1115 yield c
1114 yield c
1116
1115
1117 yield bundler.close()
1116 yield bundler.close()
1118
1117
1119 def addgroup(self, bundle, linkmapper, transaction):
1118 def addgroup(self, bundle, linkmapper, transaction):
1120 """
1119 """
1121 add a delta group
1120 add a delta group
1122
1121
1123 given a set of deltas, add them to the revision log. the
1122 given a set of deltas, add them to the revision log. the
1124 first delta is against its parent, which should be in our
1123 first delta is against its parent, which should be in our
1125 log, the rest are against the previous delta.
1124 log, the rest are against the previous delta.
1126 """
1125 """
1127
1126
1128 # track the base of the current delta log
1127 # track the base of the current delta log
1129 node = None
1128 node = None
1130
1129
1131 r = len(self)
1130 r = len(self)
1132 end = 0
1131 end = 0
1133 if r:
1132 if r:
1134 end = self.end(r - 1)
1133 end = self.end(r - 1)
1135 ifh = self.opener(self.indexfile, "a+")
1134 ifh = self.opener(self.indexfile, "a+")
1136 isize = r * self._io.size
1135 isize = r * self._io.size
1137 if self._inline:
1136 if self._inline:
1138 transaction.add(self.indexfile, end + isize, r)
1137 transaction.add(self.indexfile, end + isize, r)
1139 dfh = None
1138 dfh = None
1140 else:
1139 else:
1141 transaction.add(self.indexfile, isize, r)
1140 transaction.add(self.indexfile, isize, r)
1142 transaction.add(self.datafile, end)
1141 transaction.add(self.datafile, end)
1143 dfh = self.opener(self.datafile, "a")
1142 dfh = self.opener(self.datafile, "a")
1144
1143
1145 try:
1144 try:
1146 # loop through our set of deltas
1145 # loop through our set of deltas
1147 chain = None
1146 chain = None
1148 while 1:
1147 while 1:
1149 chunkdata = bundle.deltachunk(chain)
1148 chunkdata = bundle.deltachunk(chain)
1150 if not chunkdata:
1149 if not chunkdata:
1151 break
1150 break
1152 node = chunkdata['node']
1151 node = chunkdata['node']
1153 p1 = chunkdata['p1']
1152 p1 = chunkdata['p1']
1154 p2 = chunkdata['p2']
1153 p2 = chunkdata['p2']
1155 cs = chunkdata['cs']
1154 cs = chunkdata['cs']
1156 deltabase = chunkdata['deltabase']
1155 deltabase = chunkdata['deltabase']
1157 delta = chunkdata['delta']
1156 delta = chunkdata['delta']
1158
1157
1159 link = linkmapper(cs)
1158 link = linkmapper(cs)
1160 if node in self.nodemap:
1159 if node in self.nodemap:
1161 # this can happen if two branches make the same change
1160 # this can happen if two branches make the same change
1162 chain = node
1161 chain = node
1163 continue
1162 continue
1164
1163
1165 for p in (p1, p2):
1164 for p in (p1, p2):
1166 if not p in self.nodemap:
1165 if not p in self.nodemap:
1167 raise LookupError(p, self.indexfile,
1166 raise LookupError(p, self.indexfile,
1168 _('unknown parent'))
1167 _('unknown parent'))
1169
1168
1170 if deltabase not in self.nodemap:
1169 if deltabase not in self.nodemap:
1171 raise LookupError(deltabase, self.indexfile,
1170 raise LookupError(deltabase, self.indexfile,
1172 _('unknown delta base'))
1171 _('unknown delta base'))
1173
1172
1174 baserev = self.rev(deltabase)
1173 baserev = self.rev(deltabase)
1175 chain = self._addrevision(node, None, transaction, link,
1174 chain = self._addrevision(node, None, transaction, link,
1176 p1, p2, (baserev, delta), ifh, dfh)
1175 p1, p2, (baserev, delta), ifh, dfh)
1177 if not dfh and not self._inline:
1176 if not dfh and not self._inline:
1178 # addrevision switched from inline to conventional
1177 # addrevision switched from inline to conventional
1179 # reopen the index
1178 # reopen the index
1180 ifh.close()
1179 ifh.close()
1181 dfh = self.opener(self.datafile, "a")
1180 dfh = self.opener(self.datafile, "a")
1182 ifh = self.opener(self.indexfile, "a")
1181 ifh = self.opener(self.indexfile, "a")
1183 finally:
1182 finally:
1184 if dfh:
1183 if dfh:
1185 dfh.close()
1184 dfh.close()
1186 ifh.close()
1185 ifh.close()
1187
1186
1188 return node
1187 return node
1189
1188
1190 def strip(self, minlink, transaction):
1189 def strip(self, minlink, transaction):
1191 """truncate the revlog on the first revision with a linkrev >= minlink
1190 """truncate the revlog on the first revision with a linkrev >= minlink
1192
1191
1193 This function is called when we're stripping revision minlink and
1192 This function is called when we're stripping revision minlink and
1194 its descendants from the repository.
1193 its descendants from the repository.
1195
1194
1196 We have to remove all revisions with linkrev >= minlink, because
1195 We have to remove all revisions with linkrev >= minlink, because
1197 the equivalent changelog revisions will be renumbered after the
1196 the equivalent changelog revisions will be renumbered after the
1198 strip.
1197 strip.
1199
1198
1200 So we truncate the revlog on the first of these revisions, and
1199 So we truncate the revlog on the first of these revisions, and
1201 trust that the caller has saved the revisions that shouldn't be
1200 trust that the caller has saved the revisions that shouldn't be
1202 removed and that it'll readd them after this truncation.
1201 removed and that it'll readd them after this truncation.
1203 """
1202 """
1204 if len(self) == 0:
1203 if len(self) == 0:
1205 return
1204 return
1206
1205
1207 for rev in self:
1206 for rev in self:
1208 if self.index[rev][4] >= minlink:
1207 if self.index[rev][4] >= minlink:
1209 break
1208 break
1210 else:
1209 else:
1211 return
1210 return
1212
1211
1213 # first truncate the files on disk
1212 # first truncate the files on disk
1214 end = self.start(rev)
1213 end = self.start(rev)
1215 if not self._inline:
1214 if not self._inline:
1216 transaction.add(self.datafile, end)
1215 transaction.add(self.datafile, end)
1217 end = rev * self._io.size
1216 end = rev * self._io.size
1218 else:
1217 else:
1219 end += rev * self._io.size
1218 end += rev * self._io.size
1220
1219
1221 transaction.add(self.indexfile, end)
1220 transaction.add(self.indexfile, end)
1222
1221
1223 # then reset internal state in memory to forget those revisions
1222 # then reset internal state in memory to forget those revisions
1224 self._cache = None
1223 self._cache = None
1225 self._chunkclear()
1224 self._chunkclear()
1226 for x in xrange(rev, len(self)):
1225 for x in xrange(rev, len(self)):
1227 del self.nodemap[self.node(x)]
1226 del self.nodemap[self.node(x)]
1228
1227
1229 del self.index[rev:-1]
1228 del self.index[rev:-1]
1230
1229
1231 def checksize(self):
1230 def checksize(self):
1232 expected = 0
1231 expected = 0
1233 if len(self):
1232 if len(self):
1234 expected = max(0, self.end(len(self) - 1))
1233 expected = max(0, self.end(len(self) - 1))
1235
1234
1236 try:
1235 try:
1237 f = self.opener(self.datafile)
1236 f = self.opener(self.datafile)
1238 f.seek(0, 2)
1237 f.seek(0, 2)
1239 actual = f.tell()
1238 actual = f.tell()
1240 f.close()
1239 f.close()
1241 dd = actual - expected
1240 dd = actual - expected
1242 except IOError, inst:
1241 except IOError, inst:
1243 if inst.errno != errno.ENOENT:
1242 if inst.errno != errno.ENOENT:
1244 raise
1243 raise
1245 dd = 0
1244 dd = 0
1246
1245
1247 try:
1246 try:
1248 f = self.opener(self.indexfile)
1247 f = self.opener(self.indexfile)
1249 f.seek(0, 2)
1248 f.seek(0, 2)
1250 actual = f.tell()
1249 actual = f.tell()
1251 f.close()
1250 f.close()
1252 s = self._io.size
1251 s = self._io.size
1253 i = max(0, actual // s)
1252 i = max(0, actual // s)
1254 di = actual - (i * s)
1253 di = actual - (i * s)
1255 if self._inline:
1254 if self._inline:
1256 databytes = 0
1255 databytes = 0
1257 for r in self:
1256 for r in self:
1258 databytes += max(0, self.length(r))
1257 databytes += max(0, self.length(r))
1259 dd = 0
1258 dd = 0
1260 di = actual - len(self) * s - databytes
1259 di = actual - len(self) * s - databytes
1261 except IOError, inst:
1260 except IOError, inst:
1262 if inst.errno != errno.ENOENT:
1261 if inst.errno != errno.ENOENT:
1263 raise
1262 raise
1264 di = 0
1263 di = 0
1265
1264
1266 return (dd, di)
1265 return (dd, di)
1267
1266
1268 def files(self):
1267 def files(self):
1269 res = [self.indexfile]
1268 res = [self.indexfile]
1270 if not self._inline:
1269 if not self._inline:
1271 res.append(self.datafile)
1270 res.append(self.datafile)
1272 return res
1271 return res
General Comments 0
You need to be logged in to leave comments. Login now