##// END OF EJS Templates
add options dict to localrepo.store.opener and use it for defversion
Vsevolod Solovyov -
r10322:d9a2bc2f default
parent child Browse files
Show More
@@ -1,2169 +1,2170 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92 self.sopener.options = {}
92
93
93 # These two define the set of tags for this repository. _tags
94 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
96 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
97 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
98 # constitute the in-memory cache of tags.
98 self._tags = None
99 self._tags = None
99 self._tagtypes = None
100 self._tagtypes = None
100
101
101 self._branchcache = None # in UTF-8
102 self._branchcache = None # in UTF-8
102 self._branchcachetip = None
103 self._branchcachetip = None
103 self.nodetagscache = None
104 self.nodetagscache = None
104 self.filterpats = {}
105 self.filterpats = {}
105 self._datafilters = {}
106 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
107
108
108 @propertycache
109 @propertycache
109 def changelog(self):
110 def changelog(self):
110 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
111 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
112 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
113 if p.startswith(self.root):
114 if p.startswith(self.root):
114 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
115 self.sopener.defversion = c.version
116 self.sopener.options['defversion'] = c.version
116 return c
117 return c
117
118
118 @propertycache
119 @propertycache
119 def manifest(self):
120 def manifest(self):
120 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
121
122
122 @propertycache
123 @propertycache
123 def dirstate(self):
124 def dirstate(self):
124 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125
126
126 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
127 if changeid is None:
128 if changeid is None:
128 return context.workingctx(self)
129 return context.workingctx(self)
129 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
130
131
131 def __contains__(self, changeid):
132 def __contains__(self, changeid):
132 try:
133 try:
133 return bool(self.lookup(changeid))
134 return bool(self.lookup(changeid))
134 except error.RepoLookupError:
135 except error.RepoLookupError:
135 return False
136 return False
136
137
137 def __nonzero__(self):
138 def __nonzero__(self):
138 return True
139 return True
139
140
140 def __len__(self):
141 def __len__(self):
141 return len(self.changelog)
142 return len(self.changelog)
142
143
143 def __iter__(self):
144 def __iter__(self):
144 for i in xrange(len(self)):
145 for i in xrange(len(self)):
145 yield i
146 yield i
146
147
147 def url(self):
148 def url(self):
148 return 'file:' + self.root
149 return 'file:' + self.root
149
150
150 def hook(self, name, throw=False, **args):
151 def hook(self, name, throw=False, **args):
151 return hook.hook(self.ui, self, name, throw, **args)
152 return hook.hook(self.ui, self, name, throw, **args)
152
153
153 tag_disallowed = ':\r\n'
154 tag_disallowed = ':\r\n'
154
155
155 def _tag(self, names, node, message, local, user, date, extra={}):
156 def _tag(self, names, node, message, local, user, date, extra={}):
156 if isinstance(names, str):
157 if isinstance(names, str):
157 allchars = names
158 allchars = names
158 names = (names,)
159 names = (names,)
159 else:
160 else:
160 allchars = ''.join(names)
161 allchars = ''.join(names)
161 for c in self.tag_disallowed:
162 for c in self.tag_disallowed:
162 if c in allchars:
163 if c in allchars:
163 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
164
165
165 for name in names:
166 for name in names:
166 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 local=local)
168 local=local)
168
169
169 def writetags(fp, names, munge, prevtags):
170 def writetags(fp, names, munge, prevtags):
170 fp.seek(0, 2)
171 fp.seek(0, 2)
171 if prevtags and prevtags[-1] != '\n':
172 if prevtags and prevtags[-1] != '\n':
172 fp.write('\n')
173 fp.write('\n')
173 for name in names:
174 for name in names:
174 m = munge and munge(name) or name
175 m = munge and munge(name) or name
175 if self._tagtypes and name in self._tagtypes:
176 if self._tagtypes and name in self._tagtypes:
176 old = self._tags.get(name, nullid)
177 old = self._tags.get(name, nullid)
177 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(node), m))
179 fp.write('%s %s\n' % (hex(node), m))
179 fp.close()
180 fp.close()
180
181
181 prevtags = ''
182 prevtags = ''
182 if local:
183 if local:
183 try:
184 try:
184 fp = self.opener('localtags', 'r+')
185 fp = self.opener('localtags', 'r+')
185 except IOError:
186 except IOError:
186 fp = self.opener('localtags', 'a')
187 fp = self.opener('localtags', 'a')
187 else:
188 else:
188 prevtags = fp.read()
189 prevtags = fp.read()
189
190
190 # local tags are stored in the current charset
191 # local tags are stored in the current charset
191 writetags(fp, names, None, prevtags)
192 writetags(fp, names, None, prevtags)
192 for name in names:
193 for name in names:
193 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 return
195 return
195
196
196 try:
197 try:
197 fp = self.wfile('.hgtags', 'rb+')
198 fp = self.wfile('.hgtags', 'rb+')
198 except IOError:
199 except IOError:
199 fp = self.wfile('.hgtags', 'ab')
200 fp = self.wfile('.hgtags', 'ab')
200 else:
201 else:
201 prevtags = fp.read()
202 prevtags = fp.read()
202
203
203 # committed tags are stored in UTF-8
204 # committed tags are stored in UTF-8
204 writetags(fp, names, encoding.fromlocal, prevtags)
205 writetags(fp, names, encoding.fromlocal, prevtags)
205
206
206 if '.hgtags' not in self.dirstate:
207 if '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
208 self.add(['.hgtags'])
208
209
209 m = match_.exact(self.root, '', ['.hgtags'])
210 m = match_.exact(self.root, '', ['.hgtags'])
210 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
211
212
212 for name in names:
213 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
214
215
215 return tagnode
216 return tagnode
216
217
217 def tag(self, names, node, message, local, user, date):
218 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
219 '''tag a revision with one or more symbolic names.
219
220
220 names is a list of strings or, when adding a single tag, names may be a
221 names is a list of strings or, when adding a single tag, names may be a
221 string.
222 string.
222
223
223 if local is True, the tags are stored in a per-repository file.
224 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
225 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
226 changeset is committed with the change.
226
227
227 keyword arguments:
228 keyword arguments:
228
229
229 local: whether to store tags in non-version-controlled file
230 local: whether to store tags in non-version-controlled file
230 (default False)
231 (default False)
231
232
232 message: commit message to use if committing
233 message: commit message to use if committing
233
234
234 user: name of user to use if committing
235 user: name of user to use if committing
235
236
236 date: date tuple to use if committing'''
237 date: date tuple to use if committing'''
237
238
238 for x in self.status()[:5]:
239 for x in self.status()[:5]:
239 if '.hgtags' in x:
240 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
241 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
242 '(please commit .hgtags manually)'))
242
243
243 self.tags() # instantiate the cache
244 self.tags() # instantiate the cache
244 self._tag(names, node, message, local, user, date)
245 self._tag(names, node, message, local, user, date)
245
246
246 def tags(self):
247 def tags(self):
247 '''return a mapping of tag to node'''
248 '''return a mapping of tag to node'''
248 if self._tags is None:
249 if self._tags is None:
249 (self._tags, self._tagtypes) = self._findtags()
250 (self._tags, self._tagtypes) = self._findtags()
250
251
251 return self._tags
252 return self._tags
252
253
253 def _findtags(self):
254 def _findtags(self):
254 '''Do the hard work of finding tags. Return a pair of dicts
255 '''Do the hard work of finding tags. Return a pair of dicts
255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 maps tag name to a string like \'global\' or \'local\'.
257 maps tag name to a string like \'global\' or \'local\'.
257 Subclasses or extensions are free to add their own tags, but
258 Subclasses or extensions are free to add their own tags, but
258 should be aware that the returned dicts will be retained for the
259 should be aware that the returned dicts will be retained for the
259 duration of the localrepo object.'''
260 duration of the localrepo object.'''
260
261
261 # XXX what tagtype should subclasses/extensions use? Currently
262 # XXX what tagtype should subclasses/extensions use? Currently
262 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # Should each extension invent its own tag type? Should there
264 # Should each extension invent its own tag type? Should there
264 # be one tagtype for all such "virtual" tags? Or is the status
265 # be one tagtype for all such "virtual" tags? Or is the status
265 # quo fine?
266 # quo fine?
266
267
267 alltags = {} # map tag name to (node, hist)
268 alltags = {} # map tag name to (node, hist)
268 tagtypes = {}
269 tagtypes = {}
269
270
270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272
273
273 # Build the return dicts. Have to re-encode tag names because
274 # Build the return dicts. Have to re-encode tag names because
274 # the tags module always uses UTF-8 (in order not to lose info
275 # the tags module always uses UTF-8 (in order not to lose info
275 # writing to the cache), but the rest of Mercurial wants them in
276 # writing to the cache), but the rest of Mercurial wants them in
276 # local encoding.
277 # local encoding.
277 tags = {}
278 tags = {}
278 for (name, (node, hist)) in alltags.iteritems():
279 for (name, (node, hist)) in alltags.iteritems():
279 if node != nullid:
280 if node != nullid:
280 tags[encoding.tolocal(name)] = node
281 tags[encoding.tolocal(name)] = node
281 tags['tip'] = self.changelog.tip()
282 tags['tip'] = self.changelog.tip()
282 tagtypes = dict([(encoding.tolocal(name), value)
283 tagtypes = dict([(encoding.tolocal(name), value)
283 for (name, value) in tagtypes.iteritems()])
284 for (name, value) in tagtypes.iteritems()])
284 return (tags, tagtypes)
285 return (tags, tagtypes)
285
286
286 def tagtype(self, tagname):
287 def tagtype(self, tagname):
287 '''
288 '''
288 return the type of the given tag. result can be:
289 return the type of the given tag. result can be:
289
290
290 'local' : a local tag
291 'local' : a local tag
291 'global' : a global tag
292 'global' : a global tag
292 None : tag does not exist
293 None : tag does not exist
293 '''
294 '''
294
295
295 self.tags()
296 self.tags()
296
297
297 return self._tagtypes.get(tagname)
298 return self._tagtypes.get(tagname)
298
299
299 def tagslist(self):
300 def tagslist(self):
300 '''return a list of tags ordered by revision'''
301 '''return a list of tags ordered by revision'''
301 l = []
302 l = []
302 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
303 try:
304 try:
304 r = self.changelog.rev(n)
305 r = self.changelog.rev(n)
305 except:
306 except:
306 r = -2 # sort to the beginning of the list if unknown
307 r = -2 # sort to the beginning of the list if unknown
307 l.append((r, t, n))
308 l.append((r, t, n))
308 return [(t, n) for r, t, n in sorted(l)]
309 return [(t, n) for r, t, n in sorted(l)]
309
310
310 def nodetags(self, node):
311 def nodetags(self, node):
311 '''return the tags associated with a node'''
312 '''return the tags associated with a node'''
312 if not self.nodetagscache:
313 if not self.nodetagscache:
313 self.nodetagscache = {}
314 self.nodetagscache = {}
314 for t, n in self.tags().iteritems():
315 for t, n in self.tags().iteritems():
315 self.nodetagscache.setdefault(n, []).append(t)
316 self.nodetagscache.setdefault(n, []).append(t)
316 return self.nodetagscache.get(node, [])
317 return self.nodetagscache.get(node, [])
317
318
318 def _branchtags(self, partial, lrev):
319 def _branchtags(self, partial, lrev):
319 # TODO: rename this function?
320 # TODO: rename this function?
320 tiprev = len(self) - 1
321 tiprev = len(self) - 1
321 if lrev != tiprev:
322 if lrev != tiprev:
322 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324
325
325 return partial
326 return partial
326
327
327 def branchmap(self):
328 def branchmap(self):
328 tip = self.changelog.tip()
329 tip = self.changelog.tip()
329 if self._branchcache is not None and self._branchcachetip == tip:
330 if self._branchcache is not None and self._branchcachetip == tip:
330 return self._branchcache
331 return self._branchcache
331
332
332 oldtip = self._branchcachetip
333 oldtip = self._branchcachetip
333 self._branchcachetip = tip
334 self._branchcachetip = tip
334 if oldtip is None or oldtip not in self.changelog.nodemap:
335 if oldtip is None or oldtip not in self.changelog.nodemap:
335 partial, last, lrev = self._readbranchcache()
336 partial, last, lrev = self._readbranchcache()
336 else:
337 else:
337 lrev = self.changelog.rev(oldtip)
338 lrev = self.changelog.rev(oldtip)
338 partial = self._branchcache
339 partial = self._branchcache
339
340
340 self._branchtags(partial, lrev)
341 self._branchtags(partial, lrev)
341 # this private cache holds all heads (not just tips)
342 # this private cache holds all heads (not just tips)
342 self._branchcache = partial
343 self._branchcache = partial
343
344
344 return self._branchcache
345 return self._branchcache
345
346
346 def branchtags(self):
347 def branchtags(self):
347 '''return a dict where branch names map to the tipmost head of
348 '''return a dict where branch names map to the tipmost head of
348 the branch, open heads come before closed'''
349 the branch, open heads come before closed'''
349 bt = {}
350 bt = {}
350 for bn, heads in self.branchmap().iteritems():
351 for bn, heads in self.branchmap().iteritems():
351 head = None
352 head = None
352 for i in range(len(heads)-1, -1, -1):
353 for i in range(len(heads)-1, -1, -1):
353 h = heads[i]
354 h = heads[i]
354 if 'close' not in self.changelog.read(h)[5]:
355 if 'close' not in self.changelog.read(h)[5]:
355 head = h
356 head = h
356 break
357 break
357 # no open heads were found
358 # no open heads were found
358 if head is None:
359 if head is None:
359 head = heads[-1]
360 head = heads[-1]
360 bt[bn] = head
361 bt[bn] = head
361 return bt
362 return bt
362
363
363
364
364 def _readbranchcache(self):
365 def _readbranchcache(self):
365 partial = {}
366 partial = {}
366 try:
367 try:
367 f = self.opener("branchheads.cache")
368 f = self.opener("branchheads.cache")
368 lines = f.read().split('\n')
369 lines = f.read().split('\n')
369 f.close()
370 f.close()
370 except (IOError, OSError):
371 except (IOError, OSError):
371 return {}, nullid, nullrev
372 return {}, nullid, nullrev
372
373
373 try:
374 try:
374 last, lrev = lines.pop(0).split(" ", 1)
375 last, lrev = lines.pop(0).split(" ", 1)
375 last, lrev = bin(last), int(lrev)
376 last, lrev = bin(last), int(lrev)
376 if lrev >= len(self) or self[lrev].node() != last:
377 if lrev >= len(self) or self[lrev].node() != last:
377 # invalidate the cache
378 # invalidate the cache
378 raise ValueError('invalidating branch cache (tip differs)')
379 raise ValueError('invalidating branch cache (tip differs)')
379 for l in lines:
380 for l in lines:
380 if not l:
381 if not l:
381 continue
382 continue
382 node, label = l.split(" ", 1)
383 node, label = l.split(" ", 1)
383 partial.setdefault(label.strip(), []).append(bin(node))
384 partial.setdefault(label.strip(), []).append(bin(node))
384 except KeyboardInterrupt:
385 except KeyboardInterrupt:
385 raise
386 raise
386 except Exception, inst:
387 except Exception, inst:
387 if self.ui.debugflag:
388 if self.ui.debugflag:
388 self.ui.warn(str(inst), '\n')
389 self.ui.warn(str(inst), '\n')
389 partial, last, lrev = {}, nullid, nullrev
390 partial, last, lrev = {}, nullid, nullrev
390 return partial, last, lrev
391 return partial, last, lrev
391
392
392 def _writebranchcache(self, branches, tip, tiprev):
393 def _writebranchcache(self, branches, tip, tiprev):
393 try:
394 try:
394 f = self.opener("branchheads.cache", "w", atomictemp=True)
395 f = self.opener("branchheads.cache", "w", atomictemp=True)
395 f.write("%s %s\n" % (hex(tip), tiprev))
396 f.write("%s %s\n" % (hex(tip), tiprev))
396 for label, nodes in branches.iteritems():
397 for label, nodes in branches.iteritems():
397 for node in nodes:
398 for node in nodes:
398 f.write("%s %s\n" % (hex(node), label))
399 f.write("%s %s\n" % (hex(node), label))
399 f.rename()
400 f.rename()
400 except (IOError, OSError):
401 except (IOError, OSError):
401 pass
402 pass
402
403
403 def _updatebranchcache(self, partial, start, end):
404 def _updatebranchcache(self, partial, start, end):
404 # collect new branch entries
405 # collect new branch entries
405 newbranches = {}
406 newbranches = {}
406 for r in xrange(start, end):
407 for r in xrange(start, end):
407 c = self[r]
408 c = self[r]
408 newbranches.setdefault(c.branch(), []).append(c.node())
409 newbranches.setdefault(c.branch(), []).append(c.node())
409 # if older branchheads are reachable from new ones, they aren't
410 # if older branchheads are reachable from new ones, they aren't
410 # really branchheads. Note checking parents is insufficient:
411 # really branchheads. Note checking parents is insufficient:
411 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
412 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
412 for branch, newnodes in newbranches.iteritems():
413 for branch, newnodes in newbranches.iteritems():
413 bheads = partial.setdefault(branch, [])
414 bheads = partial.setdefault(branch, [])
414 bheads.extend(newnodes)
415 bheads.extend(newnodes)
415 if len(bheads) < 2:
416 if len(bheads) < 2:
416 continue
417 continue
417 newbheads = []
418 newbheads = []
418 # starting from tip means fewer passes over reachable
419 # starting from tip means fewer passes over reachable
419 while newnodes:
420 while newnodes:
420 latest = newnodes.pop()
421 latest = newnodes.pop()
421 if latest not in bheads:
422 if latest not in bheads:
422 continue
423 continue
423 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
424 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
424 reachable = self.changelog.reachable(latest, minbhrev)
425 reachable = self.changelog.reachable(latest, minbhrev)
425 bheads = [b for b in bheads if b not in reachable]
426 bheads = [b for b in bheads if b not in reachable]
426 newbheads.insert(0, latest)
427 newbheads.insert(0, latest)
427 bheads.extend(newbheads)
428 bheads.extend(newbheads)
428 partial[branch] = bheads
429 partial[branch] = bheads
429
430
430 def lookup(self, key):
431 def lookup(self, key):
431 if isinstance(key, int):
432 if isinstance(key, int):
432 return self.changelog.node(key)
433 return self.changelog.node(key)
433 elif key == '.':
434 elif key == '.':
434 return self.dirstate.parents()[0]
435 return self.dirstate.parents()[0]
435 elif key == 'null':
436 elif key == 'null':
436 return nullid
437 return nullid
437 elif key == 'tip':
438 elif key == 'tip':
438 return self.changelog.tip()
439 return self.changelog.tip()
439 n = self.changelog._match(key)
440 n = self.changelog._match(key)
440 if n:
441 if n:
441 return n
442 return n
442 if key in self.tags():
443 if key in self.tags():
443 return self.tags()[key]
444 return self.tags()[key]
444 if key in self.branchtags():
445 if key in self.branchtags():
445 return self.branchtags()[key]
446 return self.branchtags()[key]
446 n = self.changelog._partialmatch(key)
447 n = self.changelog._partialmatch(key)
447 if n:
448 if n:
448 return n
449 return n
449
450
450 # can't find key, check if it might have come from damaged dirstate
451 # can't find key, check if it might have come from damaged dirstate
451 if key in self.dirstate.parents():
452 if key in self.dirstate.parents():
452 raise error.Abort(_("working directory has unknown parent '%s'!")
453 raise error.Abort(_("working directory has unknown parent '%s'!")
453 % short(key))
454 % short(key))
454 try:
455 try:
455 if len(key) == 20:
456 if len(key) == 20:
456 key = hex(key)
457 key = hex(key)
457 except:
458 except:
458 pass
459 pass
459 raise error.RepoLookupError(_("unknown revision '%s'") % key)
460 raise error.RepoLookupError(_("unknown revision '%s'") % key)
460
461
461 def local(self):
462 def local(self):
462 return True
463 return True
463
464
464 def join(self, f):
465 def join(self, f):
465 return os.path.join(self.path, f)
466 return os.path.join(self.path, f)
466
467
467 def wjoin(self, f):
468 def wjoin(self, f):
468 return os.path.join(self.root, f)
469 return os.path.join(self.root, f)
469
470
470 def rjoin(self, f):
471 def rjoin(self, f):
471 return os.path.join(self.root, util.pconvert(f))
472 return os.path.join(self.root, util.pconvert(f))
472
473
473 def file(self, f):
474 def file(self, f):
474 if f[0] == '/':
475 if f[0] == '/':
475 f = f[1:]
476 f = f[1:]
476 return filelog.filelog(self.sopener, f)
477 return filelog.filelog(self.sopener, f)
477
478
478 def changectx(self, changeid):
479 def changectx(self, changeid):
479 return self[changeid]
480 return self[changeid]
480
481
481 def parents(self, changeid=None):
482 def parents(self, changeid=None):
482 '''get list of changectxs for parents of changeid'''
483 '''get list of changectxs for parents of changeid'''
483 return self[changeid].parents()
484 return self[changeid].parents()
484
485
485 def filectx(self, path, changeid=None, fileid=None):
486 def filectx(self, path, changeid=None, fileid=None):
486 """changeid can be a changeset revision, node, or tag.
487 """changeid can be a changeset revision, node, or tag.
487 fileid can be a file revision or node."""
488 fileid can be a file revision or node."""
488 return context.filectx(self, path, changeid, fileid)
489 return context.filectx(self, path, changeid, fileid)
489
490
490 def getcwd(self):
491 def getcwd(self):
491 return self.dirstate.getcwd()
492 return self.dirstate.getcwd()
492
493
493 def pathto(self, f, cwd=None):
494 def pathto(self, f, cwd=None):
494 return self.dirstate.pathto(f, cwd)
495 return self.dirstate.pathto(f, cwd)
495
496
496 def wfile(self, f, mode='r'):
497 def wfile(self, f, mode='r'):
497 return self.wopener(f, mode)
498 return self.wopener(f, mode)
498
499
499 def _link(self, f):
500 def _link(self, f):
500 return os.path.islink(self.wjoin(f))
501 return os.path.islink(self.wjoin(f))
501
502
502 def _filter(self, filter, filename, data):
503 def _filter(self, filter, filename, data):
503 if filter not in self.filterpats:
504 if filter not in self.filterpats:
504 l = []
505 l = []
505 for pat, cmd in self.ui.configitems(filter):
506 for pat, cmd in self.ui.configitems(filter):
506 if cmd == '!':
507 if cmd == '!':
507 continue
508 continue
508 mf = match_.match(self.root, '', [pat])
509 mf = match_.match(self.root, '', [pat])
509 fn = None
510 fn = None
510 params = cmd
511 params = cmd
511 for name, filterfn in self._datafilters.iteritems():
512 for name, filterfn in self._datafilters.iteritems():
512 if cmd.startswith(name):
513 if cmd.startswith(name):
513 fn = filterfn
514 fn = filterfn
514 params = cmd[len(name):].lstrip()
515 params = cmd[len(name):].lstrip()
515 break
516 break
516 if not fn:
517 if not fn:
517 fn = lambda s, c, **kwargs: util.filter(s, c)
518 fn = lambda s, c, **kwargs: util.filter(s, c)
518 # Wrap old filters not supporting keyword arguments
519 # Wrap old filters not supporting keyword arguments
519 if not inspect.getargspec(fn)[2]:
520 if not inspect.getargspec(fn)[2]:
520 oldfn = fn
521 oldfn = fn
521 fn = lambda s, c, **kwargs: oldfn(s, c)
522 fn = lambda s, c, **kwargs: oldfn(s, c)
522 l.append((mf, fn, params))
523 l.append((mf, fn, params))
523 self.filterpats[filter] = l
524 self.filterpats[filter] = l
524
525
525 for mf, fn, cmd in self.filterpats[filter]:
526 for mf, fn, cmd in self.filterpats[filter]:
526 if mf(filename):
527 if mf(filename):
527 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
528 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
529 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
529 break
530 break
530
531
531 return data
532 return data
532
533
533 def adddatafilter(self, name, filter):
534 def adddatafilter(self, name, filter):
534 self._datafilters[name] = filter
535 self._datafilters[name] = filter
535
536
536 def wread(self, filename):
537 def wread(self, filename):
537 if self._link(filename):
538 if self._link(filename):
538 data = os.readlink(self.wjoin(filename))
539 data = os.readlink(self.wjoin(filename))
539 else:
540 else:
540 data = self.wopener(filename, 'r').read()
541 data = self.wopener(filename, 'r').read()
541 return self._filter("encode", filename, data)
542 return self._filter("encode", filename, data)
542
543
543 def wwrite(self, filename, data, flags):
544 def wwrite(self, filename, data, flags):
544 data = self._filter("decode", filename, data)
545 data = self._filter("decode", filename, data)
545 try:
546 try:
546 os.unlink(self.wjoin(filename))
547 os.unlink(self.wjoin(filename))
547 except OSError:
548 except OSError:
548 pass
549 pass
549 if 'l' in flags:
550 if 'l' in flags:
550 self.wopener.symlink(data, filename)
551 self.wopener.symlink(data, filename)
551 else:
552 else:
552 self.wopener(filename, 'w').write(data)
553 self.wopener(filename, 'w').write(data)
553 if 'x' in flags:
554 if 'x' in flags:
554 util.set_flags(self.wjoin(filename), False, True)
555 util.set_flags(self.wjoin(filename), False, True)
555
556
556 def wwritedata(self, filename, data):
557 def wwritedata(self, filename, data):
557 return self._filter("decode", filename, data)
558 return self._filter("decode", filename, data)
558
559
559 def transaction(self):
560 def transaction(self):
560 tr = self._transref and self._transref() or None
561 tr = self._transref and self._transref() or None
561 if tr and tr.running():
562 if tr and tr.running():
562 return tr.nest()
563 return tr.nest()
563
564
564 # abort here if the journal already exists
565 # abort here if the journal already exists
565 if os.path.exists(self.sjoin("journal")):
566 if os.path.exists(self.sjoin("journal")):
566 raise error.RepoError(
567 raise error.RepoError(
567 _("abandoned transaction found - run hg recover"))
568 _("abandoned transaction found - run hg recover"))
568
569
569 # save dirstate for rollback
570 # save dirstate for rollback
570 try:
571 try:
571 ds = self.opener("dirstate").read()
572 ds = self.opener("dirstate").read()
572 except IOError:
573 except IOError:
573 ds = ""
574 ds = ""
574 self.opener("journal.dirstate", "w").write(ds)
575 self.opener("journal.dirstate", "w").write(ds)
575 self.opener("journal.branch", "w").write(self.dirstate.branch())
576 self.opener("journal.branch", "w").write(self.dirstate.branch())
576
577
577 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 (self.join("journal.branch"), self.join("undo.branch"))]
580 (self.join("journal.branch"), self.join("undo.branch"))]
580 tr = transaction.transaction(self.ui.warn, self.sopener,
581 tr = transaction.transaction(self.ui.warn, self.sopener,
581 self.sjoin("journal"),
582 self.sjoin("journal"),
582 aftertrans(renames),
583 aftertrans(renames),
583 self.store.createmode)
584 self.store.createmode)
584 self._transref = weakref.ref(tr)
585 self._transref = weakref.ref(tr)
585 return tr
586 return tr
586
587
587 def recover(self):
588 def recover(self):
588 lock = self.lock()
589 lock = self.lock()
589 try:
590 try:
590 if os.path.exists(self.sjoin("journal")):
591 if os.path.exists(self.sjoin("journal")):
591 self.ui.status(_("rolling back interrupted transaction\n"))
592 self.ui.status(_("rolling back interrupted transaction\n"))
592 transaction.rollback(self.sopener, self.sjoin("journal"),
593 transaction.rollback(self.sopener, self.sjoin("journal"),
593 self.ui.warn)
594 self.ui.warn)
594 self.invalidate()
595 self.invalidate()
595 return True
596 return True
596 else:
597 else:
597 self.ui.warn(_("no interrupted transaction available\n"))
598 self.ui.warn(_("no interrupted transaction available\n"))
598 return False
599 return False
599 finally:
600 finally:
600 lock.release()
601 lock.release()
601
602
602 def rollback(self):
603 def rollback(self):
603 wlock = lock = None
604 wlock = lock = None
604 try:
605 try:
605 wlock = self.wlock()
606 wlock = self.wlock()
606 lock = self.lock()
607 lock = self.lock()
607 if os.path.exists(self.sjoin("undo")):
608 if os.path.exists(self.sjoin("undo")):
608 self.ui.status(_("rolling back last transaction\n"))
609 self.ui.status(_("rolling back last transaction\n"))
609 transaction.rollback(self.sopener, self.sjoin("undo"),
610 transaction.rollback(self.sopener, self.sjoin("undo"),
610 self.ui.warn)
611 self.ui.warn)
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 try:
613 try:
613 branch = self.opener("undo.branch").read()
614 branch = self.opener("undo.branch").read()
614 self.dirstate.setbranch(branch)
615 self.dirstate.setbranch(branch)
615 except IOError:
616 except IOError:
616 self.ui.warn(_("Named branch could not be reset, "
617 self.ui.warn(_("Named branch could not be reset, "
617 "current branch still is: %s\n")
618 "current branch still is: %s\n")
618 % encoding.tolocal(self.dirstate.branch()))
619 % encoding.tolocal(self.dirstate.branch()))
619 self.invalidate()
620 self.invalidate()
620 self.dirstate.invalidate()
621 self.dirstate.invalidate()
621 self.destroyed()
622 self.destroyed()
622 else:
623 else:
623 self.ui.warn(_("no rollback information available\n"))
624 self.ui.warn(_("no rollback information available\n"))
624 finally:
625 finally:
625 release(lock, wlock)
626 release(lock, wlock)
626
627
627 def invalidate(self):
628 def invalidate(self):
628 for a in "changelog manifest".split():
629 for a in "changelog manifest".split():
629 if a in self.__dict__:
630 if a in self.__dict__:
630 delattr(self, a)
631 delattr(self, a)
631 self._tags = None
632 self._tags = None
632 self._tagtypes = None
633 self._tagtypes = None
633 self.nodetagscache = None
634 self.nodetagscache = None
634 self._branchcache = None # in UTF-8
635 self._branchcache = None # in UTF-8
635 self._branchcachetip = None
636 self._branchcachetip = None
636
637
637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 try:
639 try:
639 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 except error.LockHeld, inst:
641 except error.LockHeld, inst:
641 if not wait:
642 if not wait:
642 raise
643 raise
643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 (desc, inst.locker))
645 (desc, inst.locker))
645 # default to 600 seconds timeout
646 # default to 600 seconds timeout
646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 releasefn, desc=desc)
648 releasefn, desc=desc)
648 if acquirefn:
649 if acquirefn:
649 acquirefn()
650 acquirefn()
650 return l
651 return l
651
652
652 def lock(self, wait=True):
653 def lock(self, wait=True):
653 '''Lock the repository store (.hg/store) and return a weak reference
654 '''Lock the repository store (.hg/store) and return a weak reference
654 to the lock. Use this before modifying the store (e.g. committing or
655 to the lock. Use this before modifying the store (e.g. committing or
655 stripping). If you are opening a transaction, get a lock as well.)'''
656 stripping). If you are opening a transaction, get a lock as well.)'''
656 l = self._lockref and self._lockref()
657 l = self._lockref and self._lockref()
657 if l is not None and l.held:
658 if l is not None and l.held:
658 l.lock()
659 l.lock()
659 return l
660 return l
660
661
661 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
662 _('repository %s') % self.origroot)
663 _('repository %s') % self.origroot)
663 self._lockref = weakref.ref(l)
664 self._lockref = weakref.ref(l)
664 return l
665 return l
665
666
666 def wlock(self, wait=True):
667 def wlock(self, wait=True):
667 '''Lock the non-store parts of the repository (everything under
668 '''Lock the non-store parts of the repository (everything under
668 .hg except .hg/store) and return a weak reference to the lock.
669 .hg except .hg/store) and return a weak reference to the lock.
669 Use this before modifying files in .hg.'''
670 Use this before modifying files in .hg.'''
670 l = self._wlockref and self._wlockref()
671 l = self._wlockref and self._wlockref()
671 if l is not None and l.held:
672 if l is not None and l.held:
672 l.lock()
673 l.lock()
673 return l
674 return l
674
675
675 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 self.dirstate.invalidate, _('working directory of %s') %
677 self.dirstate.invalidate, _('working directory of %s') %
677 self.origroot)
678 self.origroot)
678 self._wlockref = weakref.ref(l)
679 self._wlockref = weakref.ref(l)
679 return l
680 return l
680
681
681 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 """
683 """
683 commit an individual file as part of a larger transaction
684 commit an individual file as part of a larger transaction
684 """
685 """
685
686
686 fname = fctx.path()
687 fname = fctx.path()
687 text = fctx.data()
688 text = fctx.data()
688 flog = self.file(fname)
689 flog = self.file(fname)
689 fparent1 = manifest1.get(fname, nullid)
690 fparent1 = manifest1.get(fname, nullid)
690 fparent2 = fparent2o = manifest2.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
691
692
692 meta = {}
693 meta = {}
693 copy = fctx.renamed()
694 copy = fctx.renamed()
694 if copy and copy[0] != fname:
695 if copy and copy[0] != fname:
695 # Mark the new revision of this file as a copy of another
696 # Mark the new revision of this file as a copy of another
696 # file. This copy data will effectively act as a parent
697 # file. This copy data will effectively act as a parent
697 # of this new revision. If this is a merge, the first
698 # of this new revision. If this is a merge, the first
698 # parent will be the nullid (meaning "look up the copy data")
699 # parent will be the nullid (meaning "look up the copy data")
699 # and the second one will be the other parent. For example:
700 # and the second one will be the other parent. For example:
700 #
701 #
701 # 0 --- 1 --- 3 rev1 changes file foo
702 # 0 --- 1 --- 3 rev1 changes file foo
702 # \ / rev2 renames foo to bar and changes it
703 # \ / rev2 renames foo to bar and changes it
703 # \- 2 -/ rev3 should have bar with all changes and
704 # \- 2 -/ rev3 should have bar with all changes and
704 # should record that bar descends from
705 # should record that bar descends from
705 # bar in rev2 and foo in rev1
706 # bar in rev2 and foo in rev1
706 #
707 #
707 # this allows this merge to succeed:
708 # this allows this merge to succeed:
708 #
709 #
709 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 # \ / merging rev3 and rev4 should use bar@rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
711 # \- 2 --- 4 as the merge base
712 # \- 2 --- 4 as the merge base
712 #
713 #
713
714
714 cfname = copy[0]
715 cfname = copy[0]
715 crev = manifest1.get(cfname)
716 crev = manifest1.get(cfname)
716 newfparent = fparent2
717 newfparent = fparent2
717
718
718 if manifest2: # branch merge
719 if manifest2: # branch merge
719 if fparent2 == nullid or crev is None: # copied on remote side
720 if fparent2 == nullid or crev is None: # copied on remote side
720 if cfname in manifest2:
721 if cfname in manifest2:
721 crev = manifest2[cfname]
722 crev = manifest2[cfname]
722 newfparent = fparent1
723 newfparent = fparent1
723
724
724 # find source in nearest ancestor if we've lost track
725 # find source in nearest ancestor if we've lost track
725 if not crev:
726 if not crev:
726 self.ui.debug(" %s: searching for copy revision for %s\n" %
727 self.ui.debug(" %s: searching for copy revision for %s\n" %
727 (fname, cfname))
728 (fname, cfname))
728 for ancestor in self['.'].ancestors():
729 for ancestor in self['.'].ancestors():
729 if cfname in ancestor:
730 if cfname in ancestor:
730 crev = ancestor[cfname].filenode()
731 crev = ancestor[cfname].filenode()
731 break
732 break
732
733
733 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
734 meta["copy"] = cfname
735 meta["copy"] = cfname
735 meta["copyrev"] = hex(crev)
736 meta["copyrev"] = hex(crev)
736 fparent1, fparent2 = nullid, newfparent
737 fparent1, fparent2 = nullid, newfparent
737 elif fparent2 != nullid:
738 elif fparent2 != nullid:
738 # is one parent an ancestor of the other?
739 # is one parent an ancestor of the other?
739 fparentancestor = flog.ancestor(fparent1, fparent2)
740 fparentancestor = flog.ancestor(fparent1, fparent2)
740 if fparentancestor == fparent1:
741 if fparentancestor == fparent1:
741 fparent1, fparent2 = fparent2, nullid
742 fparent1, fparent2 = fparent2, nullid
742 elif fparentancestor == fparent2:
743 elif fparentancestor == fparent2:
743 fparent2 = nullid
744 fparent2 = nullid
744
745
745 # is the file changed?
746 # is the file changed?
746 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
747 changelist.append(fname)
748 changelist.append(fname)
748 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
749
750
750 # are just the flags changed during merge?
751 # are just the flags changed during merge?
751 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
752 changelist.append(fname)
753 changelist.append(fname)
753
754
754 return fparent1
755 return fparent1
755
756
756 def commit(self, text="", user=None, date=None, match=None, force=False,
757 def commit(self, text="", user=None, date=None, match=None, force=False,
757 editor=False, extra={}):
758 editor=False, extra={}):
758 """Add a new revision to current repository.
759 """Add a new revision to current repository.
759
760
760 Revision information is gathered from the working directory,
761 Revision information is gathered from the working directory,
761 match can be used to filter the committed files. If editor is
762 match can be used to filter the committed files. If editor is
762 supplied, it is called to get a commit message.
763 supplied, it is called to get a commit message.
763 """
764 """
764
765
765 def fail(f, msg):
766 def fail(f, msg):
766 raise util.Abort('%s: %s' % (f, msg))
767 raise util.Abort('%s: %s' % (f, msg))
767
768
768 if not match:
769 if not match:
769 match = match_.always(self.root, '')
770 match = match_.always(self.root, '')
770
771
771 if not force:
772 if not force:
772 vdirs = []
773 vdirs = []
773 match.dir = vdirs.append
774 match.dir = vdirs.append
774 match.bad = fail
775 match.bad = fail
775
776
776 wlock = self.wlock()
777 wlock = self.wlock()
777 try:
778 try:
778 p1, p2 = self.dirstate.parents()
779 p1, p2 = self.dirstate.parents()
779 wctx = self[None]
780 wctx = self[None]
780
781
781 if (not force and p2 != nullid and match and
782 if (not force and p2 != nullid and match and
782 (match.files() or match.anypats())):
783 (match.files() or match.anypats())):
783 raise util.Abort(_('cannot partially commit a merge '
784 raise util.Abort(_('cannot partially commit a merge '
784 '(do not specify files or patterns)'))
785 '(do not specify files or patterns)'))
785
786
786 changes = self.status(match=match, clean=force)
787 changes = self.status(match=match, clean=force)
787 if force:
788 if force:
788 changes[0].extend(changes[6]) # mq may commit unchanged files
789 changes[0].extend(changes[6]) # mq may commit unchanged files
789
790
790 # check subrepos
791 # check subrepos
791 subs = []
792 subs = []
792 for s in wctx.substate:
793 for s in wctx.substate:
793 if match(s) and wctx.sub(s).dirty():
794 if match(s) and wctx.sub(s).dirty():
794 subs.append(s)
795 subs.append(s)
795 if subs and '.hgsubstate' not in changes[0]:
796 if subs and '.hgsubstate' not in changes[0]:
796 changes[0].insert(0, '.hgsubstate')
797 changes[0].insert(0, '.hgsubstate')
797
798
798 # make sure all explicit patterns are matched
799 # make sure all explicit patterns are matched
799 if not force and match.files():
800 if not force and match.files():
800 matched = set(changes[0] + changes[1] + changes[2])
801 matched = set(changes[0] + changes[1] + changes[2])
801
802
802 for f in match.files():
803 for f in match.files():
803 if f == '.' or f in matched or f in wctx.substate:
804 if f == '.' or f in matched or f in wctx.substate:
804 continue
805 continue
805 if f in changes[3]: # missing
806 if f in changes[3]: # missing
806 fail(f, _('file not found!'))
807 fail(f, _('file not found!'))
807 if f in vdirs: # visited directory
808 if f in vdirs: # visited directory
808 d = f + '/'
809 d = f + '/'
809 for mf in matched:
810 for mf in matched:
810 if mf.startswith(d):
811 if mf.startswith(d):
811 break
812 break
812 else:
813 else:
813 fail(f, _("no match under directory!"))
814 fail(f, _("no match under directory!"))
814 elif f not in self.dirstate:
815 elif f not in self.dirstate:
815 fail(f, _("file not tracked!"))
816 fail(f, _("file not tracked!"))
816
817
817 if (not force and not extra.get("close") and p2 == nullid
818 if (not force and not extra.get("close") and p2 == nullid
818 and not (changes[0] or changes[1] or changes[2])
819 and not (changes[0] or changes[1] or changes[2])
819 and self[None].branch() == self['.'].branch()):
820 and self[None].branch() == self['.'].branch()):
820 return None
821 return None
821
822
822 ms = merge_.mergestate(self)
823 ms = merge_.mergestate(self)
823 for f in changes[0]:
824 for f in changes[0]:
824 if f in ms and ms[f] == 'u':
825 if f in ms and ms[f] == 'u':
825 raise util.Abort(_("unresolved merge conflicts "
826 raise util.Abort(_("unresolved merge conflicts "
826 "(see hg resolve)"))
827 "(see hg resolve)"))
827
828
828 cctx = context.workingctx(self, (p1, p2), text, user, date,
829 cctx = context.workingctx(self, (p1, p2), text, user, date,
829 extra, changes)
830 extra, changes)
830 if editor:
831 if editor:
831 cctx._text = editor(self, cctx, subs)
832 cctx._text = editor(self, cctx, subs)
832 edited = (text != cctx._text)
833 edited = (text != cctx._text)
833
834
834 # commit subs
835 # commit subs
835 if subs:
836 if subs:
836 state = wctx.substate.copy()
837 state = wctx.substate.copy()
837 for s in subs:
838 for s in subs:
838 self.ui.status(_('committing subrepository %s\n') % s)
839 self.ui.status(_('committing subrepository %s\n') % s)
839 sr = wctx.sub(s).commit(cctx._text, user, date)
840 sr = wctx.sub(s).commit(cctx._text, user, date)
840 state[s] = (state[s][0], sr)
841 state[s] = (state[s][0], sr)
841 subrepo.writestate(self, state)
842 subrepo.writestate(self, state)
842
843
843 # Save commit message in case this transaction gets rolled back
844 # Save commit message in case this transaction gets rolled back
844 # (e.g. by a pretxncommit hook). Leave the content alone on
845 # (e.g. by a pretxncommit hook). Leave the content alone on
845 # the assumption that the user will use the same editor again.
846 # the assumption that the user will use the same editor again.
846 msgfile = self.opener('last-message.txt', 'wb')
847 msgfile = self.opener('last-message.txt', 'wb')
847 msgfile.write(cctx._text)
848 msgfile.write(cctx._text)
848 msgfile.close()
849 msgfile.close()
849
850
850 try:
851 try:
851 ret = self.commitctx(cctx, True)
852 ret = self.commitctx(cctx, True)
852 except:
853 except:
853 if edited:
854 if edited:
854 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
855 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
855 self.ui.write(
856 self.ui.write(
856 _('note: commit message saved in %s\n') % msgfn)
857 _('note: commit message saved in %s\n') % msgfn)
857 raise
858 raise
858
859
859 # update dirstate and mergestate
860 # update dirstate and mergestate
860 for f in changes[0] + changes[1]:
861 for f in changes[0] + changes[1]:
861 self.dirstate.normal(f)
862 self.dirstate.normal(f)
862 for f in changes[2]:
863 for f in changes[2]:
863 self.dirstate.forget(f)
864 self.dirstate.forget(f)
864 self.dirstate.setparents(ret)
865 self.dirstate.setparents(ret)
865 ms.reset()
866 ms.reset()
866
867
867 return ret
868 return ret
868
869
869 finally:
870 finally:
870 wlock.release()
871 wlock.release()
871
872
872 def commitctx(self, ctx, error=False):
873 def commitctx(self, ctx, error=False):
873 """Add a new revision to current repository.
874 """Add a new revision to current repository.
874
875
875 Revision information is passed via the context argument.
876 Revision information is passed via the context argument.
876 """
877 """
877
878
878 tr = lock = None
879 tr = lock = None
879 removed = ctx.removed()
880 removed = ctx.removed()
880 p1, p2 = ctx.p1(), ctx.p2()
881 p1, p2 = ctx.p1(), ctx.p2()
881 m1 = p1.manifest().copy()
882 m1 = p1.manifest().copy()
882 m2 = p2.manifest()
883 m2 = p2.manifest()
883 user = ctx.user()
884 user = ctx.user()
884
885
885 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
886 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
886 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
887 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
887
888
888 lock = self.lock()
889 lock = self.lock()
889 try:
890 try:
890 tr = self.transaction()
891 tr = self.transaction()
891 trp = weakref.proxy(tr)
892 trp = weakref.proxy(tr)
892
893
893 # check in files
894 # check in files
894 new = {}
895 new = {}
895 changed = []
896 changed = []
896 linkrev = len(self)
897 linkrev = len(self)
897 for f in sorted(ctx.modified() + ctx.added()):
898 for f in sorted(ctx.modified() + ctx.added()):
898 self.ui.note(f + "\n")
899 self.ui.note(f + "\n")
899 try:
900 try:
900 fctx = ctx[f]
901 fctx = ctx[f]
901 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
902 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
902 changed)
903 changed)
903 m1.set(f, fctx.flags())
904 m1.set(f, fctx.flags())
904 except (OSError, IOError):
905 except (OSError, IOError):
905 if error:
906 if error:
906 self.ui.warn(_("trouble committing %s!\n") % f)
907 self.ui.warn(_("trouble committing %s!\n") % f)
907 raise
908 raise
908 else:
909 else:
909 removed.append(f)
910 removed.append(f)
910
911
911 # update manifest
912 # update manifest
912 m1.update(new)
913 m1.update(new)
913 removed = [f for f in sorted(removed) if f in m1 or f in m2]
914 removed = [f for f in sorted(removed) if f in m1 or f in m2]
914 drop = [f for f in removed if f in m1]
915 drop = [f for f in removed if f in m1]
915 for f in drop:
916 for f in drop:
916 del m1[f]
917 del m1[f]
917 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
918 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
918 p2.manifestnode(), (new, drop))
919 p2.manifestnode(), (new, drop))
919
920
920 # update changelog
921 # update changelog
921 self.changelog.delayupdate()
922 self.changelog.delayupdate()
922 n = self.changelog.add(mn, changed + removed, ctx.description(),
923 n = self.changelog.add(mn, changed + removed, ctx.description(),
923 trp, p1.node(), p2.node(),
924 trp, p1.node(), p2.node(),
924 user, ctx.date(), ctx.extra().copy())
925 user, ctx.date(), ctx.extra().copy())
925 p = lambda: self.changelog.writepending() and self.root or ""
926 p = lambda: self.changelog.writepending() and self.root or ""
926 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
927 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
927 parent2=xp2, pending=p)
928 parent2=xp2, pending=p)
928 self.changelog.finalize(trp)
929 self.changelog.finalize(trp)
929 tr.close()
930 tr.close()
930
931
931 if self._branchcache:
932 if self._branchcache:
932 self.branchtags()
933 self.branchtags()
933
934
934 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
935 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
935 return n
936 return n
936 finally:
937 finally:
937 del tr
938 del tr
938 lock.release()
939 lock.release()
939
940
940 def destroyed(self):
941 def destroyed(self):
941 '''Inform the repository that nodes have been destroyed.
942 '''Inform the repository that nodes have been destroyed.
942 Intended for use by strip and rollback, so there's a common
943 Intended for use by strip and rollback, so there's a common
943 place for anything that has to be done after destroying history.'''
944 place for anything that has to be done after destroying history.'''
944 # XXX it might be nice if we could take the list of destroyed
945 # XXX it might be nice if we could take the list of destroyed
945 # nodes, but I don't see an easy way for rollback() to do that
946 # nodes, but I don't see an easy way for rollback() to do that
946
947
947 # Ensure the persistent tag cache is updated. Doing it now
948 # Ensure the persistent tag cache is updated. Doing it now
948 # means that the tag cache only has to worry about destroyed
949 # means that the tag cache only has to worry about destroyed
949 # heads immediately after a strip/rollback. That in turn
950 # heads immediately after a strip/rollback. That in turn
950 # guarantees that "cachetip == currenttip" (comparing both rev
951 # guarantees that "cachetip == currenttip" (comparing both rev
951 # and node) always means no nodes have been added or destroyed.
952 # and node) always means no nodes have been added or destroyed.
952
953
953 # XXX this is suboptimal when qrefresh'ing: we strip the current
954 # XXX this is suboptimal when qrefresh'ing: we strip the current
954 # head, refresh the tag cache, then immediately add a new head.
955 # head, refresh the tag cache, then immediately add a new head.
955 # But I think doing it this way is necessary for the "instant
956 # But I think doing it this way is necessary for the "instant
956 # tag cache retrieval" case to work.
957 # tag cache retrieval" case to work.
957 tags_.findglobaltags(self.ui, self, {}, {})
958 tags_.findglobaltags(self.ui, self, {}, {})
958
959
959 def walk(self, match, node=None):
960 def walk(self, match, node=None):
960 '''
961 '''
961 walk recursively through the directory tree or a given
962 walk recursively through the directory tree or a given
962 changeset, finding all files matched by the match
963 changeset, finding all files matched by the match
963 function
964 function
964 '''
965 '''
965 return self[node].walk(match)
966 return self[node].walk(match)
966
967
967 def status(self, node1='.', node2=None, match=None,
968 def status(self, node1='.', node2=None, match=None,
968 ignored=False, clean=False, unknown=False):
969 ignored=False, clean=False, unknown=False):
969 """return status of files between two nodes or node and working directory
970 """return status of files between two nodes or node and working directory
970
971
971 If node1 is None, use the first dirstate parent instead.
972 If node1 is None, use the first dirstate parent instead.
972 If node2 is None, compare node1 with working directory.
973 If node2 is None, compare node1 with working directory.
973 """
974 """
974
975
975 def mfmatches(ctx):
976 def mfmatches(ctx):
976 mf = ctx.manifest().copy()
977 mf = ctx.manifest().copy()
977 for fn in mf.keys():
978 for fn in mf.keys():
978 if not match(fn):
979 if not match(fn):
979 del mf[fn]
980 del mf[fn]
980 return mf
981 return mf
981
982
982 if isinstance(node1, context.changectx):
983 if isinstance(node1, context.changectx):
983 ctx1 = node1
984 ctx1 = node1
984 else:
985 else:
985 ctx1 = self[node1]
986 ctx1 = self[node1]
986 if isinstance(node2, context.changectx):
987 if isinstance(node2, context.changectx):
987 ctx2 = node2
988 ctx2 = node2
988 else:
989 else:
989 ctx2 = self[node2]
990 ctx2 = self[node2]
990
991
991 working = ctx2.rev() is None
992 working = ctx2.rev() is None
992 parentworking = working and ctx1 == self['.']
993 parentworking = working and ctx1 == self['.']
993 match = match or match_.always(self.root, self.getcwd())
994 match = match or match_.always(self.root, self.getcwd())
994 listignored, listclean, listunknown = ignored, clean, unknown
995 listignored, listclean, listunknown = ignored, clean, unknown
995
996
996 # load earliest manifest first for caching reasons
997 # load earliest manifest first for caching reasons
997 if not working and ctx2.rev() < ctx1.rev():
998 if not working and ctx2.rev() < ctx1.rev():
998 ctx2.manifest()
999 ctx2.manifest()
999
1000
1000 if not parentworking:
1001 if not parentworking:
1001 def bad(f, msg):
1002 def bad(f, msg):
1002 if f not in ctx1:
1003 if f not in ctx1:
1003 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1004 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1004 match.bad = bad
1005 match.bad = bad
1005
1006
1006 if working: # we need to scan the working dir
1007 if working: # we need to scan the working dir
1007 subrepos = ctx1.substate.keys()
1008 subrepos = ctx1.substate.keys()
1008 s = self.dirstate.status(match, subrepos, listignored,
1009 s = self.dirstate.status(match, subrepos, listignored,
1009 listclean, listunknown)
1010 listclean, listunknown)
1010 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1011 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1011
1012
1012 # check for any possibly clean files
1013 # check for any possibly clean files
1013 if parentworking and cmp:
1014 if parentworking and cmp:
1014 fixup = []
1015 fixup = []
1015 # do a full compare of any files that might have changed
1016 # do a full compare of any files that might have changed
1016 for f in sorted(cmp):
1017 for f in sorted(cmp):
1017 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1018 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1018 or ctx1[f].cmp(ctx2[f].data())):
1019 or ctx1[f].cmp(ctx2[f].data())):
1019 modified.append(f)
1020 modified.append(f)
1020 else:
1021 else:
1021 fixup.append(f)
1022 fixup.append(f)
1022
1023
1023 if listclean:
1024 if listclean:
1024 clean += fixup
1025 clean += fixup
1025
1026
1026 # update dirstate for files that are actually clean
1027 # update dirstate for files that are actually clean
1027 if fixup:
1028 if fixup:
1028 try:
1029 try:
1029 # updating the dirstate is optional
1030 # updating the dirstate is optional
1030 # so we don't wait on the lock
1031 # so we don't wait on the lock
1031 wlock = self.wlock(False)
1032 wlock = self.wlock(False)
1032 try:
1033 try:
1033 for f in fixup:
1034 for f in fixup:
1034 self.dirstate.normal(f)
1035 self.dirstate.normal(f)
1035 finally:
1036 finally:
1036 wlock.release()
1037 wlock.release()
1037 except error.LockError:
1038 except error.LockError:
1038 pass
1039 pass
1039
1040
1040 if not parentworking:
1041 if not parentworking:
1041 mf1 = mfmatches(ctx1)
1042 mf1 = mfmatches(ctx1)
1042 if working:
1043 if working:
1043 # we are comparing working dir against non-parent
1044 # we are comparing working dir against non-parent
1044 # generate a pseudo-manifest for the working dir
1045 # generate a pseudo-manifest for the working dir
1045 mf2 = mfmatches(self['.'])
1046 mf2 = mfmatches(self['.'])
1046 for f in cmp + modified + added:
1047 for f in cmp + modified + added:
1047 mf2[f] = None
1048 mf2[f] = None
1048 mf2.set(f, ctx2.flags(f))
1049 mf2.set(f, ctx2.flags(f))
1049 for f in removed:
1050 for f in removed:
1050 if f in mf2:
1051 if f in mf2:
1051 del mf2[f]
1052 del mf2[f]
1052 else:
1053 else:
1053 # we are comparing two revisions
1054 # we are comparing two revisions
1054 deleted, unknown, ignored = [], [], []
1055 deleted, unknown, ignored = [], [], []
1055 mf2 = mfmatches(ctx2)
1056 mf2 = mfmatches(ctx2)
1056
1057
1057 modified, added, clean = [], [], []
1058 modified, added, clean = [], [], []
1058 for fn in mf2:
1059 for fn in mf2:
1059 if fn in mf1:
1060 if fn in mf1:
1060 if (mf1.flags(fn) != mf2.flags(fn) or
1061 if (mf1.flags(fn) != mf2.flags(fn) or
1061 (mf1[fn] != mf2[fn] and
1062 (mf1[fn] != mf2[fn] and
1062 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1063 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1063 modified.append(fn)
1064 modified.append(fn)
1064 elif listclean:
1065 elif listclean:
1065 clean.append(fn)
1066 clean.append(fn)
1066 del mf1[fn]
1067 del mf1[fn]
1067 else:
1068 else:
1068 added.append(fn)
1069 added.append(fn)
1069 removed = mf1.keys()
1070 removed = mf1.keys()
1070
1071
1071 r = modified, added, removed, deleted, unknown, ignored, clean
1072 r = modified, added, removed, deleted, unknown, ignored, clean
1072 [l.sort() for l in r]
1073 [l.sort() for l in r]
1073 return r
1074 return r
1074
1075
1075 def add(self, list):
1076 def add(self, list):
1076 wlock = self.wlock()
1077 wlock = self.wlock()
1077 try:
1078 try:
1078 rejected = []
1079 rejected = []
1079 for f in list:
1080 for f in list:
1080 p = self.wjoin(f)
1081 p = self.wjoin(f)
1081 try:
1082 try:
1082 st = os.lstat(p)
1083 st = os.lstat(p)
1083 except:
1084 except:
1084 self.ui.warn(_("%s does not exist!\n") % f)
1085 self.ui.warn(_("%s does not exist!\n") % f)
1085 rejected.append(f)
1086 rejected.append(f)
1086 continue
1087 continue
1087 if st.st_size > 10000000:
1088 if st.st_size > 10000000:
1088 self.ui.warn(_("%s: files over 10MB may cause memory and"
1089 self.ui.warn(_("%s: files over 10MB may cause memory and"
1089 " performance problems\n"
1090 " performance problems\n"
1090 "(use 'hg revert %s' to unadd the file)\n")
1091 "(use 'hg revert %s' to unadd the file)\n")
1091 % (f, f))
1092 % (f, f))
1092 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1093 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1093 self.ui.warn(_("%s not added: only files and symlinks "
1094 self.ui.warn(_("%s not added: only files and symlinks "
1094 "supported currently\n") % f)
1095 "supported currently\n") % f)
1095 rejected.append(p)
1096 rejected.append(p)
1096 elif self.dirstate[f] in 'amn':
1097 elif self.dirstate[f] in 'amn':
1097 self.ui.warn(_("%s already tracked!\n") % f)
1098 self.ui.warn(_("%s already tracked!\n") % f)
1098 elif self.dirstate[f] == 'r':
1099 elif self.dirstate[f] == 'r':
1099 self.dirstate.normallookup(f)
1100 self.dirstate.normallookup(f)
1100 else:
1101 else:
1101 self.dirstate.add(f)
1102 self.dirstate.add(f)
1102 return rejected
1103 return rejected
1103 finally:
1104 finally:
1104 wlock.release()
1105 wlock.release()
1105
1106
1106 def forget(self, list):
1107 def forget(self, list):
1107 wlock = self.wlock()
1108 wlock = self.wlock()
1108 try:
1109 try:
1109 for f in list:
1110 for f in list:
1110 if self.dirstate[f] != 'a':
1111 if self.dirstate[f] != 'a':
1111 self.ui.warn(_("%s not added!\n") % f)
1112 self.ui.warn(_("%s not added!\n") % f)
1112 else:
1113 else:
1113 self.dirstate.forget(f)
1114 self.dirstate.forget(f)
1114 finally:
1115 finally:
1115 wlock.release()
1116 wlock.release()
1116
1117
1117 def remove(self, list, unlink=False):
1118 def remove(self, list, unlink=False):
1118 if unlink:
1119 if unlink:
1119 for f in list:
1120 for f in list:
1120 try:
1121 try:
1121 util.unlink(self.wjoin(f))
1122 util.unlink(self.wjoin(f))
1122 except OSError, inst:
1123 except OSError, inst:
1123 if inst.errno != errno.ENOENT:
1124 if inst.errno != errno.ENOENT:
1124 raise
1125 raise
1125 wlock = self.wlock()
1126 wlock = self.wlock()
1126 try:
1127 try:
1127 for f in list:
1128 for f in list:
1128 if unlink and os.path.exists(self.wjoin(f)):
1129 if unlink and os.path.exists(self.wjoin(f)):
1129 self.ui.warn(_("%s still exists!\n") % f)
1130 self.ui.warn(_("%s still exists!\n") % f)
1130 elif self.dirstate[f] == 'a':
1131 elif self.dirstate[f] == 'a':
1131 self.dirstate.forget(f)
1132 self.dirstate.forget(f)
1132 elif f not in self.dirstate:
1133 elif f not in self.dirstate:
1133 self.ui.warn(_("%s not tracked!\n") % f)
1134 self.ui.warn(_("%s not tracked!\n") % f)
1134 else:
1135 else:
1135 self.dirstate.remove(f)
1136 self.dirstate.remove(f)
1136 finally:
1137 finally:
1137 wlock.release()
1138 wlock.release()
1138
1139
1139 def undelete(self, list):
1140 def undelete(self, list):
1140 manifests = [self.manifest.read(self.changelog.read(p)[0])
1141 manifests = [self.manifest.read(self.changelog.read(p)[0])
1141 for p in self.dirstate.parents() if p != nullid]
1142 for p in self.dirstate.parents() if p != nullid]
1142 wlock = self.wlock()
1143 wlock = self.wlock()
1143 try:
1144 try:
1144 for f in list:
1145 for f in list:
1145 if self.dirstate[f] != 'r':
1146 if self.dirstate[f] != 'r':
1146 self.ui.warn(_("%s not removed!\n") % f)
1147 self.ui.warn(_("%s not removed!\n") % f)
1147 else:
1148 else:
1148 m = f in manifests[0] and manifests[0] or manifests[1]
1149 m = f in manifests[0] and manifests[0] or manifests[1]
1149 t = self.file(f).read(m[f])
1150 t = self.file(f).read(m[f])
1150 self.wwrite(f, t, m.flags(f))
1151 self.wwrite(f, t, m.flags(f))
1151 self.dirstate.normal(f)
1152 self.dirstate.normal(f)
1152 finally:
1153 finally:
1153 wlock.release()
1154 wlock.release()
1154
1155
1155 def copy(self, source, dest):
1156 def copy(self, source, dest):
1156 p = self.wjoin(dest)
1157 p = self.wjoin(dest)
1157 if not (os.path.exists(p) or os.path.islink(p)):
1158 if not (os.path.exists(p) or os.path.islink(p)):
1158 self.ui.warn(_("%s does not exist!\n") % dest)
1159 self.ui.warn(_("%s does not exist!\n") % dest)
1159 elif not (os.path.isfile(p) or os.path.islink(p)):
1160 elif not (os.path.isfile(p) or os.path.islink(p)):
1160 self.ui.warn(_("copy failed: %s is not a file or a "
1161 self.ui.warn(_("copy failed: %s is not a file or a "
1161 "symbolic link\n") % dest)
1162 "symbolic link\n") % dest)
1162 else:
1163 else:
1163 wlock = self.wlock()
1164 wlock = self.wlock()
1164 try:
1165 try:
1165 if self.dirstate[dest] in '?r':
1166 if self.dirstate[dest] in '?r':
1166 self.dirstate.add(dest)
1167 self.dirstate.add(dest)
1167 self.dirstate.copy(source, dest)
1168 self.dirstate.copy(source, dest)
1168 finally:
1169 finally:
1169 wlock.release()
1170 wlock.release()
1170
1171
1171 def heads(self, start=None):
1172 def heads(self, start=None):
1172 heads = self.changelog.heads(start)
1173 heads = self.changelog.heads(start)
1173 # sort the output in rev descending order
1174 # sort the output in rev descending order
1174 heads = [(-self.changelog.rev(h), h) for h in heads]
1175 heads = [(-self.changelog.rev(h), h) for h in heads]
1175 return [n for (r, n) in sorted(heads)]
1176 return [n for (r, n) in sorted(heads)]
1176
1177
1177 def branchheads(self, branch=None, start=None, closed=False):
1178 def branchheads(self, branch=None, start=None, closed=False):
1178 '''return a (possibly filtered) list of heads for the given branch
1179 '''return a (possibly filtered) list of heads for the given branch
1179
1180
1180 Heads are returned in topological order, from newest to oldest.
1181 Heads are returned in topological order, from newest to oldest.
1181 If branch is None, use the dirstate branch.
1182 If branch is None, use the dirstate branch.
1182 If start is not None, return only heads reachable from start.
1183 If start is not None, return only heads reachable from start.
1183 If closed is True, return heads that are marked as closed as well.
1184 If closed is True, return heads that are marked as closed as well.
1184 '''
1185 '''
1185 if branch is None:
1186 if branch is None:
1186 branch = self[None].branch()
1187 branch = self[None].branch()
1187 branches = self.branchmap()
1188 branches = self.branchmap()
1188 if branch not in branches:
1189 if branch not in branches:
1189 return []
1190 return []
1190 # the cache returns heads ordered lowest to highest
1191 # the cache returns heads ordered lowest to highest
1191 bheads = list(reversed(branches[branch]))
1192 bheads = list(reversed(branches[branch]))
1192 if start is not None:
1193 if start is not None:
1193 # filter out the heads that cannot be reached from startrev
1194 # filter out the heads that cannot be reached from startrev
1194 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1195 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1195 bheads = [h for h in bheads if h in fbheads]
1196 bheads = [h for h in bheads if h in fbheads]
1196 if not closed:
1197 if not closed:
1197 bheads = [h for h in bheads if
1198 bheads = [h for h in bheads if
1198 ('close' not in self.changelog.read(h)[5])]
1199 ('close' not in self.changelog.read(h)[5])]
1199 return bheads
1200 return bheads
1200
1201
1201 def branches(self, nodes):
1202 def branches(self, nodes):
1202 if not nodes:
1203 if not nodes:
1203 nodes = [self.changelog.tip()]
1204 nodes = [self.changelog.tip()]
1204 b = []
1205 b = []
1205 for n in nodes:
1206 for n in nodes:
1206 t = n
1207 t = n
1207 while 1:
1208 while 1:
1208 p = self.changelog.parents(n)
1209 p = self.changelog.parents(n)
1209 if p[1] != nullid or p[0] == nullid:
1210 if p[1] != nullid or p[0] == nullid:
1210 b.append((t, n, p[0], p[1]))
1211 b.append((t, n, p[0], p[1]))
1211 break
1212 break
1212 n = p[0]
1213 n = p[0]
1213 return b
1214 return b
1214
1215
1215 def between(self, pairs):
1216 def between(self, pairs):
1216 r = []
1217 r = []
1217
1218
1218 for top, bottom in pairs:
1219 for top, bottom in pairs:
1219 n, l, i = top, [], 0
1220 n, l, i = top, [], 0
1220 f = 1
1221 f = 1
1221
1222
1222 while n != bottom and n != nullid:
1223 while n != bottom and n != nullid:
1223 p = self.changelog.parents(n)[0]
1224 p = self.changelog.parents(n)[0]
1224 if i == f:
1225 if i == f:
1225 l.append(n)
1226 l.append(n)
1226 f = f * 2
1227 f = f * 2
1227 n = p
1228 n = p
1228 i += 1
1229 i += 1
1229
1230
1230 r.append(l)
1231 r.append(l)
1231
1232
1232 return r
1233 return r
1233
1234
1234 def findincoming(self, remote, base=None, heads=None, force=False):
1235 def findincoming(self, remote, base=None, heads=None, force=False):
1235 """Return list of roots of the subsets of missing nodes from remote
1236 """Return list of roots of the subsets of missing nodes from remote
1236
1237
1237 If base dict is specified, assume that these nodes and their parents
1238 If base dict is specified, assume that these nodes and their parents
1238 exist on the remote side and that no child of a node of base exists
1239 exist on the remote side and that no child of a node of base exists
1239 in both remote and self.
1240 in both remote and self.
1240 Furthermore base will be updated to include the nodes that exists
1241 Furthermore base will be updated to include the nodes that exists
1241 in self and remote but no children exists in self and remote.
1242 in self and remote but no children exists in self and remote.
1242 If a list of heads is specified, return only nodes which are heads
1243 If a list of heads is specified, return only nodes which are heads
1243 or ancestors of these heads.
1244 or ancestors of these heads.
1244
1245
1245 All the ancestors of base are in self and in remote.
1246 All the ancestors of base are in self and in remote.
1246 All the descendants of the list returned are missing in self.
1247 All the descendants of the list returned are missing in self.
1247 (and so we know that the rest of the nodes are missing in remote, see
1248 (and so we know that the rest of the nodes are missing in remote, see
1248 outgoing)
1249 outgoing)
1249 """
1250 """
1250 return self.findcommonincoming(remote, base, heads, force)[1]
1251 return self.findcommonincoming(remote, base, heads, force)[1]
1251
1252
1252 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1253 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1253 """Return a tuple (common, missing roots, heads) used to identify
1254 """Return a tuple (common, missing roots, heads) used to identify
1254 missing nodes from remote.
1255 missing nodes from remote.
1255
1256
1256 If base dict is specified, assume that these nodes and their parents
1257 If base dict is specified, assume that these nodes and their parents
1257 exist on the remote side and that no child of a node of base exists
1258 exist on the remote side and that no child of a node of base exists
1258 in both remote and self.
1259 in both remote and self.
1259 Furthermore base will be updated to include the nodes that exists
1260 Furthermore base will be updated to include the nodes that exists
1260 in self and remote but no children exists in self and remote.
1261 in self and remote but no children exists in self and remote.
1261 If a list of heads is specified, return only nodes which are heads
1262 If a list of heads is specified, return only nodes which are heads
1262 or ancestors of these heads.
1263 or ancestors of these heads.
1263
1264
1264 All the ancestors of base are in self and in remote.
1265 All the ancestors of base are in self and in remote.
1265 """
1266 """
1266 m = self.changelog.nodemap
1267 m = self.changelog.nodemap
1267 search = []
1268 search = []
1268 fetch = set()
1269 fetch = set()
1269 seen = set()
1270 seen = set()
1270 seenbranch = set()
1271 seenbranch = set()
1271 if base is None:
1272 if base is None:
1272 base = {}
1273 base = {}
1273
1274
1274 if not heads:
1275 if not heads:
1275 heads = remote.heads()
1276 heads = remote.heads()
1276
1277
1277 if self.changelog.tip() == nullid:
1278 if self.changelog.tip() == nullid:
1278 base[nullid] = 1
1279 base[nullid] = 1
1279 if heads != [nullid]:
1280 if heads != [nullid]:
1280 return [nullid], [nullid], list(heads)
1281 return [nullid], [nullid], list(heads)
1281 return [nullid], [], []
1282 return [nullid], [], []
1282
1283
1283 # assume we're closer to the tip than the root
1284 # assume we're closer to the tip than the root
1284 # and start by examining the heads
1285 # and start by examining the heads
1285 self.ui.status(_("searching for changes\n"))
1286 self.ui.status(_("searching for changes\n"))
1286
1287
1287 unknown = []
1288 unknown = []
1288 for h in heads:
1289 for h in heads:
1289 if h not in m:
1290 if h not in m:
1290 unknown.append(h)
1291 unknown.append(h)
1291 else:
1292 else:
1292 base[h] = 1
1293 base[h] = 1
1293
1294
1294 heads = unknown
1295 heads = unknown
1295 if not unknown:
1296 if not unknown:
1296 return base.keys(), [], []
1297 return base.keys(), [], []
1297
1298
1298 req = set(unknown)
1299 req = set(unknown)
1299 reqcnt = 0
1300 reqcnt = 0
1300
1301
1301 # search through remote branches
1302 # search through remote branches
1302 # a 'branch' here is a linear segment of history, with four parts:
1303 # a 'branch' here is a linear segment of history, with four parts:
1303 # head, root, first parent, second parent
1304 # head, root, first parent, second parent
1304 # (a branch always has two parents (or none) by definition)
1305 # (a branch always has two parents (or none) by definition)
1305 unknown = remote.branches(unknown)
1306 unknown = remote.branches(unknown)
1306 while unknown:
1307 while unknown:
1307 r = []
1308 r = []
1308 while unknown:
1309 while unknown:
1309 n = unknown.pop(0)
1310 n = unknown.pop(0)
1310 if n[0] in seen:
1311 if n[0] in seen:
1311 continue
1312 continue
1312
1313
1313 self.ui.debug("examining %s:%s\n"
1314 self.ui.debug("examining %s:%s\n"
1314 % (short(n[0]), short(n[1])))
1315 % (short(n[0]), short(n[1])))
1315 if n[0] == nullid: # found the end of the branch
1316 if n[0] == nullid: # found the end of the branch
1316 pass
1317 pass
1317 elif n in seenbranch:
1318 elif n in seenbranch:
1318 self.ui.debug("branch already found\n")
1319 self.ui.debug("branch already found\n")
1319 continue
1320 continue
1320 elif n[1] and n[1] in m: # do we know the base?
1321 elif n[1] and n[1] in m: # do we know the base?
1321 self.ui.debug("found incomplete branch %s:%s\n"
1322 self.ui.debug("found incomplete branch %s:%s\n"
1322 % (short(n[0]), short(n[1])))
1323 % (short(n[0]), short(n[1])))
1323 search.append(n[0:2]) # schedule branch range for scanning
1324 search.append(n[0:2]) # schedule branch range for scanning
1324 seenbranch.add(n)
1325 seenbranch.add(n)
1325 else:
1326 else:
1326 if n[1] not in seen and n[1] not in fetch:
1327 if n[1] not in seen and n[1] not in fetch:
1327 if n[2] in m and n[3] in m:
1328 if n[2] in m and n[3] in m:
1328 self.ui.debug("found new changeset %s\n" %
1329 self.ui.debug("found new changeset %s\n" %
1329 short(n[1]))
1330 short(n[1]))
1330 fetch.add(n[1]) # earliest unknown
1331 fetch.add(n[1]) # earliest unknown
1331 for p in n[2:4]:
1332 for p in n[2:4]:
1332 if p in m:
1333 if p in m:
1333 base[p] = 1 # latest known
1334 base[p] = 1 # latest known
1334
1335
1335 for p in n[2:4]:
1336 for p in n[2:4]:
1336 if p not in req and p not in m:
1337 if p not in req and p not in m:
1337 r.append(p)
1338 r.append(p)
1338 req.add(p)
1339 req.add(p)
1339 seen.add(n[0])
1340 seen.add(n[0])
1340
1341
1341 if r:
1342 if r:
1342 reqcnt += 1
1343 reqcnt += 1
1343 self.ui.debug("request %d: %s\n" %
1344 self.ui.debug("request %d: %s\n" %
1344 (reqcnt, " ".join(map(short, r))))
1345 (reqcnt, " ".join(map(short, r))))
1345 for p in xrange(0, len(r), 10):
1346 for p in xrange(0, len(r), 10):
1346 for b in remote.branches(r[p:p + 10]):
1347 for b in remote.branches(r[p:p + 10]):
1347 self.ui.debug("received %s:%s\n" %
1348 self.ui.debug("received %s:%s\n" %
1348 (short(b[0]), short(b[1])))
1349 (short(b[0]), short(b[1])))
1349 unknown.append(b)
1350 unknown.append(b)
1350
1351
1351 # do binary search on the branches we found
1352 # do binary search on the branches we found
1352 while search:
1353 while search:
1353 newsearch = []
1354 newsearch = []
1354 reqcnt += 1
1355 reqcnt += 1
1355 for n, l in zip(search, remote.between(search)):
1356 for n, l in zip(search, remote.between(search)):
1356 l.append(n[1])
1357 l.append(n[1])
1357 p = n[0]
1358 p = n[0]
1358 f = 1
1359 f = 1
1359 for i in l:
1360 for i in l:
1360 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1361 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1361 if i in m:
1362 if i in m:
1362 if f <= 2:
1363 if f <= 2:
1363 self.ui.debug("found new branch changeset %s\n" %
1364 self.ui.debug("found new branch changeset %s\n" %
1364 short(p))
1365 short(p))
1365 fetch.add(p)
1366 fetch.add(p)
1366 base[i] = 1
1367 base[i] = 1
1367 else:
1368 else:
1368 self.ui.debug("narrowed branch search to %s:%s\n"
1369 self.ui.debug("narrowed branch search to %s:%s\n"
1369 % (short(p), short(i)))
1370 % (short(p), short(i)))
1370 newsearch.append((p, i))
1371 newsearch.append((p, i))
1371 break
1372 break
1372 p, f = i, f * 2
1373 p, f = i, f * 2
1373 search = newsearch
1374 search = newsearch
1374
1375
1375 # sanity check our fetch list
1376 # sanity check our fetch list
1376 for f in fetch:
1377 for f in fetch:
1377 if f in m:
1378 if f in m:
1378 raise error.RepoError(_("already have changeset ")
1379 raise error.RepoError(_("already have changeset ")
1379 + short(f[:4]))
1380 + short(f[:4]))
1380
1381
1381 if base.keys() == [nullid]:
1382 if base.keys() == [nullid]:
1382 if force:
1383 if force:
1383 self.ui.warn(_("warning: repository is unrelated\n"))
1384 self.ui.warn(_("warning: repository is unrelated\n"))
1384 else:
1385 else:
1385 raise util.Abort(_("repository is unrelated"))
1386 raise util.Abort(_("repository is unrelated"))
1386
1387
1387 self.ui.debug("found new changesets starting at " +
1388 self.ui.debug("found new changesets starting at " +
1388 " ".join([short(f) for f in fetch]) + "\n")
1389 " ".join([short(f) for f in fetch]) + "\n")
1389
1390
1390 self.ui.debug("%d total queries\n" % reqcnt)
1391 self.ui.debug("%d total queries\n" % reqcnt)
1391
1392
1392 return base.keys(), list(fetch), heads
1393 return base.keys(), list(fetch), heads
1393
1394
1394 def findoutgoing(self, remote, base=None, heads=None, force=False):
1395 def findoutgoing(self, remote, base=None, heads=None, force=False):
1395 """Return list of nodes that are roots of subsets not in remote
1396 """Return list of nodes that are roots of subsets not in remote
1396
1397
1397 If base dict is specified, assume that these nodes and their parents
1398 If base dict is specified, assume that these nodes and their parents
1398 exist on the remote side.
1399 exist on the remote side.
1399 If a list of heads is specified, return only nodes which are heads
1400 If a list of heads is specified, return only nodes which are heads
1400 or ancestors of these heads, and return a second element which
1401 or ancestors of these heads, and return a second element which
1401 contains all remote heads which get new children.
1402 contains all remote heads which get new children.
1402 """
1403 """
1403 if base is None:
1404 if base is None:
1404 base = {}
1405 base = {}
1405 self.findincoming(remote, base, heads, force=force)
1406 self.findincoming(remote, base, heads, force=force)
1406
1407
1407 self.ui.debug("common changesets up to "
1408 self.ui.debug("common changesets up to "
1408 + " ".join(map(short, base.keys())) + "\n")
1409 + " ".join(map(short, base.keys())) + "\n")
1409
1410
1410 remain = set(self.changelog.nodemap)
1411 remain = set(self.changelog.nodemap)
1411
1412
1412 # prune everything remote has from the tree
1413 # prune everything remote has from the tree
1413 remain.remove(nullid)
1414 remain.remove(nullid)
1414 remove = base.keys()
1415 remove = base.keys()
1415 while remove:
1416 while remove:
1416 n = remove.pop(0)
1417 n = remove.pop(0)
1417 if n in remain:
1418 if n in remain:
1418 remain.remove(n)
1419 remain.remove(n)
1419 for p in self.changelog.parents(n):
1420 for p in self.changelog.parents(n):
1420 remove.append(p)
1421 remove.append(p)
1421
1422
1422 # find every node whose parents have been pruned
1423 # find every node whose parents have been pruned
1423 subset = []
1424 subset = []
1424 # find every remote head that will get new children
1425 # find every remote head that will get new children
1425 updated_heads = set()
1426 updated_heads = set()
1426 for n in remain:
1427 for n in remain:
1427 p1, p2 = self.changelog.parents(n)
1428 p1, p2 = self.changelog.parents(n)
1428 if p1 not in remain and p2 not in remain:
1429 if p1 not in remain and p2 not in remain:
1429 subset.append(n)
1430 subset.append(n)
1430 if heads:
1431 if heads:
1431 if p1 in heads:
1432 if p1 in heads:
1432 updated_heads.add(p1)
1433 updated_heads.add(p1)
1433 if p2 in heads:
1434 if p2 in heads:
1434 updated_heads.add(p2)
1435 updated_heads.add(p2)
1435
1436
1436 # this is the set of all roots we have to push
1437 # this is the set of all roots we have to push
1437 if heads:
1438 if heads:
1438 return subset, list(updated_heads)
1439 return subset, list(updated_heads)
1439 else:
1440 else:
1440 return subset
1441 return subset
1441
1442
1442 def pull(self, remote, heads=None, force=False):
1443 def pull(self, remote, heads=None, force=False):
1443 lock = self.lock()
1444 lock = self.lock()
1444 try:
1445 try:
1445 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1446 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1446 force=force)
1447 force=force)
1447 if fetch == [nullid]:
1448 if fetch == [nullid]:
1448 self.ui.status(_("requesting all changes\n"))
1449 self.ui.status(_("requesting all changes\n"))
1449
1450
1450 if not fetch:
1451 if not fetch:
1451 self.ui.status(_("no changes found\n"))
1452 self.ui.status(_("no changes found\n"))
1452 return 0
1453 return 0
1453
1454
1454 if heads is None and remote.capable('changegroupsubset'):
1455 if heads is None and remote.capable('changegroupsubset'):
1455 heads = rheads
1456 heads = rheads
1456
1457
1457 if heads is None:
1458 if heads is None:
1458 cg = remote.changegroup(fetch, 'pull')
1459 cg = remote.changegroup(fetch, 'pull')
1459 else:
1460 else:
1460 if not remote.capable('changegroupsubset'):
1461 if not remote.capable('changegroupsubset'):
1461 raise util.Abort(_("Partial pull cannot be done because "
1462 raise util.Abort(_("Partial pull cannot be done because "
1462 "other repository doesn't support "
1463 "other repository doesn't support "
1463 "changegroupsubset."))
1464 "changegroupsubset."))
1464 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 return self.addchangegroup(cg, 'pull', remote.url())
1466 return self.addchangegroup(cg, 'pull', remote.url())
1466 finally:
1467 finally:
1467 lock.release()
1468 lock.release()
1468
1469
1469 def push(self, remote, force=False, revs=None):
1470 def push(self, remote, force=False, revs=None):
1470 # there are two ways to push to remote repo:
1471 # there are two ways to push to remote repo:
1471 #
1472 #
1472 # addchangegroup assumes local user can lock remote
1473 # addchangegroup assumes local user can lock remote
1473 # repo (local filesystem, old ssh servers).
1474 # repo (local filesystem, old ssh servers).
1474 #
1475 #
1475 # unbundle assumes local user cannot lock remote repo (new ssh
1476 # unbundle assumes local user cannot lock remote repo (new ssh
1476 # servers, http servers).
1477 # servers, http servers).
1477
1478
1478 if remote.capable('unbundle'):
1479 if remote.capable('unbundle'):
1479 return self.push_unbundle(remote, force, revs)
1480 return self.push_unbundle(remote, force, revs)
1480 return self.push_addchangegroup(remote, force, revs)
1481 return self.push_addchangegroup(remote, force, revs)
1481
1482
1482 def prepush(self, remote, force, revs):
1483 def prepush(self, remote, force, revs):
1483 '''Analyze the local and remote repositories and determine which
1484 '''Analyze the local and remote repositories and determine which
1484 changesets need to be pushed to the remote. Return a tuple
1485 changesets need to be pushed to the remote. Return a tuple
1485 (changegroup, remoteheads). changegroup is a readable file-like
1486 (changegroup, remoteheads). changegroup is a readable file-like
1486 object whose read() returns successive changegroup chunks ready to
1487 object whose read() returns successive changegroup chunks ready to
1487 be sent over the wire. remoteheads is the list of remote heads.
1488 be sent over the wire. remoteheads is the list of remote heads.
1488 '''
1489 '''
1489 common = {}
1490 common = {}
1490 remote_heads = remote.heads()
1491 remote_heads = remote.heads()
1491 inc = self.findincoming(remote, common, remote_heads, force=force)
1492 inc = self.findincoming(remote, common, remote_heads, force=force)
1492
1493
1493 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1494 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1494 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1495 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1495
1496
1496 def checkbranch(lheads, rheads, updatelb):
1497 def checkbranch(lheads, rheads, updatelb):
1497 '''
1498 '''
1498 check whether there are more local heads than remote heads on
1499 check whether there are more local heads than remote heads on
1499 a specific branch.
1500 a specific branch.
1500
1501
1501 lheads: local branch heads
1502 lheads: local branch heads
1502 rheads: remote branch heads
1503 rheads: remote branch heads
1503 updatelb: outgoing local branch bases
1504 updatelb: outgoing local branch bases
1504 '''
1505 '''
1505
1506
1506 warn = 0
1507 warn = 0
1507
1508
1508 if not revs and len(lheads) > len(rheads):
1509 if not revs and len(lheads) > len(rheads):
1509 warn = 1
1510 warn = 1
1510 else:
1511 else:
1511 # add local heads involved in the push
1512 # add local heads involved in the push
1512 updatelheads = [self.changelog.heads(x, lheads)
1513 updatelheads = [self.changelog.heads(x, lheads)
1513 for x in updatelb]
1514 for x in updatelb]
1514 newheads = set(sum(updatelheads, [])) & set(lheads)
1515 newheads = set(sum(updatelheads, [])) & set(lheads)
1515
1516
1516 if not newheads:
1517 if not newheads:
1517 return True
1518 return True
1518
1519
1519 # add heads we don't have or that are not involved in the push
1520 # add heads we don't have or that are not involved in the push
1520 for r in rheads:
1521 for r in rheads:
1521 if r in self.changelog.nodemap:
1522 if r in self.changelog.nodemap:
1522 desc = self.changelog.heads(r, heads)
1523 desc = self.changelog.heads(r, heads)
1523 l = [h for h in heads if h in desc]
1524 l = [h for h in heads if h in desc]
1524 if not l:
1525 if not l:
1525 newheads.add(r)
1526 newheads.add(r)
1526 else:
1527 else:
1527 newheads.add(r)
1528 newheads.add(r)
1528 if len(newheads) > len(rheads):
1529 if len(newheads) > len(rheads):
1529 warn = 1
1530 warn = 1
1530
1531
1531 if warn:
1532 if warn:
1532 if not rheads: # new branch requires --force
1533 if not rheads: # new branch requires --force
1533 self.ui.warn(_("abort: push creates new"
1534 self.ui.warn(_("abort: push creates new"
1534 " remote branch '%s'!\n") %
1535 " remote branch '%s'!\n") %
1535 self[lheads[0]].branch())
1536 self[lheads[0]].branch())
1536 else:
1537 else:
1537 self.ui.warn(_("abort: push creates new remote heads!\n"))
1538 self.ui.warn(_("abort: push creates new remote heads!\n"))
1538
1539
1539 self.ui.status(_("(did you forget to merge?"
1540 self.ui.status(_("(did you forget to merge?"
1540 " use push -f to force)\n"))
1541 " use push -f to force)\n"))
1541 return False
1542 return False
1542 return True
1543 return True
1543
1544
1544 if not bases:
1545 if not bases:
1545 self.ui.status(_("no changes found\n"))
1546 self.ui.status(_("no changes found\n"))
1546 return None, 1
1547 return None, 1
1547 elif not force:
1548 elif not force:
1548 # Check for each named branch if we're creating new remote heads.
1549 # Check for each named branch if we're creating new remote heads.
1549 # To be a remote head after push, node must be either:
1550 # To be a remote head after push, node must be either:
1550 # - unknown locally
1551 # - unknown locally
1551 # - a local outgoing head descended from update
1552 # - a local outgoing head descended from update
1552 # - a remote head that's known locally and not
1553 # - a remote head that's known locally and not
1553 # ancestral to an outgoing head
1554 # ancestral to an outgoing head
1554 #
1555 #
1555 # New named branches cannot be created without --force.
1556 # New named branches cannot be created without --force.
1556
1557
1557 if remote_heads != [nullid]:
1558 if remote_heads != [nullid]:
1558 if remote.capable('branchmap'):
1559 if remote.capable('branchmap'):
1559 localhds = {}
1560 localhds = {}
1560 if not revs:
1561 if not revs:
1561 localhds = self.branchmap()
1562 localhds = self.branchmap()
1562 else:
1563 else:
1563 for n in heads:
1564 for n in heads:
1564 branch = self[n].branch()
1565 branch = self[n].branch()
1565 if branch in localhds:
1566 if branch in localhds:
1566 localhds[branch].append(n)
1567 localhds[branch].append(n)
1567 else:
1568 else:
1568 localhds[branch] = [n]
1569 localhds[branch] = [n]
1569
1570
1570 remotehds = remote.branchmap()
1571 remotehds = remote.branchmap()
1571
1572
1572 for lh in localhds:
1573 for lh in localhds:
1573 if lh in remotehds:
1574 if lh in remotehds:
1574 rheads = remotehds[lh]
1575 rheads = remotehds[lh]
1575 else:
1576 else:
1576 rheads = []
1577 rheads = []
1577 lheads = localhds[lh]
1578 lheads = localhds[lh]
1578 if not checkbranch(lheads, rheads, update):
1579 if not checkbranch(lheads, rheads, update):
1579 return None, 0
1580 return None, 0
1580 else:
1581 else:
1581 if not checkbranch(heads, remote_heads, update):
1582 if not checkbranch(heads, remote_heads, update):
1582 return None, 0
1583 return None, 0
1583
1584
1584 if inc:
1585 if inc:
1585 self.ui.warn(_("note: unsynced remote changes!\n"))
1586 self.ui.warn(_("note: unsynced remote changes!\n"))
1586
1587
1587
1588
1588 if revs is None:
1589 if revs is None:
1589 # use the fast path, no race possible on push
1590 # use the fast path, no race possible on push
1590 nodes = self.changelog.findmissing(common.keys())
1591 nodes = self.changelog.findmissing(common.keys())
1591 cg = self._changegroup(nodes, 'push')
1592 cg = self._changegroup(nodes, 'push')
1592 else:
1593 else:
1593 cg = self.changegroupsubset(update, revs, 'push')
1594 cg = self.changegroupsubset(update, revs, 'push')
1594 return cg, remote_heads
1595 return cg, remote_heads
1595
1596
1596 def push_addchangegroup(self, remote, force, revs):
1597 def push_addchangegroup(self, remote, force, revs):
1597 lock = remote.lock()
1598 lock = remote.lock()
1598 try:
1599 try:
1599 ret = self.prepush(remote, force, revs)
1600 ret = self.prepush(remote, force, revs)
1600 if ret[0] is not None:
1601 if ret[0] is not None:
1601 cg, remote_heads = ret
1602 cg, remote_heads = ret
1602 return remote.addchangegroup(cg, 'push', self.url())
1603 return remote.addchangegroup(cg, 'push', self.url())
1603 return ret[1]
1604 return ret[1]
1604 finally:
1605 finally:
1605 lock.release()
1606 lock.release()
1606
1607
1607 def push_unbundle(self, remote, force, revs):
1608 def push_unbundle(self, remote, force, revs):
1608 # local repo finds heads on server, finds out what revs it
1609 # local repo finds heads on server, finds out what revs it
1609 # must push. once revs transferred, if server finds it has
1610 # must push. once revs transferred, if server finds it has
1610 # different heads (someone else won commit/push race), server
1611 # different heads (someone else won commit/push race), server
1611 # aborts.
1612 # aborts.
1612
1613
1613 ret = self.prepush(remote, force, revs)
1614 ret = self.prepush(remote, force, revs)
1614 if ret[0] is not None:
1615 if ret[0] is not None:
1615 cg, remote_heads = ret
1616 cg, remote_heads = ret
1616 if force:
1617 if force:
1617 remote_heads = ['force']
1618 remote_heads = ['force']
1618 return remote.unbundle(cg, remote_heads, 'push')
1619 return remote.unbundle(cg, remote_heads, 'push')
1619 return ret[1]
1620 return ret[1]
1620
1621
1621 def changegroupinfo(self, nodes, source):
1622 def changegroupinfo(self, nodes, source):
1622 if self.ui.verbose or source == 'bundle':
1623 if self.ui.verbose or source == 'bundle':
1623 self.ui.status(_("%d changesets found\n") % len(nodes))
1624 self.ui.status(_("%d changesets found\n") % len(nodes))
1624 if self.ui.debugflag:
1625 if self.ui.debugflag:
1625 self.ui.debug("list of changesets:\n")
1626 self.ui.debug("list of changesets:\n")
1626 for node in nodes:
1627 for node in nodes:
1627 self.ui.debug("%s\n" % hex(node))
1628 self.ui.debug("%s\n" % hex(node))
1628
1629
1629 def changegroupsubset(self, bases, heads, source, extranodes=None):
1630 def changegroupsubset(self, bases, heads, source, extranodes=None):
1630 """Compute a changegroup consisting of all the nodes that are
1631 """Compute a changegroup consisting of all the nodes that are
1631 descendents of any of the bases and ancestors of any of the heads.
1632 descendents of any of the bases and ancestors of any of the heads.
1632 Return a chunkbuffer object whose read() method will return
1633 Return a chunkbuffer object whose read() method will return
1633 successive changegroup chunks.
1634 successive changegroup chunks.
1634
1635
1635 It is fairly complex as determining which filenodes and which
1636 It is fairly complex as determining which filenodes and which
1636 manifest nodes need to be included for the changeset to be complete
1637 manifest nodes need to be included for the changeset to be complete
1637 is non-trivial.
1638 is non-trivial.
1638
1639
1639 Another wrinkle is doing the reverse, figuring out which changeset in
1640 Another wrinkle is doing the reverse, figuring out which changeset in
1640 the changegroup a particular filenode or manifestnode belongs to.
1641 the changegroup a particular filenode or manifestnode belongs to.
1641
1642
1642 The caller can specify some nodes that must be included in the
1643 The caller can specify some nodes that must be included in the
1643 changegroup using the extranodes argument. It should be a dict
1644 changegroup using the extranodes argument. It should be a dict
1644 where the keys are the filenames (or 1 for the manifest), and the
1645 where the keys are the filenames (or 1 for the manifest), and the
1645 values are lists of (node, linknode) tuples, where node is a wanted
1646 values are lists of (node, linknode) tuples, where node is a wanted
1646 node and linknode is the changelog node that should be transmitted as
1647 node and linknode is the changelog node that should be transmitted as
1647 the linkrev.
1648 the linkrev.
1648 """
1649 """
1649
1650
1650 # Set up some initial variables
1651 # Set up some initial variables
1651 # Make it easy to refer to self.changelog
1652 # Make it easy to refer to self.changelog
1652 cl = self.changelog
1653 cl = self.changelog
1653 # msng is short for missing - compute the list of changesets in this
1654 # msng is short for missing - compute the list of changesets in this
1654 # changegroup.
1655 # changegroup.
1655 if not bases:
1656 if not bases:
1656 bases = [nullid]
1657 bases = [nullid]
1657 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1658 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1658
1659
1659 if extranodes is None:
1660 if extranodes is None:
1660 # can we go through the fast path ?
1661 # can we go through the fast path ?
1661 heads.sort()
1662 heads.sort()
1662 allheads = self.heads()
1663 allheads = self.heads()
1663 allheads.sort()
1664 allheads.sort()
1664 if heads == allheads:
1665 if heads == allheads:
1665 return self._changegroup(msng_cl_lst, source)
1666 return self._changegroup(msng_cl_lst, source)
1666
1667
1667 # slow path
1668 # slow path
1668 self.hook('preoutgoing', throw=True, source=source)
1669 self.hook('preoutgoing', throw=True, source=source)
1669
1670
1670 self.changegroupinfo(msng_cl_lst, source)
1671 self.changegroupinfo(msng_cl_lst, source)
1671 # Some bases may turn out to be superfluous, and some heads may be
1672 # Some bases may turn out to be superfluous, and some heads may be
1672 # too. nodesbetween will return the minimal set of bases and heads
1673 # too. nodesbetween will return the minimal set of bases and heads
1673 # necessary to re-create the changegroup.
1674 # necessary to re-create the changegroup.
1674
1675
1675 # Known heads are the list of heads that it is assumed the recipient
1676 # Known heads are the list of heads that it is assumed the recipient
1676 # of this changegroup will know about.
1677 # of this changegroup will know about.
1677 knownheads = set()
1678 knownheads = set()
1678 # We assume that all parents of bases are known heads.
1679 # We assume that all parents of bases are known heads.
1679 for n in bases:
1680 for n in bases:
1680 knownheads.update(cl.parents(n))
1681 knownheads.update(cl.parents(n))
1681 knownheads.discard(nullid)
1682 knownheads.discard(nullid)
1682 knownheads = list(knownheads)
1683 knownheads = list(knownheads)
1683 if knownheads:
1684 if knownheads:
1684 # Now that we know what heads are known, we can compute which
1685 # Now that we know what heads are known, we can compute which
1685 # changesets are known. The recipient must know about all
1686 # changesets are known. The recipient must know about all
1686 # changesets required to reach the known heads from the null
1687 # changesets required to reach the known heads from the null
1687 # changeset.
1688 # changeset.
1688 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1689 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1689 junk = None
1690 junk = None
1690 # Transform the list into a set.
1691 # Transform the list into a set.
1691 has_cl_set = set(has_cl_set)
1692 has_cl_set = set(has_cl_set)
1692 else:
1693 else:
1693 # If there were no known heads, the recipient cannot be assumed to
1694 # If there were no known heads, the recipient cannot be assumed to
1694 # know about any changesets.
1695 # know about any changesets.
1695 has_cl_set = set()
1696 has_cl_set = set()
1696
1697
1697 # Make it easy to refer to self.manifest
1698 # Make it easy to refer to self.manifest
1698 mnfst = self.manifest
1699 mnfst = self.manifest
1699 # We don't know which manifests are missing yet
1700 # We don't know which manifests are missing yet
1700 msng_mnfst_set = {}
1701 msng_mnfst_set = {}
1701 # Nor do we know which filenodes are missing.
1702 # Nor do we know which filenodes are missing.
1702 msng_filenode_set = {}
1703 msng_filenode_set = {}
1703
1704
1704 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1705 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1705 junk = None
1706 junk = None
1706
1707
1707 # A changeset always belongs to itself, so the changenode lookup
1708 # A changeset always belongs to itself, so the changenode lookup
1708 # function for a changenode is identity.
1709 # function for a changenode is identity.
1709 def identity(x):
1710 def identity(x):
1710 return x
1711 return x
1711
1712
1712 # If we determine that a particular file or manifest node must be a
1713 # If we determine that a particular file or manifest node must be a
1713 # node that the recipient of the changegroup will already have, we can
1714 # node that the recipient of the changegroup will already have, we can
1714 # also assume the recipient will have all the parents. This function
1715 # also assume the recipient will have all the parents. This function
1715 # prunes them from the set of missing nodes.
1716 # prunes them from the set of missing nodes.
1716 def prune_parents(revlog, hasset, msngset):
1717 def prune_parents(revlog, hasset, msngset):
1717 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1718 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1718 msngset.pop(revlog.node(r), None)
1719 msngset.pop(revlog.node(r), None)
1719
1720
1720 # This is a function generating function used to set up an environment
1721 # This is a function generating function used to set up an environment
1721 # for the inner function to execute in.
1722 # for the inner function to execute in.
1722 def manifest_and_file_collector(changedfileset):
1723 def manifest_and_file_collector(changedfileset):
1723 # This is an information gathering function that gathers
1724 # This is an information gathering function that gathers
1724 # information from each changeset node that goes out as part of
1725 # information from each changeset node that goes out as part of
1725 # the changegroup. The information gathered is a list of which
1726 # the changegroup. The information gathered is a list of which
1726 # manifest nodes are potentially required (the recipient may
1727 # manifest nodes are potentially required (the recipient may
1727 # already have them) and total list of all files which were
1728 # already have them) and total list of all files which were
1728 # changed in any changeset in the changegroup.
1729 # changed in any changeset in the changegroup.
1729 #
1730 #
1730 # We also remember the first changenode we saw any manifest
1731 # We also remember the first changenode we saw any manifest
1731 # referenced by so we can later determine which changenode 'owns'
1732 # referenced by so we can later determine which changenode 'owns'
1732 # the manifest.
1733 # the manifest.
1733 def collect_manifests_and_files(clnode):
1734 def collect_manifests_and_files(clnode):
1734 c = cl.read(clnode)
1735 c = cl.read(clnode)
1735 for f in c[3]:
1736 for f in c[3]:
1736 # This is to make sure we only have one instance of each
1737 # This is to make sure we only have one instance of each
1737 # filename string for each filename.
1738 # filename string for each filename.
1738 changedfileset.setdefault(f, f)
1739 changedfileset.setdefault(f, f)
1739 msng_mnfst_set.setdefault(c[0], clnode)
1740 msng_mnfst_set.setdefault(c[0], clnode)
1740 return collect_manifests_and_files
1741 return collect_manifests_and_files
1741
1742
1742 # Figure out which manifest nodes (of the ones we think might be part
1743 # Figure out which manifest nodes (of the ones we think might be part
1743 # of the changegroup) the recipient must know about and remove them
1744 # of the changegroup) the recipient must know about and remove them
1744 # from the changegroup.
1745 # from the changegroup.
1745 def prune_manifests():
1746 def prune_manifests():
1746 has_mnfst_set = set()
1747 has_mnfst_set = set()
1747 for n in msng_mnfst_set:
1748 for n in msng_mnfst_set:
1748 # If a 'missing' manifest thinks it belongs to a changenode
1749 # If a 'missing' manifest thinks it belongs to a changenode
1749 # the recipient is assumed to have, obviously the recipient
1750 # the recipient is assumed to have, obviously the recipient
1750 # must have that manifest.
1751 # must have that manifest.
1751 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1752 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1752 if linknode in has_cl_set:
1753 if linknode in has_cl_set:
1753 has_mnfst_set.add(n)
1754 has_mnfst_set.add(n)
1754 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1755 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1755
1756
1756 # Use the information collected in collect_manifests_and_files to say
1757 # Use the information collected in collect_manifests_and_files to say
1757 # which changenode any manifestnode belongs to.
1758 # which changenode any manifestnode belongs to.
1758 def lookup_manifest_link(mnfstnode):
1759 def lookup_manifest_link(mnfstnode):
1759 return msng_mnfst_set[mnfstnode]
1760 return msng_mnfst_set[mnfstnode]
1760
1761
1761 # A function generating function that sets up the initial environment
1762 # A function generating function that sets up the initial environment
1762 # the inner function.
1763 # the inner function.
1763 def filenode_collector(changedfiles):
1764 def filenode_collector(changedfiles):
1764 # This gathers information from each manifestnode included in the
1765 # This gathers information from each manifestnode included in the
1765 # changegroup about which filenodes the manifest node references
1766 # changegroup about which filenodes the manifest node references
1766 # so we can include those in the changegroup too.
1767 # so we can include those in the changegroup too.
1767 #
1768 #
1768 # It also remembers which changenode each filenode belongs to. It
1769 # It also remembers which changenode each filenode belongs to. It
1769 # does this by assuming the a filenode belongs to the changenode
1770 # does this by assuming the a filenode belongs to the changenode
1770 # the first manifest that references it belongs to.
1771 # the first manifest that references it belongs to.
1771 def collect_msng_filenodes(mnfstnode):
1772 def collect_msng_filenodes(mnfstnode):
1772 r = mnfst.rev(mnfstnode)
1773 r = mnfst.rev(mnfstnode)
1773 if r - 1 in mnfst.parentrevs(r):
1774 if r - 1 in mnfst.parentrevs(r):
1774 # If the previous rev is one of the parents,
1775 # If the previous rev is one of the parents,
1775 # we only need to see a diff.
1776 # we only need to see a diff.
1776 deltamf = mnfst.readdelta(mnfstnode)
1777 deltamf = mnfst.readdelta(mnfstnode)
1777 # For each line in the delta
1778 # For each line in the delta
1778 for f, fnode in deltamf.iteritems():
1779 for f, fnode in deltamf.iteritems():
1779 f = changedfiles.get(f, None)
1780 f = changedfiles.get(f, None)
1780 # And if the file is in the list of files we care
1781 # And if the file is in the list of files we care
1781 # about.
1782 # about.
1782 if f is not None:
1783 if f is not None:
1783 # Get the changenode this manifest belongs to
1784 # Get the changenode this manifest belongs to
1784 clnode = msng_mnfst_set[mnfstnode]
1785 clnode = msng_mnfst_set[mnfstnode]
1785 # Create the set of filenodes for the file if
1786 # Create the set of filenodes for the file if
1786 # there isn't one already.
1787 # there isn't one already.
1787 ndset = msng_filenode_set.setdefault(f, {})
1788 ndset = msng_filenode_set.setdefault(f, {})
1788 # And set the filenode's changelog node to the
1789 # And set the filenode's changelog node to the
1789 # manifest's if it hasn't been set already.
1790 # manifest's if it hasn't been set already.
1790 ndset.setdefault(fnode, clnode)
1791 ndset.setdefault(fnode, clnode)
1791 else:
1792 else:
1792 # Otherwise we need a full manifest.
1793 # Otherwise we need a full manifest.
1793 m = mnfst.read(mnfstnode)
1794 m = mnfst.read(mnfstnode)
1794 # For every file in we care about.
1795 # For every file in we care about.
1795 for f in changedfiles:
1796 for f in changedfiles:
1796 fnode = m.get(f, None)
1797 fnode = m.get(f, None)
1797 # If it's in the manifest
1798 # If it's in the manifest
1798 if fnode is not None:
1799 if fnode is not None:
1799 # See comments above.
1800 # See comments above.
1800 clnode = msng_mnfst_set[mnfstnode]
1801 clnode = msng_mnfst_set[mnfstnode]
1801 ndset = msng_filenode_set.setdefault(f, {})
1802 ndset = msng_filenode_set.setdefault(f, {})
1802 ndset.setdefault(fnode, clnode)
1803 ndset.setdefault(fnode, clnode)
1803 return collect_msng_filenodes
1804 return collect_msng_filenodes
1804
1805
1805 # We have a list of filenodes we think we need for a file, lets remove
1806 # We have a list of filenodes we think we need for a file, lets remove
1806 # all those we know the recipient must have.
1807 # all those we know the recipient must have.
1807 def prune_filenodes(f, filerevlog):
1808 def prune_filenodes(f, filerevlog):
1808 msngset = msng_filenode_set[f]
1809 msngset = msng_filenode_set[f]
1809 hasset = set()
1810 hasset = set()
1810 # If a 'missing' filenode thinks it belongs to a changenode we
1811 # If a 'missing' filenode thinks it belongs to a changenode we
1811 # assume the recipient must have, then the recipient must have
1812 # assume the recipient must have, then the recipient must have
1812 # that filenode.
1813 # that filenode.
1813 for n in msngset:
1814 for n in msngset:
1814 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1815 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1815 if clnode in has_cl_set:
1816 if clnode in has_cl_set:
1816 hasset.add(n)
1817 hasset.add(n)
1817 prune_parents(filerevlog, hasset, msngset)
1818 prune_parents(filerevlog, hasset, msngset)
1818
1819
1819 # A function generator function that sets up the a context for the
1820 # A function generator function that sets up the a context for the
1820 # inner function.
1821 # inner function.
1821 def lookup_filenode_link_func(fname):
1822 def lookup_filenode_link_func(fname):
1822 msngset = msng_filenode_set[fname]
1823 msngset = msng_filenode_set[fname]
1823 # Lookup the changenode the filenode belongs to.
1824 # Lookup the changenode the filenode belongs to.
1824 def lookup_filenode_link(fnode):
1825 def lookup_filenode_link(fnode):
1825 return msngset[fnode]
1826 return msngset[fnode]
1826 return lookup_filenode_link
1827 return lookup_filenode_link
1827
1828
1828 # Add the nodes that were explicitly requested.
1829 # Add the nodes that were explicitly requested.
1829 def add_extra_nodes(name, nodes):
1830 def add_extra_nodes(name, nodes):
1830 if not extranodes or name not in extranodes:
1831 if not extranodes or name not in extranodes:
1831 return
1832 return
1832
1833
1833 for node, linknode in extranodes[name]:
1834 for node, linknode in extranodes[name]:
1834 if node not in nodes:
1835 if node not in nodes:
1835 nodes[node] = linknode
1836 nodes[node] = linknode
1836
1837
1837 # Now that we have all theses utility functions to help out and
1838 # Now that we have all theses utility functions to help out and
1838 # logically divide up the task, generate the group.
1839 # logically divide up the task, generate the group.
1839 def gengroup():
1840 def gengroup():
1840 # The set of changed files starts empty.
1841 # The set of changed files starts empty.
1841 changedfiles = {}
1842 changedfiles = {}
1842 # Create a changenode group generator that will call our functions
1843 # Create a changenode group generator that will call our functions
1843 # back to lookup the owning changenode and collect information.
1844 # back to lookup the owning changenode and collect information.
1844 group = cl.group(msng_cl_lst, identity,
1845 group = cl.group(msng_cl_lst, identity,
1845 manifest_and_file_collector(changedfiles))
1846 manifest_and_file_collector(changedfiles))
1846 for chnk in group:
1847 for chnk in group:
1847 yield chnk
1848 yield chnk
1848
1849
1849 # The list of manifests has been collected by the generator
1850 # The list of manifests has been collected by the generator
1850 # calling our functions back.
1851 # calling our functions back.
1851 prune_manifests()
1852 prune_manifests()
1852 add_extra_nodes(1, msng_mnfst_set)
1853 add_extra_nodes(1, msng_mnfst_set)
1853 msng_mnfst_lst = msng_mnfst_set.keys()
1854 msng_mnfst_lst = msng_mnfst_set.keys()
1854 # Sort the manifestnodes by revision number.
1855 # Sort the manifestnodes by revision number.
1855 msng_mnfst_lst.sort(key=mnfst.rev)
1856 msng_mnfst_lst.sort(key=mnfst.rev)
1856 # Create a generator for the manifestnodes that calls our lookup
1857 # Create a generator for the manifestnodes that calls our lookup
1857 # and data collection functions back.
1858 # and data collection functions back.
1858 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1859 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1859 filenode_collector(changedfiles))
1860 filenode_collector(changedfiles))
1860 for chnk in group:
1861 for chnk in group:
1861 yield chnk
1862 yield chnk
1862
1863
1863 # These are no longer needed, dereference and toss the memory for
1864 # These are no longer needed, dereference and toss the memory for
1864 # them.
1865 # them.
1865 msng_mnfst_lst = None
1866 msng_mnfst_lst = None
1866 msng_mnfst_set.clear()
1867 msng_mnfst_set.clear()
1867
1868
1868 if extranodes:
1869 if extranodes:
1869 for fname in extranodes:
1870 for fname in extranodes:
1870 if isinstance(fname, int):
1871 if isinstance(fname, int):
1871 continue
1872 continue
1872 msng_filenode_set.setdefault(fname, {})
1873 msng_filenode_set.setdefault(fname, {})
1873 changedfiles[fname] = 1
1874 changedfiles[fname] = 1
1874 # Go through all our files in order sorted by name.
1875 # Go through all our files in order sorted by name.
1875 for fname in sorted(changedfiles):
1876 for fname in sorted(changedfiles):
1876 filerevlog = self.file(fname)
1877 filerevlog = self.file(fname)
1877 if not len(filerevlog):
1878 if not len(filerevlog):
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 # Toss out the filenodes that the recipient isn't really
1880 # Toss out the filenodes that the recipient isn't really
1880 # missing.
1881 # missing.
1881 if fname in msng_filenode_set:
1882 if fname in msng_filenode_set:
1882 prune_filenodes(fname, filerevlog)
1883 prune_filenodes(fname, filerevlog)
1883 add_extra_nodes(fname, msng_filenode_set[fname])
1884 add_extra_nodes(fname, msng_filenode_set[fname])
1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1885 msng_filenode_lst = msng_filenode_set[fname].keys()
1885 else:
1886 else:
1886 msng_filenode_lst = []
1887 msng_filenode_lst = []
1887 # If any filenodes are left, generate the group for them,
1888 # If any filenodes are left, generate the group for them,
1888 # otherwise don't bother.
1889 # otherwise don't bother.
1889 if len(msng_filenode_lst) > 0:
1890 if len(msng_filenode_lst) > 0:
1890 yield changegroup.chunkheader(len(fname))
1891 yield changegroup.chunkheader(len(fname))
1891 yield fname
1892 yield fname
1892 # Sort the filenodes by their revision #
1893 # Sort the filenodes by their revision #
1893 msng_filenode_lst.sort(key=filerevlog.rev)
1894 msng_filenode_lst.sort(key=filerevlog.rev)
1894 # Create a group generator and only pass in a changenode
1895 # Create a group generator and only pass in a changenode
1895 # lookup function as we need to collect no information
1896 # lookup function as we need to collect no information
1896 # from filenodes.
1897 # from filenodes.
1897 group = filerevlog.group(msng_filenode_lst,
1898 group = filerevlog.group(msng_filenode_lst,
1898 lookup_filenode_link_func(fname))
1899 lookup_filenode_link_func(fname))
1899 for chnk in group:
1900 for chnk in group:
1900 yield chnk
1901 yield chnk
1901 if fname in msng_filenode_set:
1902 if fname in msng_filenode_set:
1902 # Don't need this anymore, toss it to free memory.
1903 # Don't need this anymore, toss it to free memory.
1903 del msng_filenode_set[fname]
1904 del msng_filenode_set[fname]
1904 # Signal that no more groups are left.
1905 # Signal that no more groups are left.
1905 yield changegroup.closechunk()
1906 yield changegroup.closechunk()
1906
1907
1907 if msng_cl_lst:
1908 if msng_cl_lst:
1908 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1909 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1909
1910
1910 return util.chunkbuffer(gengroup())
1911 return util.chunkbuffer(gengroup())
1911
1912
1912 def changegroup(self, basenodes, source):
1913 def changegroup(self, basenodes, source):
1913 # to avoid a race we use changegroupsubset() (issue1320)
1914 # to avoid a race we use changegroupsubset() (issue1320)
1914 return self.changegroupsubset(basenodes, self.heads(), source)
1915 return self.changegroupsubset(basenodes, self.heads(), source)
1915
1916
1916 def _changegroup(self, nodes, source):
1917 def _changegroup(self, nodes, source):
1917 """Compute the changegroup of all nodes that we have that a recipient
1918 """Compute the changegroup of all nodes that we have that a recipient
1918 doesn't. Return a chunkbuffer object whose read() method will return
1919 doesn't. Return a chunkbuffer object whose read() method will return
1919 successive changegroup chunks.
1920 successive changegroup chunks.
1920
1921
1921 This is much easier than the previous function as we can assume that
1922 This is much easier than the previous function as we can assume that
1922 the recipient has any changenode we aren't sending them.
1923 the recipient has any changenode we aren't sending them.
1923
1924
1924 nodes is the set of nodes to send"""
1925 nodes is the set of nodes to send"""
1925
1926
1926 self.hook('preoutgoing', throw=True, source=source)
1927 self.hook('preoutgoing', throw=True, source=source)
1927
1928
1928 cl = self.changelog
1929 cl = self.changelog
1929 revset = set([cl.rev(n) for n in nodes])
1930 revset = set([cl.rev(n) for n in nodes])
1930 self.changegroupinfo(nodes, source)
1931 self.changegroupinfo(nodes, source)
1931
1932
1932 def identity(x):
1933 def identity(x):
1933 return x
1934 return x
1934
1935
1935 def gennodelst(log):
1936 def gennodelst(log):
1936 for r in log:
1937 for r in log:
1937 if log.linkrev(r) in revset:
1938 if log.linkrev(r) in revset:
1938 yield log.node(r)
1939 yield log.node(r)
1939
1940
1940 def changed_file_collector(changedfileset):
1941 def changed_file_collector(changedfileset):
1941 def collect_changed_files(clnode):
1942 def collect_changed_files(clnode):
1942 c = cl.read(clnode)
1943 c = cl.read(clnode)
1943 changedfileset.update(c[3])
1944 changedfileset.update(c[3])
1944 return collect_changed_files
1945 return collect_changed_files
1945
1946
1946 def lookuprevlink_func(revlog):
1947 def lookuprevlink_func(revlog):
1947 def lookuprevlink(n):
1948 def lookuprevlink(n):
1948 return cl.node(revlog.linkrev(revlog.rev(n)))
1949 return cl.node(revlog.linkrev(revlog.rev(n)))
1949 return lookuprevlink
1950 return lookuprevlink
1950
1951
1951 def gengroup():
1952 def gengroup():
1952 '''yield a sequence of changegroup chunks (strings)'''
1953 '''yield a sequence of changegroup chunks (strings)'''
1953 # construct a list of all changed files
1954 # construct a list of all changed files
1954 changedfiles = set()
1955 changedfiles = set()
1955
1956
1956 for chnk in cl.group(nodes, identity,
1957 for chnk in cl.group(nodes, identity,
1957 changed_file_collector(changedfiles)):
1958 changed_file_collector(changedfiles)):
1958 yield chnk
1959 yield chnk
1959
1960
1960 mnfst = self.manifest
1961 mnfst = self.manifest
1961 nodeiter = gennodelst(mnfst)
1962 nodeiter = gennodelst(mnfst)
1962 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1963 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1963 yield chnk
1964 yield chnk
1964
1965
1965 for fname in sorted(changedfiles):
1966 for fname in sorted(changedfiles):
1966 filerevlog = self.file(fname)
1967 filerevlog = self.file(fname)
1967 if not len(filerevlog):
1968 if not len(filerevlog):
1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1969 raise util.Abort(_("empty or missing revlog for %s") % fname)
1969 nodeiter = gennodelst(filerevlog)
1970 nodeiter = gennodelst(filerevlog)
1970 nodeiter = list(nodeiter)
1971 nodeiter = list(nodeiter)
1971 if nodeiter:
1972 if nodeiter:
1972 yield changegroup.chunkheader(len(fname))
1973 yield changegroup.chunkheader(len(fname))
1973 yield fname
1974 yield fname
1974 lookup = lookuprevlink_func(filerevlog)
1975 lookup = lookuprevlink_func(filerevlog)
1975 for chnk in filerevlog.group(nodeiter, lookup):
1976 for chnk in filerevlog.group(nodeiter, lookup):
1976 yield chnk
1977 yield chnk
1977
1978
1978 yield changegroup.closechunk()
1979 yield changegroup.closechunk()
1979
1980
1980 if nodes:
1981 if nodes:
1981 self.hook('outgoing', node=hex(nodes[0]), source=source)
1982 self.hook('outgoing', node=hex(nodes[0]), source=source)
1982
1983
1983 return util.chunkbuffer(gengroup())
1984 return util.chunkbuffer(gengroup())
1984
1985
1985 def addchangegroup(self, source, srctype, url, emptyok=False):
1986 def addchangegroup(self, source, srctype, url, emptyok=False):
1986 """add changegroup to repo.
1987 """add changegroup to repo.
1987
1988
1988 return values:
1989 return values:
1989 - nothing changed or no source: 0
1990 - nothing changed or no source: 0
1990 - more heads than before: 1+added heads (2..n)
1991 - more heads than before: 1+added heads (2..n)
1991 - less heads than before: -1-removed heads (-2..-n)
1992 - less heads than before: -1-removed heads (-2..-n)
1992 - number of heads stays the same: 1
1993 - number of heads stays the same: 1
1993 """
1994 """
1994 def csmap(x):
1995 def csmap(x):
1995 self.ui.debug("add changeset %s\n" % short(x))
1996 self.ui.debug("add changeset %s\n" % short(x))
1996 return len(cl)
1997 return len(cl)
1997
1998
1998 def revmap(x):
1999 def revmap(x):
1999 return cl.rev(x)
2000 return cl.rev(x)
2000
2001
2001 if not source:
2002 if not source:
2002 return 0
2003 return 0
2003
2004
2004 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2005 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2005
2006
2006 changesets = files = revisions = 0
2007 changesets = files = revisions = 0
2007
2008
2008 # write changelog data to temp files so concurrent readers will not see
2009 # write changelog data to temp files so concurrent readers will not see
2009 # inconsistent view
2010 # inconsistent view
2010 cl = self.changelog
2011 cl = self.changelog
2011 cl.delayupdate()
2012 cl.delayupdate()
2012 oldheads = len(cl.heads())
2013 oldheads = len(cl.heads())
2013
2014
2014 tr = self.transaction()
2015 tr = self.transaction()
2015 try:
2016 try:
2016 trp = weakref.proxy(tr)
2017 trp = weakref.proxy(tr)
2017 # pull off the changeset group
2018 # pull off the changeset group
2018 self.ui.status(_("adding changesets\n"))
2019 self.ui.status(_("adding changesets\n"))
2019 clstart = len(cl)
2020 clstart = len(cl)
2020 chunkiter = changegroup.chunkiter(source)
2021 chunkiter = changegroup.chunkiter(source)
2021 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2022 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2022 raise util.Abort(_("received changelog group is empty"))
2023 raise util.Abort(_("received changelog group is empty"))
2023 clend = len(cl)
2024 clend = len(cl)
2024 changesets = clend - clstart
2025 changesets = clend - clstart
2025
2026
2026 # pull off the manifest group
2027 # pull off the manifest group
2027 self.ui.status(_("adding manifests\n"))
2028 self.ui.status(_("adding manifests\n"))
2028 chunkiter = changegroup.chunkiter(source)
2029 chunkiter = changegroup.chunkiter(source)
2029 # no need to check for empty manifest group here:
2030 # no need to check for empty manifest group here:
2030 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2031 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2031 # no new manifest will be created and the manifest group will
2032 # no new manifest will be created and the manifest group will
2032 # be empty during the pull
2033 # be empty during the pull
2033 self.manifest.addgroup(chunkiter, revmap, trp)
2034 self.manifest.addgroup(chunkiter, revmap, trp)
2034
2035
2035 # process the files
2036 # process the files
2036 self.ui.status(_("adding file changes\n"))
2037 self.ui.status(_("adding file changes\n"))
2037 while 1:
2038 while 1:
2038 f = changegroup.getchunk(source)
2039 f = changegroup.getchunk(source)
2039 if not f:
2040 if not f:
2040 break
2041 break
2041 self.ui.debug("adding %s revisions\n" % f)
2042 self.ui.debug("adding %s revisions\n" % f)
2042 fl = self.file(f)
2043 fl = self.file(f)
2043 o = len(fl)
2044 o = len(fl)
2044 chunkiter = changegroup.chunkiter(source)
2045 chunkiter = changegroup.chunkiter(source)
2045 if fl.addgroup(chunkiter, revmap, trp) is None:
2046 if fl.addgroup(chunkiter, revmap, trp) is None:
2046 raise util.Abort(_("received file revlog group is empty"))
2047 raise util.Abort(_("received file revlog group is empty"))
2047 revisions += len(fl) - o
2048 revisions += len(fl) - o
2048 files += 1
2049 files += 1
2049
2050
2050 newheads = len(cl.heads())
2051 newheads = len(cl.heads())
2051 heads = ""
2052 heads = ""
2052 if oldheads and newheads != oldheads:
2053 if oldheads and newheads != oldheads:
2053 heads = _(" (%+d heads)") % (newheads - oldheads)
2054 heads = _(" (%+d heads)") % (newheads - oldheads)
2054
2055
2055 self.ui.status(_("added %d changesets"
2056 self.ui.status(_("added %d changesets"
2056 " with %d changes to %d files%s\n")
2057 " with %d changes to %d files%s\n")
2057 % (changesets, revisions, files, heads))
2058 % (changesets, revisions, files, heads))
2058
2059
2059 if changesets > 0:
2060 if changesets > 0:
2060 p = lambda: cl.writepending() and self.root or ""
2061 p = lambda: cl.writepending() and self.root or ""
2061 self.hook('pretxnchangegroup', throw=True,
2062 self.hook('pretxnchangegroup', throw=True,
2062 node=hex(cl.node(clstart)), source=srctype,
2063 node=hex(cl.node(clstart)), source=srctype,
2063 url=url, pending=p)
2064 url=url, pending=p)
2064
2065
2065 # make changelog see real files again
2066 # make changelog see real files again
2066 cl.finalize(trp)
2067 cl.finalize(trp)
2067
2068
2068 tr.close()
2069 tr.close()
2069 finally:
2070 finally:
2070 del tr
2071 del tr
2071
2072
2072 if changesets > 0:
2073 if changesets > 0:
2073 # forcefully update the on-disk branch cache
2074 # forcefully update the on-disk branch cache
2074 self.ui.debug("updating the branch cache\n")
2075 self.ui.debug("updating the branch cache\n")
2075 self.branchtags()
2076 self.branchtags()
2076 self.hook("changegroup", node=hex(cl.node(clstart)),
2077 self.hook("changegroup", node=hex(cl.node(clstart)),
2077 source=srctype, url=url)
2078 source=srctype, url=url)
2078
2079
2079 for i in xrange(clstart, clend):
2080 for i in xrange(clstart, clend):
2080 self.hook("incoming", node=hex(cl.node(i)),
2081 self.hook("incoming", node=hex(cl.node(i)),
2081 source=srctype, url=url)
2082 source=srctype, url=url)
2082
2083
2083 # never return 0 here:
2084 # never return 0 here:
2084 if newheads < oldheads:
2085 if newheads < oldheads:
2085 return newheads - oldheads - 1
2086 return newheads - oldheads - 1
2086 else:
2087 else:
2087 return newheads - oldheads + 1
2088 return newheads - oldheads + 1
2088
2089
2089
2090
2090 def stream_in(self, remote):
2091 def stream_in(self, remote):
2091 fp = remote.stream_out()
2092 fp = remote.stream_out()
2092 l = fp.readline()
2093 l = fp.readline()
2093 try:
2094 try:
2094 resp = int(l)
2095 resp = int(l)
2095 except ValueError:
2096 except ValueError:
2096 raise error.ResponseError(
2097 raise error.ResponseError(
2097 _('Unexpected response from remote server:'), l)
2098 _('Unexpected response from remote server:'), l)
2098 if resp == 1:
2099 if resp == 1:
2099 raise util.Abort(_('operation forbidden by server'))
2100 raise util.Abort(_('operation forbidden by server'))
2100 elif resp == 2:
2101 elif resp == 2:
2101 raise util.Abort(_('locking the remote repository failed'))
2102 raise util.Abort(_('locking the remote repository failed'))
2102 elif resp != 0:
2103 elif resp != 0:
2103 raise util.Abort(_('the server sent an unknown error code'))
2104 raise util.Abort(_('the server sent an unknown error code'))
2104 self.ui.status(_('streaming all changes\n'))
2105 self.ui.status(_('streaming all changes\n'))
2105 l = fp.readline()
2106 l = fp.readline()
2106 try:
2107 try:
2107 total_files, total_bytes = map(int, l.split(' ', 1))
2108 total_files, total_bytes = map(int, l.split(' ', 1))
2108 except (ValueError, TypeError):
2109 except (ValueError, TypeError):
2109 raise error.ResponseError(
2110 raise error.ResponseError(
2110 _('Unexpected response from remote server:'), l)
2111 _('Unexpected response from remote server:'), l)
2111 self.ui.status(_('%d files to transfer, %s of data\n') %
2112 self.ui.status(_('%d files to transfer, %s of data\n') %
2112 (total_files, util.bytecount(total_bytes)))
2113 (total_files, util.bytecount(total_bytes)))
2113 start = time.time()
2114 start = time.time()
2114 for i in xrange(total_files):
2115 for i in xrange(total_files):
2115 # XXX doesn't support '\n' or '\r' in filenames
2116 # XXX doesn't support '\n' or '\r' in filenames
2116 l = fp.readline()
2117 l = fp.readline()
2117 try:
2118 try:
2118 name, size = l.split('\0', 1)
2119 name, size = l.split('\0', 1)
2119 size = int(size)
2120 size = int(size)
2120 except (ValueError, TypeError):
2121 except (ValueError, TypeError):
2121 raise error.ResponseError(
2122 raise error.ResponseError(
2122 _('Unexpected response from remote server:'), l)
2123 _('Unexpected response from remote server:'), l)
2123 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2124 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2124 # for backwards compat, name was partially encoded
2125 # for backwards compat, name was partially encoded
2125 ofp = self.sopener(store.decodedir(name), 'w')
2126 ofp = self.sopener(store.decodedir(name), 'w')
2126 for chunk in util.filechunkiter(fp, limit=size):
2127 for chunk in util.filechunkiter(fp, limit=size):
2127 ofp.write(chunk)
2128 ofp.write(chunk)
2128 ofp.close()
2129 ofp.close()
2129 elapsed = time.time() - start
2130 elapsed = time.time() - start
2130 if elapsed <= 0:
2131 if elapsed <= 0:
2131 elapsed = 0.001
2132 elapsed = 0.001
2132 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2133 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2133 (util.bytecount(total_bytes), elapsed,
2134 (util.bytecount(total_bytes), elapsed,
2134 util.bytecount(total_bytes / elapsed)))
2135 util.bytecount(total_bytes / elapsed)))
2135 self.invalidate()
2136 self.invalidate()
2136 return len(self.heads()) + 1
2137 return len(self.heads()) + 1
2137
2138
2138 def clone(self, remote, heads=[], stream=False):
2139 def clone(self, remote, heads=[], stream=False):
2139 '''clone remote repository.
2140 '''clone remote repository.
2140
2141
2141 keyword arguments:
2142 keyword arguments:
2142 heads: list of revs to clone (forces use of pull)
2143 heads: list of revs to clone (forces use of pull)
2143 stream: use streaming clone if possible'''
2144 stream: use streaming clone if possible'''
2144
2145
2145 # now, all clients that can request uncompressed clones can
2146 # now, all clients that can request uncompressed clones can
2146 # read repo formats supported by all servers that can serve
2147 # read repo formats supported by all servers that can serve
2147 # them.
2148 # them.
2148
2149
2149 # if revlog format changes, client will have to check version
2150 # if revlog format changes, client will have to check version
2150 # and format flags on "stream" capability, and use
2151 # and format flags on "stream" capability, and use
2151 # uncompressed only if compatible.
2152 # uncompressed only if compatible.
2152
2153
2153 if stream and not heads and remote.capable('stream'):
2154 if stream and not heads and remote.capable('stream'):
2154 return self.stream_in(remote)
2155 return self.stream_in(remote)
2155 return self.pull(remote, heads)
2156 return self.pull(remote, heads)
2156
2157
2157 # used to avoid circular references so destructors work
2158 # used to avoid circular references so destructors work
2158 def aftertrans(files):
2159 def aftertrans(files):
2159 renamefiles = [tuple(t) for t in files]
2160 renamefiles = [tuple(t) for t in files]
2160 def a():
2161 def a():
2161 for src, dest in renamefiles:
2162 for src, dest in renamefiles:
2162 util.rename(src, dest)
2163 util.rename(src, dest)
2163 return a
2164 return a
2164
2165
2165 def instance(ui, path, create):
2166 def instance(ui, path, create):
2166 return localrepository(ui, util.drop_scheme('file', path), create)
2167 return localrepository(ui, util.drop_scheme('file', path), create)
2167
2168
2168 def islocal(path):
2169 def islocal(path):
2169 return True
2170 return True
@@ -1,1409 +1,1409 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import changegroup, ancestor, mdiff, parsers, error, util
17 import changegroup, ancestor, mdiff, parsers, error, util
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog flags
26 # revlog flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FORMAT = REVLOGNG
31 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33
33
34 _prereadsize = 1048576
34 _prereadsize = 1048576
35
35
36 RevlogError = error.RevlogError
36 RevlogError = error.RevlogError
37 LookupError = error.LookupError
37 LookupError = error.LookupError
38
38
39 def getoffset(q):
39 def getoffset(q):
40 return int(q >> 16)
40 return int(q >> 16)
41
41
42 def gettype(q):
42 def gettype(q):
43 return int(q & 0xFFFF)
43 return int(q & 0xFFFF)
44
44
45 def offset_type(offset, type):
45 def offset_type(offset, type):
46 return long(long(offset) << 16 | type)
46 return long(long(offset) << 16 | type)
47
47
48 nullhash = _sha(nullid)
48 nullhash = _sha(nullid)
49
49
50 def hash(text, p1, p2):
50 def hash(text, p1, p2):
51 """generate a hash from the given text and its parent hashes
51 """generate a hash from the given text and its parent hashes
52
52
53 This hash combines both the current file contents and its history
53 This hash combines both the current file contents and its history
54 in a manner that makes it easy to distinguish nodes with the same
54 in a manner that makes it easy to distinguish nodes with the same
55 content in the revision graph.
55 content in the revision graph.
56 """
56 """
57 # As of now, if one of the parent node is null, p2 is null
57 # As of now, if one of the parent node is null, p2 is null
58 if p2 == nullid:
58 if p2 == nullid:
59 # deep copy of a hash is faster than creating one
59 # deep copy of a hash is faster than creating one
60 s = nullhash.copy()
60 s = nullhash.copy()
61 s.update(p1)
61 s.update(p1)
62 else:
62 else:
63 # none of the parent nodes are nullid
63 # none of the parent nodes are nullid
64 l = [p1, p2]
64 l = [p1, p2]
65 l.sort()
65 l.sort()
66 s = _sha(l[0])
66 s = _sha(l[0])
67 s.update(l[1])
67 s.update(l[1])
68 s.update(text)
68 s.update(text)
69 return s.digest()
69 return s.digest()
70
70
71 def compress(text):
71 def compress(text):
72 """ generate a possibly-compressed representation of text """
72 """ generate a possibly-compressed representation of text """
73 if not text:
73 if not text:
74 return ("", text)
74 return ("", text)
75 l = len(text)
75 l = len(text)
76 bin = None
76 bin = None
77 if l < 44:
77 if l < 44:
78 pass
78 pass
79 elif l > 1000000:
79 elif l > 1000000:
80 # zlib makes an internal copy, thus doubling memory usage for
80 # zlib makes an internal copy, thus doubling memory usage for
81 # large files, so lets do this in pieces
81 # large files, so lets do this in pieces
82 z = zlib.compressobj()
82 z = zlib.compressobj()
83 p = []
83 p = []
84 pos = 0
84 pos = 0
85 while pos < l:
85 while pos < l:
86 pos2 = pos + 2**20
86 pos2 = pos + 2**20
87 p.append(z.compress(text[pos:pos2]))
87 p.append(z.compress(text[pos:pos2]))
88 pos = pos2
88 pos = pos2
89 p.append(z.flush())
89 p.append(z.flush())
90 if sum(map(len, p)) < l:
90 if sum(map(len, p)) < l:
91 bin = "".join(p)
91 bin = "".join(p)
92 else:
92 else:
93 bin = _compress(text)
93 bin = _compress(text)
94 if bin is None or len(bin) > l:
94 if bin is None or len(bin) > l:
95 if text[0] == '\0':
95 if text[0] == '\0':
96 return ("", text)
96 return ("", text)
97 return ('u', text)
97 return ('u', text)
98 return ("", bin)
98 return ("", bin)
99
99
100 def decompress(bin):
100 def decompress(bin):
101 """ decompress the given input """
101 """ decompress the given input """
102 if not bin:
102 if not bin:
103 return bin
103 return bin
104 t = bin[0]
104 t = bin[0]
105 if t == '\0':
105 if t == '\0':
106 return bin
106 return bin
107 if t == 'x':
107 if t == 'x':
108 return _decompress(bin)
108 return _decompress(bin)
109 if t == 'u':
109 if t == 'u':
110 return bin[1:]
110 return bin[1:]
111 raise RevlogError(_("unknown compression type %r") % t)
111 raise RevlogError(_("unknown compression type %r") % t)
112
112
113 class lazyparser(object):
113 class lazyparser(object):
114 """
114 """
115 this class avoids the need to parse the entirety of large indices
115 this class avoids the need to parse the entirety of large indices
116 """
116 """
117
117
118 # lazyparser is not safe to use on windows if win32 extensions not
118 # lazyparser is not safe to use on windows if win32 extensions not
119 # available. it keeps file handle open, which make it not possible
119 # available. it keeps file handle open, which make it not possible
120 # to break hardlinks on local cloned repos.
120 # to break hardlinks on local cloned repos.
121
121
122 def __init__(self, dataf):
122 def __init__(self, dataf):
123 try:
123 try:
124 size = util.fstat(dataf).st_size
124 size = util.fstat(dataf).st_size
125 except AttributeError:
125 except AttributeError:
126 size = 0
126 size = 0
127 self.dataf = dataf
127 self.dataf = dataf
128 self.s = struct.calcsize(indexformatng)
128 self.s = struct.calcsize(indexformatng)
129 self.datasize = size
129 self.datasize = size
130 self.l = size / self.s
130 self.l = size / self.s
131 self.index = [None] * self.l
131 self.index = [None] * self.l
132 self.map = {nullid: nullrev}
132 self.map = {nullid: nullrev}
133 self.allmap = 0
133 self.allmap = 0
134 self.all = 0
134 self.all = 0
135 self.mapfind_count = 0
135 self.mapfind_count = 0
136
136
137 def loadmap(self):
137 def loadmap(self):
138 """
138 """
139 during a commit, we need to make sure the rev being added is
139 during a commit, we need to make sure the rev being added is
140 not a duplicate. This requires loading the entire index,
140 not a duplicate. This requires loading the entire index,
141 which is fairly slow. loadmap can load up just the node map,
141 which is fairly slow. loadmap can load up just the node map,
142 which takes much less time.
142 which takes much less time.
143 """
143 """
144 if self.allmap:
144 if self.allmap:
145 return
145 return
146 end = self.datasize
146 end = self.datasize
147 self.allmap = 1
147 self.allmap = 1
148 cur = 0
148 cur = 0
149 count = 0
149 count = 0
150 blocksize = self.s * 256
150 blocksize = self.s * 256
151 self.dataf.seek(0)
151 self.dataf.seek(0)
152 while cur < end:
152 while cur < end:
153 data = self.dataf.read(blocksize)
153 data = self.dataf.read(blocksize)
154 off = 0
154 off = 0
155 for x in xrange(256):
155 for x in xrange(256):
156 n = data[off + ngshaoffset:off + ngshaoffset + 20]
156 n = data[off + ngshaoffset:off + ngshaoffset + 20]
157 self.map[n] = count
157 self.map[n] = count
158 count += 1
158 count += 1
159 if count >= self.l:
159 if count >= self.l:
160 break
160 break
161 off += self.s
161 off += self.s
162 cur += blocksize
162 cur += blocksize
163
163
164 def loadblock(self, blockstart, blocksize, data=None):
164 def loadblock(self, blockstart, blocksize, data=None):
165 if self.all:
165 if self.all:
166 return
166 return
167 if data is None:
167 if data is None:
168 self.dataf.seek(blockstart)
168 self.dataf.seek(blockstart)
169 if blockstart + blocksize > self.datasize:
169 if blockstart + blocksize > self.datasize:
170 # the revlog may have grown since we've started running,
170 # the revlog may have grown since we've started running,
171 # but we don't have space in self.index for more entries.
171 # but we don't have space in self.index for more entries.
172 # limit blocksize so that we don't get too much data.
172 # limit blocksize so that we don't get too much data.
173 blocksize = max(self.datasize - blockstart, 0)
173 blocksize = max(self.datasize - blockstart, 0)
174 data = self.dataf.read(blocksize)
174 data = self.dataf.read(blocksize)
175 lend = len(data) / self.s
175 lend = len(data) / self.s
176 i = blockstart / self.s
176 i = blockstart / self.s
177 off = 0
177 off = 0
178 # lazyindex supports __delitem__
178 # lazyindex supports __delitem__
179 if lend > len(self.index) - i:
179 if lend > len(self.index) - i:
180 lend = len(self.index) - i
180 lend = len(self.index) - i
181 for x in xrange(lend):
181 for x in xrange(lend):
182 if self.index[i + x] is None:
182 if self.index[i + x] is None:
183 b = data[off : off + self.s]
183 b = data[off : off + self.s]
184 self.index[i + x] = b
184 self.index[i + x] = b
185 n = b[ngshaoffset:ngshaoffset + 20]
185 n = b[ngshaoffset:ngshaoffset + 20]
186 self.map[n] = i + x
186 self.map[n] = i + x
187 off += self.s
187 off += self.s
188
188
189 def findnode(self, node):
189 def findnode(self, node):
190 """search backwards through the index file for a specific node"""
190 """search backwards through the index file for a specific node"""
191 if self.allmap:
191 if self.allmap:
192 return None
192 return None
193
193
194 # hg log will cause many many searches for the manifest
194 # hg log will cause many many searches for the manifest
195 # nodes. After we get called a few times, just load the whole
195 # nodes. After we get called a few times, just load the whole
196 # thing.
196 # thing.
197 if self.mapfind_count > 8:
197 if self.mapfind_count > 8:
198 self.loadmap()
198 self.loadmap()
199 if node in self.map:
199 if node in self.map:
200 return node
200 return node
201 return None
201 return None
202 self.mapfind_count += 1
202 self.mapfind_count += 1
203 last = self.l - 1
203 last = self.l - 1
204 while self.index[last] != None:
204 while self.index[last] != None:
205 if last == 0:
205 if last == 0:
206 self.all = 1
206 self.all = 1
207 self.allmap = 1
207 self.allmap = 1
208 return None
208 return None
209 last -= 1
209 last -= 1
210 end = (last + 1) * self.s
210 end = (last + 1) * self.s
211 blocksize = self.s * 256
211 blocksize = self.s * 256
212 while end >= 0:
212 while end >= 0:
213 start = max(end - blocksize, 0)
213 start = max(end - blocksize, 0)
214 self.dataf.seek(start)
214 self.dataf.seek(start)
215 data = self.dataf.read(end - start)
215 data = self.dataf.read(end - start)
216 findend = end - start
216 findend = end - start
217 while True:
217 while True:
218 # we're searching backwards, so we have to make sure
218 # we're searching backwards, so we have to make sure
219 # we don't find a changeset where this node is a parent
219 # we don't find a changeset where this node is a parent
220 off = data.find(node, 0, findend)
220 off = data.find(node, 0, findend)
221 findend = off
221 findend = off
222 if off >= 0:
222 if off >= 0:
223 i = off / self.s
223 i = off / self.s
224 off = i * self.s
224 off = i * self.s
225 n = data[off + ngshaoffset:off + ngshaoffset + 20]
225 n = data[off + ngshaoffset:off + ngshaoffset + 20]
226 if n == node:
226 if n == node:
227 self.map[n] = i + start / self.s
227 self.map[n] = i + start / self.s
228 return node
228 return node
229 else:
229 else:
230 break
230 break
231 end -= blocksize
231 end -= blocksize
232 return None
232 return None
233
233
234 def loadindex(self, i=None, end=None):
234 def loadindex(self, i=None, end=None):
235 if self.all:
235 if self.all:
236 return
236 return
237 all = False
237 all = False
238 if i is None:
238 if i is None:
239 blockstart = 0
239 blockstart = 0
240 blocksize = (65536 / self.s) * self.s
240 blocksize = (65536 / self.s) * self.s
241 end = self.datasize
241 end = self.datasize
242 all = True
242 all = True
243 else:
243 else:
244 if end:
244 if end:
245 blockstart = i * self.s
245 blockstart = i * self.s
246 end = end * self.s
246 end = end * self.s
247 blocksize = end - blockstart
247 blocksize = end - blockstart
248 else:
248 else:
249 blockstart = (i & ~1023) * self.s
249 blockstart = (i & ~1023) * self.s
250 blocksize = self.s * 1024
250 blocksize = self.s * 1024
251 end = blockstart + blocksize
251 end = blockstart + blocksize
252 while blockstart < end:
252 while blockstart < end:
253 self.loadblock(blockstart, blocksize)
253 self.loadblock(blockstart, blocksize)
254 blockstart += blocksize
254 blockstart += blocksize
255 if all:
255 if all:
256 self.all = True
256 self.all = True
257
257
258 class lazyindex(object):
258 class lazyindex(object):
259 """a lazy version of the index array"""
259 """a lazy version of the index array"""
260 def __init__(self, parser):
260 def __init__(self, parser):
261 self.p = parser
261 self.p = parser
262 def __len__(self):
262 def __len__(self):
263 return len(self.p.index)
263 return len(self.p.index)
264 def load(self, pos):
264 def load(self, pos):
265 if pos < 0:
265 if pos < 0:
266 pos += len(self.p.index)
266 pos += len(self.p.index)
267 self.p.loadindex(pos)
267 self.p.loadindex(pos)
268 return self.p.index[pos]
268 return self.p.index[pos]
269 def __getitem__(self, pos):
269 def __getitem__(self, pos):
270 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
270 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
271 def __setitem__(self, pos, item):
271 def __setitem__(self, pos, item):
272 self.p.index[pos] = _pack(indexformatng, *item)
272 self.p.index[pos] = _pack(indexformatng, *item)
273 def __delitem__(self, pos):
273 def __delitem__(self, pos):
274 del self.p.index[pos]
274 del self.p.index[pos]
275 def insert(self, pos, e):
275 def insert(self, pos, e):
276 self.p.index.insert(pos, _pack(indexformatng, *e))
276 self.p.index.insert(pos, _pack(indexformatng, *e))
277 def append(self, e):
277 def append(self, e):
278 self.p.index.append(_pack(indexformatng, *e))
278 self.p.index.append(_pack(indexformatng, *e))
279
279
280 class lazymap(object):
280 class lazymap(object):
281 """a lazy version of the node map"""
281 """a lazy version of the node map"""
282 def __init__(self, parser):
282 def __init__(self, parser):
283 self.p = parser
283 self.p = parser
284 def load(self, key):
284 def load(self, key):
285 n = self.p.findnode(key)
285 n = self.p.findnode(key)
286 if n is None:
286 if n is None:
287 raise KeyError(key)
287 raise KeyError(key)
288 def __contains__(self, key):
288 def __contains__(self, key):
289 if key in self.p.map:
289 if key in self.p.map:
290 return True
290 return True
291 self.p.loadmap()
291 self.p.loadmap()
292 return key in self.p.map
292 return key in self.p.map
293 def __iter__(self):
293 def __iter__(self):
294 yield nullid
294 yield nullid
295 for i in xrange(self.p.l):
295 for i in xrange(self.p.l):
296 ret = self.p.index[i]
296 ret = self.p.index[i]
297 if not ret:
297 if not ret:
298 self.p.loadindex(i)
298 self.p.loadindex(i)
299 ret = self.p.index[i]
299 ret = self.p.index[i]
300 if isinstance(ret, str):
300 if isinstance(ret, str):
301 ret = _unpack(indexformatng, ret)
301 ret = _unpack(indexformatng, ret)
302 yield ret[7]
302 yield ret[7]
303 def __getitem__(self, key):
303 def __getitem__(self, key):
304 try:
304 try:
305 return self.p.map[key]
305 return self.p.map[key]
306 except KeyError:
306 except KeyError:
307 try:
307 try:
308 self.load(key)
308 self.load(key)
309 return self.p.map[key]
309 return self.p.map[key]
310 except KeyError:
310 except KeyError:
311 raise KeyError("node " + hex(key))
311 raise KeyError("node " + hex(key))
312 def __setitem__(self, key, val):
312 def __setitem__(self, key, val):
313 self.p.map[key] = val
313 self.p.map[key] = val
314 def __delitem__(self, key):
314 def __delitem__(self, key):
315 del self.p.map[key]
315 del self.p.map[key]
316
316
317 indexformatv0 = ">4l20s20s20s"
317 indexformatv0 = ">4l20s20s20s"
318 v0shaoffset = 56
318 v0shaoffset = 56
319
319
320 class revlogoldio(object):
320 class revlogoldio(object):
321 def __init__(self):
321 def __init__(self):
322 self.size = struct.calcsize(indexformatv0)
322 self.size = struct.calcsize(indexformatv0)
323
323
324 def parseindex(self, fp, data, inline):
324 def parseindex(self, fp, data, inline):
325 s = self.size
325 s = self.size
326 index = []
326 index = []
327 nodemap = {nullid: nullrev}
327 nodemap = {nullid: nullrev}
328 n = off = 0
328 n = off = 0
329 if len(data) == _prereadsize:
329 if len(data) == _prereadsize:
330 data += fp.read() # read the rest
330 data += fp.read() # read the rest
331 l = len(data)
331 l = len(data)
332 while off + s <= l:
332 while off + s <= l:
333 cur = data[off:off + s]
333 cur = data[off:off + s]
334 off += s
334 off += s
335 e = _unpack(indexformatv0, cur)
335 e = _unpack(indexformatv0, cur)
336 # transform to revlogv1 format
336 # transform to revlogv1 format
337 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
337 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
338 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
338 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
339 index.append(e2)
339 index.append(e2)
340 nodemap[e[6]] = n
340 nodemap[e[6]] = n
341 n += 1
341 n += 1
342
342
343 return index, nodemap, None
343 return index, nodemap, None
344
344
345 def packentry(self, entry, node, version, rev):
345 def packentry(self, entry, node, version, rev):
346 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
346 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
347 node(entry[5]), node(entry[6]), entry[7])
347 node(entry[5]), node(entry[6]), entry[7])
348 return _pack(indexformatv0, *e2)
348 return _pack(indexformatv0, *e2)
349
349
350 # index ng:
350 # index ng:
351 # 6 bytes offset
351 # 6 bytes offset
352 # 2 bytes flags
352 # 2 bytes flags
353 # 4 bytes compressed length
353 # 4 bytes compressed length
354 # 4 bytes uncompressed length
354 # 4 bytes uncompressed length
355 # 4 bytes: base rev
355 # 4 bytes: base rev
356 # 4 bytes link rev
356 # 4 bytes link rev
357 # 4 bytes parent 1 rev
357 # 4 bytes parent 1 rev
358 # 4 bytes parent 2 rev
358 # 4 bytes parent 2 rev
359 # 32 bytes: nodeid
359 # 32 bytes: nodeid
360 indexformatng = ">Qiiiiii20s12x"
360 indexformatng = ">Qiiiiii20s12x"
361 ngshaoffset = 32
361 ngshaoffset = 32
362 versionformat = ">I"
362 versionformat = ">I"
363
363
364 class revlogio(object):
364 class revlogio(object):
365 def __init__(self):
365 def __init__(self):
366 self.size = struct.calcsize(indexformatng)
366 self.size = struct.calcsize(indexformatng)
367
367
368 def parseindex(self, fp, data, inline):
368 def parseindex(self, fp, data, inline):
369 if len(data) == _prereadsize:
369 if len(data) == _prereadsize:
370 if util.openhardlinks() and not inline:
370 if util.openhardlinks() and not inline:
371 # big index, let's parse it on demand
371 # big index, let's parse it on demand
372 parser = lazyparser(fp)
372 parser = lazyparser(fp)
373 index = lazyindex(parser)
373 index = lazyindex(parser)
374 nodemap = lazymap(parser)
374 nodemap = lazymap(parser)
375 e = list(index[0])
375 e = list(index[0])
376 type = gettype(e[0])
376 type = gettype(e[0])
377 e[0] = offset_type(0, type)
377 e[0] = offset_type(0, type)
378 index[0] = e
378 index[0] = e
379 return index, nodemap, None
379 return index, nodemap, None
380 else:
380 else:
381 data += fp.read()
381 data += fp.read()
382
382
383 # call the C implementation to parse the index data
383 # call the C implementation to parse the index data
384 index, nodemap, cache = parsers.parse_index(data, inline)
384 index, nodemap, cache = parsers.parse_index(data, inline)
385 return index, nodemap, cache
385 return index, nodemap, cache
386
386
387 def packentry(self, entry, node, version, rev):
387 def packentry(self, entry, node, version, rev):
388 p = _pack(indexformatng, *entry)
388 p = _pack(indexformatng, *entry)
389 if rev == 0:
389 if rev == 0:
390 p = _pack(versionformat, version) + p[4:]
390 p = _pack(versionformat, version) + p[4:]
391 return p
391 return p
392
392
393 class revlog(object):
393 class revlog(object):
394 """
394 """
395 the underlying revision storage object
395 the underlying revision storage object
396
396
397 A revlog consists of two parts, an index and the revision data.
397 A revlog consists of two parts, an index and the revision data.
398
398
399 The index is a file with a fixed record size containing
399 The index is a file with a fixed record size containing
400 information on each revision, including its nodeid (hash), the
400 information on each revision, including its nodeid (hash), the
401 nodeids of its parents, the position and offset of its data within
401 nodeids of its parents, the position and offset of its data within
402 the data file, and the revision it's based on. Finally, each entry
402 the data file, and the revision it's based on. Finally, each entry
403 contains a linkrev entry that can serve as a pointer to external
403 contains a linkrev entry that can serve as a pointer to external
404 data.
404 data.
405
405
406 The revision data itself is a linear collection of data chunks.
406 The revision data itself is a linear collection of data chunks.
407 Each chunk represents a revision and is usually represented as a
407 Each chunk represents a revision and is usually represented as a
408 delta against the previous chunk. To bound lookup time, runs of
408 delta against the previous chunk. To bound lookup time, runs of
409 deltas are limited to about 2 times the length of the original
409 deltas are limited to about 2 times the length of the original
410 version data. This makes retrieval of a version proportional to
410 version data. This makes retrieval of a version proportional to
411 its size, or O(1) relative to the number of revisions.
411 its size, or O(1) relative to the number of revisions.
412
412
413 Both pieces of the revlog are written to in an append-only
413 Both pieces of the revlog are written to in an append-only
414 fashion, which means we never need to rewrite a file to insert or
414 fashion, which means we never need to rewrite a file to insert or
415 remove data, and can use some simple techniques to avoid the need
415 remove data, and can use some simple techniques to avoid the need
416 for locking while reading.
416 for locking while reading.
417 """
417 """
418 def __init__(self, opener, indexfile):
418 def __init__(self, opener, indexfile):
419 """
419 """
420 create a revlog object
420 create a revlog object
421
421
422 opener is a function that abstracts the file opening operation
422 opener is a function that abstracts the file opening operation
423 and can be used to implement COW semantics or the like.
423 and can be used to implement COW semantics or the like.
424 """
424 """
425 self.indexfile = indexfile
425 self.indexfile = indexfile
426 self.datafile = indexfile[:-2] + ".d"
426 self.datafile = indexfile[:-2] + ".d"
427 self.opener = opener
427 self.opener = opener
428 self._cache = None
428 self._cache = None
429 self._chunkcache = (0, '')
429 self._chunkcache = (0, '')
430 self.nodemap = {nullid: nullrev}
430 self.nodemap = {nullid: nullrev}
431 self.index = []
431 self.index = []
432
432
433 v = REVLOG_DEFAULT_VERSION
433 v = REVLOG_DEFAULT_VERSION
434 if hasattr(opener, "defversion"):
434 if hasattr(opener, 'options') and 'defversion' in opener.options:
435 v = opener.defversion
435 v = opener.options['defversion']
436 if v & REVLOGNG:
436 if v & REVLOGNG:
437 v |= REVLOGNGINLINEDATA
437 v |= REVLOGNGINLINEDATA
438
438
439 i = ''
439 i = ''
440 try:
440 try:
441 f = self.opener(self.indexfile)
441 f = self.opener(self.indexfile)
442 i = f.read(_prereadsize)
442 i = f.read(_prereadsize)
443 if len(i) > 0:
443 if len(i) > 0:
444 v = struct.unpack(versionformat, i[:4])[0]
444 v = struct.unpack(versionformat, i[:4])[0]
445 except IOError, inst:
445 except IOError, inst:
446 if inst.errno != errno.ENOENT:
446 if inst.errno != errno.ENOENT:
447 raise
447 raise
448
448
449 self.version = v
449 self.version = v
450 self._inline = v & REVLOGNGINLINEDATA
450 self._inline = v & REVLOGNGINLINEDATA
451 flags = v & ~0xFFFF
451 flags = v & ~0xFFFF
452 fmt = v & 0xFFFF
452 fmt = v & 0xFFFF
453 if fmt == REVLOGV0 and flags:
453 if fmt == REVLOGV0 and flags:
454 raise RevlogError(_("index %s unknown flags %#04x for format v0")
454 raise RevlogError(_("index %s unknown flags %#04x for format v0")
455 % (self.indexfile, flags >> 16))
455 % (self.indexfile, flags >> 16))
456 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
456 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
457 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
457 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
458 % (self.indexfile, flags >> 16))
458 % (self.indexfile, flags >> 16))
459 elif fmt > REVLOGNG:
459 elif fmt > REVLOGNG:
460 raise RevlogError(_("index %s unknown format %d")
460 raise RevlogError(_("index %s unknown format %d")
461 % (self.indexfile, fmt))
461 % (self.indexfile, fmt))
462
462
463 self._io = revlogio()
463 self._io = revlogio()
464 if self.version == REVLOGV0:
464 if self.version == REVLOGV0:
465 self._io = revlogoldio()
465 self._io = revlogoldio()
466 if i:
466 if i:
467 try:
467 try:
468 d = self._io.parseindex(f, i, self._inline)
468 d = self._io.parseindex(f, i, self._inline)
469 except (ValueError, IndexError):
469 except (ValueError, IndexError):
470 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
470 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
471 self.index, self.nodemap, self._chunkcache = d
471 self.index, self.nodemap, self._chunkcache = d
472 if not self._chunkcache:
472 if not self._chunkcache:
473 self._chunkclear()
473 self._chunkclear()
474
474
475 # add the magic null revision at -1 (if it hasn't been done already)
475 # add the magic null revision at -1 (if it hasn't been done already)
476 if (self.index == [] or isinstance(self.index, lazyindex) or
476 if (self.index == [] or isinstance(self.index, lazyindex) or
477 self.index[-1][7] != nullid) :
477 self.index[-1][7] != nullid) :
478 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
478 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
479
479
480 def _loadindex(self, start, end):
480 def _loadindex(self, start, end):
481 """load a block of indexes all at once from the lazy parser"""
481 """load a block of indexes all at once from the lazy parser"""
482 if isinstance(self.index, lazyindex):
482 if isinstance(self.index, lazyindex):
483 self.index.p.loadindex(start, end)
483 self.index.p.loadindex(start, end)
484
484
485 def _loadindexmap(self):
485 def _loadindexmap(self):
486 """loads both the map and the index from the lazy parser"""
486 """loads both the map and the index from the lazy parser"""
487 if isinstance(self.index, lazyindex):
487 if isinstance(self.index, lazyindex):
488 p = self.index.p
488 p = self.index.p
489 p.loadindex()
489 p.loadindex()
490 self.nodemap = p.map
490 self.nodemap = p.map
491
491
492 def _loadmap(self):
492 def _loadmap(self):
493 """loads the map from the lazy parser"""
493 """loads the map from the lazy parser"""
494 if isinstance(self.nodemap, lazymap):
494 if isinstance(self.nodemap, lazymap):
495 self.nodemap.p.loadmap()
495 self.nodemap.p.loadmap()
496 self.nodemap = self.nodemap.p.map
496 self.nodemap = self.nodemap.p.map
497
497
498 def tip(self):
498 def tip(self):
499 return self.node(len(self.index) - 2)
499 return self.node(len(self.index) - 2)
500 def __len__(self):
500 def __len__(self):
501 return len(self.index) - 1
501 return len(self.index) - 1
502 def __iter__(self):
502 def __iter__(self):
503 for i in xrange(len(self)):
503 for i in xrange(len(self)):
504 yield i
504 yield i
505 def rev(self, node):
505 def rev(self, node):
506 try:
506 try:
507 return self.nodemap[node]
507 return self.nodemap[node]
508 except KeyError:
508 except KeyError:
509 raise LookupError(node, self.indexfile, _('no node'))
509 raise LookupError(node, self.indexfile, _('no node'))
510 def node(self, rev):
510 def node(self, rev):
511 return self.index[rev][7]
511 return self.index[rev][7]
512 def linkrev(self, rev):
512 def linkrev(self, rev):
513 return self.index[rev][4]
513 return self.index[rev][4]
514 def parents(self, node):
514 def parents(self, node):
515 i = self.index
515 i = self.index
516 d = i[self.rev(node)]
516 d = i[self.rev(node)]
517 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
517 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
518 def parentrevs(self, rev):
518 def parentrevs(self, rev):
519 return self.index[rev][5:7]
519 return self.index[rev][5:7]
520 def start(self, rev):
520 def start(self, rev):
521 return int(self.index[rev][0] >> 16)
521 return int(self.index[rev][0] >> 16)
522 def end(self, rev):
522 def end(self, rev):
523 return self.start(rev) + self.length(rev)
523 return self.start(rev) + self.length(rev)
524 def length(self, rev):
524 def length(self, rev):
525 return self.index[rev][1]
525 return self.index[rev][1]
526 def base(self, rev):
526 def base(self, rev):
527 return self.index[rev][3]
527 return self.index[rev][3]
528
528
529 def size(self, rev):
529 def size(self, rev):
530 """return the length of the uncompressed text for a given revision"""
530 """return the length of the uncompressed text for a given revision"""
531 l = self.index[rev][2]
531 l = self.index[rev][2]
532 if l >= 0:
532 if l >= 0:
533 return l
533 return l
534
534
535 t = self.revision(self.node(rev))
535 t = self.revision(self.node(rev))
536 return len(t)
536 return len(t)
537
537
538 # Alternate implementation. The advantage to this code is it
538 # Alternate implementation. The advantage to this code is it
539 # will be faster for a single revision. However, the results
539 # will be faster for a single revision. However, the results
540 # are not cached, so finding the size of every revision will
540 # are not cached, so finding the size of every revision will
541 # be slower.
541 # be slower.
542 #
542 #
543 # if self.cache and self.cache[1] == rev:
543 # if self.cache and self.cache[1] == rev:
544 # return len(self.cache[2])
544 # return len(self.cache[2])
545 #
545 #
546 # base = self.base(rev)
546 # base = self.base(rev)
547 # if self.cache and self.cache[1] >= base and self.cache[1] < rev:
547 # if self.cache and self.cache[1] >= base and self.cache[1] < rev:
548 # base = self.cache[1]
548 # base = self.cache[1]
549 # text = self.cache[2]
549 # text = self.cache[2]
550 # else:
550 # else:
551 # text = self.revision(self.node(base))
551 # text = self.revision(self.node(base))
552 #
552 #
553 # l = len(text)
553 # l = len(text)
554 # for x in xrange(base + 1, rev + 1):
554 # for x in xrange(base + 1, rev + 1):
555 # l = mdiff.patchedsize(l, self._chunk(x))
555 # l = mdiff.patchedsize(l, self._chunk(x))
556 # return l
556 # return l
557
557
558 def reachable(self, node, stop=None):
558 def reachable(self, node, stop=None):
559 """return the set of all nodes ancestral to a given node, including
559 """return the set of all nodes ancestral to a given node, including
560 the node itself, stopping when stop is matched"""
560 the node itself, stopping when stop is matched"""
561 reachable = set((node,))
561 reachable = set((node,))
562 visit = [node]
562 visit = [node]
563 if stop:
563 if stop:
564 stopn = self.rev(stop)
564 stopn = self.rev(stop)
565 else:
565 else:
566 stopn = 0
566 stopn = 0
567 while visit:
567 while visit:
568 n = visit.pop(0)
568 n = visit.pop(0)
569 if n == stop:
569 if n == stop:
570 continue
570 continue
571 if n == nullid:
571 if n == nullid:
572 continue
572 continue
573 for p in self.parents(n):
573 for p in self.parents(n):
574 if self.rev(p) < stopn:
574 if self.rev(p) < stopn:
575 continue
575 continue
576 if p not in reachable:
576 if p not in reachable:
577 reachable.add(p)
577 reachable.add(p)
578 visit.append(p)
578 visit.append(p)
579 return reachable
579 return reachable
580
580
581 def ancestors(self, *revs):
581 def ancestors(self, *revs):
582 """Generate the ancestors of 'revs' in reverse topological order.
582 """Generate the ancestors of 'revs' in reverse topological order.
583
583
584 Yield a sequence of revision numbers starting with the parents
584 Yield a sequence of revision numbers starting with the parents
585 of each revision in revs, i.e., each revision is *not* considered
585 of each revision in revs, i.e., each revision is *not* considered
586 an ancestor of itself. Results are in breadth-first order:
586 an ancestor of itself. Results are in breadth-first order:
587 parents of each rev in revs, then parents of those, etc. Result
587 parents of each rev in revs, then parents of those, etc. Result
588 does not include the null revision."""
588 does not include the null revision."""
589 visit = list(revs)
589 visit = list(revs)
590 seen = set([nullrev])
590 seen = set([nullrev])
591 while visit:
591 while visit:
592 for parent in self.parentrevs(visit.pop(0)):
592 for parent in self.parentrevs(visit.pop(0)):
593 if parent not in seen:
593 if parent not in seen:
594 visit.append(parent)
594 visit.append(parent)
595 seen.add(parent)
595 seen.add(parent)
596 yield parent
596 yield parent
597
597
598 def descendants(self, *revs):
598 def descendants(self, *revs):
599 """Generate the descendants of 'revs' in revision order.
599 """Generate the descendants of 'revs' in revision order.
600
600
601 Yield a sequence of revision numbers starting with a child of
601 Yield a sequence of revision numbers starting with a child of
602 some rev in revs, i.e., each revision is *not* considered a
602 some rev in revs, i.e., each revision is *not* considered a
603 descendant of itself. Results are ordered by revision number (a
603 descendant of itself. Results are ordered by revision number (a
604 topological sort)."""
604 topological sort)."""
605 seen = set(revs)
605 seen = set(revs)
606 for i in xrange(min(revs) + 1, len(self)):
606 for i in xrange(min(revs) + 1, len(self)):
607 for x in self.parentrevs(i):
607 for x in self.parentrevs(i):
608 if x != nullrev and x in seen:
608 if x != nullrev and x in seen:
609 seen.add(i)
609 seen.add(i)
610 yield i
610 yield i
611 break
611 break
612
612
613 def findmissing(self, common=None, heads=None):
613 def findmissing(self, common=None, heads=None):
614 """Return the ancestors of heads that are not ancestors of common.
614 """Return the ancestors of heads that are not ancestors of common.
615
615
616 More specifically, return a list of nodes N such that every N
616 More specifically, return a list of nodes N such that every N
617 satisfies the following constraints:
617 satisfies the following constraints:
618
618
619 1. N is an ancestor of some node in 'heads'
619 1. N is an ancestor of some node in 'heads'
620 2. N is not an ancestor of any node in 'common'
620 2. N is not an ancestor of any node in 'common'
621
621
622 The list is sorted by revision number, meaning it is
622 The list is sorted by revision number, meaning it is
623 topologically sorted.
623 topologically sorted.
624
624
625 'heads' and 'common' are both lists of node IDs. If heads is
625 'heads' and 'common' are both lists of node IDs. If heads is
626 not supplied, uses all of the revlog's heads. If common is not
626 not supplied, uses all of the revlog's heads. If common is not
627 supplied, uses nullid."""
627 supplied, uses nullid."""
628 if common is None:
628 if common is None:
629 common = [nullid]
629 common = [nullid]
630 if heads is None:
630 if heads is None:
631 heads = self.heads()
631 heads = self.heads()
632
632
633 common = [self.rev(n) for n in common]
633 common = [self.rev(n) for n in common]
634 heads = [self.rev(n) for n in heads]
634 heads = [self.rev(n) for n in heads]
635
635
636 # we want the ancestors, but inclusive
636 # we want the ancestors, but inclusive
637 has = set(self.ancestors(*common))
637 has = set(self.ancestors(*common))
638 has.add(nullrev)
638 has.add(nullrev)
639 has.update(common)
639 has.update(common)
640
640
641 # take all ancestors from heads that aren't in has
641 # take all ancestors from heads that aren't in has
642 missing = set()
642 missing = set()
643 visit = [r for r in heads if r not in has]
643 visit = [r for r in heads if r not in has]
644 while visit:
644 while visit:
645 r = visit.pop(0)
645 r = visit.pop(0)
646 if r in missing:
646 if r in missing:
647 continue
647 continue
648 else:
648 else:
649 missing.add(r)
649 missing.add(r)
650 for p in self.parentrevs(r):
650 for p in self.parentrevs(r):
651 if p not in has:
651 if p not in has:
652 visit.append(p)
652 visit.append(p)
653 missing = list(missing)
653 missing = list(missing)
654 missing.sort()
654 missing.sort()
655 return [self.node(r) for r in missing]
655 return [self.node(r) for r in missing]
656
656
657 def nodesbetween(self, roots=None, heads=None):
657 def nodesbetween(self, roots=None, heads=None):
658 """Return a topological path from 'roots' to 'heads'.
658 """Return a topological path from 'roots' to 'heads'.
659
659
660 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
660 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
661 topologically sorted list of all nodes N that satisfy both of
661 topologically sorted list of all nodes N that satisfy both of
662 these constraints:
662 these constraints:
663
663
664 1. N is a descendant of some node in 'roots'
664 1. N is a descendant of some node in 'roots'
665 2. N is an ancestor of some node in 'heads'
665 2. N is an ancestor of some node in 'heads'
666
666
667 Every node is considered to be both a descendant and an ancestor
667 Every node is considered to be both a descendant and an ancestor
668 of itself, so every reachable node in 'roots' and 'heads' will be
668 of itself, so every reachable node in 'roots' and 'heads' will be
669 included in 'nodes'.
669 included in 'nodes'.
670
670
671 'outroots' is the list of reachable nodes in 'roots', i.e., the
671 'outroots' is the list of reachable nodes in 'roots', i.e., the
672 subset of 'roots' that is returned in 'nodes'. Likewise,
672 subset of 'roots' that is returned in 'nodes'. Likewise,
673 'outheads' is the subset of 'heads' that is also in 'nodes'.
673 'outheads' is the subset of 'heads' that is also in 'nodes'.
674
674
675 'roots' and 'heads' are both lists of node IDs. If 'roots' is
675 'roots' and 'heads' are both lists of node IDs. If 'roots' is
676 unspecified, uses nullid as the only root. If 'heads' is
676 unspecified, uses nullid as the only root. If 'heads' is
677 unspecified, uses list of all of the revlog's heads."""
677 unspecified, uses list of all of the revlog's heads."""
678 nonodes = ([], [], [])
678 nonodes = ([], [], [])
679 if roots is not None:
679 if roots is not None:
680 roots = list(roots)
680 roots = list(roots)
681 if not roots:
681 if not roots:
682 return nonodes
682 return nonodes
683 lowestrev = min([self.rev(n) for n in roots])
683 lowestrev = min([self.rev(n) for n in roots])
684 else:
684 else:
685 roots = [nullid] # Everybody's a descendent of nullid
685 roots = [nullid] # Everybody's a descendent of nullid
686 lowestrev = nullrev
686 lowestrev = nullrev
687 if (lowestrev == nullrev) and (heads is None):
687 if (lowestrev == nullrev) and (heads is None):
688 # We want _all_ the nodes!
688 # We want _all_ the nodes!
689 return ([self.node(r) for r in self], [nullid], list(self.heads()))
689 return ([self.node(r) for r in self], [nullid], list(self.heads()))
690 if heads is None:
690 if heads is None:
691 # All nodes are ancestors, so the latest ancestor is the last
691 # All nodes are ancestors, so the latest ancestor is the last
692 # node.
692 # node.
693 highestrev = len(self) - 1
693 highestrev = len(self) - 1
694 # Set ancestors to None to signal that every node is an ancestor.
694 # Set ancestors to None to signal that every node is an ancestor.
695 ancestors = None
695 ancestors = None
696 # Set heads to an empty dictionary for later discovery of heads
696 # Set heads to an empty dictionary for later discovery of heads
697 heads = {}
697 heads = {}
698 else:
698 else:
699 heads = list(heads)
699 heads = list(heads)
700 if not heads:
700 if not heads:
701 return nonodes
701 return nonodes
702 ancestors = set()
702 ancestors = set()
703 # Turn heads into a dictionary so we can remove 'fake' heads.
703 # Turn heads into a dictionary so we can remove 'fake' heads.
704 # Also, later we will be using it to filter out the heads we can't
704 # Also, later we will be using it to filter out the heads we can't
705 # find from roots.
705 # find from roots.
706 heads = dict.fromkeys(heads, 0)
706 heads = dict.fromkeys(heads, 0)
707 # Start at the top and keep marking parents until we're done.
707 # Start at the top and keep marking parents until we're done.
708 nodestotag = set(heads)
708 nodestotag = set(heads)
709 # Remember where the top was so we can use it as a limit later.
709 # Remember where the top was so we can use it as a limit later.
710 highestrev = max([self.rev(n) for n in nodestotag])
710 highestrev = max([self.rev(n) for n in nodestotag])
711 while nodestotag:
711 while nodestotag:
712 # grab a node to tag
712 # grab a node to tag
713 n = nodestotag.pop()
713 n = nodestotag.pop()
714 # Never tag nullid
714 # Never tag nullid
715 if n == nullid:
715 if n == nullid:
716 continue
716 continue
717 # A node's revision number represents its place in a
717 # A node's revision number represents its place in a
718 # topologically sorted list of nodes.
718 # topologically sorted list of nodes.
719 r = self.rev(n)
719 r = self.rev(n)
720 if r >= lowestrev:
720 if r >= lowestrev:
721 if n not in ancestors:
721 if n not in ancestors:
722 # If we are possibly a descendent of one of the roots
722 # If we are possibly a descendent of one of the roots
723 # and we haven't already been marked as an ancestor
723 # and we haven't already been marked as an ancestor
724 ancestors.add(n) # Mark as ancestor
724 ancestors.add(n) # Mark as ancestor
725 # Add non-nullid parents to list of nodes to tag.
725 # Add non-nullid parents to list of nodes to tag.
726 nodestotag.update([p for p in self.parents(n) if
726 nodestotag.update([p for p in self.parents(n) if
727 p != nullid])
727 p != nullid])
728 elif n in heads: # We've seen it before, is it a fake head?
728 elif n in heads: # We've seen it before, is it a fake head?
729 # So it is, real heads should not be the ancestors of
729 # So it is, real heads should not be the ancestors of
730 # any other heads.
730 # any other heads.
731 heads.pop(n)
731 heads.pop(n)
732 if not ancestors:
732 if not ancestors:
733 return nonodes
733 return nonodes
734 # Now that we have our set of ancestors, we want to remove any
734 # Now that we have our set of ancestors, we want to remove any
735 # roots that are not ancestors.
735 # roots that are not ancestors.
736
736
737 # If one of the roots was nullid, everything is included anyway.
737 # If one of the roots was nullid, everything is included anyway.
738 if lowestrev > nullrev:
738 if lowestrev > nullrev:
739 # But, since we weren't, let's recompute the lowest rev to not
739 # But, since we weren't, let's recompute the lowest rev to not
740 # include roots that aren't ancestors.
740 # include roots that aren't ancestors.
741
741
742 # Filter out roots that aren't ancestors of heads
742 # Filter out roots that aren't ancestors of heads
743 roots = [n for n in roots if n in ancestors]
743 roots = [n for n in roots if n in ancestors]
744 # Recompute the lowest revision
744 # Recompute the lowest revision
745 if roots:
745 if roots:
746 lowestrev = min([self.rev(n) for n in roots])
746 lowestrev = min([self.rev(n) for n in roots])
747 else:
747 else:
748 # No more roots? Return empty list
748 # No more roots? Return empty list
749 return nonodes
749 return nonodes
750 else:
750 else:
751 # We are descending from nullid, and don't need to care about
751 # We are descending from nullid, and don't need to care about
752 # any other roots.
752 # any other roots.
753 lowestrev = nullrev
753 lowestrev = nullrev
754 roots = [nullid]
754 roots = [nullid]
755 # Transform our roots list into a set.
755 # Transform our roots list into a set.
756 descendents = set(roots)
756 descendents = set(roots)
757 # Also, keep the original roots so we can filter out roots that aren't
757 # Also, keep the original roots so we can filter out roots that aren't
758 # 'real' roots (i.e. are descended from other roots).
758 # 'real' roots (i.e. are descended from other roots).
759 roots = descendents.copy()
759 roots = descendents.copy()
760 # Our topologically sorted list of output nodes.
760 # Our topologically sorted list of output nodes.
761 orderedout = []
761 orderedout = []
762 # Don't start at nullid since we don't want nullid in our output list,
762 # Don't start at nullid since we don't want nullid in our output list,
763 # and if nullid shows up in descedents, empty parents will look like
763 # and if nullid shows up in descedents, empty parents will look like
764 # they're descendents.
764 # they're descendents.
765 for r in xrange(max(lowestrev, 0), highestrev + 1):
765 for r in xrange(max(lowestrev, 0), highestrev + 1):
766 n = self.node(r)
766 n = self.node(r)
767 isdescendent = False
767 isdescendent = False
768 if lowestrev == nullrev: # Everybody is a descendent of nullid
768 if lowestrev == nullrev: # Everybody is a descendent of nullid
769 isdescendent = True
769 isdescendent = True
770 elif n in descendents:
770 elif n in descendents:
771 # n is already a descendent
771 # n is already a descendent
772 isdescendent = True
772 isdescendent = True
773 # This check only needs to be done here because all the roots
773 # This check only needs to be done here because all the roots
774 # will start being marked is descendents before the loop.
774 # will start being marked is descendents before the loop.
775 if n in roots:
775 if n in roots:
776 # If n was a root, check if it's a 'real' root.
776 # If n was a root, check if it's a 'real' root.
777 p = tuple(self.parents(n))
777 p = tuple(self.parents(n))
778 # If any of its parents are descendents, it's not a root.
778 # If any of its parents are descendents, it's not a root.
779 if (p[0] in descendents) or (p[1] in descendents):
779 if (p[0] in descendents) or (p[1] in descendents):
780 roots.remove(n)
780 roots.remove(n)
781 else:
781 else:
782 p = tuple(self.parents(n))
782 p = tuple(self.parents(n))
783 # A node is a descendent if either of its parents are
783 # A node is a descendent if either of its parents are
784 # descendents. (We seeded the dependents list with the roots
784 # descendents. (We seeded the dependents list with the roots
785 # up there, remember?)
785 # up there, remember?)
786 if (p[0] in descendents) or (p[1] in descendents):
786 if (p[0] in descendents) or (p[1] in descendents):
787 descendents.add(n)
787 descendents.add(n)
788 isdescendent = True
788 isdescendent = True
789 if isdescendent and ((ancestors is None) or (n in ancestors)):
789 if isdescendent and ((ancestors is None) or (n in ancestors)):
790 # Only include nodes that are both descendents and ancestors.
790 # Only include nodes that are both descendents and ancestors.
791 orderedout.append(n)
791 orderedout.append(n)
792 if (ancestors is not None) and (n in heads):
792 if (ancestors is not None) and (n in heads):
793 # We're trying to figure out which heads are reachable
793 # We're trying to figure out which heads are reachable
794 # from roots.
794 # from roots.
795 # Mark this head as having been reached
795 # Mark this head as having been reached
796 heads[n] = 1
796 heads[n] = 1
797 elif ancestors is None:
797 elif ancestors is None:
798 # Otherwise, we're trying to discover the heads.
798 # Otherwise, we're trying to discover the heads.
799 # Assume this is a head because if it isn't, the next step
799 # Assume this is a head because if it isn't, the next step
800 # will eventually remove it.
800 # will eventually remove it.
801 heads[n] = 1
801 heads[n] = 1
802 # But, obviously its parents aren't.
802 # But, obviously its parents aren't.
803 for p in self.parents(n):
803 for p in self.parents(n):
804 heads.pop(p, None)
804 heads.pop(p, None)
805 heads = [n for n in heads.iterkeys() if heads[n] != 0]
805 heads = [n for n in heads.iterkeys() if heads[n] != 0]
806 roots = list(roots)
806 roots = list(roots)
807 assert orderedout
807 assert orderedout
808 assert roots
808 assert roots
809 assert heads
809 assert heads
810 return (orderedout, roots, heads)
810 return (orderedout, roots, heads)
811
811
812 def heads(self, start=None, stop=None):
812 def heads(self, start=None, stop=None):
813 """return the list of all nodes that have no children
813 """return the list of all nodes that have no children
814
814
815 if start is specified, only heads that are descendants of
815 if start is specified, only heads that are descendants of
816 start will be returned
816 start will be returned
817 if stop is specified, it will consider all the revs from stop
817 if stop is specified, it will consider all the revs from stop
818 as if they had no children
818 as if they had no children
819 """
819 """
820 if start is None and stop is None:
820 if start is None and stop is None:
821 count = len(self)
821 count = len(self)
822 if not count:
822 if not count:
823 return [nullid]
823 return [nullid]
824 ishead = [1] * (count + 1)
824 ishead = [1] * (count + 1)
825 index = self.index
825 index = self.index
826 for r in xrange(count):
826 for r in xrange(count):
827 e = index[r]
827 e = index[r]
828 ishead[e[5]] = ishead[e[6]] = 0
828 ishead[e[5]] = ishead[e[6]] = 0
829 return [self.node(r) for r in xrange(count) if ishead[r]]
829 return [self.node(r) for r in xrange(count) if ishead[r]]
830
830
831 if start is None:
831 if start is None:
832 start = nullid
832 start = nullid
833 if stop is None:
833 if stop is None:
834 stop = []
834 stop = []
835 stoprevs = set([self.rev(n) for n in stop])
835 stoprevs = set([self.rev(n) for n in stop])
836 startrev = self.rev(start)
836 startrev = self.rev(start)
837 reachable = set((startrev,))
837 reachable = set((startrev,))
838 heads = set((startrev,))
838 heads = set((startrev,))
839
839
840 parentrevs = self.parentrevs
840 parentrevs = self.parentrevs
841 for r in xrange(startrev + 1, len(self)):
841 for r in xrange(startrev + 1, len(self)):
842 for p in parentrevs(r):
842 for p in parentrevs(r):
843 if p in reachable:
843 if p in reachable:
844 if r not in stoprevs:
844 if r not in stoprevs:
845 reachable.add(r)
845 reachable.add(r)
846 heads.add(r)
846 heads.add(r)
847 if p in heads and p not in stoprevs:
847 if p in heads and p not in stoprevs:
848 heads.remove(p)
848 heads.remove(p)
849
849
850 return [self.node(r) for r in heads]
850 return [self.node(r) for r in heads]
851
851
852 def children(self, node):
852 def children(self, node):
853 """find the children of a given node"""
853 """find the children of a given node"""
854 c = []
854 c = []
855 p = self.rev(node)
855 p = self.rev(node)
856 for r in range(p + 1, len(self)):
856 for r in range(p + 1, len(self)):
857 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
857 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
858 if prevs:
858 if prevs:
859 for pr in prevs:
859 for pr in prevs:
860 if pr == p:
860 if pr == p:
861 c.append(self.node(r))
861 c.append(self.node(r))
862 elif p == nullrev:
862 elif p == nullrev:
863 c.append(self.node(r))
863 c.append(self.node(r))
864 return c
864 return c
865
865
866 def _match(self, id):
866 def _match(self, id):
867 if isinstance(id, (long, int)):
867 if isinstance(id, (long, int)):
868 # rev
868 # rev
869 return self.node(id)
869 return self.node(id)
870 if len(id) == 20:
870 if len(id) == 20:
871 # possibly a binary node
871 # possibly a binary node
872 # odds of a binary node being all hex in ASCII are 1 in 10**25
872 # odds of a binary node being all hex in ASCII are 1 in 10**25
873 try:
873 try:
874 node = id
874 node = id
875 self.rev(node) # quick search the index
875 self.rev(node) # quick search the index
876 return node
876 return node
877 except LookupError:
877 except LookupError:
878 pass # may be partial hex id
878 pass # may be partial hex id
879 try:
879 try:
880 # str(rev)
880 # str(rev)
881 rev = int(id)
881 rev = int(id)
882 if str(rev) != id:
882 if str(rev) != id:
883 raise ValueError
883 raise ValueError
884 if rev < 0:
884 if rev < 0:
885 rev = len(self) + rev
885 rev = len(self) + rev
886 if rev < 0 or rev >= len(self):
886 if rev < 0 or rev >= len(self):
887 raise ValueError
887 raise ValueError
888 return self.node(rev)
888 return self.node(rev)
889 except (ValueError, OverflowError):
889 except (ValueError, OverflowError):
890 pass
890 pass
891 if len(id) == 40:
891 if len(id) == 40:
892 try:
892 try:
893 # a full hex nodeid?
893 # a full hex nodeid?
894 node = bin(id)
894 node = bin(id)
895 self.rev(node)
895 self.rev(node)
896 return node
896 return node
897 except (TypeError, LookupError):
897 except (TypeError, LookupError):
898 pass
898 pass
899
899
900 def _partialmatch(self, id):
900 def _partialmatch(self, id):
901 if len(id) < 40:
901 if len(id) < 40:
902 try:
902 try:
903 # hex(node)[:...]
903 # hex(node)[:...]
904 l = len(id) // 2 # grab an even number of digits
904 l = len(id) // 2 # grab an even number of digits
905 bin_id = bin(id[:l * 2])
905 bin_id = bin(id[:l * 2])
906 nl = [n for n in self.nodemap if n[:l] == bin_id]
906 nl = [n for n in self.nodemap if n[:l] == bin_id]
907 nl = [n for n in nl if hex(n).startswith(id)]
907 nl = [n for n in nl if hex(n).startswith(id)]
908 if len(nl) > 0:
908 if len(nl) > 0:
909 if len(nl) == 1:
909 if len(nl) == 1:
910 return nl[0]
910 return nl[0]
911 raise LookupError(id, self.indexfile,
911 raise LookupError(id, self.indexfile,
912 _('ambiguous identifier'))
912 _('ambiguous identifier'))
913 return None
913 return None
914 except TypeError:
914 except TypeError:
915 pass
915 pass
916
916
917 def lookup(self, id):
917 def lookup(self, id):
918 """locate a node based on:
918 """locate a node based on:
919 - revision number or str(revision number)
919 - revision number or str(revision number)
920 - nodeid or subset of hex nodeid
920 - nodeid or subset of hex nodeid
921 """
921 """
922 n = self._match(id)
922 n = self._match(id)
923 if n is not None:
923 if n is not None:
924 return n
924 return n
925 n = self._partialmatch(id)
925 n = self._partialmatch(id)
926 if n:
926 if n:
927 return n
927 return n
928
928
929 raise LookupError(id, self.indexfile, _('no match found'))
929 raise LookupError(id, self.indexfile, _('no match found'))
930
930
931 def cmp(self, node, text):
931 def cmp(self, node, text):
932 """compare text with a given file revision"""
932 """compare text with a given file revision"""
933 p1, p2 = self.parents(node)
933 p1, p2 = self.parents(node)
934 return hash(text, p1, p2) != node
934 return hash(text, p1, p2) != node
935
935
936 def _addchunk(self, offset, data):
936 def _addchunk(self, offset, data):
937 o, d = self._chunkcache
937 o, d = self._chunkcache
938 # try to add to existing cache
938 # try to add to existing cache
939 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
939 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
940 self._chunkcache = o, d + data
940 self._chunkcache = o, d + data
941 else:
941 else:
942 self._chunkcache = offset, data
942 self._chunkcache = offset, data
943
943
944 def _loadchunk(self, offset, length):
944 def _loadchunk(self, offset, length):
945 if self._inline:
945 if self._inline:
946 df = self.opener(self.indexfile)
946 df = self.opener(self.indexfile)
947 else:
947 else:
948 df = self.opener(self.datafile)
948 df = self.opener(self.datafile)
949
949
950 readahead = max(65536, length)
950 readahead = max(65536, length)
951 df.seek(offset)
951 df.seek(offset)
952 d = df.read(readahead)
952 d = df.read(readahead)
953 self._addchunk(offset, d)
953 self._addchunk(offset, d)
954 if readahead > length:
954 if readahead > length:
955 return d[:length]
955 return d[:length]
956 return d
956 return d
957
957
958 def _getchunk(self, offset, length):
958 def _getchunk(self, offset, length):
959 o, d = self._chunkcache
959 o, d = self._chunkcache
960 l = len(d)
960 l = len(d)
961
961
962 # is it in the cache?
962 # is it in the cache?
963 cachestart = offset - o
963 cachestart = offset - o
964 cacheend = cachestart + length
964 cacheend = cachestart + length
965 if cachestart >= 0 and cacheend <= l:
965 if cachestart >= 0 and cacheend <= l:
966 if cachestart == 0 and cacheend == l:
966 if cachestart == 0 and cacheend == l:
967 return d # avoid a copy
967 return d # avoid a copy
968 return d[cachestart:cacheend]
968 return d[cachestart:cacheend]
969
969
970 return self._loadchunk(offset, length)
970 return self._loadchunk(offset, length)
971
971
972 def _chunkraw(self, startrev, endrev):
972 def _chunkraw(self, startrev, endrev):
973 start = self.start(startrev)
973 start = self.start(startrev)
974 length = self.end(endrev) - start
974 length = self.end(endrev) - start
975 if self._inline:
975 if self._inline:
976 start += (startrev + 1) * self._io.size
976 start += (startrev + 1) * self._io.size
977 return self._getchunk(start, length)
977 return self._getchunk(start, length)
978
978
979 def _chunk(self, rev):
979 def _chunk(self, rev):
980 return decompress(self._chunkraw(rev, rev))
980 return decompress(self._chunkraw(rev, rev))
981
981
982 def _chunkclear(self):
982 def _chunkclear(self):
983 self._chunkcache = (0, '')
983 self._chunkcache = (0, '')
984
984
985 def revdiff(self, rev1, rev2):
985 def revdiff(self, rev1, rev2):
986 """return or calculate a delta between two revisions"""
986 """return or calculate a delta between two revisions"""
987 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
987 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
988 return self._chunk(rev2)
988 return self._chunk(rev2)
989
989
990 return mdiff.textdiff(self.revision(self.node(rev1)),
990 return mdiff.textdiff(self.revision(self.node(rev1)),
991 self.revision(self.node(rev2)))
991 self.revision(self.node(rev2)))
992
992
993 def revision(self, node):
993 def revision(self, node):
994 """return an uncompressed revision of a given node"""
994 """return an uncompressed revision of a given node"""
995 if node == nullid:
995 if node == nullid:
996 return ""
996 return ""
997 if self._cache and self._cache[0] == node:
997 if self._cache and self._cache[0] == node:
998 return self._cache[2]
998 return self._cache[2]
999
999
1000 # look up what we need to read
1000 # look up what we need to read
1001 text = None
1001 text = None
1002 rev = self.rev(node)
1002 rev = self.rev(node)
1003 base = self.base(rev)
1003 base = self.base(rev)
1004
1004
1005 # check rev flags
1005 # check rev flags
1006 if self.index[rev][0] & 0xFFFF:
1006 if self.index[rev][0] & 0xFFFF:
1007 raise RevlogError(_('incompatible revision flag %x') %
1007 raise RevlogError(_('incompatible revision flag %x') %
1008 (self.index[rev][0] & 0xFFFF))
1008 (self.index[rev][0] & 0xFFFF))
1009
1009
1010 # do we have useful data cached?
1010 # do we have useful data cached?
1011 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
1011 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
1012 base = self._cache[1]
1012 base = self._cache[1]
1013 text = self._cache[2]
1013 text = self._cache[2]
1014
1014
1015 self._loadindex(base, rev + 1)
1015 self._loadindex(base, rev + 1)
1016 self._chunkraw(base, rev)
1016 self._chunkraw(base, rev)
1017 if text is None:
1017 if text is None:
1018 text = self._chunk(base)
1018 text = self._chunk(base)
1019
1019
1020 bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
1020 bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
1021 text = mdiff.patches(text, bins)
1021 text = mdiff.patches(text, bins)
1022 p1, p2 = self.parents(node)
1022 p1, p2 = self.parents(node)
1023 if node != hash(text, p1, p2):
1023 if node != hash(text, p1, p2):
1024 raise RevlogError(_("integrity check failed on %s:%d")
1024 raise RevlogError(_("integrity check failed on %s:%d")
1025 % (self.indexfile, rev))
1025 % (self.indexfile, rev))
1026
1026
1027 self._cache = (node, rev, text)
1027 self._cache = (node, rev, text)
1028 return text
1028 return text
1029
1029
1030 def checkinlinesize(self, tr, fp=None):
1030 def checkinlinesize(self, tr, fp=None):
1031 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1031 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1032 return
1032 return
1033
1033
1034 trinfo = tr.find(self.indexfile)
1034 trinfo = tr.find(self.indexfile)
1035 if trinfo is None:
1035 if trinfo is None:
1036 raise RevlogError(_("%s not found in the transaction")
1036 raise RevlogError(_("%s not found in the transaction")
1037 % self.indexfile)
1037 % self.indexfile)
1038
1038
1039 trindex = trinfo[2]
1039 trindex = trinfo[2]
1040 dataoff = self.start(trindex)
1040 dataoff = self.start(trindex)
1041
1041
1042 tr.add(self.datafile, dataoff)
1042 tr.add(self.datafile, dataoff)
1043
1043
1044 if fp:
1044 if fp:
1045 fp.flush()
1045 fp.flush()
1046 fp.close()
1046 fp.close()
1047
1047
1048 df = self.opener(self.datafile, 'w')
1048 df = self.opener(self.datafile, 'w')
1049 try:
1049 try:
1050 for r in self:
1050 for r in self:
1051 df.write(self._chunkraw(r, r))
1051 df.write(self._chunkraw(r, r))
1052 finally:
1052 finally:
1053 df.close()
1053 df.close()
1054
1054
1055 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1055 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1056 self.version &= ~(REVLOGNGINLINEDATA)
1056 self.version &= ~(REVLOGNGINLINEDATA)
1057 self._inline = False
1057 self._inline = False
1058 for i in self:
1058 for i in self:
1059 e = self._io.packentry(self.index[i], self.node, self.version, i)
1059 e = self._io.packentry(self.index[i], self.node, self.version, i)
1060 fp.write(e)
1060 fp.write(e)
1061
1061
1062 # if we don't call rename, the temp file will never replace the
1062 # if we don't call rename, the temp file will never replace the
1063 # real index
1063 # real index
1064 fp.rename()
1064 fp.rename()
1065
1065
1066 tr.replace(self.indexfile, trindex * self._io.size)
1066 tr.replace(self.indexfile, trindex * self._io.size)
1067 self._chunkclear()
1067 self._chunkclear()
1068
1068
1069 def addrevision(self, text, transaction, link, p1, p2, d=None):
1069 def addrevision(self, text, transaction, link, p1, p2, d=None):
1070 """add a revision to the log
1070 """add a revision to the log
1071
1071
1072 text - the revision data to add
1072 text - the revision data to add
1073 transaction - the transaction object used for rollback
1073 transaction - the transaction object used for rollback
1074 link - the linkrev data to add
1074 link - the linkrev data to add
1075 p1, p2 - the parent nodeids of the revision
1075 p1, p2 - the parent nodeids of the revision
1076 d - an optional precomputed delta
1076 d - an optional precomputed delta
1077 """
1077 """
1078 dfh = None
1078 dfh = None
1079 if not self._inline:
1079 if not self._inline:
1080 dfh = self.opener(self.datafile, "a")
1080 dfh = self.opener(self.datafile, "a")
1081 ifh = self.opener(self.indexfile, "a+")
1081 ifh = self.opener(self.indexfile, "a+")
1082 try:
1082 try:
1083 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1083 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1084 finally:
1084 finally:
1085 if dfh:
1085 if dfh:
1086 dfh.close()
1086 dfh.close()
1087 ifh.close()
1087 ifh.close()
1088
1088
1089 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1089 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1090 node = hash(text, p1, p2)
1090 node = hash(text, p1, p2)
1091 if node in self.nodemap:
1091 if node in self.nodemap:
1092 return node
1092 return node
1093
1093
1094 curr = len(self)
1094 curr = len(self)
1095 prev = curr - 1
1095 prev = curr - 1
1096 base = self.base(prev)
1096 base = self.base(prev)
1097 offset = self.end(prev)
1097 offset = self.end(prev)
1098
1098
1099 if curr:
1099 if curr:
1100 if not d:
1100 if not d:
1101 ptext = self.revision(self.node(prev))
1101 ptext = self.revision(self.node(prev))
1102 d = mdiff.textdiff(ptext, text)
1102 d = mdiff.textdiff(ptext, text)
1103 data = compress(d)
1103 data = compress(d)
1104 l = len(data[1]) + len(data[0])
1104 l = len(data[1]) + len(data[0])
1105 dist = l + offset - self.start(base)
1105 dist = l + offset - self.start(base)
1106
1106
1107 # full versions are inserted when the needed deltas
1107 # full versions are inserted when the needed deltas
1108 # become comparable to the uncompressed text
1108 # become comparable to the uncompressed text
1109 if not curr or dist > len(text) * 2:
1109 if not curr or dist > len(text) * 2:
1110 data = compress(text)
1110 data = compress(text)
1111 l = len(data[1]) + len(data[0])
1111 l = len(data[1]) + len(data[0])
1112 base = curr
1112 base = curr
1113
1113
1114 e = (offset_type(offset, 0), l, len(text),
1114 e = (offset_type(offset, 0), l, len(text),
1115 base, link, self.rev(p1), self.rev(p2), node)
1115 base, link, self.rev(p1), self.rev(p2), node)
1116 self.index.insert(-1, e)
1116 self.index.insert(-1, e)
1117 self.nodemap[node] = curr
1117 self.nodemap[node] = curr
1118
1118
1119 entry = self._io.packentry(e, self.node, self.version, curr)
1119 entry = self._io.packentry(e, self.node, self.version, curr)
1120 if not self._inline:
1120 if not self._inline:
1121 transaction.add(self.datafile, offset)
1121 transaction.add(self.datafile, offset)
1122 transaction.add(self.indexfile, curr * len(entry))
1122 transaction.add(self.indexfile, curr * len(entry))
1123 if data[0]:
1123 if data[0]:
1124 dfh.write(data[0])
1124 dfh.write(data[0])
1125 dfh.write(data[1])
1125 dfh.write(data[1])
1126 dfh.flush()
1126 dfh.flush()
1127 ifh.write(entry)
1127 ifh.write(entry)
1128 else:
1128 else:
1129 offset += curr * self._io.size
1129 offset += curr * self._io.size
1130 transaction.add(self.indexfile, offset, curr)
1130 transaction.add(self.indexfile, offset, curr)
1131 ifh.write(entry)
1131 ifh.write(entry)
1132 ifh.write(data[0])
1132 ifh.write(data[0])
1133 ifh.write(data[1])
1133 ifh.write(data[1])
1134 self.checkinlinesize(transaction, ifh)
1134 self.checkinlinesize(transaction, ifh)
1135
1135
1136 if type(text) == str: # only accept immutable objects
1136 if type(text) == str: # only accept immutable objects
1137 self._cache = (node, curr, text)
1137 self._cache = (node, curr, text)
1138 return node
1138 return node
1139
1139
1140 def ancestor(self, a, b):
1140 def ancestor(self, a, b):
1141 """calculate the least common ancestor of nodes a and b"""
1141 """calculate the least common ancestor of nodes a and b"""
1142
1142
1143 # fast path, check if it is a descendant
1143 # fast path, check if it is a descendant
1144 a, b = self.rev(a), self.rev(b)
1144 a, b = self.rev(a), self.rev(b)
1145 start, end = sorted((a, b))
1145 start, end = sorted((a, b))
1146 for i in self.descendants(start):
1146 for i in self.descendants(start):
1147 if i == end:
1147 if i == end:
1148 return self.node(start)
1148 return self.node(start)
1149 elif i > end:
1149 elif i > end:
1150 break
1150 break
1151
1151
1152 def parents(rev):
1152 def parents(rev):
1153 return [p for p in self.parentrevs(rev) if p != nullrev]
1153 return [p for p in self.parentrevs(rev) if p != nullrev]
1154
1154
1155 c = ancestor.ancestor(a, b, parents)
1155 c = ancestor.ancestor(a, b, parents)
1156 if c is None:
1156 if c is None:
1157 return nullid
1157 return nullid
1158
1158
1159 return self.node(c)
1159 return self.node(c)
1160
1160
1161 def group(self, nodelist, lookup, infocollect=None):
1161 def group(self, nodelist, lookup, infocollect=None):
1162 """Calculate a delta group, yielding a sequence of changegroup chunks
1162 """Calculate a delta group, yielding a sequence of changegroup chunks
1163 (strings).
1163 (strings).
1164
1164
1165 Given a list of changeset revs, return a set of deltas and
1165 Given a list of changeset revs, return a set of deltas and
1166 metadata corresponding to nodes. the first delta is
1166 metadata corresponding to nodes. the first delta is
1167 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1167 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1168 have this parent as it has all history before these
1168 have this parent as it has all history before these
1169 changesets. parent is parent[0]
1169 changesets. parent is parent[0]
1170 """
1170 """
1171
1171
1172 revs = [self.rev(n) for n in nodelist]
1172 revs = [self.rev(n) for n in nodelist]
1173
1173
1174 # if we don't have any revisions touched by these changesets, bail
1174 # if we don't have any revisions touched by these changesets, bail
1175 if not revs:
1175 if not revs:
1176 yield changegroup.closechunk()
1176 yield changegroup.closechunk()
1177 return
1177 return
1178
1178
1179 # add the parent of the first rev
1179 # add the parent of the first rev
1180 p = self.parentrevs(revs[0])[0]
1180 p = self.parentrevs(revs[0])[0]
1181 revs.insert(0, p)
1181 revs.insert(0, p)
1182
1182
1183 # build deltas
1183 # build deltas
1184 for d in xrange(len(revs) - 1):
1184 for d in xrange(len(revs) - 1):
1185 a, b = revs[d], revs[d + 1]
1185 a, b = revs[d], revs[d + 1]
1186 nb = self.node(b)
1186 nb = self.node(b)
1187
1187
1188 if infocollect is not None:
1188 if infocollect is not None:
1189 infocollect(nb)
1189 infocollect(nb)
1190
1190
1191 p = self.parents(nb)
1191 p = self.parents(nb)
1192 meta = nb + p[0] + p[1] + lookup(nb)
1192 meta = nb + p[0] + p[1] + lookup(nb)
1193 if a == -1:
1193 if a == -1:
1194 d = self.revision(nb)
1194 d = self.revision(nb)
1195 meta += mdiff.trivialdiffheader(len(d))
1195 meta += mdiff.trivialdiffheader(len(d))
1196 else:
1196 else:
1197 d = self.revdiff(a, b)
1197 d = self.revdiff(a, b)
1198 yield changegroup.chunkheader(len(meta) + len(d))
1198 yield changegroup.chunkheader(len(meta) + len(d))
1199 yield meta
1199 yield meta
1200 if len(d) > 2**20:
1200 if len(d) > 2**20:
1201 pos = 0
1201 pos = 0
1202 while pos < len(d):
1202 while pos < len(d):
1203 pos2 = pos + 2 ** 18
1203 pos2 = pos + 2 ** 18
1204 yield d[pos:pos2]
1204 yield d[pos:pos2]
1205 pos = pos2
1205 pos = pos2
1206 else:
1206 else:
1207 yield d
1207 yield d
1208
1208
1209 yield changegroup.closechunk()
1209 yield changegroup.closechunk()
1210
1210
1211 def addgroup(self, revs, linkmapper, transaction):
1211 def addgroup(self, revs, linkmapper, transaction):
1212 """
1212 """
1213 add a delta group
1213 add a delta group
1214
1214
1215 given a set of deltas, add them to the revision log. the
1215 given a set of deltas, add them to the revision log. the
1216 first delta is against its parent, which should be in our
1216 first delta is against its parent, which should be in our
1217 log, the rest are against the previous delta.
1217 log, the rest are against the previous delta.
1218 """
1218 """
1219
1219
1220 #track the base of the current delta log
1220 #track the base of the current delta log
1221 r = len(self)
1221 r = len(self)
1222 t = r - 1
1222 t = r - 1
1223 node = None
1223 node = None
1224
1224
1225 base = prev = nullrev
1225 base = prev = nullrev
1226 start = end = textlen = 0
1226 start = end = textlen = 0
1227 if r:
1227 if r:
1228 end = self.end(t)
1228 end = self.end(t)
1229
1229
1230 ifh = self.opener(self.indexfile, "a+")
1230 ifh = self.opener(self.indexfile, "a+")
1231 isize = r * self._io.size
1231 isize = r * self._io.size
1232 if self._inline:
1232 if self._inline:
1233 transaction.add(self.indexfile, end + isize, r)
1233 transaction.add(self.indexfile, end + isize, r)
1234 dfh = None
1234 dfh = None
1235 else:
1235 else:
1236 transaction.add(self.indexfile, isize, r)
1236 transaction.add(self.indexfile, isize, r)
1237 transaction.add(self.datafile, end)
1237 transaction.add(self.datafile, end)
1238 dfh = self.opener(self.datafile, "a")
1238 dfh = self.opener(self.datafile, "a")
1239
1239
1240 try:
1240 try:
1241 # loop through our set of deltas
1241 # loop through our set of deltas
1242 chain = None
1242 chain = None
1243 for chunk in revs:
1243 for chunk in revs:
1244 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1244 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1245 link = linkmapper(cs)
1245 link = linkmapper(cs)
1246 if node in self.nodemap:
1246 if node in self.nodemap:
1247 # this can happen if two branches make the same change
1247 # this can happen if two branches make the same change
1248 chain = node
1248 chain = node
1249 continue
1249 continue
1250 delta = buffer(chunk, 80)
1250 delta = buffer(chunk, 80)
1251 del chunk
1251 del chunk
1252
1252
1253 for p in (p1, p2):
1253 for p in (p1, p2):
1254 if not p in self.nodemap:
1254 if not p in self.nodemap:
1255 raise LookupError(p, self.indexfile, _('unknown parent'))
1255 raise LookupError(p, self.indexfile, _('unknown parent'))
1256
1256
1257 if not chain:
1257 if not chain:
1258 # retrieve the parent revision of the delta chain
1258 # retrieve the parent revision of the delta chain
1259 chain = p1
1259 chain = p1
1260 if not chain in self.nodemap:
1260 if not chain in self.nodemap:
1261 raise LookupError(chain, self.indexfile, _('unknown base'))
1261 raise LookupError(chain, self.indexfile, _('unknown base'))
1262
1262
1263 # full versions are inserted when the needed deltas become
1263 # full versions are inserted when the needed deltas become
1264 # comparable to the uncompressed text or when the previous
1264 # comparable to the uncompressed text or when the previous
1265 # version is not the one we have a delta against. We use
1265 # version is not the one we have a delta against. We use
1266 # the size of the previous full rev as a proxy for the
1266 # the size of the previous full rev as a proxy for the
1267 # current size.
1267 # current size.
1268
1268
1269 if chain == prev:
1269 if chain == prev:
1270 cdelta = compress(delta)
1270 cdelta = compress(delta)
1271 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1271 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1272 textlen = mdiff.patchedsize(textlen, delta)
1272 textlen = mdiff.patchedsize(textlen, delta)
1273
1273
1274 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1274 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1275 # flush our writes here so we can read it in revision
1275 # flush our writes here so we can read it in revision
1276 if dfh:
1276 if dfh:
1277 dfh.flush()
1277 dfh.flush()
1278 ifh.flush()
1278 ifh.flush()
1279 text = self.revision(chain)
1279 text = self.revision(chain)
1280 if len(text) == 0:
1280 if len(text) == 0:
1281 # skip over trivial delta header
1281 # skip over trivial delta header
1282 text = buffer(delta, 12)
1282 text = buffer(delta, 12)
1283 else:
1283 else:
1284 text = mdiff.patches(text, [delta])
1284 text = mdiff.patches(text, [delta])
1285 del delta
1285 del delta
1286 chk = self._addrevision(text, transaction, link, p1, p2, None,
1286 chk = self._addrevision(text, transaction, link, p1, p2, None,
1287 ifh, dfh)
1287 ifh, dfh)
1288 if not dfh and not self._inline:
1288 if not dfh and not self._inline:
1289 # addrevision switched from inline to conventional
1289 # addrevision switched from inline to conventional
1290 # reopen the index
1290 # reopen the index
1291 dfh = self.opener(self.datafile, "a")
1291 dfh = self.opener(self.datafile, "a")
1292 ifh = self.opener(self.indexfile, "a")
1292 ifh = self.opener(self.indexfile, "a")
1293 if chk != node:
1293 if chk != node:
1294 raise RevlogError(_("consistency error adding group"))
1294 raise RevlogError(_("consistency error adding group"))
1295 textlen = len(text)
1295 textlen = len(text)
1296 else:
1296 else:
1297 e = (offset_type(end, 0), cdeltalen, textlen, base,
1297 e = (offset_type(end, 0), cdeltalen, textlen, base,
1298 link, self.rev(p1), self.rev(p2), node)
1298 link, self.rev(p1), self.rev(p2), node)
1299 self.index.insert(-1, e)
1299 self.index.insert(-1, e)
1300 self.nodemap[node] = r
1300 self.nodemap[node] = r
1301 entry = self._io.packentry(e, self.node, self.version, r)
1301 entry = self._io.packentry(e, self.node, self.version, r)
1302 if self._inline:
1302 if self._inline:
1303 ifh.write(entry)
1303 ifh.write(entry)
1304 ifh.write(cdelta[0])
1304 ifh.write(cdelta[0])
1305 ifh.write(cdelta[1])
1305 ifh.write(cdelta[1])
1306 self.checkinlinesize(transaction, ifh)
1306 self.checkinlinesize(transaction, ifh)
1307 if not self._inline:
1307 if not self._inline:
1308 dfh = self.opener(self.datafile, "a")
1308 dfh = self.opener(self.datafile, "a")
1309 ifh = self.opener(self.indexfile, "a")
1309 ifh = self.opener(self.indexfile, "a")
1310 else:
1310 else:
1311 dfh.write(cdelta[0])
1311 dfh.write(cdelta[0])
1312 dfh.write(cdelta[1])
1312 dfh.write(cdelta[1])
1313 ifh.write(entry)
1313 ifh.write(entry)
1314
1314
1315 t, r, chain, prev = r, r + 1, node, node
1315 t, r, chain, prev = r, r + 1, node, node
1316 base = self.base(t)
1316 base = self.base(t)
1317 start = self.start(base)
1317 start = self.start(base)
1318 end = self.end(t)
1318 end = self.end(t)
1319 finally:
1319 finally:
1320 if dfh:
1320 if dfh:
1321 dfh.close()
1321 dfh.close()
1322 ifh.close()
1322 ifh.close()
1323
1323
1324 return node
1324 return node
1325
1325
1326 def strip(self, minlink, transaction):
1326 def strip(self, minlink, transaction):
1327 """truncate the revlog on the first revision with a linkrev >= minlink
1327 """truncate the revlog on the first revision with a linkrev >= minlink
1328
1328
1329 This function is called when we're stripping revision minlink and
1329 This function is called when we're stripping revision minlink and
1330 its descendants from the repository.
1330 its descendants from the repository.
1331
1331
1332 We have to remove all revisions with linkrev >= minlink, because
1332 We have to remove all revisions with linkrev >= minlink, because
1333 the equivalent changelog revisions will be renumbered after the
1333 the equivalent changelog revisions will be renumbered after the
1334 strip.
1334 strip.
1335
1335
1336 So we truncate the revlog on the first of these revisions, and
1336 So we truncate the revlog on the first of these revisions, and
1337 trust that the caller has saved the revisions that shouldn't be
1337 trust that the caller has saved the revisions that shouldn't be
1338 removed and that it'll readd them after this truncation.
1338 removed and that it'll readd them after this truncation.
1339 """
1339 """
1340 if len(self) == 0:
1340 if len(self) == 0:
1341 return
1341 return
1342
1342
1343 if isinstance(self.index, lazyindex):
1343 if isinstance(self.index, lazyindex):
1344 self._loadindexmap()
1344 self._loadindexmap()
1345
1345
1346 for rev in self:
1346 for rev in self:
1347 if self.index[rev][4] >= minlink:
1347 if self.index[rev][4] >= minlink:
1348 break
1348 break
1349 else:
1349 else:
1350 return
1350 return
1351
1351
1352 # first truncate the files on disk
1352 # first truncate the files on disk
1353 end = self.start(rev)
1353 end = self.start(rev)
1354 if not self._inline:
1354 if not self._inline:
1355 transaction.add(self.datafile, end)
1355 transaction.add(self.datafile, end)
1356 end = rev * self._io.size
1356 end = rev * self._io.size
1357 else:
1357 else:
1358 end += rev * self._io.size
1358 end += rev * self._io.size
1359
1359
1360 transaction.add(self.indexfile, end)
1360 transaction.add(self.indexfile, end)
1361
1361
1362 # then reset internal state in memory to forget those revisions
1362 # then reset internal state in memory to forget those revisions
1363 self._cache = None
1363 self._cache = None
1364 self._chunkclear()
1364 self._chunkclear()
1365 for x in xrange(rev, len(self)):
1365 for x in xrange(rev, len(self)):
1366 del self.nodemap[self.node(x)]
1366 del self.nodemap[self.node(x)]
1367
1367
1368 del self.index[rev:-1]
1368 del self.index[rev:-1]
1369
1369
1370 def checksize(self):
1370 def checksize(self):
1371 expected = 0
1371 expected = 0
1372 if len(self):
1372 if len(self):
1373 expected = max(0, self.end(len(self) - 1))
1373 expected = max(0, self.end(len(self) - 1))
1374
1374
1375 try:
1375 try:
1376 f = self.opener(self.datafile)
1376 f = self.opener(self.datafile)
1377 f.seek(0, 2)
1377 f.seek(0, 2)
1378 actual = f.tell()
1378 actual = f.tell()
1379 dd = actual - expected
1379 dd = actual - expected
1380 except IOError, inst:
1380 except IOError, inst:
1381 if inst.errno != errno.ENOENT:
1381 if inst.errno != errno.ENOENT:
1382 raise
1382 raise
1383 dd = 0
1383 dd = 0
1384
1384
1385 try:
1385 try:
1386 f = self.opener(self.indexfile)
1386 f = self.opener(self.indexfile)
1387 f.seek(0, 2)
1387 f.seek(0, 2)
1388 actual = f.tell()
1388 actual = f.tell()
1389 s = self._io.size
1389 s = self._io.size
1390 i = max(0, actual // s)
1390 i = max(0, actual // s)
1391 di = actual - (i * s)
1391 di = actual - (i * s)
1392 if self._inline:
1392 if self._inline:
1393 databytes = 0
1393 databytes = 0
1394 for r in self:
1394 for r in self:
1395 databytes += max(0, self.length(r))
1395 databytes += max(0, self.length(r))
1396 dd = 0
1396 dd = 0
1397 di = actual - len(self) * s - databytes
1397 di = actual - len(self) * s - databytes
1398 except IOError, inst:
1398 except IOError, inst:
1399 if inst.errno != errno.ENOENT:
1399 if inst.errno != errno.ENOENT:
1400 raise
1400 raise
1401 di = 0
1401 di = 0
1402
1402
1403 return (dd, di)
1403 return (dd, di)
1404
1404
1405 def files(self):
1405 def files(self):
1406 res = [self.indexfile]
1406 res = [self.indexfile]
1407 if not self._inline:
1407 if not self._inline:
1408 res.append(self.datafile)
1408 res.append(self.datafile)
1409 return res
1409 return res
General Comments 0
You need to be logged in to leave comments. Login now