##// END OF EJS Templates
store: change handling of decoding errors
Matt Mackall -
r6900:def492d1 default
parent child Browse files
Show More
@@ -1,2088 +1,2088 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store')
19 supported = ('revlogv1', 'store')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 # create an invalid changelog
38 # create an invalid changelog
39 self.opener("00changelog.i", "a").write(
39 self.opener("00changelog.i", "a").write(
40 '\0\0\0\2' # represents revlogv2
40 '\0\0\0\2' # represents revlogv2
41 ' dummy changelog to prevent using the old repo layout'
41 ' dummy changelog to prevent using the old repo layout'
42 )
42 )
43 reqfile = self.opener("requires", "w")
43 reqfile = self.opener("requires", "w")
44 for r in requirements:
44 for r in requirements:
45 reqfile.write("%s\n" % r)
45 reqfile.write("%s\n" % r)
46 reqfile.close()
46 reqfile.close()
47 else:
47 else:
48 raise repo.RepoError(_("repository %s not found") % path)
48 raise repo.RepoError(_("repository %s not found") % path)
49 elif create:
49 elif create:
50 raise repo.RepoError(_("repository %s already exists") % path)
50 raise repo.RepoError(_("repository %s already exists") % path)
51 else:
51 else:
52 # find requirements
52 # find requirements
53 requirements = []
53 requirements = []
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 for r in requirements:
56 for r in requirements:
57 if r not in self.supported:
57 if r not in self.supported:
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
59 except IOError, inst:
59 except IOError, inst:
60 if inst.errno != errno.ENOENT:
60 if inst.errno != errno.ENOENT:
61 raise
61 raise
62
62
63 self.store = store.store(requirements, self.path, util.opener)
63 self.store = store.store(requirements, self.path, util.opener)
64 self.spath = self.store.path
64 self.spath = self.store.path
65 self.sopener = self.store.opener
65 self.sopener = self.store.opener
66 self.sjoin = self.store.join
66 self.sjoin = self.store.join
67 self.opener.createmode = self.store.createmode
67 self.opener.createmode = self.store.createmode
68
68
69 self.ui = ui.ui(parentui=parentui)
69 self.ui = ui.ui(parentui=parentui)
70 try:
70 try:
71 self.ui.readconfig(self.join("hgrc"), self.root)
71 self.ui.readconfig(self.join("hgrc"), self.root)
72 extensions.loadall(self.ui)
72 extensions.loadall(self.ui)
73 except IOError:
73 except IOError:
74 pass
74 pass
75
75
76 self.tagscache = None
76 self.tagscache = None
77 self._tagstypecache = None
77 self._tagstypecache = None
78 self.branchcache = None
78 self.branchcache = None
79 self._ubranchcache = None # UTF-8 version of branchcache
79 self._ubranchcache = None # UTF-8 version of branchcache
80 self._branchcachetip = None
80 self._branchcachetip = None
81 self.nodetagscache = None
81 self.nodetagscache = None
82 self.filterpats = {}
82 self.filterpats = {}
83 self._datafilters = {}
83 self._datafilters = {}
84 self._transref = self._lockref = self._wlockref = None
84 self._transref = self._lockref = self._wlockref = None
85
85
86 def __getattr__(self, name):
86 def __getattr__(self, name):
87 if name == 'changelog':
87 if name == 'changelog':
88 self.changelog = changelog.changelog(self.sopener)
88 self.changelog = changelog.changelog(self.sopener)
89 self.sopener.defversion = self.changelog.version
89 self.sopener.defversion = self.changelog.version
90 return self.changelog
90 return self.changelog
91 if name == 'manifest':
91 if name == 'manifest':
92 self.changelog
92 self.changelog
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94 return self.manifest
94 return self.manifest
95 if name == 'dirstate':
95 if name == 'dirstate':
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 return self.dirstate
97 return self.dirstate
98 else:
98 else:
99 raise AttributeError, name
99 raise AttributeError, name
100
100
101 def __getitem__(self, changeid):
101 def __getitem__(self, changeid):
102 if changeid == None:
102 if changeid == None:
103 return context.workingctx(self)
103 return context.workingctx(self)
104 return context.changectx(self, changeid)
104 return context.changectx(self, changeid)
105
105
106 def __nonzero__(self):
106 def __nonzero__(self):
107 return True
107 return True
108
108
109 def __len__(self):
109 def __len__(self):
110 return len(self.changelog)
110 return len(self.changelog)
111
111
112 def __iter__(self):
112 def __iter__(self):
113 for i in xrange(len(self)):
113 for i in xrange(len(self)):
114 yield i
114 yield i
115
115
116 def url(self):
116 def url(self):
117 return 'file:' + self.root
117 return 'file:' + self.root
118
118
119 def hook(self, name, throw=False, **args):
119 def hook(self, name, throw=False, **args):
120 return hook.hook(self.ui, self, name, throw, **args)
120 return hook.hook(self.ui, self, name, throw, **args)
121
121
122 tag_disallowed = ':\r\n'
122 tag_disallowed = ':\r\n'
123
123
124 def _tag(self, names, node, message, local, user, date, parent=None,
124 def _tag(self, names, node, message, local, user, date, parent=None,
125 extra={}):
125 extra={}):
126 use_dirstate = parent is None
126 use_dirstate = parent is None
127
127
128 if isinstance(names, str):
128 if isinstance(names, str):
129 allchars = names
129 allchars = names
130 names = (names,)
130 names = (names,)
131 else:
131 else:
132 allchars = ''.join(names)
132 allchars = ''.join(names)
133 for c in self.tag_disallowed:
133 for c in self.tag_disallowed:
134 if c in allchars:
134 if c in allchars:
135 raise util.Abort(_('%r cannot be used in a tag name') % c)
135 raise util.Abort(_('%r cannot be used in a tag name') % c)
136
136
137 for name in names:
137 for name in names:
138 self.hook('pretag', throw=True, node=hex(node), tag=name,
138 self.hook('pretag', throw=True, node=hex(node), tag=name,
139 local=local)
139 local=local)
140
140
141 def writetags(fp, names, munge, prevtags):
141 def writetags(fp, names, munge, prevtags):
142 fp.seek(0, 2)
142 fp.seek(0, 2)
143 if prevtags and prevtags[-1] != '\n':
143 if prevtags and prevtags[-1] != '\n':
144 fp.write('\n')
144 fp.write('\n')
145 for name in names:
145 for name in names:
146 m = munge and munge(name) or name
146 m = munge and munge(name) or name
147 if self._tagstypecache and name in self._tagstypecache:
147 if self._tagstypecache and name in self._tagstypecache:
148 old = self.tagscache.get(name, nullid)
148 old = self.tagscache.get(name, nullid)
149 fp.write('%s %s\n' % (hex(old), m))
149 fp.write('%s %s\n' % (hex(old), m))
150 fp.write('%s %s\n' % (hex(node), m))
150 fp.write('%s %s\n' % (hex(node), m))
151 fp.close()
151 fp.close()
152
152
153 prevtags = ''
153 prevtags = ''
154 if local:
154 if local:
155 try:
155 try:
156 fp = self.opener('localtags', 'r+')
156 fp = self.opener('localtags', 'r+')
157 except IOError, err:
157 except IOError, err:
158 fp = self.opener('localtags', 'a')
158 fp = self.opener('localtags', 'a')
159 else:
159 else:
160 prevtags = fp.read()
160 prevtags = fp.read()
161
161
162 # local tags are stored in the current charset
162 # local tags are stored in the current charset
163 writetags(fp, names, None, prevtags)
163 writetags(fp, names, None, prevtags)
164 for name in names:
164 for name in names:
165 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
166 return
166 return
167
167
168 if use_dirstate:
168 if use_dirstate:
169 try:
169 try:
170 fp = self.wfile('.hgtags', 'rb+')
170 fp = self.wfile('.hgtags', 'rb+')
171 except IOError, err:
171 except IOError, err:
172 fp = self.wfile('.hgtags', 'ab')
172 fp = self.wfile('.hgtags', 'ab')
173 else:
173 else:
174 prevtags = fp.read()
174 prevtags = fp.read()
175 else:
175 else:
176 try:
176 try:
177 prevtags = self.filectx('.hgtags', parent).data()
177 prevtags = self.filectx('.hgtags', parent).data()
178 except revlog.LookupError:
178 except revlog.LookupError:
179 pass
179 pass
180 fp = self.wfile('.hgtags', 'wb')
180 fp = self.wfile('.hgtags', 'wb')
181 if prevtags:
181 if prevtags:
182 fp.write(prevtags)
182 fp.write(prevtags)
183
183
184 # committed tags are stored in UTF-8
184 # committed tags are stored in UTF-8
185 writetags(fp, names, util.fromlocal, prevtags)
185 writetags(fp, names, util.fromlocal, prevtags)
186
186
187 if use_dirstate and '.hgtags' not in self.dirstate:
187 if use_dirstate and '.hgtags' not in self.dirstate:
188 self.add(['.hgtags'])
188 self.add(['.hgtags'])
189
189
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 extra=extra)
191 extra=extra)
192
192
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195
195
196 return tagnode
196 return tagnode
197
197
198 def tag(self, names, node, message, local, user, date):
198 def tag(self, names, node, message, local, user, date):
199 '''tag a revision with one or more symbolic names.
199 '''tag a revision with one or more symbolic names.
200
200
201 names is a list of strings or, when adding a single tag, names may be a
201 names is a list of strings or, when adding a single tag, names may be a
202 string.
202 string.
203
203
204 if local is True, the tags are stored in a per-repository file.
204 if local is True, the tags are stored in a per-repository file.
205 otherwise, they are stored in the .hgtags file, and a new
205 otherwise, they are stored in the .hgtags file, and a new
206 changeset is committed with the change.
206 changeset is committed with the change.
207
207
208 keyword arguments:
208 keyword arguments:
209
209
210 local: whether to store tags in non-version-controlled file
210 local: whether to store tags in non-version-controlled file
211 (default False)
211 (default False)
212
212
213 message: commit message to use if committing
213 message: commit message to use if committing
214
214
215 user: name of user to use if committing
215 user: name of user to use if committing
216
216
217 date: date tuple to use if committing'''
217 date: date tuple to use if committing'''
218
218
219 for x in self.status()[:5]:
219 for x in self.status()[:5]:
220 if '.hgtags' in x:
220 if '.hgtags' in x:
221 raise util.Abort(_('working copy of .hgtags is changed '
221 raise util.Abort(_('working copy of .hgtags is changed '
222 '(please commit .hgtags manually)'))
222 '(please commit .hgtags manually)'))
223
223
224 self._tag(names, node, message, local, user, date)
224 self._tag(names, node, message, local, user, date)
225
225
226 def tags(self):
226 def tags(self):
227 '''return a mapping of tag to node'''
227 '''return a mapping of tag to node'''
228 if self.tagscache:
228 if self.tagscache:
229 return self.tagscache
229 return self.tagscache
230
230
231 globaltags = {}
231 globaltags = {}
232 tagtypes = {}
232 tagtypes = {}
233
233
234 def readtags(lines, fn, tagtype):
234 def readtags(lines, fn, tagtype):
235 filetags = {}
235 filetags = {}
236 count = 0
236 count = 0
237
237
238 def warn(msg):
238 def warn(msg):
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240
240
241 for l in lines:
241 for l in lines:
242 count += 1
242 count += 1
243 if not l:
243 if not l:
244 continue
244 continue
245 s = l.split(" ", 1)
245 s = l.split(" ", 1)
246 if len(s) != 2:
246 if len(s) != 2:
247 warn(_("cannot parse entry"))
247 warn(_("cannot parse entry"))
248 continue
248 continue
249 node, key = s
249 node, key = s
250 key = util.tolocal(key.strip()) # stored in UTF-8
250 key = util.tolocal(key.strip()) # stored in UTF-8
251 try:
251 try:
252 bin_n = bin(node)
252 bin_n = bin(node)
253 except TypeError:
253 except TypeError:
254 warn(_("node '%s' is not well formed") % node)
254 warn(_("node '%s' is not well formed") % node)
255 continue
255 continue
256 if bin_n not in self.changelog.nodemap:
256 if bin_n not in self.changelog.nodemap:
257 warn(_("tag '%s' refers to unknown node") % key)
257 warn(_("tag '%s' refers to unknown node") % key)
258 continue
258 continue
259
259
260 h = []
260 h = []
261 if key in filetags:
261 if key in filetags:
262 n, h = filetags[key]
262 n, h = filetags[key]
263 h.append(n)
263 h.append(n)
264 filetags[key] = (bin_n, h)
264 filetags[key] = (bin_n, h)
265
265
266 for k, nh in filetags.items():
266 for k, nh in filetags.items():
267 if k not in globaltags:
267 if k not in globaltags:
268 globaltags[k] = nh
268 globaltags[k] = nh
269 tagtypes[k] = tagtype
269 tagtypes[k] = tagtype
270 continue
270 continue
271
271
272 # we prefer the global tag if:
272 # we prefer the global tag if:
273 # it supercedes us OR
273 # it supercedes us OR
274 # mutual supercedes and it has a higher rank
274 # mutual supercedes and it has a higher rank
275 # otherwise we win because we're tip-most
275 # otherwise we win because we're tip-most
276 an, ah = nh
276 an, ah = nh
277 bn, bh = globaltags[k]
277 bn, bh = globaltags[k]
278 if (bn != an and an in bh and
278 if (bn != an and an in bh and
279 (bn not in ah or len(bh) > len(ah))):
279 (bn not in ah or len(bh) > len(ah))):
280 an = bn
280 an = bn
281 ah.extend([n for n in bh if n not in ah])
281 ah.extend([n for n in bh if n not in ah])
282 globaltags[k] = an, ah
282 globaltags[k] = an, ah
283 tagtypes[k] = tagtype
283 tagtypes[k] = tagtype
284
284
285 # read the tags file from each head, ending with the tip
285 # read the tags file from each head, ending with the tip
286 f = None
286 f = None
287 for rev, node, fnode in self._hgtagsnodes():
287 for rev, node, fnode in self._hgtagsnodes():
288 f = (f and f.filectx(fnode) or
288 f = (f and f.filectx(fnode) or
289 self.filectx('.hgtags', fileid=fnode))
289 self.filectx('.hgtags', fileid=fnode))
290 readtags(f.data().splitlines(), f, "global")
290 readtags(f.data().splitlines(), f, "global")
291
291
292 try:
292 try:
293 data = util.fromlocal(self.opener("localtags").read())
293 data = util.fromlocal(self.opener("localtags").read())
294 # localtags are stored in the local character set
294 # localtags are stored in the local character set
295 # while the internal tag table is stored in UTF-8
295 # while the internal tag table is stored in UTF-8
296 readtags(data.splitlines(), "localtags", "local")
296 readtags(data.splitlines(), "localtags", "local")
297 except IOError:
297 except IOError:
298 pass
298 pass
299
299
300 self.tagscache = {}
300 self.tagscache = {}
301 self._tagstypecache = {}
301 self._tagstypecache = {}
302 for k,nh in globaltags.items():
302 for k,nh in globaltags.items():
303 n = nh[0]
303 n = nh[0]
304 if n != nullid:
304 if n != nullid:
305 self.tagscache[k] = n
305 self.tagscache[k] = n
306 self._tagstypecache[k] = tagtypes[k]
306 self._tagstypecache[k] = tagtypes[k]
307 self.tagscache['tip'] = self.changelog.tip()
307 self.tagscache['tip'] = self.changelog.tip()
308 return self.tagscache
308 return self.tagscache
309
309
310 def tagtype(self, tagname):
310 def tagtype(self, tagname):
311 '''
311 '''
312 return the type of the given tag. result can be:
312 return the type of the given tag. result can be:
313
313
314 'local' : a local tag
314 'local' : a local tag
315 'global' : a global tag
315 'global' : a global tag
316 None : tag does not exist
316 None : tag does not exist
317 '''
317 '''
318
318
319 self.tags()
319 self.tags()
320
320
321 return self._tagstypecache.get(tagname)
321 return self._tagstypecache.get(tagname)
322
322
323 def _hgtagsnodes(self):
323 def _hgtagsnodes(self):
324 heads = self.heads()
324 heads = self.heads()
325 heads.reverse()
325 heads.reverse()
326 last = {}
326 last = {}
327 ret = []
327 ret = []
328 for node in heads:
328 for node in heads:
329 c = self[node]
329 c = self[node]
330 rev = c.rev()
330 rev = c.rev()
331 try:
331 try:
332 fnode = c.filenode('.hgtags')
332 fnode = c.filenode('.hgtags')
333 except revlog.LookupError:
333 except revlog.LookupError:
334 continue
334 continue
335 ret.append((rev, node, fnode))
335 ret.append((rev, node, fnode))
336 if fnode in last:
336 if fnode in last:
337 ret[last[fnode]] = None
337 ret[last[fnode]] = None
338 last[fnode] = len(ret) - 1
338 last[fnode] = len(ret) - 1
339 return [item for item in ret if item]
339 return [item for item in ret if item]
340
340
341 def tagslist(self):
341 def tagslist(self):
342 '''return a list of tags ordered by revision'''
342 '''return a list of tags ordered by revision'''
343 l = []
343 l = []
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 try:
345 try:
346 r = self.changelog.rev(n)
346 r = self.changelog.rev(n)
347 except:
347 except:
348 r = -2 # sort to the beginning of the list if unknown
348 r = -2 # sort to the beginning of the list if unknown
349 l.append((r, t, n))
349 l.append((r, t, n))
350 return [(t, n) for r, t, n in util.sort(l)]
350 return [(t, n) for r, t, n in util.sort(l)]
351
351
352 def nodetags(self, node):
352 def nodetags(self, node):
353 '''return the tags associated with a node'''
353 '''return the tags associated with a node'''
354 if not self.nodetagscache:
354 if not self.nodetagscache:
355 self.nodetagscache = {}
355 self.nodetagscache = {}
356 for t, n in self.tags().items():
356 for t, n in self.tags().items():
357 self.nodetagscache.setdefault(n, []).append(t)
357 self.nodetagscache.setdefault(n, []).append(t)
358 return self.nodetagscache.get(node, [])
358 return self.nodetagscache.get(node, [])
359
359
360 def _branchtags(self, partial, lrev):
360 def _branchtags(self, partial, lrev):
361 tiprev = len(self) - 1
361 tiprev = len(self) - 1
362 if lrev != tiprev:
362 if lrev != tiprev:
363 self._updatebranchcache(partial, lrev+1, tiprev+1)
363 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365
365
366 return partial
366 return partial
367
367
368 def branchtags(self):
368 def branchtags(self):
369 tip = self.changelog.tip()
369 tip = self.changelog.tip()
370 if self.branchcache is not None and self._branchcachetip == tip:
370 if self.branchcache is not None and self._branchcachetip == tip:
371 return self.branchcache
371 return self.branchcache
372
372
373 oldtip = self._branchcachetip
373 oldtip = self._branchcachetip
374 self._branchcachetip = tip
374 self._branchcachetip = tip
375 if self.branchcache is None:
375 if self.branchcache is None:
376 self.branchcache = {} # avoid recursion in changectx
376 self.branchcache = {} # avoid recursion in changectx
377 else:
377 else:
378 self.branchcache.clear() # keep using the same dict
378 self.branchcache.clear() # keep using the same dict
379 if oldtip is None or oldtip not in self.changelog.nodemap:
379 if oldtip is None or oldtip not in self.changelog.nodemap:
380 partial, last, lrev = self._readbranchcache()
380 partial, last, lrev = self._readbranchcache()
381 else:
381 else:
382 lrev = self.changelog.rev(oldtip)
382 lrev = self.changelog.rev(oldtip)
383 partial = self._ubranchcache
383 partial = self._ubranchcache
384
384
385 self._branchtags(partial, lrev)
385 self._branchtags(partial, lrev)
386
386
387 # the branch cache is stored on disk as UTF-8, but in the local
387 # the branch cache is stored on disk as UTF-8, but in the local
388 # charset internally
388 # charset internally
389 for k, v in partial.items():
389 for k, v in partial.items():
390 self.branchcache[util.tolocal(k)] = v
390 self.branchcache[util.tolocal(k)] = v
391 self._ubranchcache = partial
391 self._ubranchcache = partial
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if lrev >= len(self) or self[lrev].node() != last:
406 if lrev >= len(self) or self[lrev].node() != last:
407 # invalidate the cache
407 # invalidate the cache
408 raise ValueError('invalidating branch cache (tip differs)')
408 raise ValueError('invalidating branch cache (tip differs)')
409 for l in lines:
409 for l in lines:
410 if not l: continue
410 if not l: continue
411 node, label = l.split(" ", 1)
411 node, label = l.split(" ", 1)
412 partial[label.strip()] = bin(node)
412 partial[label.strip()] = bin(node)
413 except (KeyboardInterrupt, util.SignalInterrupt):
413 except (KeyboardInterrupt, util.SignalInterrupt):
414 raise
414 raise
415 except Exception, inst:
415 except Exception, inst:
416 if self.ui.debugflag:
416 if self.ui.debugflag:
417 self.ui.warn(str(inst), '\n')
417 self.ui.warn(str(inst), '\n')
418 partial, last, lrev = {}, nullid, nullrev
418 partial, last, lrev = {}, nullid, nullrev
419 return partial, last, lrev
419 return partial, last, lrev
420
420
421 def _writebranchcache(self, branches, tip, tiprev):
421 def _writebranchcache(self, branches, tip, tiprev):
422 try:
422 try:
423 f = self.opener("branch.cache", "w", atomictemp=True)
423 f = self.opener("branch.cache", "w", atomictemp=True)
424 f.write("%s %s\n" % (hex(tip), tiprev))
424 f.write("%s %s\n" % (hex(tip), tiprev))
425 for label, node in branches.iteritems():
425 for label, node in branches.iteritems():
426 f.write("%s %s\n" % (hex(node), label))
426 f.write("%s %s\n" % (hex(node), label))
427 f.rename()
427 f.rename()
428 except (IOError, OSError):
428 except (IOError, OSError):
429 pass
429 pass
430
430
431 def _updatebranchcache(self, partial, start, end):
431 def _updatebranchcache(self, partial, start, end):
432 for r in xrange(start, end):
432 for r in xrange(start, end):
433 c = self[r]
433 c = self[r]
434 b = c.branch()
434 b = c.branch()
435 partial[b] = c.node()
435 partial[b] = c.node()
436
436
437 def lookup(self, key):
437 def lookup(self, key):
438 if key == '.':
438 if key == '.':
439 return self.dirstate.parents()[0]
439 return self.dirstate.parents()[0]
440 elif key == 'null':
440 elif key == 'null':
441 return nullid
441 return nullid
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise repo.RepoError(_("unknown revision '%s'") % key)
457 raise repo.RepoError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 fn = None
505 fn = None
506 params = cmd
506 params = cmd
507 for name, filterfn in self._datafilters.iteritems():
507 for name, filterfn in self._datafilters.iteritems():
508 if cmd.startswith(name):
508 if cmd.startswith(name):
509 fn = filterfn
509 fn = filterfn
510 params = cmd[len(name):].lstrip()
510 params = cmd[len(name):].lstrip()
511 break
511 break
512 if not fn:
512 if not fn:
513 fn = lambda s, c, **kwargs: util.filter(s, c)
513 fn = lambda s, c, **kwargs: util.filter(s, c)
514 # Wrap old filters not supporting keyword arguments
514 # Wrap old filters not supporting keyword arguments
515 if not inspect.getargspec(fn)[2]:
515 if not inspect.getargspec(fn)[2]:
516 oldfn = fn
516 oldfn = fn
517 fn = lambda s, c, **kwargs: oldfn(s, c)
517 fn = lambda s, c, **kwargs: oldfn(s, c)
518 l.append((mf, fn, params))
518 l.append((mf, fn, params))
519 self.filterpats[filter] = l
519 self.filterpats[filter] = l
520
520
521 for mf, fn, cmd in self.filterpats[filter]:
521 for mf, fn, cmd in self.filterpats[filter]:
522 if mf(filename):
522 if mf(filename):
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 break
525 break
526
526
527 return data
527 return data
528
528
529 def adddatafilter(self, name, filter):
529 def adddatafilter(self, name, filter):
530 self._datafilters[name] = filter
530 self._datafilters[name] = filter
531
531
532 def wread(self, filename):
532 def wread(self, filename):
533 if self._link(filename):
533 if self._link(filename):
534 data = os.readlink(self.wjoin(filename))
534 data = os.readlink(self.wjoin(filename))
535 else:
535 else:
536 data = self.wopener(filename, 'r').read()
536 data = self.wopener(filename, 'r').read()
537 return self._filter("encode", filename, data)
537 return self._filter("encode", filename, data)
538
538
539 def wwrite(self, filename, data, flags):
539 def wwrite(self, filename, data, flags):
540 data = self._filter("decode", filename, data)
540 data = self._filter("decode", filename, data)
541 try:
541 try:
542 os.unlink(self.wjoin(filename))
542 os.unlink(self.wjoin(filename))
543 except OSError:
543 except OSError:
544 pass
544 pass
545 if 'l' in flags:
545 if 'l' in flags:
546 self.wopener.symlink(data, filename)
546 self.wopener.symlink(data, filename)
547 else:
547 else:
548 self.wopener(filename, 'w').write(data)
548 self.wopener(filename, 'w').write(data)
549 if 'x' in flags:
549 if 'x' in flags:
550 util.set_flags(self.wjoin(filename), False, True)
550 util.set_flags(self.wjoin(filename), False, True)
551
551
552 def wwritedata(self, filename, data):
552 def wwritedata(self, filename, data):
553 return self._filter("decode", filename, data)
553 return self._filter("decode", filename, data)
554
554
555 def transaction(self):
555 def transaction(self):
556 if self._transref and self._transref():
556 if self._transref and self._transref():
557 return self._transref().nest()
557 return self._transref().nest()
558
558
559 # abort here if the journal already exists
559 # abort here if the journal already exists
560 if os.path.exists(self.sjoin("journal")):
560 if os.path.exists(self.sjoin("journal")):
561 raise repo.RepoError(_("journal already exists - run hg recover"))
561 raise repo.RepoError(_("journal already exists - run hg recover"))
562
562
563 # save dirstate for rollback
563 # save dirstate for rollback
564 try:
564 try:
565 ds = self.opener("dirstate").read()
565 ds = self.opener("dirstate").read()
566 except IOError:
566 except IOError:
567 ds = ""
567 ds = ""
568 self.opener("journal.dirstate", "w").write(ds)
568 self.opener("journal.dirstate", "w").write(ds)
569 self.opener("journal.branch", "w").write(self.dirstate.branch())
569 self.opener("journal.branch", "w").write(self.dirstate.branch())
570
570
571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
573 (self.join("journal.branch"), self.join("undo.branch"))]
573 (self.join("journal.branch"), self.join("undo.branch"))]
574 tr = transaction.transaction(self.ui.warn, self.sopener,
574 tr = transaction.transaction(self.ui.warn, self.sopener,
575 self.sjoin("journal"),
575 self.sjoin("journal"),
576 aftertrans(renames),
576 aftertrans(renames),
577 self.store.createmode)
577 self.store.createmode)
578 self._transref = weakref.ref(tr)
578 self._transref = weakref.ref(tr)
579 return tr
579 return tr
580
580
581 def recover(self):
581 def recover(self):
582 l = self.lock()
582 l = self.lock()
583 try:
583 try:
584 if os.path.exists(self.sjoin("journal")):
584 if os.path.exists(self.sjoin("journal")):
585 self.ui.status(_("rolling back interrupted transaction\n"))
585 self.ui.status(_("rolling back interrupted transaction\n"))
586 transaction.rollback(self.sopener, self.sjoin("journal"))
586 transaction.rollback(self.sopener, self.sjoin("journal"))
587 self.invalidate()
587 self.invalidate()
588 return True
588 return True
589 else:
589 else:
590 self.ui.warn(_("no interrupted transaction available\n"))
590 self.ui.warn(_("no interrupted transaction available\n"))
591 return False
591 return False
592 finally:
592 finally:
593 del l
593 del l
594
594
595 def rollback(self):
595 def rollback(self):
596 wlock = lock = None
596 wlock = lock = None
597 try:
597 try:
598 wlock = self.wlock()
598 wlock = self.wlock()
599 lock = self.lock()
599 lock = self.lock()
600 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
601 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 try:
604 try:
605 branch = self.opener("undo.branch").read()
605 branch = self.opener("undo.branch").read()
606 self.dirstate.setbranch(branch)
606 self.dirstate.setbranch(branch)
607 except IOError:
607 except IOError:
608 self.ui.warn(_("Named branch could not be reset, "
608 self.ui.warn(_("Named branch could not be reset, "
609 "current branch still is: %s\n")
609 "current branch still is: %s\n")
610 % util.tolocal(self.dirstate.branch()))
610 % util.tolocal(self.dirstate.branch()))
611 self.invalidate()
611 self.invalidate()
612 self.dirstate.invalidate()
612 self.dirstate.invalidate()
613 else:
613 else:
614 self.ui.warn(_("no rollback information available\n"))
614 self.ui.warn(_("no rollback information available\n"))
615 finally:
615 finally:
616 del lock, wlock
616 del lock, wlock
617
617
618 def invalidate(self):
618 def invalidate(self):
619 for a in "changelog manifest".split():
619 for a in "changelog manifest".split():
620 if a in self.__dict__:
620 if a in self.__dict__:
621 delattr(self, a)
621 delattr(self, a)
622 self.tagscache = None
622 self.tagscache = None
623 self._tagstypecache = None
623 self._tagstypecache = None
624 self.nodetagscache = None
624 self.nodetagscache = None
625 self.branchcache = None
625 self.branchcache = None
626 self._ubranchcache = None
626 self._ubranchcache = None
627 self._branchcachetip = None
627 self._branchcachetip = None
628
628
629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
630 try:
630 try:
631 l = lock.lock(lockname, 0, releasefn, desc=desc)
631 l = lock.lock(lockname, 0, releasefn, desc=desc)
632 except lock.LockHeld, inst:
632 except lock.LockHeld, inst:
633 if not wait:
633 if not wait:
634 raise
634 raise
635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
636 (desc, inst.locker))
636 (desc, inst.locker))
637 # default to 600 seconds timeout
637 # default to 600 seconds timeout
638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
639 releasefn, desc=desc)
639 releasefn, desc=desc)
640 if acquirefn:
640 if acquirefn:
641 acquirefn()
641 acquirefn()
642 return l
642 return l
643
643
644 def lock(self, wait=True):
644 def lock(self, wait=True):
645 if self._lockref and self._lockref():
645 if self._lockref and self._lockref():
646 return self._lockref()
646 return self._lockref()
647
647
648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
649 _('repository %s') % self.origroot)
649 _('repository %s') % self.origroot)
650 self._lockref = weakref.ref(l)
650 self._lockref = weakref.ref(l)
651 return l
651 return l
652
652
653 def wlock(self, wait=True):
653 def wlock(self, wait=True):
654 if self._wlockref and self._wlockref():
654 if self._wlockref and self._wlockref():
655 return self._wlockref()
655 return self._wlockref()
656
656
657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
658 self.dirstate.invalidate, _('working directory of %s') %
658 self.dirstate.invalidate, _('working directory of %s') %
659 self.origroot)
659 self.origroot)
660 self._wlockref = weakref.ref(l)
660 self._wlockref = weakref.ref(l)
661 return l
661 return l
662
662
663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
664 """
664 """
665 commit an individual file as part of a larger transaction
665 commit an individual file as part of a larger transaction
666 """
666 """
667
667
668 fn = fctx.path()
668 fn = fctx.path()
669 t = fctx.data()
669 t = fctx.data()
670 fl = self.file(fn)
670 fl = self.file(fn)
671 fp1 = manifest1.get(fn, nullid)
671 fp1 = manifest1.get(fn, nullid)
672 fp2 = manifest2.get(fn, nullid)
672 fp2 = manifest2.get(fn, nullid)
673
673
674 meta = {}
674 meta = {}
675 cp = fctx.renamed()
675 cp = fctx.renamed()
676 if cp and cp[0] != fn:
676 if cp and cp[0] != fn:
677 # Mark the new revision of this file as a copy of another
677 # Mark the new revision of this file as a copy of another
678 # file. This copy data will effectively act as a parent
678 # file. This copy data will effectively act as a parent
679 # of this new revision. If this is a merge, the first
679 # of this new revision. If this is a merge, the first
680 # parent will be the nullid (meaning "look up the copy data")
680 # parent will be the nullid (meaning "look up the copy data")
681 # and the second one will be the other parent. For example:
681 # and the second one will be the other parent. For example:
682 #
682 #
683 # 0 --- 1 --- 3 rev1 changes file foo
683 # 0 --- 1 --- 3 rev1 changes file foo
684 # \ / rev2 renames foo to bar and changes it
684 # \ / rev2 renames foo to bar and changes it
685 # \- 2 -/ rev3 should have bar with all changes and
685 # \- 2 -/ rev3 should have bar with all changes and
686 # should record that bar descends from
686 # should record that bar descends from
687 # bar in rev2 and foo in rev1
687 # bar in rev2 and foo in rev1
688 #
688 #
689 # this allows this merge to succeed:
689 # this allows this merge to succeed:
690 #
690 #
691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
692 # \ / merging rev3 and rev4 should use bar@rev2
692 # \ / merging rev3 and rev4 should use bar@rev2
693 # \- 2 --- 4 as the merge base
693 # \- 2 --- 4 as the merge base
694 #
694 #
695
695
696 cf = cp[0]
696 cf = cp[0]
697 cr = manifest1.get(cf)
697 cr = manifest1.get(cf)
698 nfp = fp2
698 nfp = fp2
699
699
700 if manifest2: # branch merge
700 if manifest2: # branch merge
701 if fp2 == nullid: # copied on remote side
701 if fp2 == nullid: # copied on remote side
702 if fp1 != nullid or cf in manifest2:
702 if fp1 != nullid or cf in manifest2:
703 cr = manifest2[cf]
703 cr = manifest2[cf]
704 nfp = fp1
704 nfp = fp1
705
705
706 # find source in nearest ancestor if we've lost track
706 # find source in nearest ancestor if we've lost track
707 if not cr:
707 if not cr:
708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
709 (fn, cf))
709 (fn, cf))
710 for a in self['.'].ancestors():
710 for a in self['.'].ancestors():
711 if cf in a:
711 if cf in a:
712 cr = a[cf].filenode()
712 cr = a[cf].filenode()
713 break
713 break
714
714
715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
716 meta["copy"] = cf
716 meta["copy"] = cf
717 meta["copyrev"] = hex(cr)
717 meta["copyrev"] = hex(cr)
718 fp1, fp2 = nullid, nfp
718 fp1, fp2 = nullid, nfp
719 elif fp2 != nullid:
719 elif fp2 != nullid:
720 # is one parent an ancestor of the other?
720 # is one parent an ancestor of the other?
721 fpa = fl.ancestor(fp1, fp2)
721 fpa = fl.ancestor(fp1, fp2)
722 if fpa == fp1:
722 if fpa == fp1:
723 fp1, fp2 = fp2, nullid
723 fp1, fp2 = fp2, nullid
724 elif fpa == fp2:
724 elif fpa == fp2:
725 fp2 = nullid
725 fp2 = nullid
726
726
727 # is the file unmodified from the parent? report existing entry
727 # is the file unmodified from the parent? report existing entry
728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
729 return fp1
729 return fp1
730
730
731 changelist.append(fn)
731 changelist.append(fn)
732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
733
733
734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
735 if p1 is None:
735 if p1 is None:
736 p1, p2 = self.dirstate.parents()
736 p1, p2 = self.dirstate.parents()
737 return self.commit(files=files, text=text, user=user, date=date,
737 return self.commit(files=files, text=text, user=user, date=date,
738 p1=p1, p2=p2, extra=extra, empty_ok=True)
738 p1=p1, p2=p2, extra=extra, empty_ok=True)
739
739
740 def commit(self, files=None, text="", user=None, date=None,
740 def commit(self, files=None, text="", user=None, date=None,
741 match=None, force=False, force_editor=False,
741 match=None, force=False, force_editor=False,
742 p1=None, p2=None, extra={}, empty_ok=False):
742 p1=None, p2=None, extra={}, empty_ok=False):
743 wlock = lock = None
743 wlock = lock = None
744 if files:
744 if files:
745 files = util.unique(files)
745 files = util.unique(files)
746 try:
746 try:
747 wlock = self.wlock()
747 wlock = self.wlock()
748 lock = self.lock()
748 lock = self.lock()
749 use_dirstate = (p1 is None) # not rawcommit
749 use_dirstate = (p1 is None) # not rawcommit
750
750
751 if use_dirstate:
751 if use_dirstate:
752 p1, p2 = self.dirstate.parents()
752 p1, p2 = self.dirstate.parents()
753 update_dirstate = True
753 update_dirstate = True
754
754
755 if (not force and p2 != nullid and
755 if (not force and p2 != nullid and
756 (match and (match.files() or match.anypats()))):
756 (match and (match.files() or match.anypats()))):
757 raise util.Abort(_('cannot partially commit a merge '
757 raise util.Abort(_('cannot partially commit a merge '
758 '(do not specify files or patterns)'))
758 '(do not specify files or patterns)'))
759
759
760 if files:
760 if files:
761 modified, removed = [], []
761 modified, removed = [], []
762 for f in files:
762 for f in files:
763 s = self.dirstate[f]
763 s = self.dirstate[f]
764 if s in 'nma':
764 if s in 'nma':
765 modified.append(f)
765 modified.append(f)
766 elif s == 'r':
766 elif s == 'r':
767 removed.append(f)
767 removed.append(f)
768 else:
768 else:
769 self.ui.warn(_("%s not tracked!\n") % f)
769 self.ui.warn(_("%s not tracked!\n") % f)
770 changes = [modified, [], removed, [], []]
770 changes = [modified, [], removed, [], []]
771 else:
771 else:
772 changes = self.status(match=match)
772 changes = self.status(match=match)
773 else:
773 else:
774 p1, p2 = p1, p2 or nullid
774 p1, p2 = p1, p2 or nullid
775 update_dirstate = (self.dirstate.parents()[0] == p1)
775 update_dirstate = (self.dirstate.parents()[0] == p1)
776 changes = [files, [], [], [], []]
776 changes = [files, [], [], [], []]
777
777
778 ms = merge_.mergestate(self)
778 ms = merge_.mergestate(self)
779 for f in changes[0]:
779 for f in changes[0]:
780 if f in ms and ms[f] == 'u':
780 if f in ms and ms[f] == 'u':
781 raise util.Abort(_("unresolved merge conflicts "
781 raise util.Abort(_("unresolved merge conflicts "
782 "(see hg resolve)"))
782 "(see hg resolve)"))
783 wctx = context.workingctx(self, (p1, p2), text, user, date,
783 wctx = context.workingctx(self, (p1, p2), text, user, date,
784 extra, changes)
784 extra, changes)
785 return self._commitctx(wctx, force, force_editor, empty_ok,
785 return self._commitctx(wctx, force, force_editor, empty_ok,
786 use_dirstate, update_dirstate)
786 use_dirstate, update_dirstate)
787 finally:
787 finally:
788 del lock, wlock
788 del lock, wlock
789
789
790 def commitctx(self, ctx):
790 def commitctx(self, ctx):
791 wlock = lock = None
791 wlock = lock = None
792 try:
792 try:
793 wlock = self.wlock()
793 wlock = self.wlock()
794 lock = self.lock()
794 lock = self.lock()
795 return self._commitctx(ctx, force=True, force_editor=False,
795 return self._commitctx(ctx, force=True, force_editor=False,
796 empty_ok=True, use_dirstate=False,
796 empty_ok=True, use_dirstate=False,
797 update_dirstate=False)
797 update_dirstate=False)
798 finally:
798 finally:
799 del lock, wlock
799 del lock, wlock
800
800
801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
802 use_dirstate=True, update_dirstate=True):
802 use_dirstate=True, update_dirstate=True):
803 tr = None
803 tr = None
804 valid = 0 # don't save the dirstate if this isn't set
804 valid = 0 # don't save the dirstate if this isn't set
805 try:
805 try:
806 commit = util.sort(wctx.modified() + wctx.added())
806 commit = util.sort(wctx.modified() + wctx.added())
807 remove = wctx.removed()
807 remove = wctx.removed()
808 extra = wctx.extra().copy()
808 extra = wctx.extra().copy()
809 branchname = extra['branch']
809 branchname = extra['branch']
810 user = wctx.user()
810 user = wctx.user()
811 text = wctx.description()
811 text = wctx.description()
812
812
813 p1, p2 = [p.node() for p in wctx.parents()]
813 p1, p2 = [p.node() for p in wctx.parents()]
814 c1 = self.changelog.read(p1)
814 c1 = self.changelog.read(p1)
815 c2 = self.changelog.read(p2)
815 c2 = self.changelog.read(p2)
816 m1 = self.manifest.read(c1[0]).copy()
816 m1 = self.manifest.read(c1[0]).copy()
817 m2 = self.manifest.read(c2[0])
817 m2 = self.manifest.read(c2[0])
818
818
819 if use_dirstate:
819 if use_dirstate:
820 oldname = c1[5].get("branch") # stored in UTF-8
820 oldname = c1[5].get("branch") # stored in UTF-8
821 if (not commit and not remove and not force and p2 == nullid
821 if (not commit and not remove and not force and p2 == nullid
822 and branchname == oldname):
822 and branchname == oldname):
823 self.ui.status(_("nothing changed\n"))
823 self.ui.status(_("nothing changed\n"))
824 return None
824 return None
825
825
826 xp1 = hex(p1)
826 xp1 = hex(p1)
827 if p2 == nullid: xp2 = ''
827 if p2 == nullid: xp2 = ''
828 else: xp2 = hex(p2)
828 else: xp2 = hex(p2)
829
829
830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
831
831
832 tr = self.transaction()
832 tr = self.transaction()
833 trp = weakref.proxy(tr)
833 trp = weakref.proxy(tr)
834
834
835 # check in files
835 # check in files
836 new = {}
836 new = {}
837 changed = []
837 changed = []
838 linkrev = len(self)
838 linkrev = len(self)
839 for f in commit:
839 for f in commit:
840 self.ui.note(f + "\n")
840 self.ui.note(f + "\n")
841 try:
841 try:
842 fctx = wctx.filectx(f)
842 fctx = wctx.filectx(f)
843 newflags = fctx.flags()
843 newflags = fctx.flags()
844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
845 if ((not changed or changed[-1] != f) and
845 if ((not changed or changed[-1] != f) and
846 m2.get(f) != new[f]):
846 m2.get(f) != new[f]):
847 # mention the file in the changelog if some
847 # mention the file in the changelog if some
848 # flag changed, even if there was no content
848 # flag changed, even if there was no content
849 # change.
849 # change.
850 if m1.flags(f) != newflags:
850 if m1.flags(f) != newflags:
851 changed.append(f)
851 changed.append(f)
852 m1.set(f, newflags)
852 m1.set(f, newflags)
853 if use_dirstate:
853 if use_dirstate:
854 self.dirstate.normal(f)
854 self.dirstate.normal(f)
855
855
856 except (OSError, IOError):
856 except (OSError, IOError):
857 if use_dirstate:
857 if use_dirstate:
858 self.ui.warn(_("trouble committing %s!\n") % f)
858 self.ui.warn(_("trouble committing %s!\n") % f)
859 raise
859 raise
860 else:
860 else:
861 remove.append(f)
861 remove.append(f)
862
862
863 # update manifest
863 # update manifest
864 m1.update(new)
864 m1.update(new)
865 removed = []
865 removed = []
866
866
867 for f in util.sort(remove):
867 for f in util.sort(remove):
868 if f in m1:
868 if f in m1:
869 del m1[f]
869 del m1[f]
870 removed.append(f)
870 removed.append(f)
871 elif f in m2:
871 elif f in m2:
872 removed.append(f)
872 removed.append(f)
873 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
873 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
874 (new, removed))
874 (new, removed))
875
875
876 # add changeset
876 # add changeset
877 if (not empty_ok and not text) or force_editor:
877 if (not empty_ok and not text) or force_editor:
878 edittext = []
878 edittext = []
879 if text:
879 if text:
880 edittext.append(text)
880 edittext.append(text)
881 edittext.append("")
881 edittext.append("")
882 edittext.append(_("HG: Enter commit message."
882 edittext.append(_("HG: Enter commit message."
883 " Lines beginning with 'HG:' are removed."))
883 " Lines beginning with 'HG:' are removed."))
884 edittext.append("HG: --")
884 edittext.append("HG: --")
885 edittext.append("HG: user: %s" % user)
885 edittext.append("HG: user: %s" % user)
886 if p2 != nullid:
886 if p2 != nullid:
887 edittext.append("HG: branch merge")
887 edittext.append("HG: branch merge")
888 if branchname:
888 if branchname:
889 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
889 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
890 edittext.extend(["HG: changed %s" % f for f in changed])
890 edittext.extend(["HG: changed %s" % f for f in changed])
891 edittext.extend(["HG: removed %s" % f for f in removed])
891 edittext.extend(["HG: removed %s" % f for f in removed])
892 if not changed and not remove:
892 if not changed and not remove:
893 edittext.append("HG: no files changed")
893 edittext.append("HG: no files changed")
894 edittext.append("")
894 edittext.append("")
895 # run editor in the repository root
895 # run editor in the repository root
896 olddir = os.getcwd()
896 olddir = os.getcwd()
897 os.chdir(self.root)
897 os.chdir(self.root)
898 text = self.ui.edit("\n".join(edittext), user)
898 text = self.ui.edit("\n".join(edittext), user)
899 os.chdir(olddir)
899 os.chdir(olddir)
900
900
901 lines = [line.rstrip() for line in text.rstrip().splitlines()]
901 lines = [line.rstrip() for line in text.rstrip().splitlines()]
902 while lines and not lines[0]:
902 while lines and not lines[0]:
903 del lines[0]
903 del lines[0]
904 if not lines and use_dirstate:
904 if not lines and use_dirstate:
905 raise util.Abort(_("empty commit message"))
905 raise util.Abort(_("empty commit message"))
906 text = '\n'.join(lines)
906 text = '\n'.join(lines)
907
907
908 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
908 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
909 user, wctx.date(), extra)
909 user, wctx.date(), extra)
910 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
910 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
911 parent2=xp2)
911 parent2=xp2)
912 tr.close()
912 tr.close()
913
913
914 if self.branchcache:
914 if self.branchcache:
915 self.branchtags()
915 self.branchtags()
916
916
917 if use_dirstate or update_dirstate:
917 if use_dirstate or update_dirstate:
918 self.dirstate.setparents(n)
918 self.dirstate.setparents(n)
919 if use_dirstate:
919 if use_dirstate:
920 for f in removed:
920 for f in removed:
921 self.dirstate.forget(f)
921 self.dirstate.forget(f)
922 valid = 1 # our dirstate updates are complete
922 valid = 1 # our dirstate updates are complete
923
923
924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
925 return n
925 return n
926 finally:
926 finally:
927 if not valid: # don't save our updated dirstate
927 if not valid: # don't save our updated dirstate
928 self.dirstate.invalidate()
928 self.dirstate.invalidate()
929 del tr
929 del tr
930
930
931 def walk(self, match, node=None):
931 def walk(self, match, node=None):
932 '''
932 '''
933 walk recursively through the directory tree or a given
933 walk recursively through the directory tree or a given
934 changeset, finding all files matched by the match
934 changeset, finding all files matched by the match
935 function
935 function
936 '''
936 '''
937 return self[node].walk(match)
937 return self[node].walk(match)
938
938
939 def status(self, node1='.', node2=None, match=None,
939 def status(self, node1='.', node2=None, match=None,
940 ignored=False, clean=False, unknown=False):
940 ignored=False, clean=False, unknown=False):
941 """return status of files between two nodes or node and working directory
941 """return status of files between two nodes or node and working directory
942
942
943 If node1 is None, use the first dirstate parent instead.
943 If node1 is None, use the first dirstate parent instead.
944 If node2 is None, compare node1 with working directory.
944 If node2 is None, compare node1 with working directory.
945 """
945 """
946
946
947 def mfmatches(ctx):
947 def mfmatches(ctx):
948 mf = ctx.manifest().copy()
948 mf = ctx.manifest().copy()
949 for fn in mf.keys():
949 for fn in mf.keys():
950 if not match(fn):
950 if not match(fn):
951 del mf[fn]
951 del mf[fn]
952 return mf
952 return mf
953
953
954 ctx1 = self[node1]
954 ctx1 = self[node1]
955 ctx2 = self[node2]
955 ctx2 = self[node2]
956 working = ctx2 == self[None]
956 working = ctx2 == self[None]
957 parentworking = working and ctx1 == self['.']
957 parentworking = working and ctx1 == self['.']
958 match = match or match_.always(self.root, self.getcwd())
958 match = match or match_.always(self.root, self.getcwd())
959 listignored, listclean, listunknown = ignored, clean, unknown
959 listignored, listclean, listunknown = ignored, clean, unknown
960
960
961 if working: # we need to scan the working dir
961 if working: # we need to scan the working dir
962 s = self.dirstate.status(match, listignored, listclean, listunknown)
962 s = self.dirstate.status(match, listignored, listclean, listunknown)
963 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
963 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
964
964
965 # check for any possibly clean files
965 # check for any possibly clean files
966 if parentworking and cmp:
966 if parentworking and cmp:
967 fixup = []
967 fixup = []
968 # do a full compare of any files that might have changed
968 # do a full compare of any files that might have changed
969 for f in cmp:
969 for f in cmp:
970 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
970 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
971 or ctx1[f].cmp(ctx2[f].data())):
971 or ctx1[f].cmp(ctx2[f].data())):
972 modified.append(f)
972 modified.append(f)
973 else:
973 else:
974 fixup.append(f)
974 fixup.append(f)
975
975
976 if listclean:
976 if listclean:
977 clean += fixup
977 clean += fixup
978
978
979 # update dirstate for files that are actually clean
979 # update dirstate for files that are actually clean
980 if fixup:
980 if fixup:
981 wlock = None
981 wlock = None
982 try:
982 try:
983 try:
983 try:
984 wlock = self.wlock(False)
984 wlock = self.wlock(False)
985 for f in fixup:
985 for f in fixup:
986 self.dirstate.normal(f)
986 self.dirstate.normal(f)
987 except lock.LockException:
987 except lock.LockException:
988 pass
988 pass
989 finally:
989 finally:
990 del wlock
990 del wlock
991
991
992 if not parentworking:
992 if not parentworking:
993 mf1 = mfmatches(ctx1)
993 mf1 = mfmatches(ctx1)
994 if working:
994 if working:
995 # we are comparing working dir against non-parent
995 # we are comparing working dir against non-parent
996 # generate a pseudo-manifest for the working dir
996 # generate a pseudo-manifest for the working dir
997 mf2 = mfmatches(self['.'])
997 mf2 = mfmatches(self['.'])
998 for f in cmp + modified + added:
998 for f in cmp + modified + added:
999 mf2[f] = None
999 mf2[f] = None
1000 mf2.set(f, ctx2.flags(f))
1000 mf2.set(f, ctx2.flags(f))
1001 for f in removed:
1001 for f in removed:
1002 if f in mf2:
1002 if f in mf2:
1003 del mf2[f]
1003 del mf2[f]
1004 else:
1004 else:
1005 # we are comparing two revisions
1005 # we are comparing two revisions
1006 deleted, unknown, ignored = [], [], []
1006 deleted, unknown, ignored = [], [], []
1007 mf2 = mfmatches(ctx2)
1007 mf2 = mfmatches(ctx2)
1008
1008
1009 modified, added, clean = [], [], []
1009 modified, added, clean = [], [], []
1010 for fn in mf2:
1010 for fn in mf2:
1011 if fn in mf1:
1011 if fn in mf1:
1012 if (mf1.flags(fn) != mf2.flags(fn) or
1012 if (mf1.flags(fn) != mf2.flags(fn) or
1013 (mf1[fn] != mf2[fn] and
1013 (mf1[fn] != mf2[fn] and
1014 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1014 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1015 modified.append(fn)
1015 modified.append(fn)
1016 elif listclean:
1016 elif listclean:
1017 clean.append(fn)
1017 clean.append(fn)
1018 del mf1[fn]
1018 del mf1[fn]
1019 else:
1019 else:
1020 added.append(fn)
1020 added.append(fn)
1021 removed = mf1.keys()
1021 removed = mf1.keys()
1022
1022
1023 r = modified, added, removed, deleted, unknown, ignored, clean
1023 r = modified, added, removed, deleted, unknown, ignored, clean
1024 [l.sort() for l in r]
1024 [l.sort() for l in r]
1025 return r
1025 return r
1026
1026
1027 def add(self, list):
1027 def add(self, list):
1028 wlock = self.wlock()
1028 wlock = self.wlock()
1029 try:
1029 try:
1030 rejected = []
1030 rejected = []
1031 for f in list:
1031 for f in list:
1032 p = self.wjoin(f)
1032 p = self.wjoin(f)
1033 try:
1033 try:
1034 st = os.lstat(p)
1034 st = os.lstat(p)
1035 except:
1035 except:
1036 self.ui.warn(_("%s does not exist!\n") % f)
1036 self.ui.warn(_("%s does not exist!\n") % f)
1037 rejected.append(f)
1037 rejected.append(f)
1038 continue
1038 continue
1039 if st.st_size > 10000000:
1039 if st.st_size > 10000000:
1040 self.ui.warn(_("%s: files over 10MB may cause memory and"
1040 self.ui.warn(_("%s: files over 10MB may cause memory and"
1041 " performance problems\n"
1041 " performance problems\n"
1042 "(use 'hg revert %s' to unadd the file)\n")
1042 "(use 'hg revert %s' to unadd the file)\n")
1043 % (f, f))
1043 % (f, f))
1044 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1044 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1045 self.ui.warn(_("%s not added: only files and symlinks "
1045 self.ui.warn(_("%s not added: only files and symlinks "
1046 "supported currently\n") % f)
1046 "supported currently\n") % f)
1047 rejected.append(p)
1047 rejected.append(p)
1048 elif self.dirstate[f] in 'amn':
1048 elif self.dirstate[f] in 'amn':
1049 self.ui.warn(_("%s already tracked!\n") % f)
1049 self.ui.warn(_("%s already tracked!\n") % f)
1050 elif self.dirstate[f] == 'r':
1050 elif self.dirstate[f] == 'r':
1051 self.dirstate.normallookup(f)
1051 self.dirstate.normallookup(f)
1052 else:
1052 else:
1053 self.dirstate.add(f)
1053 self.dirstate.add(f)
1054 return rejected
1054 return rejected
1055 finally:
1055 finally:
1056 del wlock
1056 del wlock
1057
1057
1058 def forget(self, list):
1058 def forget(self, list):
1059 wlock = self.wlock()
1059 wlock = self.wlock()
1060 try:
1060 try:
1061 for f in list:
1061 for f in list:
1062 if self.dirstate[f] != 'a':
1062 if self.dirstate[f] != 'a':
1063 self.ui.warn(_("%s not added!\n") % f)
1063 self.ui.warn(_("%s not added!\n") % f)
1064 else:
1064 else:
1065 self.dirstate.forget(f)
1065 self.dirstate.forget(f)
1066 finally:
1066 finally:
1067 del wlock
1067 del wlock
1068
1068
1069 def remove(self, list, unlink=False):
1069 def remove(self, list, unlink=False):
1070 wlock = None
1070 wlock = None
1071 try:
1071 try:
1072 if unlink:
1072 if unlink:
1073 for f in list:
1073 for f in list:
1074 try:
1074 try:
1075 util.unlink(self.wjoin(f))
1075 util.unlink(self.wjoin(f))
1076 except OSError, inst:
1076 except OSError, inst:
1077 if inst.errno != errno.ENOENT:
1077 if inst.errno != errno.ENOENT:
1078 raise
1078 raise
1079 wlock = self.wlock()
1079 wlock = self.wlock()
1080 for f in list:
1080 for f in list:
1081 if unlink and os.path.exists(self.wjoin(f)):
1081 if unlink and os.path.exists(self.wjoin(f)):
1082 self.ui.warn(_("%s still exists!\n") % f)
1082 self.ui.warn(_("%s still exists!\n") % f)
1083 elif self.dirstate[f] == 'a':
1083 elif self.dirstate[f] == 'a':
1084 self.dirstate.forget(f)
1084 self.dirstate.forget(f)
1085 elif f not in self.dirstate:
1085 elif f not in self.dirstate:
1086 self.ui.warn(_("%s not tracked!\n") % f)
1086 self.ui.warn(_("%s not tracked!\n") % f)
1087 else:
1087 else:
1088 self.dirstate.remove(f)
1088 self.dirstate.remove(f)
1089 finally:
1089 finally:
1090 del wlock
1090 del wlock
1091
1091
1092 def undelete(self, list):
1092 def undelete(self, list):
1093 wlock = None
1093 wlock = None
1094 try:
1094 try:
1095 manifests = [self.manifest.read(self.changelog.read(p)[0])
1095 manifests = [self.manifest.read(self.changelog.read(p)[0])
1096 for p in self.dirstate.parents() if p != nullid]
1096 for p in self.dirstate.parents() if p != nullid]
1097 wlock = self.wlock()
1097 wlock = self.wlock()
1098 for f in list:
1098 for f in list:
1099 if self.dirstate[f] != 'r':
1099 if self.dirstate[f] != 'r':
1100 self.ui.warn("%s not removed!\n" % f)
1100 self.ui.warn("%s not removed!\n" % f)
1101 else:
1101 else:
1102 m = f in manifests[0] and manifests[0] or manifests[1]
1102 m = f in manifests[0] and manifests[0] or manifests[1]
1103 t = self.file(f).read(m[f])
1103 t = self.file(f).read(m[f])
1104 self.wwrite(f, t, m.flags(f))
1104 self.wwrite(f, t, m.flags(f))
1105 self.dirstate.normal(f)
1105 self.dirstate.normal(f)
1106 finally:
1106 finally:
1107 del wlock
1107 del wlock
1108
1108
1109 def copy(self, source, dest):
1109 def copy(self, source, dest):
1110 wlock = None
1110 wlock = None
1111 try:
1111 try:
1112 p = self.wjoin(dest)
1112 p = self.wjoin(dest)
1113 if not (os.path.exists(p) or os.path.islink(p)):
1113 if not (os.path.exists(p) or os.path.islink(p)):
1114 self.ui.warn(_("%s does not exist!\n") % dest)
1114 self.ui.warn(_("%s does not exist!\n") % dest)
1115 elif not (os.path.isfile(p) or os.path.islink(p)):
1115 elif not (os.path.isfile(p) or os.path.islink(p)):
1116 self.ui.warn(_("copy failed: %s is not a file or a "
1116 self.ui.warn(_("copy failed: %s is not a file or a "
1117 "symbolic link\n") % dest)
1117 "symbolic link\n") % dest)
1118 else:
1118 else:
1119 wlock = self.wlock()
1119 wlock = self.wlock()
1120 if dest not in self.dirstate:
1120 if dest not in self.dirstate:
1121 self.dirstate.add(dest)
1121 self.dirstate.add(dest)
1122 self.dirstate.copy(source, dest)
1122 self.dirstate.copy(source, dest)
1123 finally:
1123 finally:
1124 del wlock
1124 del wlock
1125
1125
1126 def heads(self, start=None):
1126 def heads(self, start=None):
1127 heads = self.changelog.heads(start)
1127 heads = self.changelog.heads(start)
1128 # sort the output in rev descending order
1128 # sort the output in rev descending order
1129 heads = [(-self.changelog.rev(h), h) for h in heads]
1129 heads = [(-self.changelog.rev(h), h) for h in heads]
1130 return [n for (r, n) in util.sort(heads)]
1130 return [n for (r, n) in util.sort(heads)]
1131
1131
1132 def branchheads(self, branch=None, start=None):
1132 def branchheads(self, branch=None, start=None):
1133 if branch is None:
1133 if branch is None:
1134 branch = self[None].branch()
1134 branch = self[None].branch()
1135 branches = self.branchtags()
1135 branches = self.branchtags()
1136 if branch not in branches:
1136 if branch not in branches:
1137 return []
1137 return []
1138 # The basic algorithm is this:
1138 # The basic algorithm is this:
1139 #
1139 #
1140 # Start from the branch tip since there are no later revisions that can
1140 # Start from the branch tip since there are no later revisions that can
1141 # possibly be in this branch, and the tip is a guaranteed head.
1141 # possibly be in this branch, and the tip is a guaranteed head.
1142 #
1142 #
1143 # Remember the tip's parents as the first ancestors, since these by
1143 # Remember the tip's parents as the first ancestors, since these by
1144 # definition are not heads.
1144 # definition are not heads.
1145 #
1145 #
1146 # Step backwards from the brach tip through all the revisions. We are
1146 # Step backwards from the brach tip through all the revisions. We are
1147 # guaranteed by the rules of Mercurial that we will now be visiting the
1147 # guaranteed by the rules of Mercurial that we will now be visiting the
1148 # nodes in reverse topological order (children before parents).
1148 # nodes in reverse topological order (children before parents).
1149 #
1149 #
1150 # If a revision is one of the ancestors of a head then we can toss it
1150 # If a revision is one of the ancestors of a head then we can toss it
1151 # out of the ancestors set (we've already found it and won't be
1151 # out of the ancestors set (we've already found it and won't be
1152 # visiting it again) and put its parents in the ancestors set.
1152 # visiting it again) and put its parents in the ancestors set.
1153 #
1153 #
1154 # Otherwise, if a revision is in the branch it's another head, since it
1154 # Otherwise, if a revision is in the branch it's another head, since it
1155 # wasn't in the ancestor list of an existing head. So add it to the
1155 # wasn't in the ancestor list of an existing head. So add it to the
1156 # head list, and add its parents to the ancestor list.
1156 # head list, and add its parents to the ancestor list.
1157 #
1157 #
1158 # If it is not in the branch ignore it.
1158 # If it is not in the branch ignore it.
1159 #
1159 #
1160 # Once we have a list of heads, use nodesbetween to filter out all the
1160 # Once we have a list of heads, use nodesbetween to filter out all the
1161 # heads that cannot be reached from startrev. There may be a more
1161 # heads that cannot be reached from startrev. There may be a more
1162 # efficient way to do this as part of the previous algorithm.
1162 # efficient way to do this as part of the previous algorithm.
1163
1163
1164 set = util.set
1164 set = util.set
1165 heads = [self.changelog.rev(branches[branch])]
1165 heads = [self.changelog.rev(branches[branch])]
1166 # Don't care if ancestors contains nullrev or not.
1166 # Don't care if ancestors contains nullrev or not.
1167 ancestors = set(self.changelog.parentrevs(heads[0]))
1167 ancestors = set(self.changelog.parentrevs(heads[0]))
1168 for rev in xrange(heads[0] - 1, nullrev, -1):
1168 for rev in xrange(heads[0] - 1, nullrev, -1):
1169 if rev in ancestors:
1169 if rev in ancestors:
1170 ancestors.update(self.changelog.parentrevs(rev))
1170 ancestors.update(self.changelog.parentrevs(rev))
1171 ancestors.remove(rev)
1171 ancestors.remove(rev)
1172 elif self[rev].branch() == branch:
1172 elif self[rev].branch() == branch:
1173 heads.append(rev)
1173 heads.append(rev)
1174 ancestors.update(self.changelog.parentrevs(rev))
1174 ancestors.update(self.changelog.parentrevs(rev))
1175 heads = [self.changelog.node(rev) for rev in heads]
1175 heads = [self.changelog.node(rev) for rev in heads]
1176 if start is not None:
1176 if start is not None:
1177 heads = self.changelog.nodesbetween([start], heads)[2]
1177 heads = self.changelog.nodesbetween([start], heads)[2]
1178 return heads
1178 return heads
1179
1179
1180 def branches(self, nodes):
1180 def branches(self, nodes):
1181 if not nodes:
1181 if not nodes:
1182 nodes = [self.changelog.tip()]
1182 nodes = [self.changelog.tip()]
1183 b = []
1183 b = []
1184 for n in nodes:
1184 for n in nodes:
1185 t = n
1185 t = n
1186 while 1:
1186 while 1:
1187 p = self.changelog.parents(n)
1187 p = self.changelog.parents(n)
1188 if p[1] != nullid or p[0] == nullid:
1188 if p[1] != nullid or p[0] == nullid:
1189 b.append((t, n, p[0], p[1]))
1189 b.append((t, n, p[0], p[1]))
1190 break
1190 break
1191 n = p[0]
1191 n = p[0]
1192 return b
1192 return b
1193
1193
1194 def between(self, pairs):
1194 def between(self, pairs):
1195 r = []
1195 r = []
1196
1196
1197 for top, bottom in pairs:
1197 for top, bottom in pairs:
1198 n, l, i = top, [], 0
1198 n, l, i = top, [], 0
1199 f = 1
1199 f = 1
1200
1200
1201 while n != bottom:
1201 while n != bottom:
1202 p = self.changelog.parents(n)[0]
1202 p = self.changelog.parents(n)[0]
1203 if i == f:
1203 if i == f:
1204 l.append(n)
1204 l.append(n)
1205 f = f * 2
1205 f = f * 2
1206 n = p
1206 n = p
1207 i += 1
1207 i += 1
1208
1208
1209 r.append(l)
1209 r.append(l)
1210
1210
1211 return r
1211 return r
1212
1212
1213 def findincoming(self, remote, base=None, heads=None, force=False):
1213 def findincoming(self, remote, base=None, heads=None, force=False):
1214 """Return list of roots of the subsets of missing nodes from remote
1214 """Return list of roots of the subsets of missing nodes from remote
1215
1215
1216 If base dict is specified, assume that these nodes and their parents
1216 If base dict is specified, assume that these nodes and their parents
1217 exist on the remote side and that no child of a node of base exists
1217 exist on the remote side and that no child of a node of base exists
1218 in both remote and self.
1218 in both remote and self.
1219 Furthermore base will be updated to include the nodes that exists
1219 Furthermore base will be updated to include the nodes that exists
1220 in self and remote but no children exists in self and remote.
1220 in self and remote but no children exists in self and remote.
1221 If a list of heads is specified, return only nodes which are heads
1221 If a list of heads is specified, return only nodes which are heads
1222 or ancestors of these heads.
1222 or ancestors of these heads.
1223
1223
1224 All the ancestors of base are in self and in remote.
1224 All the ancestors of base are in self and in remote.
1225 All the descendants of the list returned are missing in self.
1225 All the descendants of the list returned are missing in self.
1226 (and so we know that the rest of the nodes are missing in remote, see
1226 (and so we know that the rest of the nodes are missing in remote, see
1227 outgoing)
1227 outgoing)
1228 """
1228 """
1229 m = self.changelog.nodemap
1229 m = self.changelog.nodemap
1230 search = []
1230 search = []
1231 fetch = {}
1231 fetch = {}
1232 seen = {}
1232 seen = {}
1233 seenbranch = {}
1233 seenbranch = {}
1234 if base == None:
1234 if base == None:
1235 base = {}
1235 base = {}
1236
1236
1237 if not heads:
1237 if not heads:
1238 heads = remote.heads()
1238 heads = remote.heads()
1239
1239
1240 if self.changelog.tip() == nullid:
1240 if self.changelog.tip() == nullid:
1241 base[nullid] = 1
1241 base[nullid] = 1
1242 if heads != [nullid]:
1242 if heads != [nullid]:
1243 return [nullid]
1243 return [nullid]
1244 return []
1244 return []
1245
1245
1246 # assume we're closer to the tip than the root
1246 # assume we're closer to the tip than the root
1247 # and start by examining the heads
1247 # and start by examining the heads
1248 self.ui.status(_("searching for changes\n"))
1248 self.ui.status(_("searching for changes\n"))
1249
1249
1250 unknown = []
1250 unknown = []
1251 for h in heads:
1251 for h in heads:
1252 if h not in m:
1252 if h not in m:
1253 unknown.append(h)
1253 unknown.append(h)
1254 else:
1254 else:
1255 base[h] = 1
1255 base[h] = 1
1256
1256
1257 if not unknown:
1257 if not unknown:
1258 return []
1258 return []
1259
1259
1260 req = dict.fromkeys(unknown)
1260 req = dict.fromkeys(unknown)
1261 reqcnt = 0
1261 reqcnt = 0
1262
1262
1263 # search through remote branches
1263 # search through remote branches
1264 # a 'branch' here is a linear segment of history, with four parts:
1264 # a 'branch' here is a linear segment of history, with four parts:
1265 # head, root, first parent, second parent
1265 # head, root, first parent, second parent
1266 # (a branch always has two parents (or none) by definition)
1266 # (a branch always has two parents (or none) by definition)
1267 unknown = remote.branches(unknown)
1267 unknown = remote.branches(unknown)
1268 while unknown:
1268 while unknown:
1269 r = []
1269 r = []
1270 while unknown:
1270 while unknown:
1271 n = unknown.pop(0)
1271 n = unknown.pop(0)
1272 if n[0] in seen:
1272 if n[0] in seen:
1273 continue
1273 continue
1274
1274
1275 self.ui.debug(_("examining %s:%s\n")
1275 self.ui.debug(_("examining %s:%s\n")
1276 % (short(n[0]), short(n[1])))
1276 % (short(n[0]), short(n[1])))
1277 if n[0] == nullid: # found the end of the branch
1277 if n[0] == nullid: # found the end of the branch
1278 pass
1278 pass
1279 elif n in seenbranch:
1279 elif n in seenbranch:
1280 self.ui.debug(_("branch already found\n"))
1280 self.ui.debug(_("branch already found\n"))
1281 continue
1281 continue
1282 elif n[1] and n[1] in m: # do we know the base?
1282 elif n[1] and n[1] in m: # do we know the base?
1283 self.ui.debug(_("found incomplete branch %s:%s\n")
1283 self.ui.debug(_("found incomplete branch %s:%s\n")
1284 % (short(n[0]), short(n[1])))
1284 % (short(n[0]), short(n[1])))
1285 search.append(n) # schedule branch range for scanning
1285 search.append(n) # schedule branch range for scanning
1286 seenbranch[n] = 1
1286 seenbranch[n] = 1
1287 else:
1287 else:
1288 if n[1] not in seen and n[1] not in fetch:
1288 if n[1] not in seen and n[1] not in fetch:
1289 if n[2] in m and n[3] in m:
1289 if n[2] in m and n[3] in m:
1290 self.ui.debug(_("found new changeset %s\n") %
1290 self.ui.debug(_("found new changeset %s\n") %
1291 short(n[1]))
1291 short(n[1]))
1292 fetch[n[1]] = 1 # earliest unknown
1292 fetch[n[1]] = 1 # earliest unknown
1293 for p in n[2:4]:
1293 for p in n[2:4]:
1294 if p in m:
1294 if p in m:
1295 base[p] = 1 # latest known
1295 base[p] = 1 # latest known
1296
1296
1297 for p in n[2:4]:
1297 for p in n[2:4]:
1298 if p not in req and p not in m:
1298 if p not in req and p not in m:
1299 r.append(p)
1299 r.append(p)
1300 req[p] = 1
1300 req[p] = 1
1301 seen[n[0]] = 1
1301 seen[n[0]] = 1
1302
1302
1303 if r:
1303 if r:
1304 reqcnt += 1
1304 reqcnt += 1
1305 self.ui.debug(_("request %d: %s\n") %
1305 self.ui.debug(_("request %d: %s\n") %
1306 (reqcnt, " ".join(map(short, r))))
1306 (reqcnt, " ".join(map(short, r))))
1307 for p in xrange(0, len(r), 10):
1307 for p in xrange(0, len(r), 10):
1308 for b in remote.branches(r[p:p+10]):
1308 for b in remote.branches(r[p:p+10]):
1309 self.ui.debug(_("received %s:%s\n") %
1309 self.ui.debug(_("received %s:%s\n") %
1310 (short(b[0]), short(b[1])))
1310 (short(b[0]), short(b[1])))
1311 unknown.append(b)
1311 unknown.append(b)
1312
1312
1313 # do binary search on the branches we found
1313 # do binary search on the branches we found
1314 while search:
1314 while search:
1315 n = search.pop(0)
1315 n = search.pop(0)
1316 reqcnt += 1
1316 reqcnt += 1
1317 l = remote.between([(n[0], n[1])])[0]
1317 l = remote.between([(n[0], n[1])])[0]
1318 l.append(n[1])
1318 l.append(n[1])
1319 p = n[0]
1319 p = n[0]
1320 f = 1
1320 f = 1
1321 for i in l:
1321 for i in l:
1322 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1322 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1323 if i in m:
1323 if i in m:
1324 if f <= 2:
1324 if f <= 2:
1325 self.ui.debug(_("found new branch changeset %s\n") %
1325 self.ui.debug(_("found new branch changeset %s\n") %
1326 short(p))
1326 short(p))
1327 fetch[p] = 1
1327 fetch[p] = 1
1328 base[i] = 1
1328 base[i] = 1
1329 else:
1329 else:
1330 self.ui.debug(_("narrowed branch search to %s:%s\n")
1330 self.ui.debug(_("narrowed branch search to %s:%s\n")
1331 % (short(p), short(i)))
1331 % (short(p), short(i)))
1332 search.append((p, i))
1332 search.append((p, i))
1333 break
1333 break
1334 p, f = i, f * 2
1334 p, f = i, f * 2
1335
1335
1336 # sanity check our fetch list
1336 # sanity check our fetch list
1337 for f in fetch.keys():
1337 for f in fetch.keys():
1338 if f in m:
1338 if f in m:
1339 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1339 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1340
1340
1341 if base.keys() == [nullid]:
1341 if base.keys() == [nullid]:
1342 if force:
1342 if force:
1343 self.ui.warn(_("warning: repository is unrelated\n"))
1343 self.ui.warn(_("warning: repository is unrelated\n"))
1344 else:
1344 else:
1345 raise util.Abort(_("repository is unrelated"))
1345 raise util.Abort(_("repository is unrelated"))
1346
1346
1347 self.ui.debug(_("found new changesets starting at ") +
1347 self.ui.debug(_("found new changesets starting at ") +
1348 " ".join([short(f) for f in fetch]) + "\n")
1348 " ".join([short(f) for f in fetch]) + "\n")
1349
1349
1350 self.ui.debug(_("%d total queries\n") % reqcnt)
1350 self.ui.debug(_("%d total queries\n") % reqcnt)
1351
1351
1352 return fetch.keys()
1352 return fetch.keys()
1353
1353
1354 def findoutgoing(self, remote, base=None, heads=None, force=False):
1354 def findoutgoing(self, remote, base=None, heads=None, force=False):
1355 """Return list of nodes that are roots of subsets not in remote
1355 """Return list of nodes that are roots of subsets not in remote
1356
1356
1357 If base dict is specified, assume that these nodes and their parents
1357 If base dict is specified, assume that these nodes and their parents
1358 exist on the remote side.
1358 exist on the remote side.
1359 If a list of heads is specified, return only nodes which are heads
1359 If a list of heads is specified, return only nodes which are heads
1360 or ancestors of these heads, and return a second element which
1360 or ancestors of these heads, and return a second element which
1361 contains all remote heads which get new children.
1361 contains all remote heads which get new children.
1362 """
1362 """
1363 if base == None:
1363 if base == None:
1364 base = {}
1364 base = {}
1365 self.findincoming(remote, base, heads, force=force)
1365 self.findincoming(remote, base, heads, force=force)
1366
1366
1367 self.ui.debug(_("common changesets up to ")
1367 self.ui.debug(_("common changesets up to ")
1368 + " ".join(map(short, base.keys())) + "\n")
1368 + " ".join(map(short, base.keys())) + "\n")
1369
1369
1370 remain = dict.fromkeys(self.changelog.nodemap)
1370 remain = dict.fromkeys(self.changelog.nodemap)
1371
1371
1372 # prune everything remote has from the tree
1372 # prune everything remote has from the tree
1373 del remain[nullid]
1373 del remain[nullid]
1374 remove = base.keys()
1374 remove = base.keys()
1375 while remove:
1375 while remove:
1376 n = remove.pop(0)
1376 n = remove.pop(0)
1377 if n in remain:
1377 if n in remain:
1378 del remain[n]
1378 del remain[n]
1379 for p in self.changelog.parents(n):
1379 for p in self.changelog.parents(n):
1380 remove.append(p)
1380 remove.append(p)
1381
1381
1382 # find every node whose parents have been pruned
1382 # find every node whose parents have been pruned
1383 subset = []
1383 subset = []
1384 # find every remote head that will get new children
1384 # find every remote head that will get new children
1385 updated_heads = {}
1385 updated_heads = {}
1386 for n in remain:
1386 for n in remain:
1387 p1, p2 = self.changelog.parents(n)
1387 p1, p2 = self.changelog.parents(n)
1388 if p1 not in remain and p2 not in remain:
1388 if p1 not in remain and p2 not in remain:
1389 subset.append(n)
1389 subset.append(n)
1390 if heads:
1390 if heads:
1391 if p1 in heads:
1391 if p1 in heads:
1392 updated_heads[p1] = True
1392 updated_heads[p1] = True
1393 if p2 in heads:
1393 if p2 in heads:
1394 updated_heads[p2] = True
1394 updated_heads[p2] = True
1395
1395
1396 # this is the set of all roots we have to push
1396 # this is the set of all roots we have to push
1397 if heads:
1397 if heads:
1398 return subset, updated_heads.keys()
1398 return subset, updated_heads.keys()
1399 else:
1399 else:
1400 return subset
1400 return subset
1401
1401
1402 def pull(self, remote, heads=None, force=False):
1402 def pull(self, remote, heads=None, force=False):
1403 lock = self.lock()
1403 lock = self.lock()
1404 try:
1404 try:
1405 fetch = self.findincoming(remote, heads=heads, force=force)
1405 fetch = self.findincoming(remote, heads=heads, force=force)
1406 if fetch == [nullid]:
1406 if fetch == [nullid]:
1407 self.ui.status(_("requesting all changes\n"))
1407 self.ui.status(_("requesting all changes\n"))
1408
1408
1409 if not fetch:
1409 if not fetch:
1410 self.ui.status(_("no changes found\n"))
1410 self.ui.status(_("no changes found\n"))
1411 return 0
1411 return 0
1412
1412
1413 if heads is None:
1413 if heads is None:
1414 cg = remote.changegroup(fetch, 'pull')
1414 cg = remote.changegroup(fetch, 'pull')
1415 else:
1415 else:
1416 if 'changegroupsubset' not in remote.capabilities:
1416 if 'changegroupsubset' not in remote.capabilities:
1417 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1417 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1418 cg = remote.changegroupsubset(fetch, heads, 'pull')
1418 cg = remote.changegroupsubset(fetch, heads, 'pull')
1419 return self.addchangegroup(cg, 'pull', remote.url())
1419 return self.addchangegroup(cg, 'pull', remote.url())
1420 finally:
1420 finally:
1421 del lock
1421 del lock
1422
1422
1423 def push(self, remote, force=False, revs=None):
1423 def push(self, remote, force=False, revs=None):
1424 # there are two ways to push to remote repo:
1424 # there are two ways to push to remote repo:
1425 #
1425 #
1426 # addchangegroup assumes local user can lock remote
1426 # addchangegroup assumes local user can lock remote
1427 # repo (local filesystem, old ssh servers).
1427 # repo (local filesystem, old ssh servers).
1428 #
1428 #
1429 # unbundle assumes local user cannot lock remote repo (new ssh
1429 # unbundle assumes local user cannot lock remote repo (new ssh
1430 # servers, http servers).
1430 # servers, http servers).
1431
1431
1432 if remote.capable('unbundle'):
1432 if remote.capable('unbundle'):
1433 return self.push_unbundle(remote, force, revs)
1433 return self.push_unbundle(remote, force, revs)
1434 return self.push_addchangegroup(remote, force, revs)
1434 return self.push_addchangegroup(remote, force, revs)
1435
1435
1436 def prepush(self, remote, force, revs):
1436 def prepush(self, remote, force, revs):
1437 base = {}
1437 base = {}
1438 remote_heads = remote.heads()
1438 remote_heads = remote.heads()
1439 inc = self.findincoming(remote, base, remote_heads, force=force)
1439 inc = self.findincoming(remote, base, remote_heads, force=force)
1440
1440
1441 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1441 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1442 if revs is not None:
1442 if revs is not None:
1443 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1443 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1444 else:
1444 else:
1445 bases, heads = update, self.changelog.heads()
1445 bases, heads = update, self.changelog.heads()
1446
1446
1447 if not bases:
1447 if not bases:
1448 self.ui.status(_("no changes found\n"))
1448 self.ui.status(_("no changes found\n"))
1449 return None, 1
1449 return None, 1
1450 elif not force:
1450 elif not force:
1451 # check if we're creating new remote heads
1451 # check if we're creating new remote heads
1452 # to be a remote head after push, node must be either
1452 # to be a remote head after push, node must be either
1453 # - unknown locally
1453 # - unknown locally
1454 # - a local outgoing head descended from update
1454 # - a local outgoing head descended from update
1455 # - a remote head that's known locally and not
1455 # - a remote head that's known locally and not
1456 # ancestral to an outgoing head
1456 # ancestral to an outgoing head
1457
1457
1458 warn = 0
1458 warn = 0
1459
1459
1460 if remote_heads == [nullid]:
1460 if remote_heads == [nullid]:
1461 warn = 0
1461 warn = 0
1462 elif not revs and len(heads) > len(remote_heads):
1462 elif not revs and len(heads) > len(remote_heads):
1463 warn = 1
1463 warn = 1
1464 else:
1464 else:
1465 newheads = list(heads)
1465 newheads = list(heads)
1466 for r in remote_heads:
1466 for r in remote_heads:
1467 if r in self.changelog.nodemap:
1467 if r in self.changelog.nodemap:
1468 desc = self.changelog.heads(r, heads)
1468 desc = self.changelog.heads(r, heads)
1469 l = [h for h in heads if h in desc]
1469 l = [h for h in heads if h in desc]
1470 if not l:
1470 if not l:
1471 newheads.append(r)
1471 newheads.append(r)
1472 else:
1472 else:
1473 newheads.append(r)
1473 newheads.append(r)
1474 if len(newheads) > len(remote_heads):
1474 if len(newheads) > len(remote_heads):
1475 warn = 1
1475 warn = 1
1476
1476
1477 if warn:
1477 if warn:
1478 self.ui.warn(_("abort: push creates new remote heads!\n"))
1478 self.ui.warn(_("abort: push creates new remote heads!\n"))
1479 self.ui.status(_("(did you forget to merge?"
1479 self.ui.status(_("(did you forget to merge?"
1480 " use push -f to force)\n"))
1480 " use push -f to force)\n"))
1481 return None, 0
1481 return None, 0
1482 elif inc:
1482 elif inc:
1483 self.ui.warn(_("note: unsynced remote changes!\n"))
1483 self.ui.warn(_("note: unsynced remote changes!\n"))
1484
1484
1485
1485
1486 if revs is None:
1486 if revs is None:
1487 cg = self.changegroup(update, 'push')
1487 cg = self.changegroup(update, 'push')
1488 else:
1488 else:
1489 cg = self.changegroupsubset(update, revs, 'push')
1489 cg = self.changegroupsubset(update, revs, 'push')
1490 return cg, remote_heads
1490 return cg, remote_heads
1491
1491
1492 def push_addchangegroup(self, remote, force, revs):
1492 def push_addchangegroup(self, remote, force, revs):
1493 lock = remote.lock()
1493 lock = remote.lock()
1494 try:
1494 try:
1495 ret = self.prepush(remote, force, revs)
1495 ret = self.prepush(remote, force, revs)
1496 if ret[0] is not None:
1496 if ret[0] is not None:
1497 cg, remote_heads = ret
1497 cg, remote_heads = ret
1498 return remote.addchangegroup(cg, 'push', self.url())
1498 return remote.addchangegroup(cg, 'push', self.url())
1499 return ret[1]
1499 return ret[1]
1500 finally:
1500 finally:
1501 del lock
1501 del lock
1502
1502
1503 def push_unbundle(self, remote, force, revs):
1503 def push_unbundle(self, remote, force, revs):
1504 # local repo finds heads on server, finds out what revs it
1504 # local repo finds heads on server, finds out what revs it
1505 # must push. once revs transferred, if server finds it has
1505 # must push. once revs transferred, if server finds it has
1506 # different heads (someone else won commit/push race), server
1506 # different heads (someone else won commit/push race), server
1507 # aborts.
1507 # aborts.
1508
1508
1509 ret = self.prepush(remote, force, revs)
1509 ret = self.prepush(remote, force, revs)
1510 if ret[0] is not None:
1510 if ret[0] is not None:
1511 cg, remote_heads = ret
1511 cg, remote_heads = ret
1512 if force: remote_heads = ['force']
1512 if force: remote_heads = ['force']
1513 return remote.unbundle(cg, remote_heads, 'push')
1513 return remote.unbundle(cg, remote_heads, 'push')
1514 return ret[1]
1514 return ret[1]
1515
1515
1516 def changegroupinfo(self, nodes, source):
1516 def changegroupinfo(self, nodes, source):
1517 if self.ui.verbose or source == 'bundle':
1517 if self.ui.verbose or source == 'bundle':
1518 self.ui.status(_("%d changesets found\n") % len(nodes))
1518 self.ui.status(_("%d changesets found\n") % len(nodes))
1519 if self.ui.debugflag:
1519 if self.ui.debugflag:
1520 self.ui.debug(_("List of changesets:\n"))
1520 self.ui.debug(_("List of changesets:\n"))
1521 for node in nodes:
1521 for node in nodes:
1522 self.ui.debug("%s\n" % hex(node))
1522 self.ui.debug("%s\n" % hex(node))
1523
1523
1524 def changegroupsubset(self, bases, heads, source, extranodes=None):
1524 def changegroupsubset(self, bases, heads, source, extranodes=None):
1525 """This function generates a changegroup consisting of all the nodes
1525 """This function generates a changegroup consisting of all the nodes
1526 that are descendents of any of the bases, and ancestors of any of
1526 that are descendents of any of the bases, and ancestors of any of
1527 the heads.
1527 the heads.
1528
1528
1529 It is fairly complex as determining which filenodes and which
1529 It is fairly complex as determining which filenodes and which
1530 manifest nodes need to be included for the changeset to be complete
1530 manifest nodes need to be included for the changeset to be complete
1531 is non-trivial.
1531 is non-trivial.
1532
1532
1533 Another wrinkle is doing the reverse, figuring out which changeset in
1533 Another wrinkle is doing the reverse, figuring out which changeset in
1534 the changegroup a particular filenode or manifestnode belongs to.
1534 the changegroup a particular filenode or manifestnode belongs to.
1535
1535
1536 The caller can specify some nodes that must be included in the
1536 The caller can specify some nodes that must be included in the
1537 changegroup using the extranodes argument. It should be a dict
1537 changegroup using the extranodes argument. It should be a dict
1538 where the keys are the filenames (or 1 for the manifest), and the
1538 where the keys are the filenames (or 1 for the manifest), and the
1539 values are lists of (node, linknode) tuples, where node is a wanted
1539 values are lists of (node, linknode) tuples, where node is a wanted
1540 node and linknode is the changelog node that should be transmitted as
1540 node and linknode is the changelog node that should be transmitted as
1541 the linkrev.
1541 the linkrev.
1542 """
1542 """
1543
1543
1544 self.hook('preoutgoing', throw=True, source=source)
1544 self.hook('preoutgoing', throw=True, source=source)
1545
1545
1546 # Set up some initial variables
1546 # Set up some initial variables
1547 # Make it easy to refer to self.changelog
1547 # Make it easy to refer to self.changelog
1548 cl = self.changelog
1548 cl = self.changelog
1549 # msng is short for missing - compute the list of changesets in this
1549 # msng is short for missing - compute the list of changesets in this
1550 # changegroup.
1550 # changegroup.
1551 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1551 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1552 self.changegroupinfo(msng_cl_lst, source)
1552 self.changegroupinfo(msng_cl_lst, source)
1553 # Some bases may turn out to be superfluous, and some heads may be
1553 # Some bases may turn out to be superfluous, and some heads may be
1554 # too. nodesbetween will return the minimal set of bases and heads
1554 # too. nodesbetween will return the minimal set of bases and heads
1555 # necessary to re-create the changegroup.
1555 # necessary to re-create the changegroup.
1556
1556
1557 # Known heads are the list of heads that it is assumed the recipient
1557 # Known heads are the list of heads that it is assumed the recipient
1558 # of this changegroup will know about.
1558 # of this changegroup will know about.
1559 knownheads = {}
1559 knownheads = {}
1560 # We assume that all parents of bases are known heads.
1560 # We assume that all parents of bases are known heads.
1561 for n in bases:
1561 for n in bases:
1562 for p in cl.parents(n):
1562 for p in cl.parents(n):
1563 if p != nullid:
1563 if p != nullid:
1564 knownheads[p] = 1
1564 knownheads[p] = 1
1565 knownheads = knownheads.keys()
1565 knownheads = knownheads.keys()
1566 if knownheads:
1566 if knownheads:
1567 # Now that we know what heads are known, we can compute which
1567 # Now that we know what heads are known, we can compute which
1568 # changesets are known. The recipient must know about all
1568 # changesets are known. The recipient must know about all
1569 # changesets required to reach the known heads from the null
1569 # changesets required to reach the known heads from the null
1570 # changeset.
1570 # changeset.
1571 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1571 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1572 junk = None
1572 junk = None
1573 # Transform the list into an ersatz set.
1573 # Transform the list into an ersatz set.
1574 has_cl_set = dict.fromkeys(has_cl_set)
1574 has_cl_set = dict.fromkeys(has_cl_set)
1575 else:
1575 else:
1576 # If there were no known heads, the recipient cannot be assumed to
1576 # If there were no known heads, the recipient cannot be assumed to
1577 # know about any changesets.
1577 # know about any changesets.
1578 has_cl_set = {}
1578 has_cl_set = {}
1579
1579
1580 # Make it easy to refer to self.manifest
1580 # Make it easy to refer to self.manifest
1581 mnfst = self.manifest
1581 mnfst = self.manifest
1582 # We don't know which manifests are missing yet
1582 # We don't know which manifests are missing yet
1583 msng_mnfst_set = {}
1583 msng_mnfst_set = {}
1584 # Nor do we know which filenodes are missing.
1584 # Nor do we know which filenodes are missing.
1585 msng_filenode_set = {}
1585 msng_filenode_set = {}
1586
1586
1587 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1587 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1588 junk = None
1588 junk = None
1589
1589
1590 # A changeset always belongs to itself, so the changenode lookup
1590 # A changeset always belongs to itself, so the changenode lookup
1591 # function for a changenode is identity.
1591 # function for a changenode is identity.
1592 def identity(x):
1592 def identity(x):
1593 return x
1593 return x
1594
1594
1595 # A function generating function. Sets up an environment for the
1595 # A function generating function. Sets up an environment for the
1596 # inner function.
1596 # inner function.
1597 def cmp_by_rev_func(revlog):
1597 def cmp_by_rev_func(revlog):
1598 # Compare two nodes by their revision number in the environment's
1598 # Compare two nodes by their revision number in the environment's
1599 # revision history. Since the revision number both represents the
1599 # revision history. Since the revision number both represents the
1600 # most efficient order to read the nodes in, and represents a
1600 # most efficient order to read the nodes in, and represents a
1601 # topological sorting of the nodes, this function is often useful.
1601 # topological sorting of the nodes, this function is often useful.
1602 def cmp_by_rev(a, b):
1602 def cmp_by_rev(a, b):
1603 return cmp(revlog.rev(a), revlog.rev(b))
1603 return cmp(revlog.rev(a), revlog.rev(b))
1604 return cmp_by_rev
1604 return cmp_by_rev
1605
1605
1606 # If we determine that a particular file or manifest node must be a
1606 # If we determine that a particular file or manifest node must be a
1607 # node that the recipient of the changegroup will already have, we can
1607 # node that the recipient of the changegroup will already have, we can
1608 # also assume the recipient will have all the parents. This function
1608 # also assume the recipient will have all the parents. This function
1609 # prunes them from the set of missing nodes.
1609 # prunes them from the set of missing nodes.
1610 def prune_parents(revlog, hasset, msngset):
1610 def prune_parents(revlog, hasset, msngset):
1611 haslst = hasset.keys()
1611 haslst = hasset.keys()
1612 haslst.sort(cmp_by_rev_func(revlog))
1612 haslst.sort(cmp_by_rev_func(revlog))
1613 for node in haslst:
1613 for node in haslst:
1614 parentlst = [p for p in revlog.parents(node) if p != nullid]
1614 parentlst = [p for p in revlog.parents(node) if p != nullid]
1615 while parentlst:
1615 while parentlst:
1616 n = parentlst.pop()
1616 n = parentlst.pop()
1617 if n not in hasset:
1617 if n not in hasset:
1618 hasset[n] = 1
1618 hasset[n] = 1
1619 p = [p for p in revlog.parents(n) if p != nullid]
1619 p = [p for p in revlog.parents(n) if p != nullid]
1620 parentlst.extend(p)
1620 parentlst.extend(p)
1621 for n in hasset:
1621 for n in hasset:
1622 msngset.pop(n, None)
1622 msngset.pop(n, None)
1623
1623
1624 # This is a function generating function used to set up an environment
1624 # This is a function generating function used to set up an environment
1625 # for the inner function to execute in.
1625 # for the inner function to execute in.
1626 def manifest_and_file_collector(changedfileset):
1626 def manifest_and_file_collector(changedfileset):
1627 # This is an information gathering function that gathers
1627 # This is an information gathering function that gathers
1628 # information from each changeset node that goes out as part of
1628 # information from each changeset node that goes out as part of
1629 # the changegroup. The information gathered is a list of which
1629 # the changegroup. The information gathered is a list of which
1630 # manifest nodes are potentially required (the recipient may
1630 # manifest nodes are potentially required (the recipient may
1631 # already have them) and total list of all files which were
1631 # already have them) and total list of all files which were
1632 # changed in any changeset in the changegroup.
1632 # changed in any changeset in the changegroup.
1633 #
1633 #
1634 # We also remember the first changenode we saw any manifest
1634 # We also remember the first changenode we saw any manifest
1635 # referenced by so we can later determine which changenode 'owns'
1635 # referenced by so we can later determine which changenode 'owns'
1636 # the manifest.
1636 # the manifest.
1637 def collect_manifests_and_files(clnode):
1637 def collect_manifests_and_files(clnode):
1638 c = cl.read(clnode)
1638 c = cl.read(clnode)
1639 for f in c[3]:
1639 for f in c[3]:
1640 # This is to make sure we only have one instance of each
1640 # This is to make sure we only have one instance of each
1641 # filename string for each filename.
1641 # filename string for each filename.
1642 changedfileset.setdefault(f, f)
1642 changedfileset.setdefault(f, f)
1643 msng_mnfst_set.setdefault(c[0], clnode)
1643 msng_mnfst_set.setdefault(c[0], clnode)
1644 return collect_manifests_and_files
1644 return collect_manifests_and_files
1645
1645
1646 # Figure out which manifest nodes (of the ones we think might be part
1646 # Figure out which manifest nodes (of the ones we think might be part
1647 # of the changegroup) the recipient must know about and remove them
1647 # of the changegroup) the recipient must know about and remove them
1648 # from the changegroup.
1648 # from the changegroup.
1649 def prune_manifests():
1649 def prune_manifests():
1650 has_mnfst_set = {}
1650 has_mnfst_set = {}
1651 for n in msng_mnfst_set:
1651 for n in msng_mnfst_set:
1652 # If a 'missing' manifest thinks it belongs to a changenode
1652 # If a 'missing' manifest thinks it belongs to a changenode
1653 # the recipient is assumed to have, obviously the recipient
1653 # the recipient is assumed to have, obviously the recipient
1654 # must have that manifest.
1654 # must have that manifest.
1655 linknode = cl.node(mnfst.linkrev(n))
1655 linknode = cl.node(mnfst.linkrev(n))
1656 if linknode in has_cl_set:
1656 if linknode in has_cl_set:
1657 has_mnfst_set[n] = 1
1657 has_mnfst_set[n] = 1
1658 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1658 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1659
1659
1660 # Use the information collected in collect_manifests_and_files to say
1660 # Use the information collected in collect_manifests_and_files to say
1661 # which changenode any manifestnode belongs to.
1661 # which changenode any manifestnode belongs to.
1662 def lookup_manifest_link(mnfstnode):
1662 def lookup_manifest_link(mnfstnode):
1663 return msng_mnfst_set[mnfstnode]
1663 return msng_mnfst_set[mnfstnode]
1664
1664
1665 # A function generating function that sets up the initial environment
1665 # A function generating function that sets up the initial environment
1666 # the inner function.
1666 # the inner function.
1667 def filenode_collector(changedfiles):
1667 def filenode_collector(changedfiles):
1668 next_rev = [0]
1668 next_rev = [0]
1669 # This gathers information from each manifestnode included in the
1669 # This gathers information from each manifestnode included in the
1670 # changegroup about which filenodes the manifest node references
1670 # changegroup about which filenodes the manifest node references
1671 # so we can include those in the changegroup too.
1671 # so we can include those in the changegroup too.
1672 #
1672 #
1673 # It also remembers which changenode each filenode belongs to. It
1673 # It also remembers which changenode each filenode belongs to. It
1674 # does this by assuming the a filenode belongs to the changenode
1674 # does this by assuming the a filenode belongs to the changenode
1675 # the first manifest that references it belongs to.
1675 # the first manifest that references it belongs to.
1676 def collect_msng_filenodes(mnfstnode):
1676 def collect_msng_filenodes(mnfstnode):
1677 r = mnfst.rev(mnfstnode)
1677 r = mnfst.rev(mnfstnode)
1678 if r == next_rev[0]:
1678 if r == next_rev[0]:
1679 # If the last rev we looked at was the one just previous,
1679 # If the last rev we looked at was the one just previous,
1680 # we only need to see a diff.
1680 # we only need to see a diff.
1681 deltamf = mnfst.readdelta(mnfstnode)
1681 deltamf = mnfst.readdelta(mnfstnode)
1682 # For each line in the delta
1682 # For each line in the delta
1683 for f, fnode in deltamf.items():
1683 for f, fnode in deltamf.items():
1684 f = changedfiles.get(f, None)
1684 f = changedfiles.get(f, None)
1685 # And if the file is in the list of files we care
1685 # And if the file is in the list of files we care
1686 # about.
1686 # about.
1687 if f is not None:
1687 if f is not None:
1688 # Get the changenode this manifest belongs to
1688 # Get the changenode this manifest belongs to
1689 clnode = msng_mnfst_set[mnfstnode]
1689 clnode = msng_mnfst_set[mnfstnode]
1690 # Create the set of filenodes for the file if
1690 # Create the set of filenodes for the file if
1691 # there isn't one already.
1691 # there isn't one already.
1692 ndset = msng_filenode_set.setdefault(f, {})
1692 ndset = msng_filenode_set.setdefault(f, {})
1693 # And set the filenode's changelog node to the
1693 # And set the filenode's changelog node to the
1694 # manifest's if it hasn't been set already.
1694 # manifest's if it hasn't been set already.
1695 ndset.setdefault(fnode, clnode)
1695 ndset.setdefault(fnode, clnode)
1696 else:
1696 else:
1697 # Otherwise we need a full manifest.
1697 # Otherwise we need a full manifest.
1698 m = mnfst.read(mnfstnode)
1698 m = mnfst.read(mnfstnode)
1699 # For every file in we care about.
1699 # For every file in we care about.
1700 for f in changedfiles:
1700 for f in changedfiles:
1701 fnode = m.get(f, None)
1701 fnode = m.get(f, None)
1702 # If it's in the manifest
1702 # If it's in the manifest
1703 if fnode is not None:
1703 if fnode is not None:
1704 # See comments above.
1704 # See comments above.
1705 clnode = msng_mnfst_set[mnfstnode]
1705 clnode = msng_mnfst_set[mnfstnode]
1706 ndset = msng_filenode_set.setdefault(f, {})
1706 ndset = msng_filenode_set.setdefault(f, {})
1707 ndset.setdefault(fnode, clnode)
1707 ndset.setdefault(fnode, clnode)
1708 # Remember the revision we hope to see next.
1708 # Remember the revision we hope to see next.
1709 next_rev[0] = r + 1
1709 next_rev[0] = r + 1
1710 return collect_msng_filenodes
1710 return collect_msng_filenodes
1711
1711
1712 # We have a list of filenodes we think we need for a file, lets remove
1712 # We have a list of filenodes we think we need for a file, lets remove
1713 # all those we now the recipient must have.
1713 # all those we now the recipient must have.
1714 def prune_filenodes(f, filerevlog):
1714 def prune_filenodes(f, filerevlog):
1715 msngset = msng_filenode_set[f]
1715 msngset = msng_filenode_set[f]
1716 hasset = {}
1716 hasset = {}
1717 # If a 'missing' filenode thinks it belongs to a changenode we
1717 # If a 'missing' filenode thinks it belongs to a changenode we
1718 # assume the recipient must have, then the recipient must have
1718 # assume the recipient must have, then the recipient must have
1719 # that filenode.
1719 # that filenode.
1720 for n in msngset:
1720 for n in msngset:
1721 clnode = cl.node(filerevlog.linkrev(n))
1721 clnode = cl.node(filerevlog.linkrev(n))
1722 if clnode in has_cl_set:
1722 if clnode in has_cl_set:
1723 hasset[n] = 1
1723 hasset[n] = 1
1724 prune_parents(filerevlog, hasset, msngset)
1724 prune_parents(filerevlog, hasset, msngset)
1725
1725
1726 # A function generator function that sets up the a context for the
1726 # A function generator function that sets up the a context for the
1727 # inner function.
1727 # inner function.
1728 def lookup_filenode_link_func(fname):
1728 def lookup_filenode_link_func(fname):
1729 msngset = msng_filenode_set[fname]
1729 msngset = msng_filenode_set[fname]
1730 # Lookup the changenode the filenode belongs to.
1730 # Lookup the changenode the filenode belongs to.
1731 def lookup_filenode_link(fnode):
1731 def lookup_filenode_link(fnode):
1732 return msngset[fnode]
1732 return msngset[fnode]
1733 return lookup_filenode_link
1733 return lookup_filenode_link
1734
1734
1735 # Add the nodes that were explicitly requested.
1735 # Add the nodes that were explicitly requested.
1736 def add_extra_nodes(name, nodes):
1736 def add_extra_nodes(name, nodes):
1737 if not extranodes or name not in extranodes:
1737 if not extranodes or name not in extranodes:
1738 return
1738 return
1739
1739
1740 for node, linknode in extranodes[name]:
1740 for node, linknode in extranodes[name]:
1741 if node not in nodes:
1741 if node not in nodes:
1742 nodes[node] = linknode
1742 nodes[node] = linknode
1743
1743
1744 # Now that we have all theses utility functions to help out and
1744 # Now that we have all theses utility functions to help out and
1745 # logically divide up the task, generate the group.
1745 # logically divide up the task, generate the group.
1746 def gengroup():
1746 def gengroup():
1747 # The set of changed files starts empty.
1747 # The set of changed files starts empty.
1748 changedfiles = {}
1748 changedfiles = {}
1749 # Create a changenode group generator that will call our functions
1749 # Create a changenode group generator that will call our functions
1750 # back to lookup the owning changenode and collect information.
1750 # back to lookup the owning changenode and collect information.
1751 group = cl.group(msng_cl_lst, identity,
1751 group = cl.group(msng_cl_lst, identity,
1752 manifest_and_file_collector(changedfiles))
1752 manifest_and_file_collector(changedfiles))
1753 for chnk in group:
1753 for chnk in group:
1754 yield chnk
1754 yield chnk
1755
1755
1756 # The list of manifests has been collected by the generator
1756 # The list of manifests has been collected by the generator
1757 # calling our functions back.
1757 # calling our functions back.
1758 prune_manifests()
1758 prune_manifests()
1759 add_extra_nodes(1, msng_mnfst_set)
1759 add_extra_nodes(1, msng_mnfst_set)
1760 msng_mnfst_lst = msng_mnfst_set.keys()
1760 msng_mnfst_lst = msng_mnfst_set.keys()
1761 # Sort the manifestnodes by revision number.
1761 # Sort the manifestnodes by revision number.
1762 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1762 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1763 # Create a generator for the manifestnodes that calls our lookup
1763 # Create a generator for the manifestnodes that calls our lookup
1764 # and data collection functions back.
1764 # and data collection functions back.
1765 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1765 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1766 filenode_collector(changedfiles))
1766 filenode_collector(changedfiles))
1767 for chnk in group:
1767 for chnk in group:
1768 yield chnk
1768 yield chnk
1769
1769
1770 # These are no longer needed, dereference and toss the memory for
1770 # These are no longer needed, dereference and toss the memory for
1771 # them.
1771 # them.
1772 msng_mnfst_lst = None
1772 msng_mnfst_lst = None
1773 msng_mnfst_set.clear()
1773 msng_mnfst_set.clear()
1774
1774
1775 if extranodes:
1775 if extranodes:
1776 for fname in extranodes:
1776 for fname in extranodes:
1777 if isinstance(fname, int):
1777 if isinstance(fname, int):
1778 continue
1778 continue
1779 add_extra_nodes(fname,
1779 add_extra_nodes(fname,
1780 msng_filenode_set.setdefault(fname, {}))
1780 msng_filenode_set.setdefault(fname, {}))
1781 changedfiles[fname] = 1
1781 changedfiles[fname] = 1
1782 # Go through all our files in order sorted by name.
1782 # Go through all our files in order sorted by name.
1783 for fname in util.sort(changedfiles):
1783 for fname in util.sort(changedfiles):
1784 filerevlog = self.file(fname)
1784 filerevlog = self.file(fname)
1785 if not len(filerevlog):
1785 if not len(filerevlog):
1786 raise util.Abort(_("empty or missing revlog for %s") % fname)
1786 raise util.Abort(_("empty or missing revlog for %s") % fname)
1787 # Toss out the filenodes that the recipient isn't really
1787 # Toss out the filenodes that the recipient isn't really
1788 # missing.
1788 # missing.
1789 if fname in msng_filenode_set:
1789 if fname in msng_filenode_set:
1790 prune_filenodes(fname, filerevlog)
1790 prune_filenodes(fname, filerevlog)
1791 msng_filenode_lst = msng_filenode_set[fname].keys()
1791 msng_filenode_lst = msng_filenode_set[fname].keys()
1792 else:
1792 else:
1793 msng_filenode_lst = []
1793 msng_filenode_lst = []
1794 # If any filenodes are left, generate the group for them,
1794 # If any filenodes are left, generate the group for them,
1795 # otherwise don't bother.
1795 # otherwise don't bother.
1796 if len(msng_filenode_lst) > 0:
1796 if len(msng_filenode_lst) > 0:
1797 yield changegroup.chunkheader(len(fname))
1797 yield changegroup.chunkheader(len(fname))
1798 yield fname
1798 yield fname
1799 # Sort the filenodes by their revision #
1799 # Sort the filenodes by their revision #
1800 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1800 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1801 # Create a group generator and only pass in a changenode
1801 # Create a group generator and only pass in a changenode
1802 # lookup function as we need to collect no information
1802 # lookup function as we need to collect no information
1803 # from filenodes.
1803 # from filenodes.
1804 group = filerevlog.group(msng_filenode_lst,
1804 group = filerevlog.group(msng_filenode_lst,
1805 lookup_filenode_link_func(fname))
1805 lookup_filenode_link_func(fname))
1806 for chnk in group:
1806 for chnk in group:
1807 yield chnk
1807 yield chnk
1808 if fname in msng_filenode_set:
1808 if fname in msng_filenode_set:
1809 # Don't need this anymore, toss it to free memory.
1809 # Don't need this anymore, toss it to free memory.
1810 del msng_filenode_set[fname]
1810 del msng_filenode_set[fname]
1811 # Signal that no more groups are left.
1811 # Signal that no more groups are left.
1812 yield changegroup.closechunk()
1812 yield changegroup.closechunk()
1813
1813
1814 if msng_cl_lst:
1814 if msng_cl_lst:
1815 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1815 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1816
1816
1817 return util.chunkbuffer(gengroup())
1817 return util.chunkbuffer(gengroup())
1818
1818
1819 def changegroup(self, basenodes, source):
1819 def changegroup(self, basenodes, source):
1820 """Generate a changegroup of all nodes that we have that a recipient
1820 """Generate a changegroup of all nodes that we have that a recipient
1821 doesn't.
1821 doesn't.
1822
1822
1823 This is much easier than the previous function as we can assume that
1823 This is much easier than the previous function as we can assume that
1824 the recipient has any changenode we aren't sending them."""
1824 the recipient has any changenode we aren't sending them."""
1825
1825
1826 self.hook('preoutgoing', throw=True, source=source)
1826 self.hook('preoutgoing', throw=True, source=source)
1827
1827
1828 cl = self.changelog
1828 cl = self.changelog
1829 nodes = cl.nodesbetween(basenodes, None)[0]
1829 nodes = cl.nodesbetween(basenodes, None)[0]
1830 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1830 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1831 self.changegroupinfo(nodes, source)
1831 self.changegroupinfo(nodes, source)
1832
1832
1833 def identity(x):
1833 def identity(x):
1834 return x
1834 return x
1835
1835
1836 def gennodelst(log):
1836 def gennodelst(log):
1837 for r in log:
1837 for r in log:
1838 n = log.node(r)
1838 n = log.node(r)
1839 if log.linkrev(n) in revset:
1839 if log.linkrev(n) in revset:
1840 yield n
1840 yield n
1841
1841
1842 def changed_file_collector(changedfileset):
1842 def changed_file_collector(changedfileset):
1843 def collect_changed_files(clnode):
1843 def collect_changed_files(clnode):
1844 c = cl.read(clnode)
1844 c = cl.read(clnode)
1845 for fname in c[3]:
1845 for fname in c[3]:
1846 changedfileset[fname] = 1
1846 changedfileset[fname] = 1
1847 return collect_changed_files
1847 return collect_changed_files
1848
1848
1849 def lookuprevlink_func(revlog):
1849 def lookuprevlink_func(revlog):
1850 def lookuprevlink(n):
1850 def lookuprevlink(n):
1851 return cl.node(revlog.linkrev(n))
1851 return cl.node(revlog.linkrev(n))
1852 return lookuprevlink
1852 return lookuprevlink
1853
1853
1854 def gengroup():
1854 def gengroup():
1855 # construct a list of all changed files
1855 # construct a list of all changed files
1856 changedfiles = {}
1856 changedfiles = {}
1857
1857
1858 for chnk in cl.group(nodes, identity,
1858 for chnk in cl.group(nodes, identity,
1859 changed_file_collector(changedfiles)):
1859 changed_file_collector(changedfiles)):
1860 yield chnk
1860 yield chnk
1861
1861
1862 mnfst = self.manifest
1862 mnfst = self.manifest
1863 nodeiter = gennodelst(mnfst)
1863 nodeiter = gennodelst(mnfst)
1864 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1864 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1865 yield chnk
1865 yield chnk
1866
1866
1867 for fname in util.sort(changedfiles):
1867 for fname in util.sort(changedfiles):
1868 filerevlog = self.file(fname)
1868 filerevlog = self.file(fname)
1869 if not len(filerevlog):
1869 if not len(filerevlog):
1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1871 nodeiter = gennodelst(filerevlog)
1871 nodeiter = gennodelst(filerevlog)
1872 nodeiter = list(nodeiter)
1872 nodeiter = list(nodeiter)
1873 if nodeiter:
1873 if nodeiter:
1874 yield changegroup.chunkheader(len(fname))
1874 yield changegroup.chunkheader(len(fname))
1875 yield fname
1875 yield fname
1876 lookup = lookuprevlink_func(filerevlog)
1876 lookup = lookuprevlink_func(filerevlog)
1877 for chnk in filerevlog.group(nodeiter, lookup):
1877 for chnk in filerevlog.group(nodeiter, lookup):
1878 yield chnk
1878 yield chnk
1879
1879
1880 yield changegroup.closechunk()
1880 yield changegroup.closechunk()
1881
1881
1882 if nodes:
1882 if nodes:
1883 self.hook('outgoing', node=hex(nodes[0]), source=source)
1883 self.hook('outgoing', node=hex(nodes[0]), source=source)
1884
1884
1885 return util.chunkbuffer(gengroup())
1885 return util.chunkbuffer(gengroup())
1886
1886
1887 def addchangegroup(self, source, srctype, url, emptyok=False):
1887 def addchangegroup(self, source, srctype, url, emptyok=False):
1888 """add changegroup to repo.
1888 """add changegroup to repo.
1889
1889
1890 return values:
1890 return values:
1891 - nothing changed or no source: 0
1891 - nothing changed or no source: 0
1892 - more heads than before: 1+added heads (2..n)
1892 - more heads than before: 1+added heads (2..n)
1893 - less heads than before: -1-removed heads (-2..-n)
1893 - less heads than before: -1-removed heads (-2..-n)
1894 - number of heads stays the same: 1
1894 - number of heads stays the same: 1
1895 """
1895 """
1896 def csmap(x):
1896 def csmap(x):
1897 self.ui.debug(_("add changeset %s\n") % short(x))
1897 self.ui.debug(_("add changeset %s\n") % short(x))
1898 return len(cl)
1898 return len(cl)
1899
1899
1900 def revmap(x):
1900 def revmap(x):
1901 return cl.rev(x)
1901 return cl.rev(x)
1902
1902
1903 if not source:
1903 if not source:
1904 return 0
1904 return 0
1905
1905
1906 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1906 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1907
1907
1908 changesets = files = revisions = 0
1908 changesets = files = revisions = 0
1909
1909
1910 # write changelog data to temp files so concurrent readers will not see
1910 # write changelog data to temp files so concurrent readers will not see
1911 # inconsistent view
1911 # inconsistent view
1912 cl = self.changelog
1912 cl = self.changelog
1913 cl.delayupdate()
1913 cl.delayupdate()
1914 oldheads = len(cl.heads())
1914 oldheads = len(cl.heads())
1915
1915
1916 tr = self.transaction()
1916 tr = self.transaction()
1917 try:
1917 try:
1918 trp = weakref.proxy(tr)
1918 trp = weakref.proxy(tr)
1919 # pull off the changeset group
1919 # pull off the changeset group
1920 self.ui.status(_("adding changesets\n"))
1920 self.ui.status(_("adding changesets\n"))
1921 cor = len(cl) - 1
1921 cor = len(cl) - 1
1922 chunkiter = changegroup.chunkiter(source)
1922 chunkiter = changegroup.chunkiter(source)
1923 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1923 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1924 raise util.Abort(_("received changelog group is empty"))
1924 raise util.Abort(_("received changelog group is empty"))
1925 cnr = len(cl) - 1
1925 cnr = len(cl) - 1
1926 changesets = cnr - cor
1926 changesets = cnr - cor
1927
1927
1928 # pull off the manifest group
1928 # pull off the manifest group
1929 self.ui.status(_("adding manifests\n"))
1929 self.ui.status(_("adding manifests\n"))
1930 chunkiter = changegroup.chunkiter(source)
1930 chunkiter = changegroup.chunkiter(source)
1931 # no need to check for empty manifest group here:
1931 # no need to check for empty manifest group here:
1932 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1932 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1933 # no new manifest will be created and the manifest group will
1933 # no new manifest will be created and the manifest group will
1934 # be empty during the pull
1934 # be empty during the pull
1935 self.manifest.addgroup(chunkiter, revmap, trp)
1935 self.manifest.addgroup(chunkiter, revmap, trp)
1936
1936
1937 # process the files
1937 # process the files
1938 self.ui.status(_("adding file changes\n"))
1938 self.ui.status(_("adding file changes\n"))
1939 while 1:
1939 while 1:
1940 f = changegroup.getchunk(source)
1940 f = changegroup.getchunk(source)
1941 if not f:
1941 if not f:
1942 break
1942 break
1943 self.ui.debug(_("adding %s revisions\n") % f)
1943 self.ui.debug(_("adding %s revisions\n") % f)
1944 fl = self.file(f)
1944 fl = self.file(f)
1945 o = len(fl)
1945 o = len(fl)
1946 chunkiter = changegroup.chunkiter(source)
1946 chunkiter = changegroup.chunkiter(source)
1947 if fl.addgroup(chunkiter, revmap, trp) is None:
1947 if fl.addgroup(chunkiter, revmap, trp) is None:
1948 raise util.Abort(_("received file revlog group is empty"))
1948 raise util.Abort(_("received file revlog group is empty"))
1949 revisions += len(fl) - o
1949 revisions += len(fl) - o
1950 files += 1
1950 files += 1
1951
1951
1952 # make changelog see real files again
1952 # make changelog see real files again
1953 cl.finalize(trp)
1953 cl.finalize(trp)
1954
1954
1955 newheads = len(self.changelog.heads())
1955 newheads = len(self.changelog.heads())
1956 heads = ""
1956 heads = ""
1957 if oldheads and newheads != oldheads:
1957 if oldheads and newheads != oldheads:
1958 heads = _(" (%+d heads)") % (newheads - oldheads)
1958 heads = _(" (%+d heads)") % (newheads - oldheads)
1959
1959
1960 self.ui.status(_("added %d changesets"
1960 self.ui.status(_("added %d changesets"
1961 " with %d changes to %d files%s\n")
1961 " with %d changes to %d files%s\n")
1962 % (changesets, revisions, files, heads))
1962 % (changesets, revisions, files, heads))
1963
1963
1964 if changesets > 0:
1964 if changesets > 0:
1965 self.hook('pretxnchangegroup', throw=True,
1965 self.hook('pretxnchangegroup', throw=True,
1966 node=hex(self.changelog.node(cor+1)), source=srctype,
1966 node=hex(self.changelog.node(cor+1)), source=srctype,
1967 url=url)
1967 url=url)
1968
1968
1969 tr.close()
1969 tr.close()
1970 finally:
1970 finally:
1971 del tr
1971 del tr
1972
1972
1973 if changesets > 0:
1973 if changesets > 0:
1974 # forcefully update the on-disk branch cache
1974 # forcefully update the on-disk branch cache
1975 self.ui.debug(_("updating the branch cache\n"))
1975 self.ui.debug(_("updating the branch cache\n"))
1976 self.branchtags()
1976 self.branchtags()
1977 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1977 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1978 source=srctype, url=url)
1978 source=srctype, url=url)
1979
1979
1980 for i in xrange(cor + 1, cnr + 1):
1980 for i in xrange(cor + 1, cnr + 1):
1981 self.hook("incoming", node=hex(self.changelog.node(i)),
1981 self.hook("incoming", node=hex(self.changelog.node(i)),
1982 source=srctype, url=url)
1982 source=srctype, url=url)
1983
1983
1984 # never return 0 here:
1984 # never return 0 here:
1985 if newheads < oldheads:
1985 if newheads < oldheads:
1986 return newheads - oldheads - 1
1986 return newheads - oldheads - 1
1987 else:
1987 else:
1988 return newheads - oldheads + 1
1988 return newheads - oldheads + 1
1989
1989
1990
1990
1991 def stream_in(self, remote):
1991 def stream_in(self, remote):
1992 fp = remote.stream_out()
1992 fp = remote.stream_out()
1993 l = fp.readline()
1993 l = fp.readline()
1994 try:
1994 try:
1995 resp = int(l)
1995 resp = int(l)
1996 except ValueError:
1996 except ValueError:
1997 raise util.UnexpectedOutput(
1997 raise util.UnexpectedOutput(
1998 _('Unexpected response from remote server:'), l)
1998 _('Unexpected response from remote server:'), l)
1999 if resp == 1:
1999 if resp == 1:
2000 raise util.Abort(_('operation forbidden by server'))
2000 raise util.Abort(_('operation forbidden by server'))
2001 elif resp == 2:
2001 elif resp == 2:
2002 raise util.Abort(_('locking the remote repository failed'))
2002 raise util.Abort(_('locking the remote repository failed'))
2003 elif resp != 0:
2003 elif resp != 0:
2004 raise util.Abort(_('the server sent an unknown error code'))
2004 raise util.Abort(_('the server sent an unknown error code'))
2005 self.ui.status(_('streaming all changes\n'))
2005 self.ui.status(_('streaming all changes\n'))
2006 l = fp.readline()
2006 l = fp.readline()
2007 try:
2007 try:
2008 total_files, total_bytes = map(int, l.split(' ', 1))
2008 total_files, total_bytes = map(int, l.split(' ', 1))
2009 except (ValueError, TypeError):
2009 except (ValueError, TypeError):
2010 raise util.UnexpectedOutput(
2010 raise util.UnexpectedOutput(
2011 _('Unexpected response from remote server:'), l)
2011 _('Unexpected response from remote server:'), l)
2012 self.ui.status(_('%d files to transfer, %s of data\n') %
2012 self.ui.status(_('%d files to transfer, %s of data\n') %
2013 (total_files, util.bytecount(total_bytes)))
2013 (total_files, util.bytecount(total_bytes)))
2014 start = time.time()
2014 start = time.time()
2015 for i in xrange(total_files):
2015 for i in xrange(total_files):
2016 # XXX doesn't support '\n' or '\r' in filenames
2016 # XXX doesn't support '\n' or '\r' in filenames
2017 l = fp.readline()
2017 l = fp.readline()
2018 try:
2018 try:
2019 name, size = l.split('\0', 1)
2019 name, size = l.split('\0', 1)
2020 size = int(size)
2020 size = int(size)
2021 except ValueError, TypeError:
2021 except ValueError, TypeError:
2022 raise util.UnexpectedOutput(
2022 raise util.UnexpectedOutput(
2023 _('Unexpected response from remote server:'), l)
2023 _('Unexpected response from remote server:'), l)
2024 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2024 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2025 ofp = self.sopener(name, 'w')
2025 ofp = self.sopener(name, 'w')
2026 for chunk in util.filechunkiter(fp, limit=size):
2026 for chunk in util.filechunkiter(fp, limit=size):
2027 ofp.write(chunk)
2027 ofp.write(chunk)
2028 ofp.close()
2028 ofp.close()
2029 elapsed = time.time() - start
2029 elapsed = time.time() - start
2030 if elapsed <= 0:
2030 if elapsed <= 0:
2031 elapsed = 0.001
2031 elapsed = 0.001
2032 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2032 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2033 (util.bytecount(total_bytes), elapsed,
2033 (util.bytecount(total_bytes), elapsed,
2034 util.bytecount(total_bytes / elapsed)))
2034 util.bytecount(total_bytes / elapsed)))
2035 self.invalidate()
2035 self.invalidate()
2036 return len(self.heads()) + 1
2036 return len(self.heads()) + 1
2037
2037
2038 def clone(self, remote, heads=[], stream=False):
2038 def clone(self, remote, heads=[], stream=False):
2039 '''clone remote repository.
2039 '''clone remote repository.
2040
2040
2041 keyword arguments:
2041 keyword arguments:
2042 heads: list of revs to clone (forces use of pull)
2042 heads: list of revs to clone (forces use of pull)
2043 stream: use streaming clone if possible'''
2043 stream: use streaming clone if possible'''
2044
2044
2045 # now, all clients that can request uncompressed clones can
2045 # now, all clients that can request uncompressed clones can
2046 # read repo formats supported by all servers that can serve
2046 # read repo formats supported by all servers that can serve
2047 # them.
2047 # them.
2048
2048
2049 # if revlog format changes, client will have to check version
2049 # if revlog format changes, client will have to check version
2050 # and format flags on "stream" capability, and use
2050 # and format flags on "stream" capability, and use
2051 # uncompressed only if compatible.
2051 # uncompressed only if compatible.
2052
2052
2053 if stream and not heads and remote.capable('stream'):
2053 if stream and not heads and remote.capable('stream'):
2054 return self.stream_in(remote)
2054 return self.stream_in(remote)
2055 return self.pull(remote, heads)
2055 return self.pull(remote, heads)
2056
2056
2057 def storefiles(self):
2057 def storefiles(self):
2058 '''get all *.i and *.d files in the store
2058 '''get all *.i and *.d files in the store
2059
2059
2060 Returns (list of (filename, size), total_bytes)'''
2060 Returns (list of (filename, size), total_bytes)'''
2061
2061
2062 lock = None
2062 lock = None
2063 try:
2063 try:
2064 self.ui.debug('scanning\n')
2064 self.ui.debug('scanning\n')
2065 entries = []
2065 entries = []
2066 total_bytes = 0
2066 total_bytes = 0
2067 # get consistent snapshot of repo, lock during scan
2067 # get consistent snapshot of repo, lock during scan
2068 lock = self.lock()
2068 lock = self.lock()
2069 for name, size in self.store.walk():
2069 for name, ename, size in self.store.walk():
2070 entries.append((name, size))
2070 entries.append((name, size))
2071 total_bytes += size
2071 total_bytes += size
2072 return entries, total_bytes
2072 return entries, total_bytes
2073 finally:
2073 finally:
2074 del lock
2074 del lock
2075
2075
2076 # used to avoid circular references so destructors work
2076 # used to avoid circular references so destructors work
2077 def aftertrans(files):
2077 def aftertrans(files):
2078 renamefiles = [tuple(t) for t in files]
2078 renamefiles = [tuple(t) for t in files]
2079 def a():
2079 def a():
2080 for src, dest in renamefiles:
2080 for src, dest in renamefiles:
2081 util.rename(src, dest)
2081 util.rename(src, dest)
2082 return a
2082 return a
2083
2083
2084 def instance(ui, path, create):
2084 def instance(ui, path, create):
2085 return localrepository(ui, util.drop_scheme('file', path), create)
2085 return localrepository(ui, util.drop_scheme('file', path), create)
2086
2086
2087 def islocal(path):
2087 def islocal(path):
2088 return True
2088 return True
@@ -1,117 +1,116 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from i18n import _
9 import os, stat, osutil, util
8 import os, stat, osutil, util
10
9
11 def _buildencodefun():
10 def _buildencodefun():
12 e = '_'
11 e = '_'
13 win_reserved = [ord(x) for x in '\\:*?"<>|']
12 win_reserved = [ord(x) for x in '\\:*?"<>|']
14 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
13 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
15 for x in (range(32) + range(126, 256) + win_reserved):
14 for x in (range(32) + range(126, 256) + win_reserved):
16 cmap[chr(x)] = "~%02x" % x
15 cmap[chr(x)] = "~%02x" % x
17 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
16 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
18 cmap[chr(x)] = e + chr(x).lower()
17 cmap[chr(x)] = e + chr(x).lower()
19 dmap = {}
18 dmap = {}
20 for k, v in cmap.iteritems():
19 for k, v in cmap.iteritems():
21 dmap[v] = k
20 dmap[v] = k
22 def decode(s):
21 def decode(s):
23 i = 0
22 i = 0
24 while i < len(s):
23 while i < len(s):
25 for l in xrange(1, 4):
24 for l in xrange(1, 4):
26 try:
25 try:
27 yield dmap[s[i:i+l]]
26 yield dmap[s[i:i+l]]
28 i += l
27 i += l
29 break
28 break
30 except KeyError:
29 except KeyError:
31 pass
30 pass
32 else:
31 else:
33 raise KeyError
32 raise KeyError
34 return (lambda s: "".join([cmap[c] for c in s]),
33 return (lambda s: "".join([cmap[c] for c in s]),
35 lambda s: "".join(list(decode(s))))
34 lambda s: "".join(list(decode(s))))
36
35
37 encodefilename, decodefilename = _buildencodefun()
36 encodefilename, decodefilename = _buildencodefun()
38
37
39 def _calcmode(path):
38 def _calcmode(path):
40 try:
39 try:
41 # files in .hg/ will be created using this mode
40 # files in .hg/ will be created using this mode
42 mode = os.stat(path).st_mode
41 mode = os.stat(path).st_mode
43 # avoid some useless chmods
42 # avoid some useless chmods
44 if (0777 & ~util._umask) == (0777 & mode):
43 if (0777 & ~util._umask) == (0777 & mode):
45 mode = None
44 mode = None
46 except OSError:
45 except OSError:
47 mode = None
46 mode = None
48 return mode
47 return mode
49
48
50 class basicstore:
49 class basicstore:
51 '''base class for local repository stores'''
50 '''base class for local repository stores'''
52 def __init__(self, path, opener):
51 def __init__(self, path, opener):
53 self.path = path
52 self.path = path
54 self.createmode = _calcmode(path)
53 self.createmode = _calcmode(path)
55 self.opener = opener(self.path)
54 self.opener = opener(self.path)
56 self.opener.createmode = self.createmode
55 self.opener.createmode = self.createmode
57
56
58 def join(self, f):
57 def join(self, f):
59 return os.path.join(self.path, f)
58 return os.path.join(self.path, f)
60
59
61 def _walk(self, relpath, recurse):
60 def _walk(self, relpath, recurse):
62 '''yields (filename, size)'''
61 '''yields (unencoded, encoded, size)'''
63 path = os.path.join(self.path, relpath)
62 path = os.path.join(self.path, relpath)
64 striplen = len(self.path) + len(os.sep)
63 striplen = len(self.path) + len(os.sep)
65 prefix = path[striplen:]
64 prefix = path[striplen:]
66 l = []
65 l = []
67 if os.path.isdir(path):
66 if os.path.isdir(path):
68 visit = [path]
67 visit = [path]
69 while visit:
68 while visit:
70 p = visit.pop()
69 p = visit.pop()
71 for f, kind, st in osutil.listdir(p, stat=True):
70 for f, kind, st in osutil.listdir(p, stat=True):
72 fp = os.path.join(p, f)
71 fp = os.path.join(p, f)
73 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
72 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
74 l.append((util.pconvert(fp[striplen:]), st.st_size))
73 n = util.pconvert(fp[striplen:])
74 l.append((n, n, st.st_size))
75 elif kind == stat.S_IFDIR and recurse:
75 elif kind == stat.S_IFDIR and recurse:
76 visit.append(fp)
76 visit.append(fp)
77 return util.sort(l)
77 return util.sort(l)
78
78
79 def datafiles(self, reporterror=None):
79 def datafiles(self):
80 return self._walk('data', True)
80 return self._walk('data', True)
81
81
82 def walk(self):
82 def walk(self):
83 '''yields (direncoded filename, size)'''
83 '''yields (unencoded, encoded, size)'''
84 # yield data files first
84 # yield data files first
85 for x in self.datafiles():
85 for x in self.datafiles():
86 yield x
86 yield x
87 # yield manifest before changelog
87 # yield manifest before changelog
88 meta = self._walk('', False)
88 meta = self._walk('', False)
89 meta.reverse()
89 meta.reverse()
90 for x in meta:
90 for x in meta:
91 yield x
91 yield x
92
92
93 class encodedstore(basicstore):
93 class encodedstore(basicstore):
94 def __init__(self, path, opener):
94 def __init__(self, path, opener):
95 self.path = os.path.join(path, 'store')
95 self.path = os.path.join(path, 'store')
96 self.createmode = _calcmode(self.path)
96 self.createmode = _calcmode(self.path)
97 self.encodefn = encodefilename
97 self.encodefn = encodefilename
98 op = opener(self.path)
98 op = opener(self.path)
99 op.createmode = self.createmode
99 op.createmode = self.createmode
100 self.opener = lambda f, *args, **kw: op(self.encodefn(f), *args, **kw)
100 self.opener = lambda f, *args, **kw: op(self.encodefn(f), *args, **kw)
101
101
102 def datafiles(self, reporterror=None):
102 def datafiles(self):
103 for f, size in self._walk('data', True):
103 for a, b, size in self._walk('data', True):
104 try:
104 try:
105 yield decodefilename(f), size
105 a = decodefilename(a)
106 except KeyError:
106 except KeyError:
107 if not reporterror:
107 a = None
108 raise
108 yield a, b, size
109 reporterror(_("cannot decode filename '%s'") % f)
110
109
111 def join(self, f):
110 def join(self, f):
112 return os.path.join(self.path, self.encodefn(f))
111 return os.path.join(self.path, self.encodefn(f))
113
112
114 def store(requirements, path, opener):
113 def store(requirements, path, opener):
115 if 'store' in requirements:
114 if 'store' in requirements:
116 return encodedstore(path, opener)
115 return encodedstore(path, opener)
117 return basicstore(path, opener)
116 return basicstore(path, opener)
@@ -1,233 +1,235 b''
1 # verify.py - repository integrity checking for Mercurial
1 # verify.py - repository integrity checking for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import revlog, util
10 import revlog, util
11
11
12 def verify(repo):
12 def verify(repo):
13 lock = repo.lock()
13 lock = repo.lock()
14 try:
14 try:
15 return _verify(repo)
15 return _verify(repo)
16 finally:
16 finally:
17 del lock
17 del lock
18
18
19 def _verify(repo):
19 def _verify(repo):
20 mflinkrevs = {}
20 mflinkrevs = {}
21 filelinkrevs = {}
21 filelinkrevs = {}
22 filenodes = {}
22 filenodes = {}
23 revisions = 0
23 revisions = 0
24 badrevs = {}
24 badrevs = {}
25 errors = [0]
25 errors = [0]
26 warnings = [0]
26 warnings = [0]
27 ui = repo.ui
27 ui = repo.ui
28 cl = repo.changelog
28 cl = repo.changelog
29 mf = repo.manifest
29 mf = repo.manifest
30
30
31 def err(linkrev, msg, filename=None):
31 def err(linkrev, msg, filename=None):
32 if linkrev != None:
32 if linkrev != None:
33 badrevs[linkrev] = True
33 badrevs[linkrev] = True
34 else:
34 else:
35 linkrev = '?'
35 linkrev = '?'
36 msg = "%s: %s" % (linkrev, msg)
36 msg = "%s: %s" % (linkrev, msg)
37 if filename:
37 if filename:
38 msg = "%s@%s" % (filename, msg)
38 msg = "%s@%s" % (filename, msg)
39 ui.warn(" " + msg + "\n")
39 ui.warn(" " + msg + "\n")
40 errors[0] += 1
40 errors[0] += 1
41
41
42 def exc(linkrev, msg, inst, filename=None):
42 def exc(linkrev, msg, inst, filename=None):
43 if isinstance(inst, KeyboardInterrupt):
43 if isinstance(inst, KeyboardInterrupt):
44 ui.warn(_("interrupted"))
44 ui.warn(_("interrupted"))
45 raise
45 raise
46 err(linkrev, "%s: %s" % (msg, inst), filename)
46 err(linkrev, "%s: %s" % (msg, inst), filename)
47
47
48 def warn(msg):
48 def warn(msg):
49 ui.warn(msg + "\n")
49 ui.warn(msg + "\n")
50 warnings[0] += 1
50 warnings[0] += 1
51
51
52 def checklog(obj, name):
52 def checklog(obj, name):
53 if not len(obj) and (havecl or havemf):
53 if not len(obj) and (havecl or havemf):
54 err(0, _("empty or missing %s") % name)
54 err(0, _("empty or missing %s") % name)
55 return
55 return
56
56
57 d = obj.checksize()
57 d = obj.checksize()
58 if d[0]:
58 if d[0]:
59 err(None, _("data length off by %d bytes") % d[0], name)
59 err(None, _("data length off by %d bytes") % d[0], name)
60 if d[1]:
60 if d[1]:
61 err(None, _("index contains %d extra bytes") % d[1], name)
61 err(None, _("index contains %d extra bytes") % d[1], name)
62
62
63 if obj.version != revlog.REVLOGV0:
63 if obj.version != revlog.REVLOGV0:
64 if not revlogv1:
64 if not revlogv1:
65 warn(_("warning: `%s' uses revlog format 1") % name)
65 warn(_("warning: `%s' uses revlog format 1") % name)
66 elif revlogv1:
66 elif revlogv1:
67 warn(_("warning: `%s' uses revlog format 0") % name)
67 warn(_("warning: `%s' uses revlog format 0") % name)
68
68
69 def checkentry(obj, i, node, seen, linkrevs, f):
69 def checkentry(obj, i, node, seen, linkrevs, f):
70 lr = obj.linkrev(node)
70 lr = obj.linkrev(node)
71 if lr < 0 or (havecl and lr not in linkrevs):
71 if lr < 0 or (havecl and lr not in linkrevs):
72 t = "unexpected"
72 t = "unexpected"
73 if lr < 0 or lr >= len(cl):
73 if lr < 0 or lr >= len(cl):
74 t = "nonexistent"
74 t = "nonexistent"
75 err(None, _("rev %d point to %s changeset %d") % (i, t, lr), f)
75 err(None, _("rev %d point to %s changeset %d") % (i, t, lr), f)
76 if linkrevs:
76 if linkrevs:
77 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
77 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
78 lr = None # can't be trusted
78 lr = None # can't be trusted
79
79
80 try:
80 try:
81 p1, p2 = obj.parents(node)
81 p1, p2 = obj.parents(node)
82 if p1 not in seen and p1 != nullid:
82 if p1 not in seen and p1 != nullid:
83 err(lr, _("unknown parent 1 %s of %s") %
83 err(lr, _("unknown parent 1 %s of %s") %
84 (short(p1), short(n)), f)
84 (short(p1), short(n)), f)
85 if p2 not in seen and p2 != nullid:
85 if p2 not in seen and p2 != nullid:
86 err(lr, _("unknown parent 2 %s of %s") %
86 err(lr, _("unknown parent 2 %s of %s") %
87 (short(p2), short(p1)), f)
87 (short(p2), short(p1)), f)
88 except Exception, inst:
88 except Exception, inst:
89 exc(lr, _("checking parents of %s") % short(node), inst, f)
89 exc(lr, _("checking parents of %s") % short(node), inst, f)
90
90
91 if node in seen:
91 if node in seen:
92 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
92 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
93 seen[n] = i
93 seen[n] = i
94 return lr
94 return lr
95
95
96 revlogv1 = cl.version != revlog.REVLOGV0
96 revlogv1 = cl.version != revlog.REVLOGV0
97 if ui.verbose or not revlogv1:
97 if ui.verbose or not revlogv1:
98 ui.status(_("repository uses revlog format %d\n") %
98 ui.status(_("repository uses revlog format %d\n") %
99 (revlogv1 and 1 or 0))
99 (revlogv1 and 1 or 0))
100
100
101 havecl = len(cl) > 0
101 havecl = len(cl) > 0
102 havemf = len(mf) > 0
102 havemf = len(mf) > 0
103
103
104 ui.status(_("checking changesets\n"))
104 ui.status(_("checking changesets\n"))
105 seen = {}
105 seen = {}
106 checklog(cl, "changelog")
106 checklog(cl, "changelog")
107 for i in repo:
107 for i in repo:
108 n = cl.node(i)
108 n = cl.node(i)
109 checkentry(cl, i, n, seen, [i], "changelog")
109 checkentry(cl, i, n, seen, [i], "changelog")
110
110
111 try:
111 try:
112 changes = cl.read(n)
112 changes = cl.read(n)
113 mflinkrevs.setdefault(changes[0], []).append(i)
113 mflinkrevs.setdefault(changes[0], []).append(i)
114 for f in changes[3]:
114 for f in changes[3]:
115 filelinkrevs.setdefault(f, []).append(i)
115 filelinkrevs.setdefault(f, []).append(i)
116 except Exception, inst:
116 except Exception, inst:
117 exc(i, _("unpacking changeset %s") % short(n), inst)
117 exc(i, _("unpacking changeset %s") % short(n), inst)
118
118
119 ui.status(_("checking manifests\n"))
119 ui.status(_("checking manifests\n"))
120 seen = {}
120 seen = {}
121 checklog(mf, "manifest")
121 checklog(mf, "manifest")
122 for i in mf:
122 for i in mf:
123 n = mf.node(i)
123 n = mf.node(i)
124 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
124 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
125 if n in mflinkrevs:
125 if n in mflinkrevs:
126 del mflinkrevs[n]
126 del mflinkrevs[n]
127
127
128 try:
128 try:
129 for f, fn in mf.readdelta(n).iteritems():
129 for f, fn in mf.readdelta(n).iteritems():
130 if not f:
130 if not f:
131 err(lr, _("file without name in manifest"))
131 err(lr, _("file without name in manifest"))
132 elif f != "/dev/null":
132 elif f != "/dev/null":
133 fns = filenodes.setdefault(f, {})
133 fns = filenodes.setdefault(f, {})
134 if fn not in fns:
134 if fn not in fns:
135 fns[fn] = n
135 fns[fn] = n
136 except Exception, inst:
136 except Exception, inst:
137 exc(lr, _("reading manifest delta %s") % short(n), inst)
137 exc(lr, _("reading manifest delta %s") % short(n), inst)
138
138
139 ui.status(_("crosschecking files in changesets and manifests\n"))
139 ui.status(_("crosschecking files in changesets and manifests\n"))
140
140
141 if havemf:
141 if havemf:
142 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
142 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
143 err(c, _("changeset refers to unknown manifest %s") % short(m))
143 err(c, _("changeset refers to unknown manifest %s") % short(m))
144 del mflinkrevs
144 del mflinkrevs
145
145
146 for f in util.sort(filelinkrevs):
146 for f in util.sort(filelinkrevs):
147 if f not in filenodes:
147 if f not in filenodes:
148 lr = filelinkrevs[f][0]
148 lr = filelinkrevs[f][0]
149 err(lr, _("in changeset but not in manifest"), f)
149 err(lr, _("in changeset but not in manifest"), f)
150
150
151 if havecl:
151 if havecl:
152 for f in util.sort(filenodes):
152 for f in util.sort(filenodes):
153 if f not in filelinkrevs:
153 if f not in filelinkrevs:
154 try:
154 try:
155 lr = min([repo.file(f).linkrev(n) for n in filenodes[f]])
155 lr = min([repo.file(f).linkrev(n) for n in filenodes[f]])
156 except:
156 except:
157 lr = None
157 lr = None
158 err(lr, _("in manifest but not in changeset"), f)
158 err(lr, _("in manifest but not in changeset"), f)
159
159
160 ui.status(_("checking files\n"))
160 ui.status(_("checking files\n"))
161
161
162 storefiles = {}
162 storefiles = {}
163 for f, size in repo.store.datafiles(lambda m: err(None, m)):
163 for f, f2, size in repo.store.datafiles():
164 if size > 0:
164 if not f:
165 err(None, _("cannot decode filename '%s'") % f2)
166 elif size > 0:
165 storefiles[f] = True
167 storefiles[f] = True
166
168
167 files = util.sort(util.unique(filenodes.keys() + filelinkrevs.keys()))
169 files = util.sort(util.unique(filenodes.keys() + filelinkrevs.keys()))
168 for f in files:
170 for f in files:
169 fl = repo.file(f)
171 fl = repo.file(f)
170
172
171 for ff in fl.files():
173 for ff in fl.files():
172 try:
174 try:
173 del storefiles[ff]
175 del storefiles[ff]
174 except KeyError:
176 except KeyError:
175 err(0, _("missing revlog!"), ff)
177 err(0, _("missing revlog!"), ff)
176
178
177 checklog(fl, f)
179 checklog(fl, f)
178 seen = {}
180 seen = {}
179 for i in fl:
181 for i in fl:
180 revisions += 1
182 revisions += 1
181 n = fl.node(i)
183 n = fl.node(i)
182 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
184 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
183 if f in filenodes:
185 if f in filenodes:
184 if havemf and n not in filenodes[f]:
186 if havemf and n not in filenodes[f]:
185 err(lr, _("%s not in manifests") % (short(n)), f)
187 err(lr, _("%s not in manifests") % (short(n)), f)
186 else:
188 else:
187 del filenodes[f][n]
189 del filenodes[f][n]
188
190
189 # verify contents
191 # verify contents
190 try:
192 try:
191 t = fl.read(n)
193 t = fl.read(n)
192 rp = fl.renamed(n)
194 rp = fl.renamed(n)
193 if len(t) != fl.size(i):
195 if len(t) != fl.size(i):
194 if not fl._readmeta(n): # ancient copy?
196 if not fl._readmeta(n): # ancient copy?
195 err(lr, _("unpacked size is %s, %s expected") %
197 err(lr, _("unpacked size is %s, %s expected") %
196 (len(t), fl.size(i)), f)
198 (len(t), fl.size(i)), f)
197 except Exception, inst:
199 except Exception, inst:
198 exc(lr, _("unpacking %s") % short(n), inst, f)
200 exc(lr, _("unpacking %s") % short(n), inst, f)
199
201
200 # check renames
202 # check renames
201 try:
203 try:
202 if rp:
204 if rp:
203 fl2 = repo.file(rp[0])
205 fl2 = repo.file(rp[0])
204 if not len(fl2):
206 if not len(fl2):
205 err(lr, _("empty or missing copy source revlog %s:%s")
207 err(lr, _("empty or missing copy source revlog %s:%s")
206 % (rp[0], short(rp[1])), f)
208 % (rp[0], short(rp[1])), f)
207 elif rp[1] == nullid:
209 elif rp[1] == nullid:
208 warn(lr, _("copy source revision is nullid %s:%s")
210 warn(lr, _("copy source revision is nullid %s:%s")
209 % (rp[0], short(rp[1])), f)
211 % (rp[0], short(rp[1])), f)
210 else:
212 else:
211 rev = fl2.rev(rp[1])
213 rev = fl2.rev(rp[1])
212 except Exception, inst:
214 except Exception, inst:
213 exc(lr, _("checking rename of %s") % short(n), inst, f)
215 exc(lr, _("checking rename of %s") % short(n), inst, f)
214
216
215 # cross-check
217 # cross-check
216 if f in filenodes:
218 if f in filenodes:
217 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].items()]
219 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].items()]
218 for lr, node in util.sort(fns):
220 for lr, node in util.sort(fns):
219 err(lr, _("%s in manifests not found") % short(node), f)
221 err(lr, _("%s in manifests not found") % short(node), f)
220
222
221 for f in storefiles:
223 for f in storefiles:
222 warn(_("warning: orphan revlog '%s'") % f)
224 warn(_("warning: orphan revlog '%s'") % f)
223
225
224 ui.status(_("%d files, %d changesets, %d total revisions\n") %
226 ui.status(_("%d files, %d changesets, %d total revisions\n") %
225 (len(files), len(cl), revisions))
227 (len(files), len(cl), revisions))
226 if warnings[0]:
228 if warnings[0]:
227 ui.warn(_("%d warnings encountered!\n") % warnings[0])
229 ui.warn(_("%d warnings encountered!\n") % warnings[0])
228 if errors[0]:
230 if errors[0]:
229 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
231 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
230 if badrevs:
232 if badrevs:
231 ui.warn(_("(first damaged changeset appears to be %d)\n")
233 ui.warn(_("(first damaged changeset appears to be %d)\n")
232 % min(badrevs))
234 % min(badrevs))
233 return 1
235 return 1
General Comments 0
You need to be logged in to leave comments. Login now