##// END OF EJS Templates
fix pull racing with push/commit (issue1320)...
Benoit Boissinot -
r7233:9f0e52e1 default
parent child Browse files
Show More
@@ -1,2106 +1,2126 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 requirements.append("fncache")
38 requirements.append("fncache")
39 # create an invalid changelog
39 # create an invalid changelog
40 self.opener("00changelog.i", "a").write(
40 self.opener("00changelog.i", "a").write(
41 '\0\0\0\2' # represents revlogv2
41 '\0\0\0\2' # represents revlogv2
42 ' dummy changelog to prevent using the old repo layout'
42 ' dummy changelog to prevent using the old repo layout'
43 )
43 )
44 reqfile = self.opener("requires", "w")
44 reqfile = self.opener("requires", "w")
45 for r in requirements:
45 for r in requirements:
46 reqfile.write("%s\n" % r)
46 reqfile.write("%s\n" % r)
47 reqfile.close()
47 reqfile.close()
48 else:
48 else:
49 raise repo.RepoError(_("repository %s not found") % path)
49 raise repo.RepoError(_("repository %s not found") % path)
50 elif create:
50 elif create:
51 raise repo.RepoError(_("repository %s already exists") % path)
51 raise repo.RepoError(_("repository %s already exists") % path)
52 else:
52 else:
53 # find requirements
53 # find requirements
54 requirements = []
54 requirements = []
55 try:
55 try:
56 requirements = self.opener("requires").read().splitlines()
56 requirements = self.opener("requires").read().splitlines()
57 for r in requirements:
57 for r in requirements:
58 if r not in self.supported:
58 if r not in self.supported:
59 raise repo.RepoError(_("requirement '%s' not supported") % r)
59 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 except IOError, inst:
60 except IOError, inst:
61 if inst.errno != errno.ENOENT:
61 if inst.errno != errno.ENOENT:
62 raise
62 raise
63
63
64 self.store = store.store(requirements, self.path, util.opener)
64 self.store = store.store(requirements, self.path, util.opener)
65 self.spath = self.store.path
65 self.spath = self.store.path
66 self.sopener = self.store.opener
66 self.sopener = self.store.opener
67 self.sjoin = self.store.join
67 self.sjoin = self.store.join
68 self.opener.createmode = self.store.createmode
68 self.opener.createmode = self.store.createmode
69
69
70 self.ui = ui.ui(parentui=parentui)
70 self.ui = ui.ui(parentui=parentui)
71 try:
71 try:
72 self.ui.readconfig(self.join("hgrc"), self.root)
72 self.ui.readconfig(self.join("hgrc"), self.root)
73 extensions.loadall(self.ui)
73 extensions.loadall(self.ui)
74 except IOError:
74 except IOError:
75 pass
75 pass
76
76
77 self.tagscache = None
77 self.tagscache = None
78 self._tagstypecache = None
78 self._tagstypecache = None
79 self.branchcache = None
79 self.branchcache = None
80 self._ubranchcache = None # UTF-8 version of branchcache
80 self._ubranchcache = None # UTF-8 version of branchcache
81 self._branchcachetip = None
81 self._branchcachetip = None
82 self.nodetagscache = None
82 self.nodetagscache = None
83 self.filterpats = {}
83 self.filterpats = {}
84 self._datafilters = {}
84 self._datafilters = {}
85 self._transref = self._lockref = self._wlockref = None
85 self._transref = self._lockref = self._wlockref = None
86
86
87 def __getattr__(self, name):
87 def __getattr__(self, name):
88 if name == 'changelog':
88 if name == 'changelog':
89 self.changelog = changelog.changelog(self.sopener)
89 self.changelog = changelog.changelog(self.sopener)
90 self.sopener.defversion = self.changelog.version
90 self.sopener.defversion = self.changelog.version
91 return self.changelog
91 return self.changelog
92 if name == 'manifest':
92 if name == 'manifest':
93 self.changelog
93 self.changelog
94 self.manifest = manifest.manifest(self.sopener)
94 self.manifest = manifest.manifest(self.sopener)
95 return self.manifest
95 return self.manifest
96 if name == 'dirstate':
96 if name == 'dirstate':
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 return self.dirstate
98 return self.dirstate
99 else:
99 else:
100 raise AttributeError(name)
100 raise AttributeError(name)
101
101
102 def __getitem__(self, changeid):
102 def __getitem__(self, changeid):
103 if changeid == None:
103 if changeid == None:
104 return context.workingctx(self)
104 return context.workingctx(self)
105 return context.changectx(self, changeid)
105 return context.changectx(self, changeid)
106
106
107 def __nonzero__(self):
107 def __nonzero__(self):
108 return True
108 return True
109
109
110 def __len__(self):
110 def __len__(self):
111 return len(self.changelog)
111 return len(self.changelog)
112
112
113 def __iter__(self):
113 def __iter__(self):
114 for i in xrange(len(self)):
114 for i in xrange(len(self)):
115 yield i
115 yield i
116
116
117 def url(self):
117 def url(self):
118 return 'file:' + self.root
118 return 'file:' + self.root
119
119
120 def hook(self, name, throw=False, **args):
120 def hook(self, name, throw=False, **args):
121 return hook.hook(self.ui, self, name, throw, **args)
121 return hook.hook(self.ui, self, name, throw, **args)
122
122
123 tag_disallowed = ':\r\n'
123 tag_disallowed = ':\r\n'
124
124
125 def _tag(self, names, node, message, local, user, date, parent=None,
125 def _tag(self, names, node, message, local, user, date, parent=None,
126 extra={}):
126 extra={}):
127 use_dirstate = parent is None
127 use_dirstate = parent is None
128
128
129 if isinstance(names, str):
129 if isinstance(names, str):
130 allchars = names
130 allchars = names
131 names = (names,)
131 names = (names,)
132 else:
132 else:
133 allchars = ''.join(names)
133 allchars = ''.join(names)
134 for c in self.tag_disallowed:
134 for c in self.tag_disallowed:
135 if c in allchars:
135 if c in allchars:
136 raise util.Abort(_('%r cannot be used in a tag name') % c)
136 raise util.Abort(_('%r cannot be used in a tag name') % c)
137
137
138 for name in names:
138 for name in names:
139 self.hook('pretag', throw=True, node=hex(node), tag=name,
139 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 local=local)
140 local=local)
141
141
142 def writetags(fp, names, munge, prevtags):
142 def writetags(fp, names, munge, prevtags):
143 fp.seek(0, 2)
143 fp.seek(0, 2)
144 if prevtags and prevtags[-1] != '\n':
144 if prevtags and prevtags[-1] != '\n':
145 fp.write('\n')
145 fp.write('\n')
146 for name in names:
146 for name in names:
147 m = munge and munge(name) or name
147 m = munge and munge(name) or name
148 if self._tagstypecache and name in self._tagstypecache:
148 if self._tagstypecache and name in self._tagstypecache:
149 old = self.tagscache.get(name, nullid)
149 old = self.tagscache.get(name, nullid)
150 fp.write('%s %s\n' % (hex(old), m))
150 fp.write('%s %s\n' % (hex(old), m))
151 fp.write('%s %s\n' % (hex(node), m))
151 fp.write('%s %s\n' % (hex(node), m))
152 fp.close()
152 fp.close()
153
153
154 prevtags = ''
154 prevtags = ''
155 if local:
155 if local:
156 try:
156 try:
157 fp = self.opener('localtags', 'r+')
157 fp = self.opener('localtags', 'r+')
158 except IOError, err:
158 except IOError, err:
159 fp = self.opener('localtags', 'a')
159 fp = self.opener('localtags', 'a')
160 else:
160 else:
161 prevtags = fp.read()
161 prevtags = fp.read()
162
162
163 # local tags are stored in the current charset
163 # local tags are stored in the current charset
164 writetags(fp, names, None, prevtags)
164 writetags(fp, names, None, prevtags)
165 for name in names:
165 for name in names:
166 self.hook('tag', node=hex(node), tag=name, local=local)
166 self.hook('tag', node=hex(node), tag=name, local=local)
167 return
167 return
168
168
169 if use_dirstate:
169 if use_dirstate:
170 try:
170 try:
171 fp = self.wfile('.hgtags', 'rb+')
171 fp = self.wfile('.hgtags', 'rb+')
172 except IOError, err:
172 except IOError, err:
173 fp = self.wfile('.hgtags', 'ab')
173 fp = self.wfile('.hgtags', 'ab')
174 else:
174 else:
175 prevtags = fp.read()
175 prevtags = fp.read()
176 else:
176 else:
177 try:
177 try:
178 prevtags = self.filectx('.hgtags', parent).data()
178 prevtags = self.filectx('.hgtags', parent).data()
179 except revlog.LookupError:
179 except revlog.LookupError:
180 pass
180 pass
181 fp = self.wfile('.hgtags', 'wb')
181 fp = self.wfile('.hgtags', 'wb')
182 if prevtags:
182 if prevtags:
183 fp.write(prevtags)
183 fp.write(prevtags)
184
184
185 # committed tags are stored in UTF-8
185 # committed tags are stored in UTF-8
186 writetags(fp, names, util.fromlocal, prevtags)
186 writetags(fp, names, util.fromlocal, prevtags)
187
187
188 if use_dirstate and '.hgtags' not in self.dirstate:
188 if use_dirstate and '.hgtags' not in self.dirstate:
189 self.add(['.hgtags'])
189 self.add(['.hgtags'])
190
190
191 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 extra=extra)
192 extra=extra)
193
193
194 for name in names:
194 for name in names:
195 self.hook('tag', node=hex(node), tag=name, local=local)
195 self.hook('tag', node=hex(node), tag=name, local=local)
196
196
197 return tagnode
197 return tagnode
198
198
199 def tag(self, names, node, message, local, user, date):
199 def tag(self, names, node, message, local, user, date):
200 '''tag a revision with one or more symbolic names.
200 '''tag a revision with one or more symbolic names.
201
201
202 names is a list of strings or, when adding a single tag, names may be a
202 names is a list of strings or, when adding a single tag, names may be a
203 string.
203 string.
204
204
205 if local is True, the tags are stored in a per-repository file.
205 if local is True, the tags are stored in a per-repository file.
206 otherwise, they are stored in the .hgtags file, and a new
206 otherwise, they are stored in the .hgtags file, and a new
207 changeset is committed with the change.
207 changeset is committed with the change.
208
208
209 keyword arguments:
209 keyword arguments:
210
210
211 local: whether to store tags in non-version-controlled file
211 local: whether to store tags in non-version-controlled file
212 (default False)
212 (default False)
213
213
214 message: commit message to use if committing
214 message: commit message to use if committing
215
215
216 user: name of user to use if committing
216 user: name of user to use if committing
217
217
218 date: date tuple to use if committing'''
218 date: date tuple to use if committing'''
219
219
220 for x in self.status()[:5]:
220 for x in self.status()[:5]:
221 if '.hgtags' in x:
221 if '.hgtags' in x:
222 raise util.Abort(_('working copy of .hgtags is changed '
222 raise util.Abort(_('working copy of .hgtags is changed '
223 '(please commit .hgtags manually)'))
223 '(please commit .hgtags manually)'))
224
224
225 self._tag(names, node, message, local, user, date)
225 self._tag(names, node, message, local, user, date)
226
226
227 def tags(self):
227 def tags(self):
228 '''return a mapping of tag to node'''
228 '''return a mapping of tag to node'''
229 if self.tagscache:
229 if self.tagscache:
230 return self.tagscache
230 return self.tagscache
231
231
232 globaltags = {}
232 globaltags = {}
233 tagtypes = {}
233 tagtypes = {}
234
234
235 def readtags(lines, fn, tagtype):
235 def readtags(lines, fn, tagtype):
236 filetags = {}
236 filetags = {}
237 count = 0
237 count = 0
238
238
239 def warn(msg):
239 def warn(msg):
240 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241
241
242 for l in lines:
242 for l in lines:
243 count += 1
243 count += 1
244 if not l:
244 if not l:
245 continue
245 continue
246 s = l.split(" ", 1)
246 s = l.split(" ", 1)
247 if len(s) != 2:
247 if len(s) != 2:
248 warn(_("cannot parse entry"))
248 warn(_("cannot parse entry"))
249 continue
249 continue
250 node, key = s
250 node, key = s
251 key = util.tolocal(key.strip()) # stored in UTF-8
251 key = util.tolocal(key.strip()) # stored in UTF-8
252 try:
252 try:
253 bin_n = bin(node)
253 bin_n = bin(node)
254 except TypeError:
254 except TypeError:
255 warn(_("node '%s' is not well formed") % node)
255 warn(_("node '%s' is not well formed") % node)
256 continue
256 continue
257 if bin_n not in self.changelog.nodemap:
257 if bin_n not in self.changelog.nodemap:
258 warn(_("tag '%s' refers to unknown node") % key)
258 warn(_("tag '%s' refers to unknown node") % key)
259 continue
259 continue
260
260
261 h = []
261 h = []
262 if key in filetags:
262 if key in filetags:
263 n, h = filetags[key]
263 n, h = filetags[key]
264 h.append(n)
264 h.append(n)
265 filetags[key] = (bin_n, h)
265 filetags[key] = (bin_n, h)
266
266
267 for k, nh in filetags.items():
267 for k, nh in filetags.items():
268 if k not in globaltags:
268 if k not in globaltags:
269 globaltags[k] = nh
269 globaltags[k] = nh
270 tagtypes[k] = tagtype
270 tagtypes[k] = tagtype
271 continue
271 continue
272
272
273 # we prefer the global tag if:
273 # we prefer the global tag if:
274 # it supercedes us OR
274 # it supercedes us OR
275 # mutual supercedes and it has a higher rank
275 # mutual supercedes and it has a higher rank
276 # otherwise we win because we're tip-most
276 # otherwise we win because we're tip-most
277 an, ah = nh
277 an, ah = nh
278 bn, bh = globaltags[k]
278 bn, bh = globaltags[k]
279 if (bn != an and an in bh and
279 if (bn != an and an in bh and
280 (bn not in ah or len(bh) > len(ah))):
280 (bn not in ah or len(bh) > len(ah))):
281 an = bn
281 an = bn
282 ah.extend([n for n in bh if n not in ah])
282 ah.extend([n for n in bh if n not in ah])
283 globaltags[k] = an, ah
283 globaltags[k] = an, ah
284 tagtypes[k] = tagtype
284 tagtypes[k] = tagtype
285
285
286 # read the tags file from each head, ending with the tip
286 # read the tags file from each head, ending with the tip
287 f = None
287 f = None
288 for rev, node, fnode in self._hgtagsnodes():
288 for rev, node, fnode in self._hgtagsnodes():
289 f = (f and f.filectx(fnode) or
289 f = (f and f.filectx(fnode) or
290 self.filectx('.hgtags', fileid=fnode))
290 self.filectx('.hgtags', fileid=fnode))
291 readtags(f.data().splitlines(), f, "global")
291 readtags(f.data().splitlines(), f, "global")
292
292
293 try:
293 try:
294 data = util.fromlocal(self.opener("localtags").read())
294 data = util.fromlocal(self.opener("localtags").read())
295 # localtags are stored in the local character set
295 # localtags are stored in the local character set
296 # while the internal tag table is stored in UTF-8
296 # while the internal tag table is stored in UTF-8
297 readtags(data.splitlines(), "localtags", "local")
297 readtags(data.splitlines(), "localtags", "local")
298 except IOError:
298 except IOError:
299 pass
299 pass
300
300
301 self.tagscache = {}
301 self.tagscache = {}
302 self._tagstypecache = {}
302 self._tagstypecache = {}
303 for k,nh in globaltags.items():
303 for k,nh in globaltags.items():
304 n = nh[0]
304 n = nh[0]
305 if n != nullid:
305 if n != nullid:
306 self.tagscache[k] = n
306 self.tagscache[k] = n
307 self._tagstypecache[k] = tagtypes[k]
307 self._tagstypecache[k] = tagtypes[k]
308 self.tagscache['tip'] = self.changelog.tip()
308 self.tagscache['tip'] = self.changelog.tip()
309 return self.tagscache
309 return self.tagscache
310
310
311 def tagtype(self, tagname):
311 def tagtype(self, tagname):
312 '''
312 '''
313 return the type of the given tag. result can be:
313 return the type of the given tag. result can be:
314
314
315 'local' : a local tag
315 'local' : a local tag
316 'global' : a global tag
316 'global' : a global tag
317 None : tag does not exist
317 None : tag does not exist
318 '''
318 '''
319
319
320 self.tags()
320 self.tags()
321
321
322 return self._tagstypecache.get(tagname)
322 return self._tagstypecache.get(tagname)
323
323
324 def _hgtagsnodes(self):
324 def _hgtagsnodes(self):
325 heads = self.heads()
325 heads = self.heads()
326 heads.reverse()
326 heads.reverse()
327 last = {}
327 last = {}
328 ret = []
328 ret = []
329 for node in heads:
329 for node in heads:
330 c = self[node]
330 c = self[node]
331 rev = c.rev()
331 rev = c.rev()
332 try:
332 try:
333 fnode = c.filenode('.hgtags')
333 fnode = c.filenode('.hgtags')
334 except revlog.LookupError:
334 except revlog.LookupError:
335 continue
335 continue
336 ret.append((rev, node, fnode))
336 ret.append((rev, node, fnode))
337 if fnode in last:
337 if fnode in last:
338 ret[last[fnode]] = None
338 ret[last[fnode]] = None
339 last[fnode] = len(ret) - 1
339 last[fnode] = len(ret) - 1
340 return [item for item in ret if item]
340 return [item for item in ret if item]
341
341
342 def tagslist(self):
342 def tagslist(self):
343 '''return a list of tags ordered by revision'''
343 '''return a list of tags ordered by revision'''
344 l = []
344 l = []
345 for t, n in self.tags().items():
345 for t, n in self.tags().items():
346 try:
346 try:
347 r = self.changelog.rev(n)
347 r = self.changelog.rev(n)
348 except:
348 except:
349 r = -2 # sort to the beginning of the list if unknown
349 r = -2 # sort to the beginning of the list if unknown
350 l.append((r, t, n))
350 l.append((r, t, n))
351 return [(t, n) for r, t, n in util.sort(l)]
351 return [(t, n) for r, t, n in util.sort(l)]
352
352
353 def nodetags(self, node):
353 def nodetags(self, node):
354 '''return the tags associated with a node'''
354 '''return the tags associated with a node'''
355 if not self.nodetagscache:
355 if not self.nodetagscache:
356 self.nodetagscache = {}
356 self.nodetagscache = {}
357 for t, n in self.tags().items():
357 for t, n in self.tags().items():
358 self.nodetagscache.setdefault(n, []).append(t)
358 self.nodetagscache.setdefault(n, []).append(t)
359 return self.nodetagscache.get(node, [])
359 return self.nodetagscache.get(node, [])
360
360
361 def _branchtags(self, partial, lrev):
361 def _branchtags(self, partial, lrev):
362 tiprev = len(self) - 1
362 tiprev = len(self) - 1
363 if lrev != tiprev:
363 if lrev != tiprev:
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366
366
367 return partial
367 return partial
368
368
369 def branchtags(self):
369 def branchtags(self):
370 tip = self.changelog.tip()
370 tip = self.changelog.tip()
371 if self.branchcache is not None and self._branchcachetip == tip:
371 if self.branchcache is not None and self._branchcachetip == tip:
372 return self.branchcache
372 return self.branchcache
373
373
374 oldtip = self._branchcachetip
374 oldtip = self._branchcachetip
375 self._branchcachetip = tip
375 self._branchcachetip = tip
376 if self.branchcache is None:
376 if self.branchcache is None:
377 self.branchcache = {} # avoid recursion in changectx
377 self.branchcache = {} # avoid recursion in changectx
378 else:
378 else:
379 self.branchcache.clear() # keep using the same dict
379 self.branchcache.clear() # keep using the same dict
380 if oldtip is None or oldtip not in self.changelog.nodemap:
380 if oldtip is None or oldtip not in self.changelog.nodemap:
381 partial, last, lrev = self._readbranchcache()
381 partial, last, lrev = self._readbranchcache()
382 else:
382 else:
383 lrev = self.changelog.rev(oldtip)
383 lrev = self.changelog.rev(oldtip)
384 partial = self._ubranchcache
384 partial = self._ubranchcache
385
385
386 self._branchtags(partial, lrev)
386 self._branchtags(partial, lrev)
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 self._ubranchcache = partial
392 self._ubranchcache = partial
393 return self.branchcache
393 return self.branchcache
394
394
395 def _readbranchcache(self):
395 def _readbranchcache(self):
396 partial = {}
396 partial = {}
397 try:
397 try:
398 f = self.opener("branch.cache")
398 f = self.opener("branch.cache")
399 lines = f.read().split('\n')
399 lines = f.read().split('\n')
400 f.close()
400 f.close()
401 except (IOError, OSError):
401 except (IOError, OSError):
402 return {}, nullid, nullrev
402 return {}, nullid, nullrev
403
403
404 try:
404 try:
405 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = lines.pop(0).split(" ", 1)
406 last, lrev = bin(last), int(lrev)
406 last, lrev = bin(last), int(lrev)
407 if lrev >= len(self) or self[lrev].node() != last:
407 if lrev >= len(self) or self[lrev].node() != last:
408 # invalidate the cache
408 # invalidate the cache
409 raise ValueError('invalidating branch cache (tip differs)')
409 raise ValueError('invalidating branch cache (tip differs)')
410 for l in lines:
410 for l in lines:
411 if not l: continue
411 if not l: continue
412 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
415 raise
416 except Exception, inst:
416 except Exception, inst:
417 if self.ui.debugflag:
417 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
420 return partial, last, lrev
421
421
422 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
423 try:
423 try:
424 f = self.opener("branch.cache", "w", atomictemp=True)
424 f = self.opener("branch.cache", "w", atomictemp=True)
425 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
428 f.rename()
428 f.rename()
429 except (IOError, OSError):
429 except (IOError, OSError):
430 pass
430 pass
431
431
432 def _updatebranchcache(self, partial, start, end):
432 def _updatebranchcache(self, partial, start, end):
433 for r in xrange(start, end):
433 for r in xrange(start, end):
434 c = self[r]
434 c = self[r]
435 b = c.branch()
435 b = c.branch()
436 partial[b] = c.node()
436 partial[b] = c.node()
437
437
438 def lookup(self, key):
438 def lookup(self, key):
439 if key == '.':
439 if key == '.':
440 return self.dirstate.parents()[0]
440 return self.dirstate.parents()[0]
441 elif key == 'null':
441 elif key == 'null':
442 return nullid
442 return nullid
443 n = self.changelog._match(key)
443 n = self.changelog._match(key)
444 if n:
444 if n:
445 return n
445 return n
446 if key in self.tags():
446 if key in self.tags():
447 return self.tags()[key]
447 return self.tags()[key]
448 if key in self.branchtags():
448 if key in self.branchtags():
449 return self.branchtags()[key]
449 return self.branchtags()[key]
450 n = self.changelog._partialmatch(key)
450 n = self.changelog._partialmatch(key)
451 if n:
451 if n:
452 return n
452 return n
453 try:
453 try:
454 if len(key) == 20:
454 if len(key) == 20:
455 key = hex(key)
455 key = hex(key)
456 except:
456 except:
457 pass
457 pass
458 raise repo.RepoError(_("unknown revision '%s'") % key)
458 raise repo.RepoError(_("unknown revision '%s'") % key)
459
459
460 def local(self):
460 def local(self):
461 return True
461 return True
462
462
463 def join(self, f):
463 def join(self, f):
464 return os.path.join(self.path, f)
464 return os.path.join(self.path, f)
465
465
466 def wjoin(self, f):
466 def wjoin(self, f):
467 return os.path.join(self.root, f)
467 return os.path.join(self.root, f)
468
468
469 def rjoin(self, f):
469 def rjoin(self, f):
470 return os.path.join(self.root, util.pconvert(f))
470 return os.path.join(self.root, util.pconvert(f))
471
471
472 def file(self, f):
472 def file(self, f):
473 if f[0] == '/':
473 if f[0] == '/':
474 f = f[1:]
474 f = f[1:]
475 return filelog.filelog(self.sopener, f)
475 return filelog.filelog(self.sopener, f)
476
476
477 def changectx(self, changeid):
477 def changectx(self, changeid):
478 return self[changeid]
478 return self[changeid]
479
479
480 def parents(self, changeid=None):
480 def parents(self, changeid=None):
481 '''get list of changectxs for parents of changeid'''
481 '''get list of changectxs for parents of changeid'''
482 return self[changeid].parents()
482 return self[changeid].parents()
483
483
484 def filectx(self, path, changeid=None, fileid=None):
484 def filectx(self, path, changeid=None, fileid=None):
485 """changeid can be a changeset revision, node, or tag.
485 """changeid can be a changeset revision, node, or tag.
486 fileid can be a file revision or node."""
486 fileid can be a file revision or node."""
487 return context.filectx(self, path, changeid, fileid)
487 return context.filectx(self, path, changeid, fileid)
488
488
489 def getcwd(self):
489 def getcwd(self):
490 return self.dirstate.getcwd()
490 return self.dirstate.getcwd()
491
491
492 def pathto(self, f, cwd=None):
492 def pathto(self, f, cwd=None):
493 return self.dirstate.pathto(f, cwd)
493 return self.dirstate.pathto(f, cwd)
494
494
495 def wfile(self, f, mode='r'):
495 def wfile(self, f, mode='r'):
496 return self.wopener(f, mode)
496 return self.wopener(f, mode)
497
497
498 def _link(self, f):
498 def _link(self, f):
499 return os.path.islink(self.wjoin(f))
499 return os.path.islink(self.wjoin(f))
500
500
501 def _filter(self, filter, filename, data):
501 def _filter(self, filter, filename, data):
502 if filter not in self.filterpats:
502 if filter not in self.filterpats:
503 l = []
503 l = []
504 for pat, cmd in self.ui.configitems(filter):
504 for pat, cmd in self.ui.configitems(filter):
505 if cmd == '!':
505 if cmd == '!':
506 continue
506 continue
507 mf = util.matcher(self.root, "", [pat], [], [])[1]
507 mf = util.matcher(self.root, "", [pat], [], [])[1]
508 fn = None
508 fn = None
509 params = cmd
509 params = cmd
510 for name, filterfn in self._datafilters.iteritems():
510 for name, filterfn in self._datafilters.iteritems():
511 if cmd.startswith(name):
511 if cmd.startswith(name):
512 fn = filterfn
512 fn = filterfn
513 params = cmd[len(name):].lstrip()
513 params = cmd[len(name):].lstrip()
514 break
514 break
515 if not fn:
515 if not fn:
516 fn = lambda s, c, **kwargs: util.filter(s, c)
516 fn = lambda s, c, **kwargs: util.filter(s, c)
517 # Wrap old filters not supporting keyword arguments
517 # Wrap old filters not supporting keyword arguments
518 if not inspect.getargspec(fn)[2]:
518 if not inspect.getargspec(fn)[2]:
519 oldfn = fn
519 oldfn = fn
520 fn = lambda s, c, **kwargs: oldfn(s, c)
520 fn = lambda s, c, **kwargs: oldfn(s, c)
521 l.append((mf, fn, params))
521 l.append((mf, fn, params))
522 self.filterpats[filter] = l
522 self.filterpats[filter] = l
523
523
524 for mf, fn, cmd in self.filterpats[filter]:
524 for mf, fn, cmd in self.filterpats[filter]:
525 if mf(filename):
525 if mf(filename):
526 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
526 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
528 break
528 break
529
529
530 return data
530 return data
531
531
532 def adddatafilter(self, name, filter):
532 def adddatafilter(self, name, filter):
533 self._datafilters[name] = filter
533 self._datafilters[name] = filter
534
534
535 def wread(self, filename):
535 def wread(self, filename):
536 if self._link(filename):
536 if self._link(filename):
537 data = os.readlink(self.wjoin(filename))
537 data = os.readlink(self.wjoin(filename))
538 else:
538 else:
539 data = self.wopener(filename, 'r').read()
539 data = self.wopener(filename, 'r').read()
540 return self._filter("encode", filename, data)
540 return self._filter("encode", filename, data)
541
541
542 def wwrite(self, filename, data, flags):
542 def wwrite(self, filename, data, flags):
543 data = self._filter("decode", filename, data)
543 data = self._filter("decode", filename, data)
544 try:
544 try:
545 os.unlink(self.wjoin(filename))
545 os.unlink(self.wjoin(filename))
546 except OSError:
546 except OSError:
547 pass
547 pass
548 if 'l' in flags:
548 if 'l' in flags:
549 self.wopener.symlink(data, filename)
549 self.wopener.symlink(data, filename)
550 else:
550 else:
551 self.wopener(filename, 'w').write(data)
551 self.wopener(filename, 'w').write(data)
552 if 'x' in flags:
552 if 'x' in flags:
553 util.set_flags(self.wjoin(filename), False, True)
553 util.set_flags(self.wjoin(filename), False, True)
554
554
555 def wwritedata(self, filename, data):
555 def wwritedata(self, filename, data):
556 return self._filter("decode", filename, data)
556 return self._filter("decode", filename, data)
557
557
558 def transaction(self):
558 def transaction(self):
559 if self._transref and self._transref():
559 if self._transref and self._transref():
560 return self._transref().nest()
560 return self._transref().nest()
561
561
562 # abort here if the journal already exists
562 # abort here if the journal already exists
563 if os.path.exists(self.sjoin("journal")):
563 if os.path.exists(self.sjoin("journal")):
564 raise repo.RepoError(_("journal already exists - run hg recover"))
564 raise repo.RepoError(_("journal already exists - run hg recover"))
565
565
566 # save dirstate for rollback
566 # save dirstate for rollback
567 try:
567 try:
568 ds = self.opener("dirstate").read()
568 ds = self.opener("dirstate").read()
569 except IOError:
569 except IOError:
570 ds = ""
570 ds = ""
571 self.opener("journal.dirstate", "w").write(ds)
571 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.branch", "w").write(self.dirstate.branch())
572 self.opener("journal.branch", "w").write(self.dirstate.branch())
573
573
574 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 (self.join("journal.dirstate"), self.join("undo.dirstate")),
575 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.branch"), self.join("undo.branch"))]
576 (self.join("journal.branch"), self.join("undo.branch"))]
577 tr = transaction.transaction(self.ui.warn, self.sopener,
577 tr = transaction.transaction(self.ui.warn, self.sopener,
578 self.sjoin("journal"),
578 self.sjoin("journal"),
579 aftertrans(renames),
579 aftertrans(renames),
580 self.store.createmode)
580 self.store.createmode)
581 self._transref = weakref.ref(tr)
581 self._transref = weakref.ref(tr)
582 return tr
582 return tr
583
583
584 def recover(self):
584 def recover(self):
585 l = self.lock()
585 l = self.lock()
586 try:
586 try:
587 if os.path.exists(self.sjoin("journal")):
587 if os.path.exists(self.sjoin("journal")):
588 self.ui.status(_("rolling back interrupted transaction\n"))
588 self.ui.status(_("rolling back interrupted transaction\n"))
589 transaction.rollback(self.sopener, self.sjoin("journal"))
589 transaction.rollback(self.sopener, self.sjoin("journal"))
590 self.invalidate()
590 self.invalidate()
591 return True
591 return True
592 else:
592 else:
593 self.ui.warn(_("no interrupted transaction available\n"))
593 self.ui.warn(_("no interrupted transaction available\n"))
594 return False
594 return False
595 finally:
595 finally:
596 del l
596 del l
597
597
598 def rollback(self):
598 def rollback(self):
599 wlock = lock = None
599 wlock = lock = None
600 try:
600 try:
601 wlock = self.wlock()
601 wlock = self.wlock()
602 lock = self.lock()
602 lock = self.lock()
603 if os.path.exists(self.sjoin("undo")):
603 if os.path.exists(self.sjoin("undo")):
604 self.ui.status(_("rolling back last transaction\n"))
604 self.ui.status(_("rolling back last transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("undo"))
605 transaction.rollback(self.sopener, self.sjoin("undo"))
606 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
606 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 try:
607 try:
608 branch = self.opener("undo.branch").read()
608 branch = self.opener("undo.branch").read()
609 self.dirstate.setbranch(branch)
609 self.dirstate.setbranch(branch)
610 except IOError:
610 except IOError:
611 self.ui.warn(_("Named branch could not be reset, "
611 self.ui.warn(_("Named branch could not be reset, "
612 "current branch still is: %s\n")
612 "current branch still is: %s\n")
613 % util.tolocal(self.dirstate.branch()))
613 % util.tolocal(self.dirstate.branch()))
614 self.invalidate()
614 self.invalidate()
615 self.dirstate.invalidate()
615 self.dirstate.invalidate()
616 else:
616 else:
617 self.ui.warn(_("no rollback information available\n"))
617 self.ui.warn(_("no rollback information available\n"))
618 finally:
618 finally:
619 del lock, wlock
619 del lock, wlock
620
620
621 def invalidate(self):
621 def invalidate(self):
622 for a in "changelog manifest".split():
622 for a in "changelog manifest".split():
623 if a in self.__dict__:
623 if a in self.__dict__:
624 delattr(self, a)
624 delattr(self, a)
625 self.tagscache = None
625 self.tagscache = None
626 self._tagstypecache = None
626 self._tagstypecache = None
627 self.nodetagscache = None
627 self.nodetagscache = None
628 self.branchcache = None
628 self.branchcache = None
629 self._ubranchcache = None
629 self._ubranchcache = None
630 self._branchcachetip = None
630 self._branchcachetip = None
631
631
632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
632 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
633 try:
633 try:
634 l = lock.lock(lockname, 0, releasefn, desc=desc)
634 l = lock.lock(lockname, 0, releasefn, desc=desc)
635 except lock.LockHeld, inst:
635 except lock.LockHeld, inst:
636 if not wait:
636 if not wait:
637 raise
637 raise
638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
638 self.ui.warn(_("waiting for lock on %s held by %r\n") %
639 (desc, inst.locker))
639 (desc, inst.locker))
640 # default to 600 seconds timeout
640 # default to 600 seconds timeout
641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
641 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
642 releasefn, desc=desc)
642 releasefn, desc=desc)
643 if acquirefn:
643 if acquirefn:
644 acquirefn()
644 acquirefn()
645 return l
645 return l
646
646
647 def lock(self, wait=True):
647 def lock(self, wait=True):
648 if self._lockref and self._lockref():
648 if self._lockref and self._lockref():
649 return self._lockref()
649 return self._lockref()
650
650
651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 _('repository %s') % self.origroot)
652 _('repository %s') % self.origroot)
653 self._lockref = weakref.ref(l)
653 self._lockref = weakref.ref(l)
654 return l
654 return l
655
655
656 def wlock(self, wait=True):
656 def wlock(self, wait=True):
657 if self._wlockref and self._wlockref():
657 if self._wlockref and self._wlockref():
658 return self._wlockref()
658 return self._wlockref()
659
659
660 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
660 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
661 self.dirstate.invalidate, _('working directory of %s') %
661 self.dirstate.invalidate, _('working directory of %s') %
662 self.origroot)
662 self.origroot)
663 self._wlockref = weakref.ref(l)
663 self._wlockref = weakref.ref(l)
664 return l
664 return l
665
665
666 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
666 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
667 """
667 """
668 commit an individual file as part of a larger transaction
668 commit an individual file as part of a larger transaction
669 """
669 """
670
670
671 fn = fctx.path()
671 fn = fctx.path()
672 t = fctx.data()
672 t = fctx.data()
673 fl = self.file(fn)
673 fl = self.file(fn)
674 fp1 = manifest1.get(fn, nullid)
674 fp1 = manifest1.get(fn, nullid)
675 fp2 = manifest2.get(fn, nullid)
675 fp2 = manifest2.get(fn, nullid)
676
676
677 meta = {}
677 meta = {}
678 cp = fctx.renamed()
678 cp = fctx.renamed()
679 if cp and cp[0] != fn:
679 if cp and cp[0] != fn:
680 # Mark the new revision of this file as a copy of another
680 # Mark the new revision of this file as a copy of another
681 # file. This copy data will effectively act as a parent
681 # file. This copy data will effectively act as a parent
682 # of this new revision. If this is a merge, the first
682 # of this new revision. If this is a merge, the first
683 # parent will be the nullid (meaning "look up the copy data")
683 # parent will be the nullid (meaning "look up the copy data")
684 # and the second one will be the other parent. For example:
684 # and the second one will be the other parent. For example:
685 #
685 #
686 # 0 --- 1 --- 3 rev1 changes file foo
686 # 0 --- 1 --- 3 rev1 changes file foo
687 # \ / rev2 renames foo to bar and changes it
687 # \ / rev2 renames foo to bar and changes it
688 # \- 2 -/ rev3 should have bar with all changes and
688 # \- 2 -/ rev3 should have bar with all changes and
689 # should record that bar descends from
689 # should record that bar descends from
690 # bar in rev2 and foo in rev1
690 # bar in rev2 and foo in rev1
691 #
691 #
692 # this allows this merge to succeed:
692 # this allows this merge to succeed:
693 #
693 #
694 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
694 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
695 # \ / merging rev3 and rev4 should use bar@rev2
695 # \ / merging rev3 and rev4 should use bar@rev2
696 # \- 2 --- 4 as the merge base
696 # \- 2 --- 4 as the merge base
697 #
697 #
698
698
699 cf = cp[0]
699 cf = cp[0]
700 cr = manifest1.get(cf)
700 cr = manifest1.get(cf)
701 nfp = fp2
701 nfp = fp2
702
702
703 if manifest2: # branch merge
703 if manifest2: # branch merge
704 if fp2 == nullid: # copied on remote side
704 if fp2 == nullid: # copied on remote side
705 if fp1 != nullid or cf in manifest2:
705 if fp1 != nullid or cf in manifest2:
706 cr = manifest2[cf]
706 cr = manifest2[cf]
707 nfp = fp1
707 nfp = fp1
708
708
709 # find source in nearest ancestor if we've lost track
709 # find source in nearest ancestor if we've lost track
710 if not cr:
710 if not cr:
711 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
711 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
712 (fn, cf))
712 (fn, cf))
713 for a in self['.'].ancestors():
713 for a in self['.'].ancestors():
714 if cf in a:
714 if cf in a:
715 cr = a[cf].filenode()
715 cr = a[cf].filenode()
716 break
716 break
717
717
718 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
718 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
719 meta["copy"] = cf
719 meta["copy"] = cf
720 meta["copyrev"] = hex(cr)
720 meta["copyrev"] = hex(cr)
721 fp1, fp2 = nullid, nfp
721 fp1, fp2 = nullid, nfp
722 elif fp2 != nullid:
722 elif fp2 != nullid:
723 # is one parent an ancestor of the other?
723 # is one parent an ancestor of the other?
724 fpa = fl.ancestor(fp1, fp2)
724 fpa = fl.ancestor(fp1, fp2)
725 if fpa == fp1:
725 if fpa == fp1:
726 fp1, fp2 = fp2, nullid
726 fp1, fp2 = fp2, nullid
727 elif fpa == fp2:
727 elif fpa == fp2:
728 fp2 = nullid
728 fp2 = nullid
729
729
730 # is the file unmodified from the parent? report existing entry
730 # is the file unmodified from the parent? report existing entry
731 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
731 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
732 return fp1
732 return fp1
733
733
734 changelist.append(fn)
734 changelist.append(fn)
735 return fl.add(t, meta, tr, linkrev, fp1, fp2)
735 return fl.add(t, meta, tr, linkrev, fp1, fp2)
736
736
737 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
737 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
738 if p1 is None:
738 if p1 is None:
739 p1, p2 = self.dirstate.parents()
739 p1, p2 = self.dirstate.parents()
740 return self.commit(files=files, text=text, user=user, date=date,
740 return self.commit(files=files, text=text, user=user, date=date,
741 p1=p1, p2=p2, extra=extra, empty_ok=True)
741 p1=p1, p2=p2, extra=extra, empty_ok=True)
742
742
743 def commit(self, files=None, text="", user=None, date=None,
743 def commit(self, files=None, text="", user=None, date=None,
744 match=None, force=False, force_editor=False,
744 match=None, force=False, force_editor=False,
745 p1=None, p2=None, extra={}, empty_ok=False):
745 p1=None, p2=None, extra={}, empty_ok=False):
746 wlock = lock = None
746 wlock = lock = None
747 if files:
747 if files:
748 files = util.unique(files)
748 files = util.unique(files)
749 try:
749 try:
750 wlock = self.wlock()
750 wlock = self.wlock()
751 lock = self.lock()
751 lock = self.lock()
752 use_dirstate = (p1 is None) # not rawcommit
752 use_dirstate = (p1 is None) # not rawcommit
753
753
754 if use_dirstate:
754 if use_dirstate:
755 p1, p2 = self.dirstate.parents()
755 p1, p2 = self.dirstate.parents()
756 update_dirstate = True
756 update_dirstate = True
757
757
758 if (not force and p2 != nullid and
758 if (not force and p2 != nullid and
759 (match and (match.files() or match.anypats()))):
759 (match and (match.files() or match.anypats()))):
760 raise util.Abort(_('cannot partially commit a merge '
760 raise util.Abort(_('cannot partially commit a merge '
761 '(do not specify files or patterns)'))
761 '(do not specify files or patterns)'))
762
762
763 if files:
763 if files:
764 modified, removed = [], []
764 modified, removed = [], []
765 for f in files:
765 for f in files:
766 s = self.dirstate[f]
766 s = self.dirstate[f]
767 if s in 'nma':
767 if s in 'nma':
768 modified.append(f)
768 modified.append(f)
769 elif s == 'r':
769 elif s == 'r':
770 removed.append(f)
770 removed.append(f)
771 else:
771 else:
772 self.ui.warn(_("%s not tracked!\n") % f)
772 self.ui.warn(_("%s not tracked!\n") % f)
773 changes = [modified, [], removed, [], []]
773 changes = [modified, [], removed, [], []]
774 else:
774 else:
775 changes = self.status(match=match)
775 changes = self.status(match=match)
776 else:
776 else:
777 p1, p2 = p1, p2 or nullid
777 p1, p2 = p1, p2 or nullid
778 update_dirstate = (self.dirstate.parents()[0] == p1)
778 update_dirstate = (self.dirstate.parents()[0] == p1)
779 changes = [files, [], [], [], []]
779 changes = [files, [], [], [], []]
780
780
781 ms = merge_.mergestate(self)
781 ms = merge_.mergestate(self)
782 for f in changes[0]:
782 for f in changes[0]:
783 if f in ms and ms[f] == 'u':
783 if f in ms and ms[f] == 'u':
784 raise util.Abort(_("unresolved merge conflicts "
784 raise util.Abort(_("unresolved merge conflicts "
785 "(see hg resolve)"))
785 "(see hg resolve)"))
786 wctx = context.workingctx(self, (p1, p2), text, user, date,
786 wctx = context.workingctx(self, (p1, p2), text, user, date,
787 extra, changes)
787 extra, changes)
788 return self._commitctx(wctx, force, force_editor, empty_ok,
788 return self._commitctx(wctx, force, force_editor, empty_ok,
789 use_dirstate, update_dirstate)
789 use_dirstate, update_dirstate)
790 finally:
790 finally:
791 del lock, wlock
791 del lock, wlock
792
792
793 def commitctx(self, ctx):
793 def commitctx(self, ctx):
794 """Add a new revision to current repository.
794 """Add a new revision to current repository.
795
795
796 Revision information is passed in the context.memctx argument.
796 Revision information is passed in the context.memctx argument.
797 commitctx() does not touch the working directory.
797 commitctx() does not touch the working directory.
798 """
798 """
799 wlock = lock = None
799 wlock = lock = None
800 try:
800 try:
801 wlock = self.wlock()
801 wlock = self.wlock()
802 lock = self.lock()
802 lock = self.lock()
803 return self._commitctx(ctx, force=True, force_editor=False,
803 return self._commitctx(ctx, force=True, force_editor=False,
804 empty_ok=True, use_dirstate=False,
804 empty_ok=True, use_dirstate=False,
805 update_dirstate=False)
805 update_dirstate=False)
806 finally:
806 finally:
807 del lock, wlock
807 del lock, wlock
808
808
809 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
810 use_dirstate=True, update_dirstate=True):
810 use_dirstate=True, update_dirstate=True):
811 tr = None
811 tr = None
812 valid = 0 # don't save the dirstate if this isn't set
812 valid = 0 # don't save the dirstate if this isn't set
813 try:
813 try:
814 commit = util.sort(wctx.modified() + wctx.added())
814 commit = util.sort(wctx.modified() + wctx.added())
815 remove = wctx.removed()
815 remove = wctx.removed()
816 extra = wctx.extra().copy()
816 extra = wctx.extra().copy()
817 branchname = extra['branch']
817 branchname = extra['branch']
818 user = wctx.user()
818 user = wctx.user()
819 text = wctx.description()
819 text = wctx.description()
820
820
821 p1, p2 = [p.node() for p in wctx.parents()]
821 p1, p2 = [p.node() for p in wctx.parents()]
822 c1 = self.changelog.read(p1)
822 c1 = self.changelog.read(p1)
823 c2 = self.changelog.read(p2)
823 c2 = self.changelog.read(p2)
824 m1 = self.manifest.read(c1[0]).copy()
824 m1 = self.manifest.read(c1[0]).copy()
825 m2 = self.manifest.read(c2[0])
825 m2 = self.manifest.read(c2[0])
826
826
827 if use_dirstate:
827 if use_dirstate:
828 oldname = c1[5].get("branch") # stored in UTF-8
828 oldname = c1[5].get("branch") # stored in UTF-8
829 if (not commit and not remove and not force and p2 == nullid
829 if (not commit and not remove and not force and p2 == nullid
830 and branchname == oldname):
830 and branchname == oldname):
831 self.ui.status(_("nothing changed\n"))
831 self.ui.status(_("nothing changed\n"))
832 return None
832 return None
833
833
834 xp1 = hex(p1)
834 xp1 = hex(p1)
835 if p2 == nullid: xp2 = ''
835 if p2 == nullid: xp2 = ''
836 else: xp2 = hex(p2)
836 else: xp2 = hex(p2)
837
837
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
839
839
840 tr = self.transaction()
840 tr = self.transaction()
841 trp = weakref.proxy(tr)
841 trp = weakref.proxy(tr)
842
842
843 # check in files
843 # check in files
844 new = {}
844 new = {}
845 changed = []
845 changed = []
846 linkrev = len(self)
846 linkrev = len(self)
847 for f in commit:
847 for f in commit:
848 self.ui.note(f + "\n")
848 self.ui.note(f + "\n")
849 try:
849 try:
850 fctx = wctx.filectx(f)
850 fctx = wctx.filectx(f)
851 newflags = fctx.flags()
851 newflags = fctx.flags()
852 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
852 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
853 if ((not changed or changed[-1] != f) and
853 if ((not changed or changed[-1] != f) and
854 m2.get(f) != new[f]):
854 m2.get(f) != new[f]):
855 # mention the file in the changelog if some
855 # mention the file in the changelog if some
856 # flag changed, even if there was no content
856 # flag changed, even if there was no content
857 # change.
857 # change.
858 if m1.flags(f) != newflags:
858 if m1.flags(f) != newflags:
859 changed.append(f)
859 changed.append(f)
860 m1.set(f, newflags)
860 m1.set(f, newflags)
861 if use_dirstate:
861 if use_dirstate:
862 self.dirstate.normal(f)
862 self.dirstate.normal(f)
863
863
864 except (OSError, IOError):
864 except (OSError, IOError):
865 if use_dirstate:
865 if use_dirstate:
866 self.ui.warn(_("trouble committing %s!\n") % f)
866 self.ui.warn(_("trouble committing %s!\n") % f)
867 raise
867 raise
868 else:
868 else:
869 remove.append(f)
869 remove.append(f)
870
870
871 updated, added = [], []
871 updated, added = [], []
872 for f in util.sort(changed):
872 for f in util.sort(changed):
873 if f in m1 or f in m2:
873 if f in m1 or f in m2:
874 updated.append(f)
874 updated.append(f)
875 else:
875 else:
876 added.append(f)
876 added.append(f)
877
877
878 # update manifest
878 # update manifest
879 m1.update(new)
879 m1.update(new)
880 removed = []
880 removed = []
881
881
882 for f in util.sort(remove):
882 for f in util.sort(remove):
883 if f in m1:
883 if f in m1:
884 del m1[f]
884 del m1[f]
885 removed.append(f)
885 removed.append(f)
886 elif f in m2:
886 elif f in m2:
887 removed.append(f)
887 removed.append(f)
888 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
888 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
889 (new, removed))
889 (new, removed))
890
890
891 # add changeset
891 # add changeset
892 if (not empty_ok and not text) or force_editor:
892 if (not empty_ok and not text) or force_editor:
893 edittext = []
893 edittext = []
894 if text:
894 if text:
895 edittext.append(text)
895 edittext.append(text)
896 edittext.append("")
896 edittext.append("")
897 edittext.append("") # Empty line between message and comments.
897 edittext.append("") # Empty line between message and comments.
898 edittext.append(_("HG: Enter commit message."
898 edittext.append(_("HG: Enter commit message."
899 " Lines beginning with 'HG:' are removed."))
899 " Lines beginning with 'HG:' are removed."))
900 edittext.append("HG: --")
900 edittext.append("HG: --")
901 edittext.append("HG: user: %s" % user)
901 edittext.append("HG: user: %s" % user)
902 if p2 != nullid:
902 if p2 != nullid:
903 edittext.append("HG: branch merge")
903 edittext.append("HG: branch merge")
904 if branchname:
904 if branchname:
905 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
905 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
906 edittext.extend(["HG: added %s" % f for f in added])
906 edittext.extend(["HG: added %s" % f for f in added])
907 edittext.extend(["HG: changed %s" % f for f in updated])
907 edittext.extend(["HG: changed %s" % f for f in updated])
908 edittext.extend(["HG: removed %s" % f for f in removed])
908 edittext.extend(["HG: removed %s" % f for f in removed])
909 if not added and not updated and not removed:
909 if not added and not updated and not removed:
910 edittext.append("HG: no files changed")
910 edittext.append("HG: no files changed")
911 edittext.append("")
911 edittext.append("")
912 # run editor in the repository root
912 # run editor in the repository root
913 olddir = os.getcwd()
913 olddir = os.getcwd()
914 os.chdir(self.root)
914 os.chdir(self.root)
915 text = self.ui.edit("\n".join(edittext), user)
915 text = self.ui.edit("\n".join(edittext), user)
916 os.chdir(olddir)
916 os.chdir(olddir)
917
917
918 lines = [line.rstrip() for line in text.rstrip().splitlines()]
918 lines = [line.rstrip() for line in text.rstrip().splitlines()]
919 while lines and not lines[0]:
919 while lines and not lines[0]:
920 del lines[0]
920 del lines[0]
921 if not lines and use_dirstate:
921 if not lines and use_dirstate:
922 raise util.Abort(_("empty commit message"))
922 raise util.Abort(_("empty commit message"))
923 text = '\n'.join(lines)
923 text = '\n'.join(lines)
924
924
925 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
925 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
926 user, wctx.date(), extra)
926 user, wctx.date(), extra)
927 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
927 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
928 parent2=xp2)
928 parent2=xp2)
929 tr.close()
929 tr.close()
930
930
931 if self.branchcache:
931 if self.branchcache:
932 self.branchtags()
932 self.branchtags()
933
933
934 if use_dirstate or update_dirstate:
934 if use_dirstate or update_dirstate:
935 self.dirstate.setparents(n)
935 self.dirstate.setparents(n)
936 if use_dirstate:
936 if use_dirstate:
937 for f in removed:
937 for f in removed:
938 self.dirstate.forget(f)
938 self.dirstate.forget(f)
939 valid = 1 # our dirstate updates are complete
939 valid = 1 # our dirstate updates are complete
940
940
941 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
941 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
942 return n
942 return n
943 finally:
943 finally:
944 if not valid: # don't save our updated dirstate
944 if not valid: # don't save our updated dirstate
945 self.dirstate.invalidate()
945 self.dirstate.invalidate()
946 del tr
946 del tr
947
947
948 def walk(self, match, node=None):
948 def walk(self, match, node=None):
949 '''
949 '''
950 walk recursively through the directory tree or a given
950 walk recursively through the directory tree or a given
951 changeset, finding all files matched by the match
951 changeset, finding all files matched by the match
952 function
952 function
953 '''
953 '''
954 return self[node].walk(match)
954 return self[node].walk(match)
955
955
956 def status(self, node1='.', node2=None, match=None,
956 def status(self, node1='.', node2=None, match=None,
957 ignored=False, clean=False, unknown=False):
957 ignored=False, clean=False, unknown=False):
958 """return status of files between two nodes or node and working directory
958 """return status of files between two nodes or node and working directory
959
959
960 If node1 is None, use the first dirstate parent instead.
960 If node1 is None, use the first dirstate parent instead.
961 If node2 is None, compare node1 with working directory.
961 If node2 is None, compare node1 with working directory.
962 """
962 """
963
963
964 def mfmatches(ctx):
964 def mfmatches(ctx):
965 mf = ctx.manifest().copy()
965 mf = ctx.manifest().copy()
966 for fn in mf.keys():
966 for fn in mf.keys():
967 if not match(fn):
967 if not match(fn):
968 del mf[fn]
968 del mf[fn]
969 return mf
969 return mf
970
970
971 if isinstance(node1, context.changectx):
971 if isinstance(node1, context.changectx):
972 ctx1 = node1
972 ctx1 = node1
973 else:
973 else:
974 ctx1 = self[node1]
974 ctx1 = self[node1]
975 if isinstance(node2, context.changectx):
975 if isinstance(node2, context.changectx):
976 ctx2 = node2
976 ctx2 = node2
977 else:
977 else:
978 ctx2 = self[node2]
978 ctx2 = self[node2]
979
979
980 working = ctx2 == self[None]
980 working = ctx2 == self[None]
981 parentworking = working and ctx1 == self['.']
981 parentworking = working and ctx1 == self['.']
982 match = match or match_.always(self.root, self.getcwd())
982 match = match or match_.always(self.root, self.getcwd())
983 listignored, listclean, listunknown = ignored, clean, unknown
983 listignored, listclean, listunknown = ignored, clean, unknown
984
984
985 # load earliest manifest first for caching reasons
985 # load earliest manifest first for caching reasons
986 if not working and ctx2.rev() < ctx1.rev():
986 if not working and ctx2.rev() < ctx1.rev():
987 ctx2.manifest()
987 ctx2.manifest()
988
988
989 if not parentworking:
989 if not parentworking:
990 def bad(f, msg):
990 def bad(f, msg):
991 if f not in ctx1:
991 if f not in ctx1:
992 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
992 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
993 return False
993 return False
994 match.bad = bad
994 match.bad = bad
995
995
996 if working: # we need to scan the working dir
996 if working: # we need to scan the working dir
997 s = self.dirstate.status(match, listignored, listclean, listunknown)
997 s = self.dirstate.status(match, listignored, listclean, listunknown)
998 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
998 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
999
999
1000 # check for any possibly clean files
1000 # check for any possibly clean files
1001 if parentworking and cmp:
1001 if parentworking and cmp:
1002 fixup = []
1002 fixup = []
1003 # do a full compare of any files that might have changed
1003 # do a full compare of any files that might have changed
1004 for f in cmp:
1004 for f in cmp:
1005 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1005 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1006 or ctx1[f].cmp(ctx2[f].data())):
1006 or ctx1[f].cmp(ctx2[f].data())):
1007 modified.append(f)
1007 modified.append(f)
1008 else:
1008 else:
1009 fixup.append(f)
1009 fixup.append(f)
1010
1010
1011 if listclean:
1011 if listclean:
1012 clean += fixup
1012 clean += fixup
1013
1013
1014 # update dirstate for files that are actually clean
1014 # update dirstate for files that are actually clean
1015 if fixup:
1015 if fixup:
1016 wlock = None
1016 wlock = None
1017 try:
1017 try:
1018 try:
1018 try:
1019 wlock = self.wlock(False)
1019 wlock = self.wlock(False)
1020 for f in fixup:
1020 for f in fixup:
1021 self.dirstate.normal(f)
1021 self.dirstate.normal(f)
1022 except lock.LockException:
1022 except lock.LockException:
1023 pass
1023 pass
1024 finally:
1024 finally:
1025 del wlock
1025 del wlock
1026
1026
1027 if not parentworking:
1027 if not parentworking:
1028 mf1 = mfmatches(ctx1)
1028 mf1 = mfmatches(ctx1)
1029 if working:
1029 if working:
1030 # we are comparing working dir against non-parent
1030 # we are comparing working dir against non-parent
1031 # generate a pseudo-manifest for the working dir
1031 # generate a pseudo-manifest for the working dir
1032 mf2 = mfmatches(self['.'])
1032 mf2 = mfmatches(self['.'])
1033 for f in cmp + modified + added:
1033 for f in cmp + modified + added:
1034 mf2[f] = None
1034 mf2[f] = None
1035 mf2.set(f, ctx2.flags(f))
1035 mf2.set(f, ctx2.flags(f))
1036 for f in removed:
1036 for f in removed:
1037 if f in mf2:
1037 if f in mf2:
1038 del mf2[f]
1038 del mf2[f]
1039 else:
1039 else:
1040 # we are comparing two revisions
1040 # we are comparing two revisions
1041 deleted, unknown, ignored = [], [], []
1041 deleted, unknown, ignored = [], [], []
1042 mf2 = mfmatches(ctx2)
1042 mf2 = mfmatches(ctx2)
1043
1043
1044 modified, added, clean = [], [], []
1044 modified, added, clean = [], [], []
1045 for fn in mf2:
1045 for fn in mf2:
1046 if fn in mf1:
1046 if fn in mf1:
1047 if (mf1.flags(fn) != mf2.flags(fn) or
1047 if (mf1.flags(fn) != mf2.flags(fn) or
1048 (mf1[fn] != mf2[fn] and
1048 (mf1[fn] != mf2[fn] and
1049 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1049 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1050 modified.append(fn)
1050 modified.append(fn)
1051 elif listclean:
1051 elif listclean:
1052 clean.append(fn)
1052 clean.append(fn)
1053 del mf1[fn]
1053 del mf1[fn]
1054 else:
1054 else:
1055 added.append(fn)
1055 added.append(fn)
1056 removed = mf1.keys()
1056 removed = mf1.keys()
1057
1057
1058 r = modified, added, removed, deleted, unknown, ignored, clean
1058 r = modified, added, removed, deleted, unknown, ignored, clean
1059 [l.sort() for l in r]
1059 [l.sort() for l in r]
1060 return r
1060 return r
1061
1061
1062 def add(self, list):
1062 def add(self, list):
1063 wlock = self.wlock()
1063 wlock = self.wlock()
1064 try:
1064 try:
1065 rejected = []
1065 rejected = []
1066 for f in list:
1066 for f in list:
1067 p = self.wjoin(f)
1067 p = self.wjoin(f)
1068 try:
1068 try:
1069 st = os.lstat(p)
1069 st = os.lstat(p)
1070 except:
1070 except:
1071 self.ui.warn(_("%s does not exist!\n") % f)
1071 self.ui.warn(_("%s does not exist!\n") % f)
1072 rejected.append(f)
1072 rejected.append(f)
1073 continue
1073 continue
1074 if st.st_size > 10000000:
1074 if st.st_size > 10000000:
1075 self.ui.warn(_("%s: files over 10MB may cause memory and"
1075 self.ui.warn(_("%s: files over 10MB may cause memory and"
1076 " performance problems\n"
1076 " performance problems\n"
1077 "(use 'hg revert %s' to unadd the file)\n")
1077 "(use 'hg revert %s' to unadd the file)\n")
1078 % (f, f))
1078 % (f, f))
1079 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1079 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1080 self.ui.warn(_("%s not added: only files and symlinks "
1080 self.ui.warn(_("%s not added: only files and symlinks "
1081 "supported currently\n") % f)
1081 "supported currently\n") % f)
1082 rejected.append(p)
1082 rejected.append(p)
1083 elif self.dirstate[f] in 'amn':
1083 elif self.dirstate[f] in 'amn':
1084 self.ui.warn(_("%s already tracked!\n") % f)
1084 self.ui.warn(_("%s already tracked!\n") % f)
1085 elif self.dirstate[f] == 'r':
1085 elif self.dirstate[f] == 'r':
1086 self.dirstate.normallookup(f)
1086 self.dirstate.normallookup(f)
1087 else:
1087 else:
1088 self.dirstate.add(f)
1088 self.dirstate.add(f)
1089 return rejected
1089 return rejected
1090 finally:
1090 finally:
1091 del wlock
1091 del wlock
1092
1092
1093 def forget(self, list):
1093 def forget(self, list):
1094 wlock = self.wlock()
1094 wlock = self.wlock()
1095 try:
1095 try:
1096 for f in list:
1096 for f in list:
1097 if self.dirstate[f] != 'a':
1097 if self.dirstate[f] != 'a':
1098 self.ui.warn(_("%s not added!\n") % f)
1098 self.ui.warn(_("%s not added!\n") % f)
1099 else:
1099 else:
1100 self.dirstate.forget(f)
1100 self.dirstate.forget(f)
1101 finally:
1101 finally:
1102 del wlock
1102 del wlock
1103
1103
1104 def remove(self, list, unlink=False):
1104 def remove(self, list, unlink=False):
1105 wlock = None
1105 wlock = None
1106 try:
1106 try:
1107 if unlink:
1107 if unlink:
1108 for f in list:
1108 for f in list:
1109 try:
1109 try:
1110 util.unlink(self.wjoin(f))
1110 util.unlink(self.wjoin(f))
1111 except OSError, inst:
1111 except OSError, inst:
1112 if inst.errno != errno.ENOENT:
1112 if inst.errno != errno.ENOENT:
1113 raise
1113 raise
1114 wlock = self.wlock()
1114 wlock = self.wlock()
1115 for f in list:
1115 for f in list:
1116 if unlink and os.path.exists(self.wjoin(f)):
1116 if unlink and os.path.exists(self.wjoin(f)):
1117 self.ui.warn(_("%s still exists!\n") % f)
1117 self.ui.warn(_("%s still exists!\n") % f)
1118 elif self.dirstate[f] == 'a':
1118 elif self.dirstate[f] == 'a':
1119 self.dirstate.forget(f)
1119 self.dirstate.forget(f)
1120 elif f not in self.dirstate:
1120 elif f not in self.dirstate:
1121 self.ui.warn(_("%s not tracked!\n") % f)
1121 self.ui.warn(_("%s not tracked!\n") % f)
1122 else:
1122 else:
1123 self.dirstate.remove(f)
1123 self.dirstate.remove(f)
1124 finally:
1124 finally:
1125 del wlock
1125 del wlock
1126
1126
1127 def undelete(self, list):
1127 def undelete(self, list):
1128 wlock = None
1128 wlock = None
1129 try:
1129 try:
1130 manifests = [self.manifest.read(self.changelog.read(p)[0])
1130 manifests = [self.manifest.read(self.changelog.read(p)[0])
1131 for p in self.dirstate.parents() if p != nullid]
1131 for p in self.dirstate.parents() if p != nullid]
1132 wlock = self.wlock()
1132 wlock = self.wlock()
1133 for f in list:
1133 for f in list:
1134 if self.dirstate[f] != 'r':
1134 if self.dirstate[f] != 'r':
1135 self.ui.warn(_("%s not removed!\n") % f)
1135 self.ui.warn(_("%s not removed!\n") % f)
1136 else:
1136 else:
1137 m = f in manifests[0] and manifests[0] or manifests[1]
1137 m = f in manifests[0] and manifests[0] or manifests[1]
1138 t = self.file(f).read(m[f])
1138 t = self.file(f).read(m[f])
1139 self.wwrite(f, t, m.flags(f))
1139 self.wwrite(f, t, m.flags(f))
1140 self.dirstate.normal(f)
1140 self.dirstate.normal(f)
1141 finally:
1141 finally:
1142 del wlock
1142 del wlock
1143
1143
1144 def copy(self, source, dest):
1144 def copy(self, source, dest):
1145 wlock = None
1145 wlock = None
1146 try:
1146 try:
1147 p = self.wjoin(dest)
1147 p = self.wjoin(dest)
1148 if not (os.path.exists(p) or os.path.islink(p)):
1148 if not (os.path.exists(p) or os.path.islink(p)):
1149 self.ui.warn(_("%s does not exist!\n") % dest)
1149 self.ui.warn(_("%s does not exist!\n") % dest)
1150 elif not (os.path.isfile(p) or os.path.islink(p)):
1150 elif not (os.path.isfile(p) or os.path.islink(p)):
1151 self.ui.warn(_("copy failed: %s is not a file or a "
1151 self.ui.warn(_("copy failed: %s is not a file or a "
1152 "symbolic link\n") % dest)
1152 "symbolic link\n") % dest)
1153 else:
1153 else:
1154 wlock = self.wlock()
1154 wlock = self.wlock()
1155 if self.dirstate[dest] in '?r':
1155 if self.dirstate[dest] in '?r':
1156 self.dirstate.add(dest)
1156 self.dirstate.add(dest)
1157 self.dirstate.copy(source, dest)
1157 self.dirstate.copy(source, dest)
1158 finally:
1158 finally:
1159 del wlock
1159 del wlock
1160
1160
1161 def heads(self, start=None):
1161 def heads(self, start=None):
1162 heads = self.changelog.heads(start)
1162 heads = self.changelog.heads(start)
1163 # sort the output in rev descending order
1163 # sort the output in rev descending order
1164 heads = [(-self.changelog.rev(h), h) for h in heads]
1164 heads = [(-self.changelog.rev(h), h) for h in heads]
1165 return [n for (r, n) in util.sort(heads)]
1165 return [n for (r, n) in util.sort(heads)]
1166
1166
1167 def branchheads(self, branch=None, start=None):
1167 def branchheads(self, branch=None, start=None):
1168 if branch is None:
1168 if branch is None:
1169 branch = self[None].branch()
1169 branch = self[None].branch()
1170 branches = self.branchtags()
1170 branches = self.branchtags()
1171 if branch not in branches:
1171 if branch not in branches:
1172 return []
1172 return []
1173 # The basic algorithm is this:
1173 # The basic algorithm is this:
1174 #
1174 #
1175 # Start from the branch tip since there are no later revisions that can
1175 # Start from the branch tip since there are no later revisions that can
1176 # possibly be in this branch, and the tip is a guaranteed head.
1176 # possibly be in this branch, and the tip is a guaranteed head.
1177 #
1177 #
1178 # Remember the tip's parents as the first ancestors, since these by
1178 # Remember the tip's parents as the first ancestors, since these by
1179 # definition are not heads.
1179 # definition are not heads.
1180 #
1180 #
1181 # Step backwards from the brach tip through all the revisions. We are
1181 # Step backwards from the brach tip through all the revisions. We are
1182 # guaranteed by the rules of Mercurial that we will now be visiting the
1182 # guaranteed by the rules of Mercurial that we will now be visiting the
1183 # nodes in reverse topological order (children before parents).
1183 # nodes in reverse topological order (children before parents).
1184 #
1184 #
1185 # If a revision is one of the ancestors of a head then we can toss it
1185 # If a revision is one of the ancestors of a head then we can toss it
1186 # out of the ancestors set (we've already found it and won't be
1186 # out of the ancestors set (we've already found it and won't be
1187 # visiting it again) and put its parents in the ancestors set.
1187 # visiting it again) and put its parents in the ancestors set.
1188 #
1188 #
1189 # Otherwise, if a revision is in the branch it's another head, since it
1189 # Otherwise, if a revision is in the branch it's another head, since it
1190 # wasn't in the ancestor list of an existing head. So add it to the
1190 # wasn't in the ancestor list of an existing head. So add it to the
1191 # head list, and add its parents to the ancestor list.
1191 # head list, and add its parents to the ancestor list.
1192 #
1192 #
1193 # If it is not in the branch ignore it.
1193 # If it is not in the branch ignore it.
1194 #
1194 #
1195 # Once we have a list of heads, use nodesbetween to filter out all the
1195 # Once we have a list of heads, use nodesbetween to filter out all the
1196 # heads that cannot be reached from startrev. There may be a more
1196 # heads that cannot be reached from startrev. There may be a more
1197 # efficient way to do this as part of the previous algorithm.
1197 # efficient way to do this as part of the previous algorithm.
1198
1198
1199 set = util.set
1199 set = util.set
1200 heads = [self.changelog.rev(branches[branch])]
1200 heads = [self.changelog.rev(branches[branch])]
1201 # Don't care if ancestors contains nullrev or not.
1201 # Don't care if ancestors contains nullrev or not.
1202 ancestors = set(self.changelog.parentrevs(heads[0]))
1202 ancestors = set(self.changelog.parentrevs(heads[0]))
1203 for rev in xrange(heads[0] - 1, nullrev, -1):
1203 for rev in xrange(heads[0] - 1, nullrev, -1):
1204 if rev in ancestors:
1204 if rev in ancestors:
1205 ancestors.update(self.changelog.parentrevs(rev))
1205 ancestors.update(self.changelog.parentrevs(rev))
1206 ancestors.remove(rev)
1206 ancestors.remove(rev)
1207 elif self[rev].branch() == branch:
1207 elif self[rev].branch() == branch:
1208 heads.append(rev)
1208 heads.append(rev)
1209 ancestors.update(self.changelog.parentrevs(rev))
1209 ancestors.update(self.changelog.parentrevs(rev))
1210 heads = [self.changelog.node(rev) for rev in heads]
1210 heads = [self.changelog.node(rev) for rev in heads]
1211 if start is not None:
1211 if start is not None:
1212 heads = self.changelog.nodesbetween([start], heads)[2]
1212 heads = self.changelog.nodesbetween([start], heads)[2]
1213 return heads
1213 return heads
1214
1214
1215 def branches(self, nodes):
1215 def branches(self, nodes):
1216 if not nodes:
1216 if not nodes:
1217 nodes = [self.changelog.tip()]
1217 nodes = [self.changelog.tip()]
1218 b = []
1218 b = []
1219 for n in nodes:
1219 for n in nodes:
1220 t = n
1220 t = n
1221 while 1:
1221 while 1:
1222 p = self.changelog.parents(n)
1222 p = self.changelog.parents(n)
1223 if p[1] != nullid or p[0] == nullid:
1223 if p[1] != nullid or p[0] == nullid:
1224 b.append((t, n, p[0], p[1]))
1224 b.append((t, n, p[0], p[1]))
1225 break
1225 break
1226 n = p[0]
1226 n = p[0]
1227 return b
1227 return b
1228
1228
1229 def between(self, pairs):
1229 def between(self, pairs):
1230 r = []
1230 r = []
1231
1231
1232 for top, bottom in pairs:
1232 for top, bottom in pairs:
1233 n, l, i = top, [], 0
1233 n, l, i = top, [], 0
1234 f = 1
1234 f = 1
1235
1235
1236 while n != bottom:
1236 while n != bottom:
1237 p = self.changelog.parents(n)[0]
1237 p = self.changelog.parents(n)[0]
1238 if i == f:
1238 if i == f:
1239 l.append(n)
1239 l.append(n)
1240 f = f * 2
1240 f = f * 2
1241 n = p
1241 n = p
1242 i += 1
1242 i += 1
1243
1243
1244 r.append(l)
1244 r.append(l)
1245
1245
1246 return r
1246 return r
1247
1247
1248 def findincoming(self, remote, base=None, heads=None, force=False):
1248 def findincoming(self, remote, base=None, heads=None, force=False):
1249 """Return list of roots of the subsets of missing nodes from remote
1249 """Return list of roots of the subsets of missing nodes from remote
1250
1250
1251 If base dict is specified, assume that these nodes and their parents
1251 If base dict is specified, assume that these nodes and their parents
1252 exist on the remote side and that no child of a node of base exists
1252 exist on the remote side and that no child of a node of base exists
1253 in both remote and self.
1253 in both remote and self.
1254 Furthermore base will be updated to include the nodes that exists
1254 Furthermore base will be updated to include the nodes that exists
1255 in self and remote but no children exists in self and remote.
1255 in self and remote but no children exists in self and remote.
1256 If a list of heads is specified, return only nodes which are heads
1256 If a list of heads is specified, return only nodes which are heads
1257 or ancestors of these heads.
1257 or ancestors of these heads.
1258
1258
1259 All the ancestors of base are in self and in remote.
1259 All the ancestors of base are in self and in remote.
1260 All the descendants of the list returned are missing in self.
1260 All the descendants of the list returned are missing in self.
1261 (and so we know that the rest of the nodes are missing in remote, see
1261 (and so we know that the rest of the nodes are missing in remote, see
1262 outgoing)
1262 outgoing)
1263 """
1263 """
1264 m = self.changelog.nodemap
1264 m = self.changelog.nodemap
1265 search = []
1265 search = []
1266 fetch = {}
1266 fetch = {}
1267 seen = {}
1267 seen = {}
1268 seenbranch = {}
1268 seenbranch = {}
1269 if base == None:
1269 if base == None:
1270 base = {}
1270 base = {}
1271
1271
1272 if not heads:
1272 if not heads:
1273 heads = remote.heads()
1273 heads = remote.heads()
1274
1274
1275 if self.changelog.tip() == nullid:
1275 if self.changelog.tip() == nullid:
1276 base[nullid] = 1
1276 base[nullid] = 1
1277 if heads != [nullid]:
1277 if heads != [nullid]:
1278 return [nullid]
1278 return [nullid]
1279 return []
1279 return []
1280
1280
1281 # assume we're closer to the tip than the root
1281 # assume we're closer to the tip than the root
1282 # and start by examining the heads
1282 # and start by examining the heads
1283 self.ui.status(_("searching for changes\n"))
1283 self.ui.status(_("searching for changes\n"))
1284
1284
1285 unknown = []
1285 unknown = []
1286 for h in heads:
1286 for h in heads:
1287 if h not in m:
1287 if h not in m:
1288 unknown.append(h)
1288 unknown.append(h)
1289 else:
1289 else:
1290 base[h] = 1
1290 base[h] = 1
1291
1291
1292 if not unknown:
1292 if not unknown:
1293 return []
1293 return []
1294
1294
1295 req = dict.fromkeys(unknown)
1295 req = dict.fromkeys(unknown)
1296 reqcnt = 0
1296 reqcnt = 0
1297
1297
1298 # search through remote branches
1298 # search through remote branches
1299 # a 'branch' here is a linear segment of history, with four parts:
1299 # a 'branch' here is a linear segment of history, with four parts:
1300 # head, root, first parent, second parent
1300 # head, root, first parent, second parent
1301 # (a branch always has two parents (or none) by definition)
1301 # (a branch always has two parents (or none) by definition)
1302 unknown = remote.branches(unknown)
1302 unknown = remote.branches(unknown)
1303 while unknown:
1303 while unknown:
1304 r = []
1304 r = []
1305 while unknown:
1305 while unknown:
1306 n = unknown.pop(0)
1306 n = unknown.pop(0)
1307 if n[0] in seen:
1307 if n[0] in seen:
1308 continue
1308 continue
1309
1309
1310 self.ui.debug(_("examining %s:%s\n")
1310 self.ui.debug(_("examining %s:%s\n")
1311 % (short(n[0]), short(n[1])))
1311 % (short(n[0]), short(n[1])))
1312 if n[0] == nullid: # found the end of the branch
1312 if n[0] == nullid: # found the end of the branch
1313 pass
1313 pass
1314 elif n in seenbranch:
1314 elif n in seenbranch:
1315 self.ui.debug(_("branch already found\n"))
1315 self.ui.debug(_("branch already found\n"))
1316 continue
1316 continue
1317 elif n[1] and n[1] in m: # do we know the base?
1317 elif n[1] and n[1] in m: # do we know the base?
1318 self.ui.debug(_("found incomplete branch %s:%s\n")
1318 self.ui.debug(_("found incomplete branch %s:%s\n")
1319 % (short(n[0]), short(n[1])))
1319 % (short(n[0]), short(n[1])))
1320 search.append(n) # schedule branch range for scanning
1320 search.append(n) # schedule branch range for scanning
1321 seenbranch[n] = 1
1321 seenbranch[n] = 1
1322 else:
1322 else:
1323 if n[1] not in seen and n[1] not in fetch:
1323 if n[1] not in seen and n[1] not in fetch:
1324 if n[2] in m and n[3] in m:
1324 if n[2] in m and n[3] in m:
1325 self.ui.debug(_("found new changeset %s\n") %
1325 self.ui.debug(_("found new changeset %s\n") %
1326 short(n[1]))
1326 short(n[1]))
1327 fetch[n[1]] = 1 # earliest unknown
1327 fetch[n[1]] = 1 # earliest unknown
1328 for p in n[2:4]:
1328 for p in n[2:4]:
1329 if p in m:
1329 if p in m:
1330 base[p] = 1 # latest known
1330 base[p] = 1 # latest known
1331
1331
1332 for p in n[2:4]:
1332 for p in n[2:4]:
1333 if p not in req and p not in m:
1333 if p not in req and p not in m:
1334 r.append(p)
1334 r.append(p)
1335 req[p] = 1
1335 req[p] = 1
1336 seen[n[0]] = 1
1336 seen[n[0]] = 1
1337
1337
1338 if r:
1338 if r:
1339 reqcnt += 1
1339 reqcnt += 1
1340 self.ui.debug(_("request %d: %s\n") %
1340 self.ui.debug(_("request %d: %s\n") %
1341 (reqcnt, " ".join(map(short, r))))
1341 (reqcnt, " ".join(map(short, r))))
1342 for p in xrange(0, len(r), 10):
1342 for p in xrange(0, len(r), 10):
1343 for b in remote.branches(r[p:p+10]):
1343 for b in remote.branches(r[p:p+10]):
1344 self.ui.debug(_("received %s:%s\n") %
1344 self.ui.debug(_("received %s:%s\n") %
1345 (short(b[0]), short(b[1])))
1345 (short(b[0]), short(b[1])))
1346 unknown.append(b)
1346 unknown.append(b)
1347
1347
1348 # do binary search on the branches we found
1348 # do binary search on the branches we found
1349 search = [(t, b) for (t, b, p1, p2) in search]
1349 search = [(t, b) for (t, b, p1, p2) in search]
1350 while search:
1350 while search:
1351 newsearch = []
1351 newsearch = []
1352 reqcnt += 1
1352 reqcnt += 1
1353 for n, l in zip(search, remote.between(search)):
1353 for n, l in zip(search, remote.between(search)):
1354 l.append(n[1])
1354 l.append(n[1])
1355 p = n[0]
1355 p = n[0]
1356 f = 1
1356 f = 1
1357 for i in l:
1357 for i in l:
1358 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1358 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1359 if i in m:
1359 if i in m:
1360 if f <= 2:
1360 if f <= 2:
1361 self.ui.debug(_("found new branch changeset %s\n") %
1361 self.ui.debug(_("found new branch changeset %s\n") %
1362 short(p))
1362 short(p))
1363 fetch[p] = 1
1363 fetch[p] = 1
1364 base[i] = 1
1364 base[i] = 1
1365 else:
1365 else:
1366 self.ui.debug(_("narrowed branch search to %s:%s\n")
1366 self.ui.debug(_("narrowed branch search to %s:%s\n")
1367 % (short(p), short(i)))
1367 % (short(p), short(i)))
1368 newsearch.append((p, i))
1368 newsearch.append((p, i))
1369 break
1369 break
1370 p, f = i, f * 2
1370 p, f = i, f * 2
1371 search = newsearch
1371 search = newsearch
1372
1372
1373 # sanity check our fetch list
1373 # sanity check our fetch list
1374 for f in fetch.keys():
1374 for f in fetch.keys():
1375 if f in m:
1375 if f in m:
1376 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1376 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1377
1377
1378 if base.keys() == [nullid]:
1378 if base.keys() == [nullid]:
1379 if force:
1379 if force:
1380 self.ui.warn(_("warning: repository is unrelated\n"))
1380 self.ui.warn(_("warning: repository is unrelated\n"))
1381 else:
1381 else:
1382 raise util.Abort(_("repository is unrelated"))
1382 raise util.Abort(_("repository is unrelated"))
1383
1383
1384 self.ui.debug(_("found new changesets starting at ") +
1384 self.ui.debug(_("found new changesets starting at ") +
1385 " ".join([short(f) for f in fetch]) + "\n")
1385 " ".join([short(f) for f in fetch]) + "\n")
1386
1386
1387 self.ui.debug(_("%d total queries\n") % reqcnt)
1387 self.ui.debug(_("%d total queries\n") % reqcnt)
1388
1388
1389 return fetch.keys()
1389 return fetch.keys()
1390
1390
1391 def findoutgoing(self, remote, base=None, heads=None, force=False):
1391 def findoutgoing(self, remote, base=None, heads=None, force=False):
1392 """Return list of nodes that are roots of subsets not in remote
1392 """Return list of nodes that are roots of subsets not in remote
1393
1393
1394 If base dict is specified, assume that these nodes and their parents
1394 If base dict is specified, assume that these nodes and their parents
1395 exist on the remote side.
1395 exist on the remote side.
1396 If a list of heads is specified, return only nodes which are heads
1396 If a list of heads is specified, return only nodes which are heads
1397 or ancestors of these heads, and return a second element which
1397 or ancestors of these heads, and return a second element which
1398 contains all remote heads which get new children.
1398 contains all remote heads which get new children.
1399 """
1399 """
1400 if base == None:
1400 if base == None:
1401 base = {}
1401 base = {}
1402 self.findincoming(remote, base, heads, force=force)
1402 self.findincoming(remote, base, heads, force=force)
1403
1403
1404 self.ui.debug(_("common changesets up to ")
1404 self.ui.debug(_("common changesets up to ")
1405 + " ".join(map(short, base.keys())) + "\n")
1405 + " ".join(map(short, base.keys())) + "\n")
1406
1406
1407 remain = dict.fromkeys(self.changelog.nodemap)
1407 remain = dict.fromkeys(self.changelog.nodemap)
1408
1408
1409 # prune everything remote has from the tree
1409 # prune everything remote has from the tree
1410 del remain[nullid]
1410 del remain[nullid]
1411 remove = base.keys()
1411 remove = base.keys()
1412 while remove:
1412 while remove:
1413 n = remove.pop(0)
1413 n = remove.pop(0)
1414 if n in remain:
1414 if n in remain:
1415 del remain[n]
1415 del remain[n]
1416 for p in self.changelog.parents(n):
1416 for p in self.changelog.parents(n):
1417 remove.append(p)
1417 remove.append(p)
1418
1418
1419 # find every node whose parents have been pruned
1419 # find every node whose parents have been pruned
1420 subset = []
1420 subset = []
1421 # find every remote head that will get new children
1421 # find every remote head that will get new children
1422 updated_heads = {}
1422 updated_heads = {}
1423 for n in remain:
1423 for n in remain:
1424 p1, p2 = self.changelog.parents(n)
1424 p1, p2 = self.changelog.parents(n)
1425 if p1 not in remain and p2 not in remain:
1425 if p1 not in remain and p2 not in remain:
1426 subset.append(n)
1426 subset.append(n)
1427 if heads:
1427 if heads:
1428 if p1 in heads:
1428 if p1 in heads:
1429 updated_heads[p1] = True
1429 updated_heads[p1] = True
1430 if p2 in heads:
1430 if p2 in heads:
1431 updated_heads[p2] = True
1431 updated_heads[p2] = True
1432
1432
1433 # this is the set of all roots we have to push
1433 # this is the set of all roots we have to push
1434 if heads:
1434 if heads:
1435 return subset, updated_heads.keys()
1435 return subset, updated_heads.keys()
1436 else:
1436 else:
1437 return subset
1437 return subset
1438
1438
1439 def pull(self, remote, heads=None, force=False):
1439 def pull(self, remote, heads=None, force=False):
1440 lock = self.lock()
1440 lock = self.lock()
1441 try:
1441 try:
1442 fetch = self.findincoming(remote, heads=heads, force=force)
1442 fetch = self.findincoming(remote, heads=heads, force=force)
1443 if fetch == [nullid]:
1443 if fetch == [nullid]:
1444 self.ui.status(_("requesting all changes\n"))
1444 self.ui.status(_("requesting all changes\n"))
1445
1445
1446 if not fetch:
1446 if not fetch:
1447 self.ui.status(_("no changes found\n"))
1447 self.ui.status(_("no changes found\n"))
1448 return 0
1448 return 0
1449
1449
1450 if heads is None:
1450 if heads is None:
1451 cg = remote.changegroup(fetch, 'pull')
1451 cg = remote.changegroup(fetch, 'pull')
1452 else:
1452 else:
1453 if 'changegroupsubset' not in remote.capabilities:
1453 if 'changegroupsubset' not in remote.capabilities:
1454 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1454 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1455 cg = remote.changegroupsubset(fetch, heads, 'pull')
1455 cg = remote.changegroupsubset(fetch, heads, 'pull')
1456 return self.addchangegroup(cg, 'pull', remote.url())
1456 return self.addchangegroup(cg, 'pull', remote.url())
1457 finally:
1457 finally:
1458 del lock
1458 del lock
1459
1459
1460 def push(self, remote, force=False, revs=None):
1460 def push(self, remote, force=False, revs=None):
1461 # there are two ways to push to remote repo:
1461 # there are two ways to push to remote repo:
1462 #
1462 #
1463 # addchangegroup assumes local user can lock remote
1463 # addchangegroup assumes local user can lock remote
1464 # repo (local filesystem, old ssh servers).
1464 # repo (local filesystem, old ssh servers).
1465 #
1465 #
1466 # unbundle assumes local user cannot lock remote repo (new ssh
1466 # unbundle assumes local user cannot lock remote repo (new ssh
1467 # servers, http servers).
1467 # servers, http servers).
1468
1468
1469 if remote.capable('unbundle'):
1469 if remote.capable('unbundle'):
1470 return self.push_unbundle(remote, force, revs)
1470 return self.push_unbundle(remote, force, revs)
1471 return self.push_addchangegroup(remote, force, revs)
1471 return self.push_addchangegroup(remote, force, revs)
1472
1472
1473 def prepush(self, remote, force, revs):
1473 def prepush(self, remote, force, revs):
1474 base = {}
1474 base = {}
1475 remote_heads = remote.heads()
1475 remote_heads = remote.heads()
1476 inc = self.findincoming(remote, base, remote_heads, force=force)
1476 inc = self.findincoming(remote, base, remote_heads, force=force)
1477
1477
1478 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1478 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1479 if revs is not None:
1479 if revs is not None:
1480 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1480 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1481 else:
1481 else:
1482 bases, heads = update, self.changelog.heads()
1482 bases, heads = update, self.changelog.heads()
1483
1483
1484 if not bases:
1484 if not bases:
1485 self.ui.status(_("no changes found\n"))
1485 self.ui.status(_("no changes found\n"))
1486 return None, 1
1486 return None, 1
1487 elif not force:
1487 elif not force:
1488 # check if we're creating new remote heads
1488 # check if we're creating new remote heads
1489 # to be a remote head after push, node must be either
1489 # to be a remote head after push, node must be either
1490 # - unknown locally
1490 # - unknown locally
1491 # - a local outgoing head descended from update
1491 # - a local outgoing head descended from update
1492 # - a remote head that's known locally and not
1492 # - a remote head that's known locally and not
1493 # ancestral to an outgoing head
1493 # ancestral to an outgoing head
1494
1494
1495 warn = 0
1495 warn = 0
1496
1496
1497 if remote_heads == [nullid]:
1497 if remote_heads == [nullid]:
1498 warn = 0
1498 warn = 0
1499 elif not revs and len(heads) > len(remote_heads):
1499 elif not revs and len(heads) > len(remote_heads):
1500 warn = 1
1500 warn = 1
1501 else:
1501 else:
1502 newheads = list(heads)
1502 newheads = list(heads)
1503 for r in remote_heads:
1503 for r in remote_heads:
1504 if r in self.changelog.nodemap:
1504 if r in self.changelog.nodemap:
1505 desc = self.changelog.heads(r, heads)
1505 desc = self.changelog.heads(r, heads)
1506 l = [h for h in heads if h in desc]
1506 l = [h for h in heads if h in desc]
1507 if not l:
1507 if not l:
1508 newheads.append(r)
1508 newheads.append(r)
1509 else:
1509 else:
1510 newheads.append(r)
1510 newheads.append(r)
1511 if len(newheads) > len(remote_heads):
1511 if len(newheads) > len(remote_heads):
1512 warn = 1
1512 warn = 1
1513
1513
1514 if warn:
1514 if warn:
1515 self.ui.warn(_("abort: push creates new remote heads!\n"))
1515 self.ui.warn(_("abort: push creates new remote heads!\n"))
1516 self.ui.status(_("(did you forget to merge?"
1516 self.ui.status(_("(did you forget to merge?"
1517 " use push -f to force)\n"))
1517 " use push -f to force)\n"))
1518 return None, 0
1518 return None, 0
1519 elif inc:
1519 elif inc:
1520 self.ui.warn(_("note: unsynced remote changes!\n"))
1520 self.ui.warn(_("note: unsynced remote changes!\n"))
1521
1521
1522
1522
1523 if revs is None:
1523 if revs is None:
1524 cg = self.changegroup(update, 'push')
1524 cg = self.changegroup(update, 'push')
1525 else:
1525 else:
1526 cg = self.changegroupsubset(update, revs, 'push')
1526 cg = self.changegroupsubset(update, revs, 'push')
1527 return cg, remote_heads
1527 return cg, remote_heads
1528
1528
1529 def push_addchangegroup(self, remote, force, revs):
1529 def push_addchangegroup(self, remote, force, revs):
1530 lock = remote.lock()
1530 lock = remote.lock()
1531 try:
1531 try:
1532 ret = self.prepush(remote, force, revs)
1532 ret = self.prepush(remote, force, revs)
1533 if ret[0] is not None:
1533 if ret[0] is not None:
1534 cg, remote_heads = ret
1534 cg, remote_heads = ret
1535 return remote.addchangegroup(cg, 'push', self.url())
1535 return remote.addchangegroup(cg, 'push', self.url())
1536 return ret[1]
1536 return ret[1]
1537 finally:
1537 finally:
1538 del lock
1538 del lock
1539
1539
1540 def push_unbundle(self, remote, force, revs):
1540 def push_unbundle(self, remote, force, revs):
1541 # local repo finds heads on server, finds out what revs it
1541 # local repo finds heads on server, finds out what revs it
1542 # must push. once revs transferred, if server finds it has
1542 # must push. once revs transferred, if server finds it has
1543 # different heads (someone else won commit/push race), server
1543 # different heads (someone else won commit/push race), server
1544 # aborts.
1544 # aborts.
1545
1545
1546 ret = self.prepush(remote, force, revs)
1546 ret = self.prepush(remote, force, revs)
1547 if ret[0] is not None:
1547 if ret[0] is not None:
1548 cg, remote_heads = ret
1548 cg, remote_heads = ret
1549 if force: remote_heads = ['force']
1549 if force: remote_heads = ['force']
1550 return remote.unbundle(cg, remote_heads, 'push')
1550 return remote.unbundle(cg, remote_heads, 'push')
1551 return ret[1]
1551 return ret[1]
1552
1552
1553 def changegroupinfo(self, nodes, source):
1553 def changegroupinfo(self, nodes, source):
1554 if self.ui.verbose or source == 'bundle':
1554 if self.ui.verbose or source == 'bundle':
1555 self.ui.status(_("%d changesets found\n") % len(nodes))
1555 self.ui.status(_("%d changesets found\n") % len(nodes))
1556 if self.ui.debugflag:
1556 if self.ui.debugflag:
1557 self.ui.debug(_("List of changesets:\n"))
1557 self.ui.debug(_("List of changesets:\n"))
1558 for node in nodes:
1558 for node in nodes:
1559 self.ui.debug("%s\n" % hex(node))
1559 self.ui.debug("%s\n" % hex(node))
1560
1560
1561 def changegroupsubset(self, bases, heads, source, extranodes=None):
1561 def changegroupsubset(self, bases, heads, source, extranodes=None):
1562 """This function generates a changegroup consisting of all the nodes
1562 """This function generates a changegroup consisting of all the nodes
1563 that are descendents of any of the bases, and ancestors of any of
1563 that are descendents of any of the bases, and ancestors of any of
1564 the heads.
1564 the heads.
1565
1565
1566 It is fairly complex as determining which filenodes and which
1566 It is fairly complex as determining which filenodes and which
1567 manifest nodes need to be included for the changeset to be complete
1567 manifest nodes need to be included for the changeset to be complete
1568 is non-trivial.
1568 is non-trivial.
1569
1569
1570 Another wrinkle is doing the reverse, figuring out which changeset in
1570 Another wrinkle is doing the reverse, figuring out which changeset in
1571 the changegroup a particular filenode or manifestnode belongs to.
1571 the changegroup a particular filenode or manifestnode belongs to.
1572
1572
1573 The caller can specify some nodes that must be included in the
1573 The caller can specify some nodes that must be included in the
1574 changegroup using the extranodes argument. It should be a dict
1574 changegroup using the extranodes argument. It should be a dict
1575 where the keys are the filenames (or 1 for the manifest), and the
1575 where the keys are the filenames (or 1 for the manifest), and the
1576 values are lists of (node, linknode) tuples, where node is a wanted
1576 values are lists of (node, linknode) tuples, where node is a wanted
1577 node and linknode is the changelog node that should be transmitted as
1577 node and linknode is the changelog node that should be transmitted as
1578 the linkrev.
1578 the linkrev.
1579 """
1579 """
1580
1580
1581 if extranodes is None:
1582 # can we go through the fast path ?
1583 heads.sort()
1584 allheads = self.heads()
1585 allheads.sort()
1586 if heads == allheads:
1587 common = []
1588 # parents of bases are known from both sides
1589 for n in bases:
1590 for p in self.changelog.parents(n):
1591 if p != nullid:
1592 common.append(p)
1593 return self._changegroup(common, source)
1594
1581 self.hook('preoutgoing', throw=True, source=source)
1595 self.hook('preoutgoing', throw=True, source=source)
1582
1596
1583 # Set up some initial variables
1597 # Set up some initial variables
1584 # Make it easy to refer to self.changelog
1598 # Make it easy to refer to self.changelog
1585 cl = self.changelog
1599 cl = self.changelog
1586 # msng is short for missing - compute the list of changesets in this
1600 # msng is short for missing - compute the list of changesets in this
1587 # changegroup.
1601 # changegroup.
1588 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1602 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1589 self.changegroupinfo(msng_cl_lst, source)
1603 self.changegroupinfo(msng_cl_lst, source)
1590 # Some bases may turn out to be superfluous, and some heads may be
1604 # Some bases may turn out to be superfluous, and some heads may be
1591 # too. nodesbetween will return the minimal set of bases and heads
1605 # too. nodesbetween will return the minimal set of bases and heads
1592 # necessary to re-create the changegroup.
1606 # necessary to re-create the changegroup.
1593
1607
1594 # Known heads are the list of heads that it is assumed the recipient
1608 # Known heads are the list of heads that it is assumed the recipient
1595 # of this changegroup will know about.
1609 # of this changegroup will know about.
1596 knownheads = {}
1610 knownheads = {}
1597 # We assume that all parents of bases are known heads.
1611 # We assume that all parents of bases are known heads.
1598 for n in bases:
1612 for n in bases:
1599 for p in cl.parents(n):
1613 for p in cl.parents(n):
1600 if p != nullid:
1614 if p != nullid:
1601 knownheads[p] = 1
1615 knownheads[p] = 1
1602 knownheads = knownheads.keys()
1616 knownheads = knownheads.keys()
1603 if knownheads:
1617 if knownheads:
1604 # Now that we know what heads are known, we can compute which
1618 # Now that we know what heads are known, we can compute which
1605 # changesets are known. The recipient must know about all
1619 # changesets are known. The recipient must know about all
1606 # changesets required to reach the known heads from the null
1620 # changesets required to reach the known heads from the null
1607 # changeset.
1621 # changeset.
1608 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1622 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1609 junk = None
1623 junk = None
1610 # Transform the list into an ersatz set.
1624 # Transform the list into an ersatz set.
1611 has_cl_set = dict.fromkeys(has_cl_set)
1625 has_cl_set = dict.fromkeys(has_cl_set)
1612 else:
1626 else:
1613 # If there were no known heads, the recipient cannot be assumed to
1627 # If there were no known heads, the recipient cannot be assumed to
1614 # know about any changesets.
1628 # know about any changesets.
1615 has_cl_set = {}
1629 has_cl_set = {}
1616
1630
1617 # Make it easy to refer to self.manifest
1631 # Make it easy to refer to self.manifest
1618 mnfst = self.manifest
1632 mnfst = self.manifest
1619 # We don't know which manifests are missing yet
1633 # We don't know which manifests are missing yet
1620 msng_mnfst_set = {}
1634 msng_mnfst_set = {}
1621 # Nor do we know which filenodes are missing.
1635 # Nor do we know which filenodes are missing.
1622 msng_filenode_set = {}
1636 msng_filenode_set = {}
1623
1637
1624 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1638 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1625 junk = None
1639 junk = None
1626
1640
1627 # A changeset always belongs to itself, so the changenode lookup
1641 # A changeset always belongs to itself, so the changenode lookup
1628 # function for a changenode is identity.
1642 # function for a changenode is identity.
1629 def identity(x):
1643 def identity(x):
1630 return x
1644 return x
1631
1645
1632 # A function generating function. Sets up an environment for the
1646 # A function generating function. Sets up an environment for the
1633 # inner function.
1647 # inner function.
1634 def cmp_by_rev_func(revlog):
1648 def cmp_by_rev_func(revlog):
1635 # Compare two nodes by their revision number in the environment's
1649 # Compare two nodes by their revision number in the environment's
1636 # revision history. Since the revision number both represents the
1650 # revision history. Since the revision number both represents the
1637 # most efficient order to read the nodes in, and represents a
1651 # most efficient order to read the nodes in, and represents a
1638 # topological sorting of the nodes, this function is often useful.
1652 # topological sorting of the nodes, this function is often useful.
1639 def cmp_by_rev(a, b):
1653 def cmp_by_rev(a, b):
1640 return cmp(revlog.rev(a), revlog.rev(b))
1654 return cmp(revlog.rev(a), revlog.rev(b))
1641 return cmp_by_rev
1655 return cmp_by_rev
1642
1656
1643 # If we determine that a particular file or manifest node must be a
1657 # If we determine that a particular file or manifest node must be a
1644 # node that the recipient of the changegroup will already have, we can
1658 # node that the recipient of the changegroup will already have, we can
1645 # also assume the recipient will have all the parents. This function
1659 # also assume the recipient will have all the parents. This function
1646 # prunes them from the set of missing nodes.
1660 # prunes them from the set of missing nodes.
1647 def prune_parents(revlog, hasset, msngset):
1661 def prune_parents(revlog, hasset, msngset):
1648 haslst = hasset.keys()
1662 haslst = hasset.keys()
1649 haslst.sort(cmp_by_rev_func(revlog))
1663 haslst.sort(cmp_by_rev_func(revlog))
1650 for node in haslst:
1664 for node in haslst:
1651 parentlst = [p for p in revlog.parents(node) if p != nullid]
1665 parentlst = [p for p in revlog.parents(node) if p != nullid]
1652 while parentlst:
1666 while parentlst:
1653 n = parentlst.pop()
1667 n = parentlst.pop()
1654 if n not in hasset:
1668 if n not in hasset:
1655 hasset[n] = 1
1669 hasset[n] = 1
1656 p = [p for p in revlog.parents(n) if p != nullid]
1670 p = [p for p in revlog.parents(n) if p != nullid]
1657 parentlst.extend(p)
1671 parentlst.extend(p)
1658 for n in hasset:
1672 for n in hasset:
1659 msngset.pop(n, None)
1673 msngset.pop(n, None)
1660
1674
1661 # This is a function generating function used to set up an environment
1675 # This is a function generating function used to set up an environment
1662 # for the inner function to execute in.
1676 # for the inner function to execute in.
1663 def manifest_and_file_collector(changedfileset):
1677 def manifest_and_file_collector(changedfileset):
1664 # This is an information gathering function that gathers
1678 # This is an information gathering function that gathers
1665 # information from each changeset node that goes out as part of
1679 # information from each changeset node that goes out as part of
1666 # the changegroup. The information gathered is a list of which
1680 # the changegroup. The information gathered is a list of which
1667 # manifest nodes are potentially required (the recipient may
1681 # manifest nodes are potentially required (the recipient may
1668 # already have them) and total list of all files which were
1682 # already have them) and total list of all files which were
1669 # changed in any changeset in the changegroup.
1683 # changed in any changeset in the changegroup.
1670 #
1684 #
1671 # We also remember the first changenode we saw any manifest
1685 # We also remember the first changenode we saw any manifest
1672 # referenced by so we can later determine which changenode 'owns'
1686 # referenced by so we can later determine which changenode 'owns'
1673 # the manifest.
1687 # the manifest.
1674 def collect_manifests_and_files(clnode):
1688 def collect_manifests_and_files(clnode):
1675 c = cl.read(clnode)
1689 c = cl.read(clnode)
1676 for f in c[3]:
1690 for f in c[3]:
1677 # This is to make sure we only have one instance of each
1691 # This is to make sure we only have one instance of each
1678 # filename string for each filename.
1692 # filename string for each filename.
1679 changedfileset.setdefault(f, f)
1693 changedfileset.setdefault(f, f)
1680 msng_mnfst_set.setdefault(c[0], clnode)
1694 msng_mnfst_set.setdefault(c[0], clnode)
1681 return collect_manifests_and_files
1695 return collect_manifests_and_files
1682
1696
1683 # Figure out which manifest nodes (of the ones we think might be part
1697 # Figure out which manifest nodes (of the ones we think might be part
1684 # of the changegroup) the recipient must know about and remove them
1698 # of the changegroup) the recipient must know about and remove them
1685 # from the changegroup.
1699 # from the changegroup.
1686 def prune_manifests():
1700 def prune_manifests():
1687 has_mnfst_set = {}
1701 has_mnfst_set = {}
1688 for n in msng_mnfst_set:
1702 for n in msng_mnfst_set:
1689 # If a 'missing' manifest thinks it belongs to a changenode
1703 # If a 'missing' manifest thinks it belongs to a changenode
1690 # the recipient is assumed to have, obviously the recipient
1704 # the recipient is assumed to have, obviously the recipient
1691 # must have that manifest.
1705 # must have that manifest.
1692 linknode = cl.node(mnfst.linkrev(n))
1706 linknode = cl.node(mnfst.linkrev(n))
1693 if linknode in has_cl_set:
1707 if linknode in has_cl_set:
1694 has_mnfst_set[n] = 1
1708 has_mnfst_set[n] = 1
1695 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1709 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1696
1710
1697 # Use the information collected in collect_manifests_and_files to say
1711 # Use the information collected in collect_manifests_and_files to say
1698 # which changenode any manifestnode belongs to.
1712 # which changenode any manifestnode belongs to.
1699 def lookup_manifest_link(mnfstnode):
1713 def lookup_manifest_link(mnfstnode):
1700 return msng_mnfst_set[mnfstnode]
1714 return msng_mnfst_set[mnfstnode]
1701
1715
1702 # A function generating function that sets up the initial environment
1716 # A function generating function that sets up the initial environment
1703 # the inner function.
1717 # the inner function.
1704 def filenode_collector(changedfiles):
1718 def filenode_collector(changedfiles):
1705 next_rev = [0]
1719 next_rev = [0]
1706 # This gathers information from each manifestnode included in the
1720 # This gathers information from each manifestnode included in the
1707 # changegroup about which filenodes the manifest node references
1721 # changegroup about which filenodes the manifest node references
1708 # so we can include those in the changegroup too.
1722 # so we can include those in the changegroup too.
1709 #
1723 #
1710 # It also remembers which changenode each filenode belongs to. It
1724 # It also remembers which changenode each filenode belongs to. It
1711 # does this by assuming the a filenode belongs to the changenode
1725 # does this by assuming the a filenode belongs to the changenode
1712 # the first manifest that references it belongs to.
1726 # the first manifest that references it belongs to.
1713 def collect_msng_filenodes(mnfstnode):
1727 def collect_msng_filenodes(mnfstnode):
1714 r = mnfst.rev(mnfstnode)
1728 r = mnfst.rev(mnfstnode)
1715 if r == next_rev[0]:
1729 if r == next_rev[0]:
1716 # If the last rev we looked at was the one just previous,
1730 # If the last rev we looked at was the one just previous,
1717 # we only need to see a diff.
1731 # we only need to see a diff.
1718 deltamf = mnfst.readdelta(mnfstnode)
1732 deltamf = mnfst.readdelta(mnfstnode)
1719 # For each line in the delta
1733 # For each line in the delta
1720 for f, fnode in deltamf.items():
1734 for f, fnode in deltamf.items():
1721 f = changedfiles.get(f, None)
1735 f = changedfiles.get(f, None)
1722 # And if the file is in the list of files we care
1736 # And if the file is in the list of files we care
1723 # about.
1737 # about.
1724 if f is not None:
1738 if f is not None:
1725 # Get the changenode this manifest belongs to
1739 # Get the changenode this manifest belongs to
1726 clnode = msng_mnfst_set[mnfstnode]
1740 clnode = msng_mnfst_set[mnfstnode]
1727 # Create the set of filenodes for the file if
1741 # Create the set of filenodes for the file if
1728 # there isn't one already.
1742 # there isn't one already.
1729 ndset = msng_filenode_set.setdefault(f, {})
1743 ndset = msng_filenode_set.setdefault(f, {})
1730 # And set the filenode's changelog node to the
1744 # And set the filenode's changelog node to the
1731 # manifest's if it hasn't been set already.
1745 # manifest's if it hasn't been set already.
1732 ndset.setdefault(fnode, clnode)
1746 ndset.setdefault(fnode, clnode)
1733 else:
1747 else:
1734 # Otherwise we need a full manifest.
1748 # Otherwise we need a full manifest.
1735 m = mnfst.read(mnfstnode)
1749 m = mnfst.read(mnfstnode)
1736 # For every file in we care about.
1750 # For every file in we care about.
1737 for f in changedfiles:
1751 for f in changedfiles:
1738 fnode = m.get(f, None)
1752 fnode = m.get(f, None)
1739 # If it's in the manifest
1753 # If it's in the manifest
1740 if fnode is not None:
1754 if fnode is not None:
1741 # See comments above.
1755 # See comments above.
1742 clnode = msng_mnfst_set[mnfstnode]
1756 clnode = msng_mnfst_set[mnfstnode]
1743 ndset = msng_filenode_set.setdefault(f, {})
1757 ndset = msng_filenode_set.setdefault(f, {})
1744 ndset.setdefault(fnode, clnode)
1758 ndset.setdefault(fnode, clnode)
1745 # Remember the revision we hope to see next.
1759 # Remember the revision we hope to see next.
1746 next_rev[0] = r + 1
1760 next_rev[0] = r + 1
1747 return collect_msng_filenodes
1761 return collect_msng_filenodes
1748
1762
1749 # We have a list of filenodes we think we need for a file, lets remove
1763 # We have a list of filenodes we think we need for a file, lets remove
1750 # all those we now the recipient must have.
1764 # all those we now the recipient must have.
1751 def prune_filenodes(f, filerevlog):
1765 def prune_filenodes(f, filerevlog):
1752 msngset = msng_filenode_set[f]
1766 msngset = msng_filenode_set[f]
1753 hasset = {}
1767 hasset = {}
1754 # If a 'missing' filenode thinks it belongs to a changenode we
1768 # If a 'missing' filenode thinks it belongs to a changenode we
1755 # assume the recipient must have, then the recipient must have
1769 # assume the recipient must have, then the recipient must have
1756 # that filenode.
1770 # that filenode.
1757 for n in msngset:
1771 for n in msngset:
1758 clnode = cl.node(filerevlog.linkrev(n))
1772 clnode = cl.node(filerevlog.linkrev(n))
1759 if clnode in has_cl_set:
1773 if clnode in has_cl_set:
1760 hasset[n] = 1
1774 hasset[n] = 1
1761 prune_parents(filerevlog, hasset, msngset)
1775 prune_parents(filerevlog, hasset, msngset)
1762
1776
1763 # A function generator function that sets up the a context for the
1777 # A function generator function that sets up the a context for the
1764 # inner function.
1778 # inner function.
1765 def lookup_filenode_link_func(fname):
1779 def lookup_filenode_link_func(fname):
1766 msngset = msng_filenode_set[fname]
1780 msngset = msng_filenode_set[fname]
1767 # Lookup the changenode the filenode belongs to.
1781 # Lookup the changenode the filenode belongs to.
1768 def lookup_filenode_link(fnode):
1782 def lookup_filenode_link(fnode):
1769 return msngset[fnode]
1783 return msngset[fnode]
1770 return lookup_filenode_link
1784 return lookup_filenode_link
1771
1785
1772 # Add the nodes that were explicitly requested.
1786 # Add the nodes that were explicitly requested.
1773 def add_extra_nodes(name, nodes):
1787 def add_extra_nodes(name, nodes):
1774 if not extranodes or name not in extranodes:
1788 if not extranodes or name not in extranodes:
1775 return
1789 return
1776
1790
1777 for node, linknode in extranodes[name]:
1791 for node, linknode in extranodes[name]:
1778 if node not in nodes:
1792 if node not in nodes:
1779 nodes[node] = linknode
1793 nodes[node] = linknode
1780
1794
1781 # Now that we have all theses utility functions to help out and
1795 # Now that we have all theses utility functions to help out and
1782 # logically divide up the task, generate the group.
1796 # logically divide up the task, generate the group.
1783 def gengroup():
1797 def gengroup():
1784 # The set of changed files starts empty.
1798 # The set of changed files starts empty.
1785 changedfiles = {}
1799 changedfiles = {}
1786 # Create a changenode group generator that will call our functions
1800 # Create a changenode group generator that will call our functions
1787 # back to lookup the owning changenode and collect information.
1801 # back to lookup the owning changenode and collect information.
1788 group = cl.group(msng_cl_lst, identity,
1802 group = cl.group(msng_cl_lst, identity,
1789 manifest_and_file_collector(changedfiles))
1803 manifest_and_file_collector(changedfiles))
1790 for chnk in group:
1804 for chnk in group:
1791 yield chnk
1805 yield chnk
1792
1806
1793 # The list of manifests has been collected by the generator
1807 # The list of manifests has been collected by the generator
1794 # calling our functions back.
1808 # calling our functions back.
1795 prune_manifests()
1809 prune_manifests()
1796 add_extra_nodes(1, msng_mnfst_set)
1810 add_extra_nodes(1, msng_mnfst_set)
1797 msng_mnfst_lst = msng_mnfst_set.keys()
1811 msng_mnfst_lst = msng_mnfst_set.keys()
1798 # Sort the manifestnodes by revision number.
1812 # Sort the manifestnodes by revision number.
1799 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1813 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1800 # Create a generator for the manifestnodes that calls our lookup
1814 # Create a generator for the manifestnodes that calls our lookup
1801 # and data collection functions back.
1815 # and data collection functions back.
1802 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1816 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1803 filenode_collector(changedfiles))
1817 filenode_collector(changedfiles))
1804 for chnk in group:
1818 for chnk in group:
1805 yield chnk
1819 yield chnk
1806
1820
1807 # These are no longer needed, dereference and toss the memory for
1821 # These are no longer needed, dereference and toss the memory for
1808 # them.
1822 # them.
1809 msng_mnfst_lst = None
1823 msng_mnfst_lst = None
1810 msng_mnfst_set.clear()
1824 msng_mnfst_set.clear()
1811
1825
1812 if extranodes:
1826 if extranodes:
1813 for fname in extranodes:
1827 for fname in extranodes:
1814 if isinstance(fname, int):
1828 if isinstance(fname, int):
1815 continue
1829 continue
1816 msng_filenode_set.setdefault(fname, {})
1830 msng_filenode_set.setdefault(fname, {})
1817 changedfiles[fname] = 1
1831 changedfiles[fname] = 1
1818 # Go through all our files in order sorted by name.
1832 # Go through all our files in order sorted by name.
1819 for fname in util.sort(changedfiles):
1833 for fname in util.sort(changedfiles):
1820 filerevlog = self.file(fname)
1834 filerevlog = self.file(fname)
1821 if not len(filerevlog):
1835 if not len(filerevlog):
1822 raise util.Abort(_("empty or missing revlog for %s") % fname)
1836 raise util.Abort(_("empty or missing revlog for %s") % fname)
1823 # Toss out the filenodes that the recipient isn't really
1837 # Toss out the filenodes that the recipient isn't really
1824 # missing.
1838 # missing.
1825 if fname in msng_filenode_set:
1839 if fname in msng_filenode_set:
1826 prune_filenodes(fname, filerevlog)
1840 prune_filenodes(fname, filerevlog)
1827 add_extra_nodes(fname, msng_filenode_set[fname])
1841 add_extra_nodes(fname, msng_filenode_set[fname])
1828 msng_filenode_lst = msng_filenode_set[fname].keys()
1842 msng_filenode_lst = msng_filenode_set[fname].keys()
1829 else:
1843 else:
1830 msng_filenode_lst = []
1844 msng_filenode_lst = []
1831 # If any filenodes are left, generate the group for them,
1845 # If any filenodes are left, generate the group for them,
1832 # otherwise don't bother.
1846 # otherwise don't bother.
1833 if len(msng_filenode_lst) > 0:
1847 if len(msng_filenode_lst) > 0:
1834 yield changegroup.chunkheader(len(fname))
1848 yield changegroup.chunkheader(len(fname))
1835 yield fname
1849 yield fname
1836 # Sort the filenodes by their revision #
1850 # Sort the filenodes by their revision #
1837 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1851 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1838 # Create a group generator and only pass in a changenode
1852 # Create a group generator and only pass in a changenode
1839 # lookup function as we need to collect no information
1853 # lookup function as we need to collect no information
1840 # from filenodes.
1854 # from filenodes.
1841 group = filerevlog.group(msng_filenode_lst,
1855 group = filerevlog.group(msng_filenode_lst,
1842 lookup_filenode_link_func(fname))
1856 lookup_filenode_link_func(fname))
1843 for chnk in group:
1857 for chnk in group:
1844 yield chnk
1858 yield chnk
1845 if fname in msng_filenode_set:
1859 if fname in msng_filenode_set:
1846 # Don't need this anymore, toss it to free memory.
1860 # Don't need this anymore, toss it to free memory.
1847 del msng_filenode_set[fname]
1861 del msng_filenode_set[fname]
1848 # Signal that no more groups are left.
1862 # Signal that no more groups are left.
1849 yield changegroup.closechunk()
1863 yield changegroup.closechunk()
1850
1864
1851 if msng_cl_lst:
1865 if msng_cl_lst:
1852 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1866 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1853
1867
1854 return util.chunkbuffer(gengroup())
1868 return util.chunkbuffer(gengroup())
1855
1869
1856 def changegroup(self, basenodes, source):
1870 def changegroup(self, basenodes, source):
1871 # to avoid a race we use changegroupsubset() (issue1320)
1872 return self.changegroupsubset(basenodes, self.heads(), source)
1873
1874 def _changegroup(self, common, source):
1857 """Generate a changegroup of all nodes that we have that a recipient
1875 """Generate a changegroup of all nodes that we have that a recipient
1858 doesn't.
1876 doesn't.
1859
1877
1860 This is much easier than the previous function as we can assume that
1878 This is much easier than the previous function as we can assume that
1861 the recipient has any changenode we aren't sending them."""
1879 the recipient has any changenode we aren't sending them.
1880
1881 common is the set of common nodes between remote and self"""
1862
1882
1863 self.hook('preoutgoing', throw=True, source=source)
1883 self.hook('preoutgoing', throw=True, source=source)
1864
1884
1865 cl = self.changelog
1885 cl = self.changelog
1866 nodes = cl.nodesbetween(basenodes, None)[0]
1886 nodes = cl.findmissing(common)
1867 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1887 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1868 self.changegroupinfo(nodes, source)
1888 self.changegroupinfo(nodes, source)
1869
1889
1870 def identity(x):
1890 def identity(x):
1871 return x
1891 return x
1872
1892
1873 def gennodelst(log):
1893 def gennodelst(log):
1874 for r in log:
1894 for r in log:
1875 n = log.node(r)
1895 n = log.node(r)
1876 if log.linkrev(n) in revset:
1896 if log.linkrev(n) in revset:
1877 yield n
1897 yield n
1878
1898
1879 def changed_file_collector(changedfileset):
1899 def changed_file_collector(changedfileset):
1880 def collect_changed_files(clnode):
1900 def collect_changed_files(clnode):
1881 c = cl.read(clnode)
1901 c = cl.read(clnode)
1882 for fname in c[3]:
1902 for fname in c[3]:
1883 changedfileset[fname] = 1
1903 changedfileset[fname] = 1
1884 return collect_changed_files
1904 return collect_changed_files
1885
1905
1886 def lookuprevlink_func(revlog):
1906 def lookuprevlink_func(revlog):
1887 def lookuprevlink(n):
1907 def lookuprevlink(n):
1888 return cl.node(revlog.linkrev(n))
1908 return cl.node(revlog.linkrev(n))
1889 return lookuprevlink
1909 return lookuprevlink
1890
1910
1891 def gengroup():
1911 def gengroup():
1892 # construct a list of all changed files
1912 # construct a list of all changed files
1893 changedfiles = {}
1913 changedfiles = {}
1894
1914
1895 for chnk in cl.group(nodes, identity,
1915 for chnk in cl.group(nodes, identity,
1896 changed_file_collector(changedfiles)):
1916 changed_file_collector(changedfiles)):
1897 yield chnk
1917 yield chnk
1898
1918
1899 mnfst = self.manifest
1919 mnfst = self.manifest
1900 nodeiter = gennodelst(mnfst)
1920 nodeiter = gennodelst(mnfst)
1901 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1921 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1902 yield chnk
1922 yield chnk
1903
1923
1904 for fname in util.sort(changedfiles):
1924 for fname in util.sort(changedfiles):
1905 filerevlog = self.file(fname)
1925 filerevlog = self.file(fname)
1906 if not len(filerevlog):
1926 if not len(filerevlog):
1907 raise util.Abort(_("empty or missing revlog for %s") % fname)
1927 raise util.Abort(_("empty or missing revlog for %s") % fname)
1908 nodeiter = gennodelst(filerevlog)
1928 nodeiter = gennodelst(filerevlog)
1909 nodeiter = list(nodeiter)
1929 nodeiter = list(nodeiter)
1910 if nodeiter:
1930 if nodeiter:
1911 yield changegroup.chunkheader(len(fname))
1931 yield changegroup.chunkheader(len(fname))
1912 yield fname
1932 yield fname
1913 lookup = lookuprevlink_func(filerevlog)
1933 lookup = lookuprevlink_func(filerevlog)
1914 for chnk in filerevlog.group(nodeiter, lookup):
1934 for chnk in filerevlog.group(nodeiter, lookup):
1915 yield chnk
1935 yield chnk
1916
1936
1917 yield changegroup.closechunk()
1937 yield changegroup.closechunk()
1918
1938
1919 if nodes:
1939 if nodes:
1920 self.hook('outgoing', node=hex(nodes[0]), source=source)
1940 self.hook('outgoing', node=hex(nodes[0]), source=source)
1921
1941
1922 return util.chunkbuffer(gengroup())
1942 return util.chunkbuffer(gengroup())
1923
1943
1924 def addchangegroup(self, source, srctype, url, emptyok=False):
1944 def addchangegroup(self, source, srctype, url, emptyok=False):
1925 """add changegroup to repo.
1945 """add changegroup to repo.
1926
1946
1927 return values:
1947 return values:
1928 - nothing changed or no source: 0
1948 - nothing changed or no source: 0
1929 - more heads than before: 1+added heads (2..n)
1949 - more heads than before: 1+added heads (2..n)
1930 - less heads than before: -1-removed heads (-2..-n)
1950 - less heads than before: -1-removed heads (-2..-n)
1931 - number of heads stays the same: 1
1951 - number of heads stays the same: 1
1932 """
1952 """
1933 def csmap(x):
1953 def csmap(x):
1934 self.ui.debug(_("add changeset %s\n") % short(x))
1954 self.ui.debug(_("add changeset %s\n") % short(x))
1935 return len(cl)
1955 return len(cl)
1936
1956
1937 def revmap(x):
1957 def revmap(x):
1938 return cl.rev(x)
1958 return cl.rev(x)
1939
1959
1940 if not source:
1960 if not source:
1941 return 0
1961 return 0
1942
1962
1943 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1963 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1944
1964
1945 changesets = files = revisions = 0
1965 changesets = files = revisions = 0
1946
1966
1947 # write changelog data to temp files so concurrent readers will not see
1967 # write changelog data to temp files so concurrent readers will not see
1948 # inconsistent view
1968 # inconsistent view
1949 cl = self.changelog
1969 cl = self.changelog
1950 cl.delayupdate()
1970 cl.delayupdate()
1951 oldheads = len(cl.heads())
1971 oldheads = len(cl.heads())
1952
1972
1953 tr = self.transaction()
1973 tr = self.transaction()
1954 try:
1974 try:
1955 trp = weakref.proxy(tr)
1975 trp = weakref.proxy(tr)
1956 # pull off the changeset group
1976 # pull off the changeset group
1957 self.ui.status(_("adding changesets\n"))
1977 self.ui.status(_("adding changesets\n"))
1958 cor = len(cl) - 1
1978 cor = len(cl) - 1
1959 chunkiter = changegroup.chunkiter(source)
1979 chunkiter = changegroup.chunkiter(source)
1960 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1980 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1961 raise util.Abort(_("received changelog group is empty"))
1981 raise util.Abort(_("received changelog group is empty"))
1962 cnr = len(cl) - 1
1982 cnr = len(cl) - 1
1963 changesets = cnr - cor
1983 changesets = cnr - cor
1964
1984
1965 # pull off the manifest group
1985 # pull off the manifest group
1966 self.ui.status(_("adding manifests\n"))
1986 self.ui.status(_("adding manifests\n"))
1967 chunkiter = changegroup.chunkiter(source)
1987 chunkiter = changegroup.chunkiter(source)
1968 # no need to check for empty manifest group here:
1988 # no need to check for empty manifest group here:
1969 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1989 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1970 # no new manifest will be created and the manifest group will
1990 # no new manifest will be created and the manifest group will
1971 # be empty during the pull
1991 # be empty during the pull
1972 self.manifest.addgroup(chunkiter, revmap, trp)
1992 self.manifest.addgroup(chunkiter, revmap, trp)
1973
1993
1974 # process the files
1994 # process the files
1975 self.ui.status(_("adding file changes\n"))
1995 self.ui.status(_("adding file changes\n"))
1976 while 1:
1996 while 1:
1977 f = changegroup.getchunk(source)
1997 f = changegroup.getchunk(source)
1978 if not f:
1998 if not f:
1979 break
1999 break
1980 self.ui.debug(_("adding %s revisions\n") % f)
2000 self.ui.debug(_("adding %s revisions\n") % f)
1981 fl = self.file(f)
2001 fl = self.file(f)
1982 o = len(fl)
2002 o = len(fl)
1983 chunkiter = changegroup.chunkiter(source)
2003 chunkiter = changegroup.chunkiter(source)
1984 if fl.addgroup(chunkiter, revmap, trp) is None:
2004 if fl.addgroup(chunkiter, revmap, trp) is None:
1985 raise util.Abort(_("received file revlog group is empty"))
2005 raise util.Abort(_("received file revlog group is empty"))
1986 revisions += len(fl) - o
2006 revisions += len(fl) - o
1987 files += 1
2007 files += 1
1988
2008
1989 # make changelog see real files again
2009 # make changelog see real files again
1990 cl.finalize(trp)
2010 cl.finalize(trp)
1991
2011
1992 newheads = len(self.changelog.heads())
2012 newheads = len(self.changelog.heads())
1993 heads = ""
2013 heads = ""
1994 if oldheads and newheads != oldheads:
2014 if oldheads and newheads != oldheads:
1995 heads = _(" (%+d heads)") % (newheads - oldheads)
2015 heads = _(" (%+d heads)") % (newheads - oldheads)
1996
2016
1997 self.ui.status(_("added %d changesets"
2017 self.ui.status(_("added %d changesets"
1998 " with %d changes to %d files%s\n")
2018 " with %d changes to %d files%s\n")
1999 % (changesets, revisions, files, heads))
2019 % (changesets, revisions, files, heads))
2000
2020
2001 if changesets > 0:
2021 if changesets > 0:
2002 self.hook('pretxnchangegroup', throw=True,
2022 self.hook('pretxnchangegroup', throw=True,
2003 node=hex(self.changelog.node(cor+1)), source=srctype,
2023 node=hex(self.changelog.node(cor+1)), source=srctype,
2004 url=url)
2024 url=url)
2005
2025
2006 tr.close()
2026 tr.close()
2007 finally:
2027 finally:
2008 del tr
2028 del tr
2009
2029
2010 if changesets > 0:
2030 if changesets > 0:
2011 # forcefully update the on-disk branch cache
2031 # forcefully update the on-disk branch cache
2012 self.ui.debug(_("updating the branch cache\n"))
2032 self.ui.debug(_("updating the branch cache\n"))
2013 self.branchtags()
2033 self.branchtags()
2014 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2034 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2015 source=srctype, url=url)
2035 source=srctype, url=url)
2016
2036
2017 for i in xrange(cor + 1, cnr + 1):
2037 for i in xrange(cor + 1, cnr + 1):
2018 self.hook("incoming", node=hex(self.changelog.node(i)),
2038 self.hook("incoming", node=hex(self.changelog.node(i)),
2019 source=srctype, url=url)
2039 source=srctype, url=url)
2020
2040
2021 # never return 0 here:
2041 # never return 0 here:
2022 if newheads < oldheads:
2042 if newheads < oldheads:
2023 return newheads - oldheads - 1
2043 return newheads - oldheads - 1
2024 else:
2044 else:
2025 return newheads - oldheads + 1
2045 return newheads - oldheads + 1
2026
2046
2027
2047
2028 def stream_in(self, remote):
2048 def stream_in(self, remote):
2029 fp = remote.stream_out()
2049 fp = remote.stream_out()
2030 l = fp.readline()
2050 l = fp.readline()
2031 try:
2051 try:
2032 resp = int(l)
2052 resp = int(l)
2033 except ValueError:
2053 except ValueError:
2034 raise util.UnexpectedOutput(
2054 raise util.UnexpectedOutput(
2035 _('Unexpected response from remote server:'), l)
2055 _('Unexpected response from remote server:'), l)
2036 if resp == 1:
2056 if resp == 1:
2037 raise util.Abort(_('operation forbidden by server'))
2057 raise util.Abort(_('operation forbidden by server'))
2038 elif resp == 2:
2058 elif resp == 2:
2039 raise util.Abort(_('locking the remote repository failed'))
2059 raise util.Abort(_('locking the remote repository failed'))
2040 elif resp != 0:
2060 elif resp != 0:
2041 raise util.Abort(_('the server sent an unknown error code'))
2061 raise util.Abort(_('the server sent an unknown error code'))
2042 self.ui.status(_('streaming all changes\n'))
2062 self.ui.status(_('streaming all changes\n'))
2043 l = fp.readline()
2063 l = fp.readline()
2044 try:
2064 try:
2045 total_files, total_bytes = map(int, l.split(' ', 1))
2065 total_files, total_bytes = map(int, l.split(' ', 1))
2046 except (ValueError, TypeError):
2066 except (ValueError, TypeError):
2047 raise util.UnexpectedOutput(
2067 raise util.UnexpectedOutput(
2048 _('Unexpected response from remote server:'), l)
2068 _('Unexpected response from remote server:'), l)
2049 self.ui.status(_('%d files to transfer, %s of data\n') %
2069 self.ui.status(_('%d files to transfer, %s of data\n') %
2050 (total_files, util.bytecount(total_bytes)))
2070 (total_files, util.bytecount(total_bytes)))
2051 start = time.time()
2071 start = time.time()
2052 for i in xrange(total_files):
2072 for i in xrange(total_files):
2053 # XXX doesn't support '\n' or '\r' in filenames
2073 # XXX doesn't support '\n' or '\r' in filenames
2054 l = fp.readline()
2074 l = fp.readline()
2055 try:
2075 try:
2056 name, size = l.split('\0', 1)
2076 name, size = l.split('\0', 1)
2057 size = int(size)
2077 size = int(size)
2058 except (ValueError, TypeError):
2078 except (ValueError, TypeError):
2059 raise util.UnexpectedOutput(
2079 raise util.UnexpectedOutput(
2060 _('Unexpected response from remote server:'), l)
2080 _('Unexpected response from remote server:'), l)
2061 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2081 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2062 ofp = self.sopener(name, 'w')
2082 ofp = self.sopener(name, 'w')
2063 for chunk in util.filechunkiter(fp, limit=size):
2083 for chunk in util.filechunkiter(fp, limit=size):
2064 ofp.write(chunk)
2084 ofp.write(chunk)
2065 ofp.close()
2085 ofp.close()
2066 elapsed = time.time() - start
2086 elapsed = time.time() - start
2067 if elapsed <= 0:
2087 if elapsed <= 0:
2068 elapsed = 0.001
2088 elapsed = 0.001
2069 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2089 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2070 (util.bytecount(total_bytes), elapsed,
2090 (util.bytecount(total_bytes), elapsed,
2071 util.bytecount(total_bytes / elapsed)))
2091 util.bytecount(total_bytes / elapsed)))
2072 self.invalidate()
2092 self.invalidate()
2073 return len(self.heads()) + 1
2093 return len(self.heads()) + 1
2074
2094
2075 def clone(self, remote, heads=[], stream=False):
2095 def clone(self, remote, heads=[], stream=False):
2076 '''clone remote repository.
2096 '''clone remote repository.
2077
2097
2078 keyword arguments:
2098 keyword arguments:
2079 heads: list of revs to clone (forces use of pull)
2099 heads: list of revs to clone (forces use of pull)
2080 stream: use streaming clone if possible'''
2100 stream: use streaming clone if possible'''
2081
2101
2082 # now, all clients that can request uncompressed clones can
2102 # now, all clients that can request uncompressed clones can
2083 # read repo formats supported by all servers that can serve
2103 # read repo formats supported by all servers that can serve
2084 # them.
2104 # them.
2085
2105
2086 # if revlog format changes, client will have to check version
2106 # if revlog format changes, client will have to check version
2087 # and format flags on "stream" capability, and use
2107 # and format flags on "stream" capability, and use
2088 # uncompressed only if compatible.
2108 # uncompressed only if compatible.
2089
2109
2090 if stream and not heads and remote.capable('stream'):
2110 if stream and not heads and remote.capable('stream'):
2091 return self.stream_in(remote)
2111 return self.stream_in(remote)
2092 return self.pull(remote, heads)
2112 return self.pull(remote, heads)
2093
2113
2094 # used to avoid circular references so destructors work
2114 # used to avoid circular references so destructors work
2095 def aftertrans(files):
2115 def aftertrans(files):
2096 renamefiles = [tuple(t) for t in files]
2116 renamefiles = [tuple(t) for t in files]
2097 def a():
2117 def a():
2098 for src, dest in renamefiles:
2118 for src, dest in renamefiles:
2099 util.rename(src, dest)
2119 util.rename(src, dest)
2100 return a
2120 return a
2101
2121
2102 def instance(ui, path, create):
2122 def instance(ui, path, create):
2103 return localrepository(ui, util.drop_scheme('file', path), create)
2123 return localrepository(ui, util.drop_scheme('file', path), create)
2104
2124
2105 def islocal(path):
2125 def islocal(path):
2106 return True
2126 return True
@@ -1,1334 +1,1374 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import bin, hex, nullid, nullrev, short
13 from node import bin, hex, nullid, nullrev, short
14 from i18n import _
14 from i18n import _
15 import changegroup, errno, ancestor, mdiff, parsers
15 import changegroup, errno, ancestor, mdiff, parsers
16 import struct, util, zlib
16 import struct, util, zlib
17
17
18 _pack = struct.pack
18 _pack = struct.pack
19 _unpack = struct.unpack
19 _unpack = struct.unpack
20 _compress = zlib.compress
20 _compress = zlib.compress
21 _decompress = zlib.decompress
21 _decompress = zlib.decompress
22 _sha = util.sha1
22 _sha = util.sha1
23
23
24 # revlog flags
24 # revlog flags
25 REVLOGV0 = 0
25 REVLOGV0 = 0
26 REVLOGNG = 1
26 REVLOGNG = 1
27 REVLOGNGINLINEDATA = (1 << 16)
27 REVLOGNGINLINEDATA = (1 << 16)
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31
31
32 class RevlogError(Exception):
32 class RevlogError(Exception):
33 pass
33 pass
34
34
35 class LookupError(RevlogError, KeyError):
35 class LookupError(RevlogError, KeyError):
36 def __init__(self, name, index, message):
36 def __init__(self, name, index, message):
37 self.name = name
37 self.name = name
38 if isinstance(name, str) and len(name) == 20:
38 if isinstance(name, str) and len(name) == 20:
39 name = short(name)
39 name = short(name)
40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
41
41
42 def __str__(self):
42 def __str__(self):
43 return RevlogError.__str__(self)
43 return RevlogError.__str__(self)
44
44
45 def getoffset(q):
45 def getoffset(q):
46 return int(q >> 16)
46 return int(q >> 16)
47
47
48 def gettype(q):
48 def gettype(q):
49 return int(q & 0xFFFF)
49 return int(q & 0xFFFF)
50
50
51 def offset_type(offset, type):
51 def offset_type(offset, type):
52 return long(long(offset) << 16 | type)
52 return long(long(offset) << 16 | type)
53
53
54 def hash(text, p1, p2):
54 def hash(text, p1, p2):
55 """generate a hash from the given text and its parent hashes
55 """generate a hash from the given text and its parent hashes
56
56
57 This hash combines both the current file contents and its history
57 This hash combines both the current file contents and its history
58 in a manner that makes it easy to distinguish nodes with the same
58 in a manner that makes it easy to distinguish nodes with the same
59 content in the revision graph.
59 content in the revision graph.
60 """
60 """
61 l = [p1, p2]
61 l = [p1, p2]
62 l.sort()
62 l.sort()
63 s = _sha(l[0])
63 s = _sha(l[0])
64 s.update(l[1])
64 s.update(l[1])
65 s.update(text)
65 s.update(text)
66 return s.digest()
66 return s.digest()
67
67
68 def compress(text):
68 def compress(text):
69 """ generate a possibly-compressed representation of text """
69 """ generate a possibly-compressed representation of text """
70 if not text:
70 if not text:
71 return ("", text)
71 return ("", text)
72 l = len(text)
72 l = len(text)
73 bin = None
73 bin = None
74 if l < 44:
74 if l < 44:
75 pass
75 pass
76 elif l > 1000000:
76 elif l > 1000000:
77 # zlib makes an internal copy, thus doubling memory usage for
77 # zlib makes an internal copy, thus doubling memory usage for
78 # large files, so lets do this in pieces
78 # large files, so lets do this in pieces
79 z = zlib.compressobj()
79 z = zlib.compressobj()
80 p = []
80 p = []
81 pos = 0
81 pos = 0
82 while pos < l:
82 while pos < l:
83 pos2 = pos + 2**20
83 pos2 = pos + 2**20
84 p.append(z.compress(text[pos:pos2]))
84 p.append(z.compress(text[pos:pos2]))
85 pos = pos2
85 pos = pos2
86 p.append(z.flush())
86 p.append(z.flush())
87 if sum(map(len, p)) < l:
87 if sum(map(len, p)) < l:
88 bin = "".join(p)
88 bin = "".join(p)
89 else:
89 else:
90 bin = _compress(text)
90 bin = _compress(text)
91 if bin is None or len(bin) > l:
91 if bin is None or len(bin) > l:
92 if text[0] == '\0':
92 if text[0] == '\0':
93 return ("", text)
93 return ("", text)
94 return ('u', text)
94 return ('u', text)
95 return ("", bin)
95 return ("", bin)
96
96
97 def decompress(bin):
97 def decompress(bin):
98 """ decompress the given input """
98 """ decompress the given input """
99 if not bin:
99 if not bin:
100 return bin
100 return bin
101 t = bin[0]
101 t = bin[0]
102 if t == '\0':
102 if t == '\0':
103 return bin
103 return bin
104 if t == 'x':
104 if t == 'x':
105 return _decompress(bin)
105 return _decompress(bin)
106 if t == 'u':
106 if t == 'u':
107 return bin[1:]
107 return bin[1:]
108 raise RevlogError(_("unknown compression type %r") % t)
108 raise RevlogError(_("unknown compression type %r") % t)
109
109
110 class lazyparser(object):
110 class lazyparser(object):
111 """
111 """
112 this class avoids the need to parse the entirety of large indices
112 this class avoids the need to parse the entirety of large indices
113 """
113 """
114
114
115 # lazyparser is not safe to use on windows if win32 extensions not
115 # lazyparser is not safe to use on windows if win32 extensions not
116 # available. it keeps file handle open, which make it not possible
116 # available. it keeps file handle open, which make it not possible
117 # to break hardlinks on local cloned repos.
117 # to break hardlinks on local cloned repos.
118
118
119 def __init__(self, dataf, size):
119 def __init__(self, dataf, size):
120 self.dataf = dataf
120 self.dataf = dataf
121 self.s = struct.calcsize(indexformatng)
121 self.s = struct.calcsize(indexformatng)
122 self.datasize = size
122 self.datasize = size
123 self.l = size/self.s
123 self.l = size/self.s
124 self.index = [None] * self.l
124 self.index = [None] * self.l
125 self.map = {nullid: nullrev}
125 self.map = {nullid: nullrev}
126 self.allmap = 0
126 self.allmap = 0
127 self.all = 0
127 self.all = 0
128 self.mapfind_count = 0
128 self.mapfind_count = 0
129
129
130 def loadmap(self):
130 def loadmap(self):
131 """
131 """
132 during a commit, we need to make sure the rev being added is
132 during a commit, we need to make sure the rev being added is
133 not a duplicate. This requires loading the entire index,
133 not a duplicate. This requires loading the entire index,
134 which is fairly slow. loadmap can load up just the node map,
134 which is fairly slow. loadmap can load up just the node map,
135 which takes much less time.
135 which takes much less time.
136 """
136 """
137 if self.allmap:
137 if self.allmap:
138 return
138 return
139 end = self.datasize
139 end = self.datasize
140 self.allmap = 1
140 self.allmap = 1
141 cur = 0
141 cur = 0
142 count = 0
142 count = 0
143 blocksize = self.s * 256
143 blocksize = self.s * 256
144 self.dataf.seek(0)
144 self.dataf.seek(0)
145 while cur < end:
145 while cur < end:
146 data = self.dataf.read(blocksize)
146 data = self.dataf.read(blocksize)
147 off = 0
147 off = 0
148 for x in xrange(256):
148 for x in xrange(256):
149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
150 self.map[n] = count
150 self.map[n] = count
151 count += 1
151 count += 1
152 if count >= self.l:
152 if count >= self.l:
153 break
153 break
154 off += self.s
154 off += self.s
155 cur += blocksize
155 cur += blocksize
156
156
157 def loadblock(self, blockstart, blocksize, data=None):
157 def loadblock(self, blockstart, blocksize, data=None):
158 if self.all:
158 if self.all:
159 return
159 return
160 if data is None:
160 if data is None:
161 self.dataf.seek(blockstart)
161 self.dataf.seek(blockstart)
162 if blockstart + blocksize > self.datasize:
162 if blockstart + blocksize > self.datasize:
163 # the revlog may have grown since we've started running,
163 # the revlog may have grown since we've started running,
164 # but we don't have space in self.index for more entries.
164 # but we don't have space in self.index for more entries.
165 # limit blocksize so that we don't get too much data.
165 # limit blocksize so that we don't get too much data.
166 blocksize = max(self.datasize - blockstart, 0)
166 blocksize = max(self.datasize - blockstart, 0)
167 data = self.dataf.read(blocksize)
167 data = self.dataf.read(blocksize)
168 lend = len(data) / self.s
168 lend = len(data) / self.s
169 i = blockstart / self.s
169 i = blockstart / self.s
170 off = 0
170 off = 0
171 # lazyindex supports __delitem__
171 # lazyindex supports __delitem__
172 if lend > len(self.index) - i:
172 if lend > len(self.index) - i:
173 lend = len(self.index) - i
173 lend = len(self.index) - i
174 for x in xrange(lend):
174 for x in xrange(lend):
175 if self.index[i + x] == None:
175 if self.index[i + x] == None:
176 b = data[off : off + self.s]
176 b = data[off : off + self.s]
177 self.index[i + x] = b
177 self.index[i + x] = b
178 n = b[ngshaoffset:ngshaoffset + 20]
178 n = b[ngshaoffset:ngshaoffset + 20]
179 self.map[n] = i + x
179 self.map[n] = i + x
180 off += self.s
180 off += self.s
181
181
182 def findnode(self, node):
182 def findnode(self, node):
183 """search backwards through the index file for a specific node"""
183 """search backwards through the index file for a specific node"""
184 if self.allmap:
184 if self.allmap:
185 return None
185 return None
186
186
187 # hg log will cause many many searches for the manifest
187 # hg log will cause many many searches for the manifest
188 # nodes. After we get called a few times, just load the whole
188 # nodes. After we get called a few times, just load the whole
189 # thing.
189 # thing.
190 if self.mapfind_count > 8:
190 if self.mapfind_count > 8:
191 self.loadmap()
191 self.loadmap()
192 if node in self.map:
192 if node in self.map:
193 return node
193 return node
194 return None
194 return None
195 self.mapfind_count += 1
195 self.mapfind_count += 1
196 last = self.l - 1
196 last = self.l - 1
197 while self.index[last] != None:
197 while self.index[last] != None:
198 if last == 0:
198 if last == 0:
199 self.all = 1
199 self.all = 1
200 self.allmap = 1
200 self.allmap = 1
201 return None
201 return None
202 last -= 1
202 last -= 1
203 end = (last + 1) * self.s
203 end = (last + 1) * self.s
204 blocksize = self.s * 256
204 blocksize = self.s * 256
205 while end >= 0:
205 while end >= 0:
206 start = max(end - blocksize, 0)
206 start = max(end - blocksize, 0)
207 self.dataf.seek(start)
207 self.dataf.seek(start)
208 data = self.dataf.read(end - start)
208 data = self.dataf.read(end - start)
209 findend = end - start
209 findend = end - start
210 while True:
210 while True:
211 # we're searching backwards, so we have to make sure
211 # we're searching backwards, so we have to make sure
212 # we don't find a changeset where this node is a parent
212 # we don't find a changeset where this node is a parent
213 off = data.find(node, 0, findend)
213 off = data.find(node, 0, findend)
214 findend = off
214 findend = off
215 if off >= 0:
215 if off >= 0:
216 i = off / self.s
216 i = off / self.s
217 off = i * self.s
217 off = i * self.s
218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
219 if n == node:
219 if n == node:
220 self.map[n] = i + start / self.s
220 self.map[n] = i + start / self.s
221 return node
221 return node
222 else:
222 else:
223 break
223 break
224 end -= blocksize
224 end -= blocksize
225 return None
225 return None
226
226
227 def loadindex(self, i=None, end=None):
227 def loadindex(self, i=None, end=None):
228 if self.all:
228 if self.all:
229 return
229 return
230 all = False
230 all = False
231 if i == None:
231 if i == None:
232 blockstart = 0
232 blockstart = 0
233 blocksize = (65536 / self.s) * self.s
233 blocksize = (65536 / self.s) * self.s
234 end = self.datasize
234 end = self.datasize
235 all = True
235 all = True
236 else:
236 else:
237 if end:
237 if end:
238 blockstart = i * self.s
238 blockstart = i * self.s
239 end = end * self.s
239 end = end * self.s
240 blocksize = end - blockstart
240 blocksize = end - blockstart
241 else:
241 else:
242 blockstart = (i & ~1023) * self.s
242 blockstart = (i & ~1023) * self.s
243 blocksize = self.s * 1024
243 blocksize = self.s * 1024
244 end = blockstart + blocksize
244 end = blockstart + blocksize
245 while blockstart < end:
245 while blockstart < end:
246 self.loadblock(blockstart, blocksize)
246 self.loadblock(blockstart, blocksize)
247 blockstart += blocksize
247 blockstart += blocksize
248 if all:
248 if all:
249 self.all = True
249 self.all = True
250
250
251 class lazyindex(object):
251 class lazyindex(object):
252 """a lazy version of the index array"""
252 """a lazy version of the index array"""
253 def __init__(self, parser):
253 def __init__(self, parser):
254 self.p = parser
254 self.p = parser
255 def __len__(self):
255 def __len__(self):
256 return len(self.p.index)
256 return len(self.p.index)
257 def load(self, pos):
257 def load(self, pos):
258 if pos < 0:
258 if pos < 0:
259 pos += len(self.p.index)
259 pos += len(self.p.index)
260 self.p.loadindex(pos)
260 self.p.loadindex(pos)
261 return self.p.index[pos]
261 return self.p.index[pos]
262 def __getitem__(self, pos):
262 def __getitem__(self, pos):
263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
264 def __setitem__(self, pos, item):
264 def __setitem__(self, pos, item):
265 self.p.index[pos] = _pack(indexformatng, *item)
265 self.p.index[pos] = _pack(indexformatng, *item)
266 def __delitem__(self, pos):
266 def __delitem__(self, pos):
267 del self.p.index[pos]
267 del self.p.index[pos]
268 def insert(self, pos, e):
268 def insert(self, pos, e):
269 self.p.index.insert(pos, _pack(indexformatng, *e))
269 self.p.index.insert(pos, _pack(indexformatng, *e))
270 def append(self, e):
270 def append(self, e):
271 self.p.index.append(_pack(indexformatng, *e))
271 self.p.index.append(_pack(indexformatng, *e))
272
272
273 class lazymap(object):
273 class lazymap(object):
274 """a lazy version of the node map"""
274 """a lazy version of the node map"""
275 def __init__(self, parser):
275 def __init__(self, parser):
276 self.p = parser
276 self.p = parser
277 def load(self, key):
277 def load(self, key):
278 n = self.p.findnode(key)
278 n = self.p.findnode(key)
279 if n == None:
279 if n == None:
280 raise KeyError(key)
280 raise KeyError(key)
281 def __contains__(self, key):
281 def __contains__(self, key):
282 if key in self.p.map:
282 if key in self.p.map:
283 return True
283 return True
284 self.p.loadmap()
284 self.p.loadmap()
285 return key in self.p.map
285 return key in self.p.map
286 def __iter__(self):
286 def __iter__(self):
287 yield nullid
287 yield nullid
288 for i in xrange(self.p.l):
288 for i in xrange(self.p.l):
289 ret = self.p.index[i]
289 ret = self.p.index[i]
290 if not ret:
290 if not ret:
291 self.p.loadindex(i)
291 self.p.loadindex(i)
292 ret = self.p.index[i]
292 ret = self.p.index[i]
293 if isinstance(ret, str):
293 if isinstance(ret, str):
294 ret = _unpack(indexformatng, ret)
294 ret = _unpack(indexformatng, ret)
295 yield ret[7]
295 yield ret[7]
296 def __getitem__(self, key):
296 def __getitem__(self, key):
297 try:
297 try:
298 return self.p.map[key]
298 return self.p.map[key]
299 except KeyError:
299 except KeyError:
300 try:
300 try:
301 self.load(key)
301 self.load(key)
302 return self.p.map[key]
302 return self.p.map[key]
303 except KeyError:
303 except KeyError:
304 raise KeyError("node " + hex(key))
304 raise KeyError("node " + hex(key))
305 def __setitem__(self, key, val):
305 def __setitem__(self, key, val):
306 self.p.map[key] = val
306 self.p.map[key] = val
307 def __delitem__(self, key):
307 def __delitem__(self, key):
308 del self.p.map[key]
308 del self.p.map[key]
309
309
310 indexformatv0 = ">4l20s20s20s"
310 indexformatv0 = ">4l20s20s20s"
311 v0shaoffset = 56
311 v0shaoffset = 56
312
312
313 class revlogoldio(object):
313 class revlogoldio(object):
314 def __init__(self):
314 def __init__(self):
315 self.size = struct.calcsize(indexformatv0)
315 self.size = struct.calcsize(indexformatv0)
316
316
317 def parseindex(self, fp, inline):
317 def parseindex(self, fp, inline):
318 s = self.size
318 s = self.size
319 index = []
319 index = []
320 nodemap = {nullid: nullrev}
320 nodemap = {nullid: nullrev}
321 n = off = 0
321 n = off = 0
322 data = fp.read()
322 data = fp.read()
323 l = len(data)
323 l = len(data)
324 while off + s <= l:
324 while off + s <= l:
325 cur = data[off:off + s]
325 cur = data[off:off + s]
326 off += s
326 off += s
327 e = _unpack(indexformatv0, cur)
327 e = _unpack(indexformatv0, cur)
328 # transform to revlogv1 format
328 # transform to revlogv1 format
329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
331 index.append(e2)
331 index.append(e2)
332 nodemap[e[6]] = n
332 nodemap[e[6]] = n
333 n += 1
333 n += 1
334
334
335 return index, nodemap, None
335 return index, nodemap, None
336
336
337 def packentry(self, entry, node, version, rev):
337 def packentry(self, entry, node, version, rev):
338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
339 node(entry[5]), node(entry[6]), entry[7])
339 node(entry[5]), node(entry[6]), entry[7])
340 return _pack(indexformatv0, *e2)
340 return _pack(indexformatv0, *e2)
341
341
342 # index ng:
342 # index ng:
343 # 6 bytes offset
343 # 6 bytes offset
344 # 2 bytes flags
344 # 2 bytes flags
345 # 4 bytes compressed length
345 # 4 bytes compressed length
346 # 4 bytes uncompressed length
346 # 4 bytes uncompressed length
347 # 4 bytes: base rev
347 # 4 bytes: base rev
348 # 4 bytes link rev
348 # 4 bytes link rev
349 # 4 bytes parent 1 rev
349 # 4 bytes parent 1 rev
350 # 4 bytes parent 2 rev
350 # 4 bytes parent 2 rev
351 # 32 bytes: nodeid
351 # 32 bytes: nodeid
352 indexformatng = ">Qiiiiii20s12x"
352 indexformatng = ">Qiiiiii20s12x"
353 ngshaoffset = 32
353 ngshaoffset = 32
354 versionformat = ">I"
354 versionformat = ">I"
355
355
356 class revlogio(object):
356 class revlogio(object):
357 def __init__(self):
357 def __init__(self):
358 self.size = struct.calcsize(indexformatng)
358 self.size = struct.calcsize(indexformatng)
359
359
360 def parseindex(self, fp, inline):
360 def parseindex(self, fp, inline):
361 try:
361 try:
362 size = util.fstat(fp).st_size
362 size = util.fstat(fp).st_size
363 except AttributeError:
363 except AttributeError:
364 size = 0
364 size = 0
365
365
366 if util.openhardlinks() and not inline and size > 1000000:
366 if util.openhardlinks() and not inline and size > 1000000:
367 # big index, let's parse it on demand
367 # big index, let's parse it on demand
368 parser = lazyparser(fp, size)
368 parser = lazyparser(fp, size)
369 index = lazyindex(parser)
369 index = lazyindex(parser)
370 nodemap = lazymap(parser)
370 nodemap = lazymap(parser)
371 e = list(index[0])
371 e = list(index[0])
372 type = gettype(e[0])
372 type = gettype(e[0])
373 e[0] = offset_type(0, type)
373 e[0] = offset_type(0, type)
374 index[0] = e
374 index[0] = e
375 return index, nodemap, None
375 return index, nodemap, None
376
376
377 data = fp.read()
377 data = fp.read()
378 # call the C implementation to parse the index data
378 # call the C implementation to parse the index data
379 index, nodemap, cache = parsers.parse_index(data, inline)
379 index, nodemap, cache = parsers.parse_index(data, inline)
380 return index, nodemap, cache
380 return index, nodemap, cache
381
381
382 def packentry(self, entry, node, version, rev):
382 def packentry(self, entry, node, version, rev):
383 p = _pack(indexformatng, *entry)
383 p = _pack(indexformatng, *entry)
384 if rev == 0:
384 if rev == 0:
385 p = _pack(versionformat, version) + p[4:]
385 p = _pack(versionformat, version) + p[4:]
386 return p
386 return p
387
387
388 class revlog(object):
388 class revlog(object):
389 """
389 """
390 the underlying revision storage object
390 the underlying revision storage object
391
391
392 A revlog consists of two parts, an index and the revision data.
392 A revlog consists of two parts, an index and the revision data.
393
393
394 The index is a file with a fixed record size containing
394 The index is a file with a fixed record size containing
395 information on each revision, including its nodeid (hash), the
395 information on each revision, including its nodeid (hash), the
396 nodeids of its parents, the position and offset of its data within
396 nodeids of its parents, the position and offset of its data within
397 the data file, and the revision it's based on. Finally, each entry
397 the data file, and the revision it's based on. Finally, each entry
398 contains a linkrev entry that can serve as a pointer to external
398 contains a linkrev entry that can serve as a pointer to external
399 data.
399 data.
400
400
401 The revision data itself is a linear collection of data chunks.
401 The revision data itself is a linear collection of data chunks.
402 Each chunk represents a revision and is usually represented as a
402 Each chunk represents a revision and is usually represented as a
403 delta against the previous chunk. To bound lookup time, runs of
403 delta against the previous chunk. To bound lookup time, runs of
404 deltas are limited to about 2 times the length of the original
404 deltas are limited to about 2 times the length of the original
405 version data. This makes retrieval of a version proportional to
405 version data. This makes retrieval of a version proportional to
406 its size, or O(1) relative to the number of revisions.
406 its size, or O(1) relative to the number of revisions.
407
407
408 Both pieces of the revlog are written to in an append-only
408 Both pieces of the revlog are written to in an append-only
409 fashion, which means we never need to rewrite a file to insert or
409 fashion, which means we never need to rewrite a file to insert or
410 remove data, and can use some simple techniques to avoid the need
410 remove data, and can use some simple techniques to avoid the need
411 for locking while reading.
411 for locking while reading.
412 """
412 """
413 def __init__(self, opener, indexfile):
413 def __init__(self, opener, indexfile):
414 """
414 """
415 create a revlog object
415 create a revlog object
416
416
417 opener is a function that abstracts the file opening operation
417 opener is a function that abstracts the file opening operation
418 and can be used to implement COW semantics or the like.
418 and can be used to implement COW semantics or the like.
419 """
419 """
420 self.indexfile = indexfile
420 self.indexfile = indexfile
421 self.datafile = indexfile[:-2] + ".d"
421 self.datafile = indexfile[:-2] + ".d"
422 self.opener = opener
422 self.opener = opener
423 self._cache = None
423 self._cache = None
424 self._chunkcache = None
424 self._chunkcache = None
425 self.nodemap = {nullid: nullrev}
425 self.nodemap = {nullid: nullrev}
426 self.index = []
426 self.index = []
427
427
428 v = REVLOG_DEFAULT_VERSION
428 v = REVLOG_DEFAULT_VERSION
429 if hasattr(opener, "defversion"):
429 if hasattr(opener, "defversion"):
430 v = opener.defversion
430 v = opener.defversion
431 if v & REVLOGNG:
431 if v & REVLOGNG:
432 v |= REVLOGNGINLINEDATA
432 v |= REVLOGNGINLINEDATA
433
433
434 i = ""
434 i = ""
435 try:
435 try:
436 f = self.opener(self.indexfile)
436 f = self.opener(self.indexfile)
437 i = f.read(4)
437 i = f.read(4)
438 f.seek(0)
438 f.seek(0)
439 if len(i) > 0:
439 if len(i) > 0:
440 v = struct.unpack(versionformat, i)[0]
440 v = struct.unpack(versionformat, i)[0]
441 except IOError, inst:
441 except IOError, inst:
442 if inst.errno != errno.ENOENT:
442 if inst.errno != errno.ENOENT:
443 raise
443 raise
444
444
445 self.version = v
445 self.version = v
446 self._inline = v & REVLOGNGINLINEDATA
446 self._inline = v & REVLOGNGINLINEDATA
447 flags = v & ~0xFFFF
447 flags = v & ~0xFFFF
448 fmt = v & 0xFFFF
448 fmt = v & 0xFFFF
449 if fmt == REVLOGV0 and flags:
449 if fmt == REVLOGV0 and flags:
450 raise RevlogError(_("index %s unknown flags %#04x for format v0")
450 raise RevlogError(_("index %s unknown flags %#04x for format v0")
451 % (self.indexfile, flags >> 16))
451 % (self.indexfile, flags >> 16))
452 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
452 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
453 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
453 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
454 % (self.indexfile, flags >> 16))
454 % (self.indexfile, flags >> 16))
455 elif fmt > REVLOGNG:
455 elif fmt > REVLOGNG:
456 raise RevlogError(_("index %s unknown format %d")
456 raise RevlogError(_("index %s unknown format %d")
457 % (self.indexfile, fmt))
457 % (self.indexfile, fmt))
458
458
459 self._io = revlogio()
459 self._io = revlogio()
460 if self.version == REVLOGV0:
460 if self.version == REVLOGV0:
461 self._io = revlogoldio()
461 self._io = revlogoldio()
462 if i:
462 if i:
463 d = self._io.parseindex(f, self._inline)
463 d = self._io.parseindex(f, self._inline)
464 self.index, self.nodemap, self._chunkcache = d
464 self.index, self.nodemap, self._chunkcache = d
465
465
466 # add the magic null revision at -1 (if it hasn't been done already)
466 # add the magic null revision at -1 (if it hasn't been done already)
467 if (self.index == [] or isinstance(self.index, lazyindex) or
467 if (self.index == [] or isinstance(self.index, lazyindex) or
468 self.index[-1][7] != nullid) :
468 self.index[-1][7] != nullid) :
469 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
469 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
470
470
471 def _loadindex(self, start, end):
471 def _loadindex(self, start, end):
472 """load a block of indexes all at once from the lazy parser"""
472 """load a block of indexes all at once from the lazy parser"""
473 if isinstance(self.index, lazyindex):
473 if isinstance(self.index, lazyindex):
474 self.index.p.loadindex(start, end)
474 self.index.p.loadindex(start, end)
475
475
476 def _loadindexmap(self):
476 def _loadindexmap(self):
477 """loads both the map and the index from the lazy parser"""
477 """loads both the map and the index from the lazy parser"""
478 if isinstance(self.index, lazyindex):
478 if isinstance(self.index, lazyindex):
479 p = self.index.p
479 p = self.index.p
480 p.loadindex()
480 p.loadindex()
481 self.nodemap = p.map
481 self.nodemap = p.map
482
482
483 def _loadmap(self):
483 def _loadmap(self):
484 """loads the map from the lazy parser"""
484 """loads the map from the lazy parser"""
485 if isinstance(self.nodemap, lazymap):
485 if isinstance(self.nodemap, lazymap):
486 self.nodemap.p.loadmap()
486 self.nodemap.p.loadmap()
487 self.nodemap = self.nodemap.p.map
487 self.nodemap = self.nodemap.p.map
488
488
489 def tip(self):
489 def tip(self):
490 return self.node(len(self.index) - 2)
490 return self.node(len(self.index) - 2)
491 def __len__(self):
491 def __len__(self):
492 return len(self.index) - 1
492 return len(self.index) - 1
493 def __iter__(self):
493 def __iter__(self):
494 for i in xrange(len(self)):
494 for i in xrange(len(self)):
495 yield i
495 yield i
496 def rev(self, node):
496 def rev(self, node):
497 try:
497 try:
498 return self.nodemap[node]
498 return self.nodemap[node]
499 except KeyError:
499 except KeyError:
500 raise LookupError(node, self.indexfile, _('no node'))
500 raise LookupError(node, self.indexfile, _('no node'))
501 def node(self, rev):
501 def node(self, rev):
502 return self.index[rev][7]
502 return self.index[rev][7]
503 def linkrev(self, node):
503 def linkrev(self, node):
504 return self.index[self.rev(node)][4]
504 return self.index[self.rev(node)][4]
505 def parents(self, node):
505 def parents(self, node):
506 d = self.index[self.rev(node)][5:7]
506 d = self.index[self.rev(node)][5:7]
507 return (self.node(d[0]), self.node(d[1]))
507 return (self.node(d[0]), self.node(d[1]))
508 def parentrevs(self, rev):
508 def parentrevs(self, rev):
509 return self.index[rev][5:7]
509 return self.index[rev][5:7]
510 def start(self, rev):
510 def start(self, rev):
511 return int(self.index[rev][0] >> 16)
511 return int(self.index[rev][0] >> 16)
512 def end(self, rev):
512 def end(self, rev):
513 return self.start(rev) + self.length(rev)
513 return self.start(rev) + self.length(rev)
514 def length(self, rev):
514 def length(self, rev):
515 return self.index[rev][1]
515 return self.index[rev][1]
516 def base(self, rev):
516 def base(self, rev):
517 return self.index[rev][3]
517 return self.index[rev][3]
518
518
519 def size(self, rev):
519 def size(self, rev):
520 """return the length of the uncompressed text for a given revision"""
520 """return the length of the uncompressed text for a given revision"""
521 l = self.index[rev][2]
521 l = self.index[rev][2]
522 if l >= 0:
522 if l >= 0:
523 return l
523 return l
524
524
525 t = self.revision(self.node(rev))
525 t = self.revision(self.node(rev))
526 return len(t)
526 return len(t)
527
527
528 # alternate implementation, The advantage to this code is it
528 # alternate implementation, The advantage to this code is it
529 # will be faster for a single revision. But, the results are not
529 # will be faster for a single revision. But, the results are not
530 # cached, so finding the size of every revision will be slower.
530 # cached, so finding the size of every revision will be slower.
531 """
531 """
532 if self.cache and self.cache[1] == rev:
532 if self.cache and self.cache[1] == rev:
533 return len(self.cache[2])
533 return len(self.cache[2])
534
534
535 base = self.base(rev)
535 base = self.base(rev)
536 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
536 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
537 base = self.cache[1]
537 base = self.cache[1]
538 text = self.cache[2]
538 text = self.cache[2]
539 else:
539 else:
540 text = self.revision(self.node(base))
540 text = self.revision(self.node(base))
541
541
542 l = len(text)
542 l = len(text)
543 for x in xrange(base + 1, rev + 1):
543 for x in xrange(base + 1, rev + 1):
544 l = mdiff.patchedsize(l, self.chunk(x))
544 l = mdiff.patchedsize(l, self.chunk(x))
545 return l
545 return l
546 """
546 """
547
547
548 def reachable(self, node, stop=None):
548 def reachable(self, node, stop=None):
549 """return a hash of all nodes ancestral to a given node, including
549 """return a hash of all nodes ancestral to a given node, including
550 the node itself, stopping when stop is matched"""
550 the node itself, stopping when stop is matched"""
551 reachable = {}
551 reachable = {}
552 visit = [node]
552 visit = [node]
553 reachable[node] = 1
553 reachable[node] = 1
554 if stop:
554 if stop:
555 stopn = self.rev(stop)
555 stopn = self.rev(stop)
556 else:
556 else:
557 stopn = 0
557 stopn = 0
558 while visit:
558 while visit:
559 n = visit.pop(0)
559 n = visit.pop(0)
560 if n == stop:
560 if n == stop:
561 continue
561 continue
562 if n == nullid:
562 if n == nullid:
563 continue
563 continue
564 for p in self.parents(n):
564 for p in self.parents(n):
565 if self.rev(p) < stopn:
565 if self.rev(p) < stopn:
566 continue
566 continue
567 if p not in reachable:
567 if p not in reachable:
568 reachable[p] = 1
568 reachable[p] = 1
569 visit.append(p)
569 visit.append(p)
570 return reachable
570 return reachable
571
571
572 def ancestors(self, *revs):
572 def ancestors(self, *revs):
573 'Generate the ancestors of revs using a breadth-first visit'
573 'Generate the ancestors of revs using a breadth-first visit'
574 visit = list(revs)
574 visit = list(revs)
575 seen = util.set([nullrev])
575 seen = util.set([nullrev])
576 while visit:
576 while visit:
577 for parent in self.parentrevs(visit.pop(0)):
577 for parent in self.parentrevs(visit.pop(0)):
578 if parent not in seen:
578 if parent not in seen:
579 visit.append(parent)
579 visit.append(parent)
580 seen.add(parent)
580 seen.add(parent)
581 yield parent
581 yield parent
582
582
583 def descendants(self, *revs):
583 def descendants(self, *revs):
584 'Generate the descendants of revs in topological order'
584 'Generate the descendants of revs in topological order'
585 seen = util.set(revs)
585 seen = util.set(revs)
586 for i in xrange(min(revs) + 1, len(self)):
586 for i in xrange(min(revs) + 1, len(self)):
587 for x in self.parentrevs(i):
587 for x in self.parentrevs(i):
588 if x != nullrev and x in seen:
588 if x != nullrev and x in seen:
589 seen.add(i)
589 seen.add(i)
590 yield i
590 yield i
591 break
591 break
592
592
593 def findmissing(self, common=None, heads=None):
594 '''
595 returns the topologically sorted list of nodes from the set:
596 missing = (ancestors(heads) \ ancestors(common))
597
598 where ancestors() is the set of ancestors from heads, heads included
599
600 if heads is None, the heads of the revlog are used
601 if common is None, nullid is assumed to be a common node
602 '''
603 if common is None:
604 common = [nullid]
605 if heads is None:
606 heads = self.heads()
607
608 common = [self.rev(n) for n in common]
609 heads = [self.rev(n) for n in heads]
610
611 # we want the ancestors, but inclusive
612 has = dict.fromkeys(self.ancestors(*common))
613 has[nullrev] = None
614 for r in common:
615 has[r] = None
616
617 # take all ancestors from heads that aren't in has
618 missing = {}
619 visit = [r for r in heads if r not in has]
620 while visit:
621 r = visit.pop(0)
622 if r in missing:
623 continue
624 else:
625 missing[r] = None
626 for p in self.parentrevs(r):
627 if p not in has:
628 visit.append(p)
629 missing = missing.keys()
630 missing.sort()
631 return [self.node(r) for r in missing]
632
593 def nodesbetween(self, roots=None, heads=None):
633 def nodesbetween(self, roots=None, heads=None):
594 """Return a tuple containing three elements. Elements 1 and 2 contain
634 """Return a tuple containing three elements. Elements 1 and 2 contain
595 a final list bases and heads after all the unreachable ones have been
635 a final list bases and heads after all the unreachable ones have been
596 pruned. Element 0 contains a topologically sorted list of all
636 pruned. Element 0 contains a topologically sorted list of all
597
637
598 nodes that satisfy these constraints:
638 nodes that satisfy these constraints:
599 1. All nodes must be descended from a node in roots (the nodes on
639 1. All nodes must be descended from a node in roots (the nodes on
600 roots are considered descended from themselves).
640 roots are considered descended from themselves).
601 2. All nodes must also be ancestors of a node in heads (the nodes in
641 2. All nodes must also be ancestors of a node in heads (the nodes in
602 heads are considered to be their own ancestors).
642 heads are considered to be their own ancestors).
603
643
604 If roots is unspecified, nullid is assumed as the only root.
644 If roots is unspecified, nullid is assumed as the only root.
605 If heads is unspecified, it is taken to be the output of the
645 If heads is unspecified, it is taken to be the output of the
606 heads method (i.e. a list of all nodes in the repository that
646 heads method (i.e. a list of all nodes in the repository that
607 have no children)."""
647 have no children)."""
608 nonodes = ([], [], [])
648 nonodes = ([], [], [])
609 if roots is not None:
649 if roots is not None:
610 roots = list(roots)
650 roots = list(roots)
611 if not roots:
651 if not roots:
612 return nonodes
652 return nonodes
613 lowestrev = min([self.rev(n) for n in roots])
653 lowestrev = min([self.rev(n) for n in roots])
614 else:
654 else:
615 roots = [nullid] # Everybody's a descendent of nullid
655 roots = [nullid] # Everybody's a descendent of nullid
616 lowestrev = nullrev
656 lowestrev = nullrev
617 if (lowestrev == nullrev) and (heads is None):
657 if (lowestrev == nullrev) and (heads is None):
618 # We want _all_ the nodes!
658 # We want _all_ the nodes!
619 return ([self.node(r) for r in self], [nullid], list(self.heads()))
659 return ([self.node(r) for r in self], [nullid], list(self.heads()))
620 if heads is None:
660 if heads is None:
621 # All nodes are ancestors, so the latest ancestor is the last
661 # All nodes are ancestors, so the latest ancestor is the last
622 # node.
662 # node.
623 highestrev = len(self) - 1
663 highestrev = len(self) - 1
624 # Set ancestors to None to signal that every node is an ancestor.
664 # Set ancestors to None to signal that every node is an ancestor.
625 ancestors = None
665 ancestors = None
626 # Set heads to an empty dictionary for later discovery of heads
666 # Set heads to an empty dictionary for later discovery of heads
627 heads = {}
667 heads = {}
628 else:
668 else:
629 heads = list(heads)
669 heads = list(heads)
630 if not heads:
670 if not heads:
631 return nonodes
671 return nonodes
632 ancestors = {}
672 ancestors = {}
633 # Turn heads into a dictionary so we can remove 'fake' heads.
673 # Turn heads into a dictionary so we can remove 'fake' heads.
634 # Also, later we will be using it to filter out the heads we can't
674 # Also, later we will be using it to filter out the heads we can't
635 # find from roots.
675 # find from roots.
636 heads = dict.fromkeys(heads, 0)
676 heads = dict.fromkeys(heads, 0)
637 # Start at the top and keep marking parents until we're done.
677 # Start at the top and keep marking parents until we're done.
638 nodestotag = heads.keys()
678 nodestotag = heads.keys()
639 # Remember where the top was so we can use it as a limit later.
679 # Remember where the top was so we can use it as a limit later.
640 highestrev = max([self.rev(n) for n in nodestotag])
680 highestrev = max([self.rev(n) for n in nodestotag])
641 while nodestotag:
681 while nodestotag:
642 # grab a node to tag
682 # grab a node to tag
643 n = nodestotag.pop()
683 n = nodestotag.pop()
644 # Never tag nullid
684 # Never tag nullid
645 if n == nullid:
685 if n == nullid:
646 continue
686 continue
647 # A node's revision number represents its place in a
687 # A node's revision number represents its place in a
648 # topologically sorted list of nodes.
688 # topologically sorted list of nodes.
649 r = self.rev(n)
689 r = self.rev(n)
650 if r >= lowestrev:
690 if r >= lowestrev:
651 if n not in ancestors:
691 if n not in ancestors:
652 # If we are possibly a descendent of one of the roots
692 # If we are possibly a descendent of one of the roots
653 # and we haven't already been marked as an ancestor
693 # and we haven't already been marked as an ancestor
654 ancestors[n] = 1 # Mark as ancestor
694 ancestors[n] = 1 # Mark as ancestor
655 # Add non-nullid parents to list of nodes to tag.
695 # Add non-nullid parents to list of nodes to tag.
656 nodestotag.extend([p for p in self.parents(n) if
696 nodestotag.extend([p for p in self.parents(n) if
657 p != nullid])
697 p != nullid])
658 elif n in heads: # We've seen it before, is it a fake head?
698 elif n in heads: # We've seen it before, is it a fake head?
659 # So it is, real heads should not be the ancestors of
699 # So it is, real heads should not be the ancestors of
660 # any other heads.
700 # any other heads.
661 heads.pop(n)
701 heads.pop(n)
662 if not ancestors:
702 if not ancestors:
663 return nonodes
703 return nonodes
664 # Now that we have our set of ancestors, we want to remove any
704 # Now that we have our set of ancestors, we want to remove any
665 # roots that are not ancestors.
705 # roots that are not ancestors.
666
706
667 # If one of the roots was nullid, everything is included anyway.
707 # If one of the roots was nullid, everything is included anyway.
668 if lowestrev > nullrev:
708 if lowestrev > nullrev:
669 # But, since we weren't, let's recompute the lowest rev to not
709 # But, since we weren't, let's recompute the lowest rev to not
670 # include roots that aren't ancestors.
710 # include roots that aren't ancestors.
671
711
672 # Filter out roots that aren't ancestors of heads
712 # Filter out roots that aren't ancestors of heads
673 roots = [n for n in roots if n in ancestors]
713 roots = [n for n in roots if n in ancestors]
674 # Recompute the lowest revision
714 # Recompute the lowest revision
675 if roots:
715 if roots:
676 lowestrev = min([self.rev(n) for n in roots])
716 lowestrev = min([self.rev(n) for n in roots])
677 else:
717 else:
678 # No more roots? Return empty list
718 # No more roots? Return empty list
679 return nonodes
719 return nonodes
680 else:
720 else:
681 # We are descending from nullid, and don't need to care about
721 # We are descending from nullid, and don't need to care about
682 # any other roots.
722 # any other roots.
683 lowestrev = nullrev
723 lowestrev = nullrev
684 roots = [nullid]
724 roots = [nullid]
685 # Transform our roots list into a 'set' (i.e. a dictionary where the
725 # Transform our roots list into a 'set' (i.e. a dictionary where the
686 # values don't matter.
726 # values don't matter.
687 descendents = dict.fromkeys(roots, 1)
727 descendents = dict.fromkeys(roots, 1)
688 # Also, keep the original roots so we can filter out roots that aren't
728 # Also, keep the original roots so we can filter out roots that aren't
689 # 'real' roots (i.e. are descended from other roots).
729 # 'real' roots (i.e. are descended from other roots).
690 roots = descendents.copy()
730 roots = descendents.copy()
691 # Our topologically sorted list of output nodes.
731 # Our topologically sorted list of output nodes.
692 orderedout = []
732 orderedout = []
693 # Don't start at nullid since we don't want nullid in our output list,
733 # Don't start at nullid since we don't want nullid in our output list,
694 # and if nullid shows up in descedents, empty parents will look like
734 # and if nullid shows up in descedents, empty parents will look like
695 # they're descendents.
735 # they're descendents.
696 for r in xrange(max(lowestrev, 0), highestrev + 1):
736 for r in xrange(max(lowestrev, 0), highestrev + 1):
697 n = self.node(r)
737 n = self.node(r)
698 isdescendent = False
738 isdescendent = False
699 if lowestrev == nullrev: # Everybody is a descendent of nullid
739 if lowestrev == nullrev: # Everybody is a descendent of nullid
700 isdescendent = True
740 isdescendent = True
701 elif n in descendents:
741 elif n in descendents:
702 # n is already a descendent
742 # n is already a descendent
703 isdescendent = True
743 isdescendent = True
704 # This check only needs to be done here because all the roots
744 # This check only needs to be done here because all the roots
705 # will start being marked is descendents before the loop.
745 # will start being marked is descendents before the loop.
706 if n in roots:
746 if n in roots:
707 # If n was a root, check if it's a 'real' root.
747 # If n was a root, check if it's a 'real' root.
708 p = tuple(self.parents(n))
748 p = tuple(self.parents(n))
709 # If any of its parents are descendents, it's not a root.
749 # If any of its parents are descendents, it's not a root.
710 if (p[0] in descendents) or (p[1] in descendents):
750 if (p[0] in descendents) or (p[1] in descendents):
711 roots.pop(n)
751 roots.pop(n)
712 else:
752 else:
713 p = tuple(self.parents(n))
753 p = tuple(self.parents(n))
714 # A node is a descendent if either of its parents are
754 # A node is a descendent if either of its parents are
715 # descendents. (We seeded the dependents list with the roots
755 # descendents. (We seeded the dependents list with the roots
716 # up there, remember?)
756 # up there, remember?)
717 if (p[0] in descendents) or (p[1] in descendents):
757 if (p[0] in descendents) or (p[1] in descendents):
718 descendents[n] = 1
758 descendents[n] = 1
719 isdescendent = True
759 isdescendent = True
720 if isdescendent and ((ancestors is None) or (n in ancestors)):
760 if isdescendent and ((ancestors is None) or (n in ancestors)):
721 # Only include nodes that are both descendents and ancestors.
761 # Only include nodes that are both descendents and ancestors.
722 orderedout.append(n)
762 orderedout.append(n)
723 if (ancestors is not None) and (n in heads):
763 if (ancestors is not None) and (n in heads):
724 # We're trying to figure out which heads are reachable
764 # We're trying to figure out which heads are reachable
725 # from roots.
765 # from roots.
726 # Mark this head as having been reached
766 # Mark this head as having been reached
727 heads[n] = 1
767 heads[n] = 1
728 elif ancestors is None:
768 elif ancestors is None:
729 # Otherwise, we're trying to discover the heads.
769 # Otherwise, we're trying to discover the heads.
730 # Assume this is a head because if it isn't, the next step
770 # Assume this is a head because if it isn't, the next step
731 # will eventually remove it.
771 # will eventually remove it.
732 heads[n] = 1
772 heads[n] = 1
733 # But, obviously its parents aren't.
773 # But, obviously its parents aren't.
734 for p in self.parents(n):
774 for p in self.parents(n):
735 heads.pop(p, None)
775 heads.pop(p, None)
736 heads = [n for n in heads.iterkeys() if heads[n] != 0]
776 heads = [n for n in heads.iterkeys() if heads[n] != 0]
737 roots = roots.keys()
777 roots = roots.keys()
738 assert orderedout
778 assert orderedout
739 assert roots
779 assert roots
740 assert heads
780 assert heads
741 return (orderedout, roots, heads)
781 return (orderedout, roots, heads)
742
782
743 def heads(self, start=None, stop=None):
783 def heads(self, start=None, stop=None):
744 """return the list of all nodes that have no children
784 """return the list of all nodes that have no children
745
785
746 if start is specified, only heads that are descendants of
786 if start is specified, only heads that are descendants of
747 start will be returned
787 start will be returned
748 if stop is specified, it will consider all the revs from stop
788 if stop is specified, it will consider all the revs from stop
749 as if they had no children
789 as if they had no children
750 """
790 """
751 if start is None and stop is None:
791 if start is None and stop is None:
752 count = len(self)
792 count = len(self)
753 if not count:
793 if not count:
754 return [nullid]
794 return [nullid]
755 ishead = [1] * (count + 1)
795 ishead = [1] * (count + 1)
756 index = self.index
796 index = self.index
757 for r in xrange(count):
797 for r in xrange(count):
758 e = index[r]
798 e = index[r]
759 ishead[e[5]] = ishead[e[6]] = 0
799 ishead[e[5]] = ishead[e[6]] = 0
760 return [self.node(r) for r in xrange(count) if ishead[r]]
800 return [self.node(r) for r in xrange(count) if ishead[r]]
761
801
762 if start is None:
802 if start is None:
763 start = nullid
803 start = nullid
764 if stop is None:
804 if stop is None:
765 stop = []
805 stop = []
766 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
806 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
767 startrev = self.rev(start)
807 startrev = self.rev(start)
768 reachable = {startrev: 1}
808 reachable = {startrev: 1}
769 heads = {startrev: 1}
809 heads = {startrev: 1}
770
810
771 parentrevs = self.parentrevs
811 parentrevs = self.parentrevs
772 for r in xrange(startrev + 1, len(self)):
812 for r in xrange(startrev + 1, len(self)):
773 for p in parentrevs(r):
813 for p in parentrevs(r):
774 if p in reachable:
814 if p in reachable:
775 if r not in stoprevs:
815 if r not in stoprevs:
776 reachable[r] = 1
816 reachable[r] = 1
777 heads[r] = 1
817 heads[r] = 1
778 if p in heads and p not in stoprevs:
818 if p in heads and p not in stoprevs:
779 del heads[p]
819 del heads[p]
780
820
781 return [self.node(r) for r in heads]
821 return [self.node(r) for r in heads]
782
822
783 def children(self, node):
823 def children(self, node):
784 """find the children of a given node"""
824 """find the children of a given node"""
785 c = []
825 c = []
786 p = self.rev(node)
826 p = self.rev(node)
787 for r in range(p + 1, len(self)):
827 for r in range(p + 1, len(self)):
788 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
828 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
789 if prevs:
829 if prevs:
790 for pr in prevs:
830 for pr in prevs:
791 if pr == p:
831 if pr == p:
792 c.append(self.node(r))
832 c.append(self.node(r))
793 elif p == nullrev:
833 elif p == nullrev:
794 c.append(self.node(r))
834 c.append(self.node(r))
795 return c
835 return c
796
836
797 def _match(self, id):
837 def _match(self, id):
798 if isinstance(id, (long, int)):
838 if isinstance(id, (long, int)):
799 # rev
839 # rev
800 return self.node(id)
840 return self.node(id)
801 if len(id) == 20:
841 if len(id) == 20:
802 # possibly a binary node
842 # possibly a binary node
803 # odds of a binary node being all hex in ASCII are 1 in 10**25
843 # odds of a binary node being all hex in ASCII are 1 in 10**25
804 try:
844 try:
805 node = id
845 node = id
806 r = self.rev(node) # quick search the index
846 r = self.rev(node) # quick search the index
807 return node
847 return node
808 except LookupError:
848 except LookupError:
809 pass # may be partial hex id
849 pass # may be partial hex id
810 try:
850 try:
811 # str(rev)
851 # str(rev)
812 rev = int(id)
852 rev = int(id)
813 if str(rev) != id:
853 if str(rev) != id:
814 raise ValueError
854 raise ValueError
815 if rev < 0:
855 if rev < 0:
816 rev = len(self) + rev
856 rev = len(self) + rev
817 if rev < 0 or rev >= len(self):
857 if rev < 0 or rev >= len(self):
818 raise ValueError
858 raise ValueError
819 return self.node(rev)
859 return self.node(rev)
820 except (ValueError, OverflowError):
860 except (ValueError, OverflowError):
821 pass
861 pass
822 if len(id) == 40:
862 if len(id) == 40:
823 try:
863 try:
824 # a full hex nodeid?
864 # a full hex nodeid?
825 node = bin(id)
865 node = bin(id)
826 r = self.rev(node)
866 r = self.rev(node)
827 return node
867 return node
828 except (TypeError, LookupError):
868 except (TypeError, LookupError):
829 pass
869 pass
830
870
831 def _partialmatch(self, id):
871 def _partialmatch(self, id):
832 if len(id) < 40:
872 if len(id) < 40:
833 try:
873 try:
834 # hex(node)[:...]
874 # hex(node)[:...]
835 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
875 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
836 node = None
876 node = None
837 for n in self.nodemap:
877 for n in self.nodemap:
838 if n.startswith(bin_id) and hex(n).startswith(id):
878 if n.startswith(bin_id) and hex(n).startswith(id):
839 if node is not None:
879 if node is not None:
840 raise LookupError(id, self.indexfile,
880 raise LookupError(id, self.indexfile,
841 _('ambiguous identifier'))
881 _('ambiguous identifier'))
842 node = n
882 node = n
843 if node is not None:
883 if node is not None:
844 return node
884 return node
845 except TypeError:
885 except TypeError:
846 pass
886 pass
847
887
848 def lookup(self, id):
888 def lookup(self, id):
849 """locate a node based on:
889 """locate a node based on:
850 - revision number or str(revision number)
890 - revision number or str(revision number)
851 - nodeid or subset of hex nodeid
891 - nodeid or subset of hex nodeid
852 """
892 """
853 n = self._match(id)
893 n = self._match(id)
854 if n is not None:
894 if n is not None:
855 return n
895 return n
856 n = self._partialmatch(id)
896 n = self._partialmatch(id)
857 if n:
897 if n:
858 return n
898 return n
859
899
860 raise LookupError(id, self.indexfile, _('no match found'))
900 raise LookupError(id, self.indexfile, _('no match found'))
861
901
862 def cmp(self, node, text):
902 def cmp(self, node, text):
863 """compare text with a given file revision"""
903 """compare text with a given file revision"""
864 p1, p2 = self.parents(node)
904 p1, p2 = self.parents(node)
865 return hash(text, p1, p2) != node
905 return hash(text, p1, p2) != node
866
906
867 def chunk(self, rev, df=None):
907 def chunk(self, rev, df=None):
868 def loadcache(df):
908 def loadcache(df):
869 if not df:
909 if not df:
870 if self._inline:
910 if self._inline:
871 df = self.opener(self.indexfile)
911 df = self.opener(self.indexfile)
872 else:
912 else:
873 df = self.opener(self.datafile)
913 df = self.opener(self.datafile)
874 df.seek(start)
914 df.seek(start)
875 self._chunkcache = (start, df.read(cache_length))
915 self._chunkcache = (start, df.read(cache_length))
876
916
877 start, length = self.start(rev), self.length(rev)
917 start, length = self.start(rev), self.length(rev)
878 if self._inline:
918 if self._inline:
879 start += (rev + 1) * self._io.size
919 start += (rev + 1) * self._io.size
880 end = start + length
920 end = start + length
881
921
882 offset = 0
922 offset = 0
883 if not self._chunkcache:
923 if not self._chunkcache:
884 cache_length = max(65536, length)
924 cache_length = max(65536, length)
885 loadcache(df)
925 loadcache(df)
886 else:
926 else:
887 cache_start = self._chunkcache[0]
927 cache_start = self._chunkcache[0]
888 cache_length = len(self._chunkcache[1])
928 cache_length = len(self._chunkcache[1])
889 cache_end = cache_start + cache_length
929 cache_end = cache_start + cache_length
890 if start >= cache_start and end <= cache_end:
930 if start >= cache_start and end <= cache_end:
891 # it is cached
931 # it is cached
892 offset = start - cache_start
932 offset = start - cache_start
893 else:
933 else:
894 cache_length = max(65536, length)
934 cache_length = max(65536, length)
895 loadcache(df)
935 loadcache(df)
896
936
897 # avoid copying large chunks
937 # avoid copying large chunks
898 c = self._chunkcache[1]
938 c = self._chunkcache[1]
899 if cache_length != length:
939 if cache_length != length:
900 c = c[offset:offset + length]
940 c = c[offset:offset + length]
901
941
902 return decompress(c)
942 return decompress(c)
903
943
904 def delta(self, node):
944 def delta(self, node):
905 """return or calculate a delta between a node and its predecessor"""
945 """return or calculate a delta between a node and its predecessor"""
906 r = self.rev(node)
946 r = self.rev(node)
907 return self.revdiff(r - 1, r)
947 return self.revdiff(r - 1, r)
908
948
909 def revdiff(self, rev1, rev2):
949 def revdiff(self, rev1, rev2):
910 """return or calculate a delta between two revisions"""
950 """return or calculate a delta between two revisions"""
911 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
951 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
912 return self.chunk(rev2)
952 return self.chunk(rev2)
913
953
914 return mdiff.textdiff(self.revision(self.node(rev1)),
954 return mdiff.textdiff(self.revision(self.node(rev1)),
915 self.revision(self.node(rev2)))
955 self.revision(self.node(rev2)))
916
956
917 def revision(self, node):
957 def revision(self, node):
918 """return an uncompressed revision of a given node"""
958 """return an uncompressed revision of a given node"""
919 if node == nullid:
959 if node == nullid:
920 return ""
960 return ""
921 if self._cache and self._cache[0] == node:
961 if self._cache and self._cache[0] == node:
922 return str(self._cache[2])
962 return str(self._cache[2])
923
963
924 # look up what we need to read
964 # look up what we need to read
925 text = None
965 text = None
926 rev = self.rev(node)
966 rev = self.rev(node)
927 base = self.base(rev)
967 base = self.base(rev)
928
968
929 # check rev flags
969 # check rev flags
930 if self.index[rev][0] & 0xFFFF:
970 if self.index[rev][0] & 0xFFFF:
931 raise RevlogError(_('incompatible revision flag %x') %
971 raise RevlogError(_('incompatible revision flag %x') %
932 (self.index[rev][0] & 0xFFFF))
972 (self.index[rev][0] & 0xFFFF))
933
973
934 df = None
974 df = None
935
975
936 # do we have useful data cached?
976 # do we have useful data cached?
937 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
977 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
938 base = self._cache[1]
978 base = self._cache[1]
939 text = str(self._cache[2])
979 text = str(self._cache[2])
940 self._loadindex(base, rev + 1)
980 self._loadindex(base, rev + 1)
941 if not self._inline and rev > base + 1:
981 if not self._inline and rev > base + 1:
942 df = self.opener(self.datafile)
982 df = self.opener(self.datafile)
943 else:
983 else:
944 self._loadindex(base, rev + 1)
984 self._loadindex(base, rev + 1)
945 if not self._inline and rev > base:
985 if not self._inline and rev > base:
946 df = self.opener(self.datafile)
986 df = self.opener(self.datafile)
947 text = self.chunk(base, df=df)
987 text = self.chunk(base, df=df)
948
988
949 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
989 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
950 text = mdiff.patches(text, bins)
990 text = mdiff.patches(text, bins)
951 p1, p2 = self.parents(node)
991 p1, p2 = self.parents(node)
952 if node != hash(text, p1, p2):
992 if node != hash(text, p1, p2):
953 raise RevlogError(_("integrity check failed on %s:%d")
993 raise RevlogError(_("integrity check failed on %s:%d")
954 % (self.datafile, rev))
994 % (self.datafile, rev))
955
995
956 self._cache = (node, rev, text)
996 self._cache = (node, rev, text)
957 return text
997 return text
958
998
959 def checkinlinesize(self, tr, fp=None):
999 def checkinlinesize(self, tr, fp=None):
960 if not self._inline:
1000 if not self._inline:
961 return
1001 return
962 if not fp:
1002 if not fp:
963 fp = self.opener(self.indexfile, 'r')
1003 fp = self.opener(self.indexfile, 'r')
964 fp.seek(0, 2)
1004 fp.seek(0, 2)
965 size = fp.tell()
1005 size = fp.tell()
966 if size < 131072:
1006 if size < 131072:
967 return
1007 return
968 trinfo = tr.find(self.indexfile)
1008 trinfo = tr.find(self.indexfile)
969 if trinfo == None:
1009 if trinfo == None:
970 raise RevlogError(_("%s not found in the transaction")
1010 raise RevlogError(_("%s not found in the transaction")
971 % self.indexfile)
1011 % self.indexfile)
972
1012
973 trindex = trinfo[2]
1013 trindex = trinfo[2]
974 dataoff = self.start(trindex)
1014 dataoff = self.start(trindex)
975
1015
976 tr.add(self.datafile, dataoff)
1016 tr.add(self.datafile, dataoff)
977 df = self.opener(self.datafile, 'w')
1017 df = self.opener(self.datafile, 'w')
978 try:
1018 try:
979 calc = self._io.size
1019 calc = self._io.size
980 for r in self:
1020 for r in self:
981 start = self.start(r) + (r + 1) * calc
1021 start = self.start(r) + (r + 1) * calc
982 length = self.length(r)
1022 length = self.length(r)
983 fp.seek(start)
1023 fp.seek(start)
984 d = fp.read(length)
1024 d = fp.read(length)
985 df.write(d)
1025 df.write(d)
986 finally:
1026 finally:
987 df.close()
1027 df.close()
988
1028
989 fp.close()
1029 fp.close()
990 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1030 fp = self.opener(self.indexfile, 'w', atomictemp=True)
991 self.version &= ~(REVLOGNGINLINEDATA)
1031 self.version &= ~(REVLOGNGINLINEDATA)
992 self._inline = False
1032 self._inline = False
993 for i in self:
1033 for i in self:
994 e = self._io.packentry(self.index[i], self.node, self.version, i)
1034 e = self._io.packentry(self.index[i], self.node, self.version, i)
995 fp.write(e)
1035 fp.write(e)
996
1036
997 # if we don't call rename, the temp file will never replace the
1037 # if we don't call rename, the temp file will never replace the
998 # real index
1038 # real index
999 fp.rename()
1039 fp.rename()
1000
1040
1001 tr.replace(self.indexfile, trindex * calc)
1041 tr.replace(self.indexfile, trindex * calc)
1002 self._chunkcache = None
1042 self._chunkcache = None
1003
1043
1004 def addrevision(self, text, transaction, link, p1, p2, d=None):
1044 def addrevision(self, text, transaction, link, p1, p2, d=None):
1005 """add a revision to the log
1045 """add a revision to the log
1006
1046
1007 text - the revision data to add
1047 text - the revision data to add
1008 transaction - the transaction object used for rollback
1048 transaction - the transaction object used for rollback
1009 link - the linkrev data to add
1049 link - the linkrev data to add
1010 p1, p2 - the parent nodeids of the revision
1050 p1, p2 - the parent nodeids of the revision
1011 d - an optional precomputed delta
1051 d - an optional precomputed delta
1012 """
1052 """
1013 dfh = None
1053 dfh = None
1014 if not self._inline:
1054 if not self._inline:
1015 dfh = self.opener(self.datafile, "a")
1055 dfh = self.opener(self.datafile, "a")
1016 ifh = self.opener(self.indexfile, "a+")
1056 ifh = self.opener(self.indexfile, "a+")
1017 try:
1057 try:
1018 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1058 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1019 finally:
1059 finally:
1020 if dfh:
1060 if dfh:
1021 dfh.close()
1061 dfh.close()
1022 ifh.close()
1062 ifh.close()
1023
1063
1024 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1064 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1025 node = hash(text, p1, p2)
1065 node = hash(text, p1, p2)
1026 if node in self.nodemap:
1066 if node in self.nodemap:
1027 return node
1067 return node
1028
1068
1029 curr = len(self)
1069 curr = len(self)
1030 prev = curr - 1
1070 prev = curr - 1
1031 base = self.base(prev)
1071 base = self.base(prev)
1032 offset = self.end(prev)
1072 offset = self.end(prev)
1033
1073
1034 if curr:
1074 if curr:
1035 if not d:
1075 if not d:
1036 ptext = self.revision(self.node(prev))
1076 ptext = self.revision(self.node(prev))
1037 d = mdiff.textdiff(ptext, text)
1077 d = mdiff.textdiff(ptext, text)
1038 data = compress(d)
1078 data = compress(d)
1039 l = len(data[1]) + len(data[0])
1079 l = len(data[1]) + len(data[0])
1040 dist = l + offset - self.start(base)
1080 dist = l + offset - self.start(base)
1041
1081
1042 # full versions are inserted when the needed deltas
1082 # full versions are inserted when the needed deltas
1043 # become comparable to the uncompressed text
1083 # become comparable to the uncompressed text
1044 if not curr or dist > len(text) * 2:
1084 if not curr or dist > len(text) * 2:
1045 data = compress(text)
1085 data = compress(text)
1046 l = len(data[1]) + len(data[0])
1086 l = len(data[1]) + len(data[0])
1047 base = curr
1087 base = curr
1048
1088
1049 e = (offset_type(offset, 0), l, len(text),
1089 e = (offset_type(offset, 0), l, len(text),
1050 base, link, self.rev(p1), self.rev(p2), node)
1090 base, link, self.rev(p1), self.rev(p2), node)
1051 self.index.insert(-1, e)
1091 self.index.insert(-1, e)
1052 self.nodemap[node] = curr
1092 self.nodemap[node] = curr
1053
1093
1054 entry = self._io.packentry(e, self.node, self.version, curr)
1094 entry = self._io.packentry(e, self.node, self.version, curr)
1055 if not self._inline:
1095 if not self._inline:
1056 transaction.add(self.datafile, offset)
1096 transaction.add(self.datafile, offset)
1057 transaction.add(self.indexfile, curr * len(entry))
1097 transaction.add(self.indexfile, curr * len(entry))
1058 if data[0]:
1098 if data[0]:
1059 dfh.write(data[0])
1099 dfh.write(data[0])
1060 dfh.write(data[1])
1100 dfh.write(data[1])
1061 dfh.flush()
1101 dfh.flush()
1062 ifh.write(entry)
1102 ifh.write(entry)
1063 else:
1103 else:
1064 offset += curr * self._io.size
1104 offset += curr * self._io.size
1065 transaction.add(self.indexfile, offset, curr)
1105 transaction.add(self.indexfile, offset, curr)
1066 ifh.write(entry)
1106 ifh.write(entry)
1067 ifh.write(data[0])
1107 ifh.write(data[0])
1068 ifh.write(data[1])
1108 ifh.write(data[1])
1069 self.checkinlinesize(transaction, ifh)
1109 self.checkinlinesize(transaction, ifh)
1070
1110
1071 self._cache = (node, curr, text)
1111 self._cache = (node, curr, text)
1072 return node
1112 return node
1073
1113
1074 def ancestor(self, a, b):
1114 def ancestor(self, a, b):
1075 """calculate the least common ancestor of nodes a and b"""
1115 """calculate the least common ancestor of nodes a and b"""
1076
1116
1077 def parents(rev):
1117 def parents(rev):
1078 return [p for p in self.parentrevs(rev) if p != nullrev]
1118 return [p for p in self.parentrevs(rev) if p != nullrev]
1079
1119
1080 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1120 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1081 if c is None:
1121 if c is None:
1082 return nullid
1122 return nullid
1083
1123
1084 return self.node(c)
1124 return self.node(c)
1085
1125
1086 def group(self, nodelist, lookup, infocollect=None):
1126 def group(self, nodelist, lookup, infocollect=None):
1087 """calculate a delta group
1127 """calculate a delta group
1088
1128
1089 Given a list of changeset revs, return a set of deltas and
1129 Given a list of changeset revs, return a set of deltas and
1090 metadata corresponding to nodes. the first delta is
1130 metadata corresponding to nodes. the first delta is
1091 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1131 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1092 have this parent as it has all history before these
1132 have this parent as it has all history before these
1093 changesets. parent is parent[0]
1133 changesets. parent is parent[0]
1094 """
1134 """
1095 revs = [self.rev(n) for n in nodelist]
1135 revs = [self.rev(n) for n in nodelist]
1096
1136
1097 # if we don't have any revisions touched by these changesets, bail
1137 # if we don't have any revisions touched by these changesets, bail
1098 if not revs:
1138 if not revs:
1099 yield changegroup.closechunk()
1139 yield changegroup.closechunk()
1100 return
1140 return
1101
1141
1102 # add the parent of the first rev
1142 # add the parent of the first rev
1103 p = self.parents(self.node(revs[0]))[0]
1143 p = self.parents(self.node(revs[0]))[0]
1104 revs.insert(0, self.rev(p))
1144 revs.insert(0, self.rev(p))
1105
1145
1106 # build deltas
1146 # build deltas
1107 for d in xrange(0, len(revs) - 1):
1147 for d in xrange(0, len(revs) - 1):
1108 a, b = revs[d], revs[d + 1]
1148 a, b = revs[d], revs[d + 1]
1109 nb = self.node(b)
1149 nb = self.node(b)
1110
1150
1111 if infocollect is not None:
1151 if infocollect is not None:
1112 infocollect(nb)
1152 infocollect(nb)
1113
1153
1114 p = self.parents(nb)
1154 p = self.parents(nb)
1115 meta = nb + p[0] + p[1] + lookup(nb)
1155 meta = nb + p[0] + p[1] + lookup(nb)
1116 if a == -1:
1156 if a == -1:
1117 d = self.revision(nb)
1157 d = self.revision(nb)
1118 meta += mdiff.trivialdiffheader(len(d))
1158 meta += mdiff.trivialdiffheader(len(d))
1119 else:
1159 else:
1120 d = self.revdiff(a, b)
1160 d = self.revdiff(a, b)
1121 yield changegroup.chunkheader(len(meta) + len(d))
1161 yield changegroup.chunkheader(len(meta) + len(d))
1122 yield meta
1162 yield meta
1123 if len(d) > 2**20:
1163 if len(d) > 2**20:
1124 pos = 0
1164 pos = 0
1125 while pos < len(d):
1165 while pos < len(d):
1126 pos2 = pos + 2 ** 18
1166 pos2 = pos + 2 ** 18
1127 yield d[pos:pos2]
1167 yield d[pos:pos2]
1128 pos = pos2
1168 pos = pos2
1129 else:
1169 else:
1130 yield d
1170 yield d
1131
1171
1132 yield changegroup.closechunk()
1172 yield changegroup.closechunk()
1133
1173
1134 def addgroup(self, revs, linkmapper, transaction):
1174 def addgroup(self, revs, linkmapper, transaction):
1135 """
1175 """
1136 add a delta group
1176 add a delta group
1137
1177
1138 given a set of deltas, add them to the revision log. the
1178 given a set of deltas, add them to the revision log. the
1139 first delta is against its parent, which should be in our
1179 first delta is against its parent, which should be in our
1140 log, the rest are against the previous delta.
1180 log, the rest are against the previous delta.
1141 """
1181 """
1142
1182
1143 #track the base of the current delta log
1183 #track the base of the current delta log
1144 r = len(self)
1184 r = len(self)
1145 t = r - 1
1185 t = r - 1
1146 node = None
1186 node = None
1147
1187
1148 base = prev = nullrev
1188 base = prev = nullrev
1149 start = end = textlen = 0
1189 start = end = textlen = 0
1150 if r:
1190 if r:
1151 end = self.end(t)
1191 end = self.end(t)
1152
1192
1153 ifh = self.opener(self.indexfile, "a+")
1193 ifh = self.opener(self.indexfile, "a+")
1154 isize = r * self._io.size
1194 isize = r * self._io.size
1155 if self._inline:
1195 if self._inline:
1156 transaction.add(self.indexfile, end + isize, r)
1196 transaction.add(self.indexfile, end + isize, r)
1157 dfh = None
1197 dfh = None
1158 else:
1198 else:
1159 transaction.add(self.indexfile, isize, r)
1199 transaction.add(self.indexfile, isize, r)
1160 transaction.add(self.datafile, end)
1200 transaction.add(self.datafile, end)
1161 dfh = self.opener(self.datafile, "a")
1201 dfh = self.opener(self.datafile, "a")
1162
1202
1163 try:
1203 try:
1164 # loop through our set of deltas
1204 # loop through our set of deltas
1165 chain = None
1205 chain = None
1166 for chunk in revs:
1206 for chunk in revs:
1167 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1207 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1168 link = linkmapper(cs)
1208 link = linkmapper(cs)
1169 if node in self.nodemap:
1209 if node in self.nodemap:
1170 # this can happen if two branches make the same change
1210 # this can happen if two branches make the same change
1171 chain = node
1211 chain = node
1172 continue
1212 continue
1173 delta = buffer(chunk, 80)
1213 delta = buffer(chunk, 80)
1174 del chunk
1214 del chunk
1175
1215
1176 for p in (p1, p2):
1216 for p in (p1, p2):
1177 if not p in self.nodemap:
1217 if not p in self.nodemap:
1178 raise LookupError(p, self.indexfile, _('unknown parent'))
1218 raise LookupError(p, self.indexfile, _('unknown parent'))
1179
1219
1180 if not chain:
1220 if not chain:
1181 # retrieve the parent revision of the delta chain
1221 # retrieve the parent revision of the delta chain
1182 chain = p1
1222 chain = p1
1183 if not chain in self.nodemap:
1223 if not chain in self.nodemap:
1184 raise LookupError(chain, self.indexfile, _('unknown base'))
1224 raise LookupError(chain, self.indexfile, _('unknown base'))
1185
1225
1186 # full versions are inserted when the needed deltas become
1226 # full versions are inserted when the needed deltas become
1187 # comparable to the uncompressed text or when the previous
1227 # comparable to the uncompressed text or when the previous
1188 # version is not the one we have a delta against. We use
1228 # version is not the one we have a delta against. We use
1189 # the size of the previous full rev as a proxy for the
1229 # the size of the previous full rev as a proxy for the
1190 # current size.
1230 # current size.
1191
1231
1192 if chain == prev:
1232 if chain == prev:
1193 cdelta = compress(delta)
1233 cdelta = compress(delta)
1194 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1234 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1195 textlen = mdiff.patchedsize(textlen, delta)
1235 textlen = mdiff.patchedsize(textlen, delta)
1196
1236
1197 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1237 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1198 # flush our writes here so we can read it in revision
1238 # flush our writes here so we can read it in revision
1199 if dfh:
1239 if dfh:
1200 dfh.flush()
1240 dfh.flush()
1201 ifh.flush()
1241 ifh.flush()
1202 text = self.revision(chain)
1242 text = self.revision(chain)
1203 if len(text) == 0:
1243 if len(text) == 0:
1204 # skip over trivial delta header
1244 # skip over trivial delta header
1205 text = buffer(delta, 12)
1245 text = buffer(delta, 12)
1206 else:
1246 else:
1207 text = mdiff.patches(text, [delta])
1247 text = mdiff.patches(text, [delta])
1208 del delta
1248 del delta
1209 chk = self._addrevision(text, transaction, link, p1, p2, None,
1249 chk = self._addrevision(text, transaction, link, p1, p2, None,
1210 ifh, dfh)
1250 ifh, dfh)
1211 if not dfh and not self._inline:
1251 if not dfh and not self._inline:
1212 # addrevision switched from inline to conventional
1252 # addrevision switched from inline to conventional
1213 # reopen the index
1253 # reopen the index
1214 dfh = self.opener(self.datafile, "a")
1254 dfh = self.opener(self.datafile, "a")
1215 ifh = self.opener(self.indexfile, "a")
1255 ifh = self.opener(self.indexfile, "a")
1216 if chk != node:
1256 if chk != node:
1217 raise RevlogError(_("consistency error adding group"))
1257 raise RevlogError(_("consistency error adding group"))
1218 textlen = len(text)
1258 textlen = len(text)
1219 else:
1259 else:
1220 e = (offset_type(end, 0), cdeltalen, textlen, base,
1260 e = (offset_type(end, 0), cdeltalen, textlen, base,
1221 link, self.rev(p1), self.rev(p2), node)
1261 link, self.rev(p1), self.rev(p2), node)
1222 self.index.insert(-1, e)
1262 self.index.insert(-1, e)
1223 self.nodemap[node] = r
1263 self.nodemap[node] = r
1224 entry = self._io.packentry(e, self.node, self.version, r)
1264 entry = self._io.packentry(e, self.node, self.version, r)
1225 if self._inline:
1265 if self._inline:
1226 ifh.write(entry)
1266 ifh.write(entry)
1227 ifh.write(cdelta[0])
1267 ifh.write(cdelta[0])
1228 ifh.write(cdelta[1])
1268 ifh.write(cdelta[1])
1229 self.checkinlinesize(transaction, ifh)
1269 self.checkinlinesize(transaction, ifh)
1230 if not self._inline:
1270 if not self._inline:
1231 dfh = self.opener(self.datafile, "a")
1271 dfh = self.opener(self.datafile, "a")
1232 ifh = self.opener(self.indexfile, "a")
1272 ifh = self.opener(self.indexfile, "a")
1233 else:
1273 else:
1234 dfh.write(cdelta[0])
1274 dfh.write(cdelta[0])
1235 dfh.write(cdelta[1])
1275 dfh.write(cdelta[1])
1236 ifh.write(entry)
1276 ifh.write(entry)
1237
1277
1238 t, r, chain, prev = r, r + 1, node, node
1278 t, r, chain, prev = r, r + 1, node, node
1239 base = self.base(t)
1279 base = self.base(t)
1240 start = self.start(base)
1280 start = self.start(base)
1241 end = self.end(t)
1281 end = self.end(t)
1242 finally:
1282 finally:
1243 if dfh:
1283 if dfh:
1244 dfh.close()
1284 dfh.close()
1245 ifh.close()
1285 ifh.close()
1246
1286
1247 return node
1287 return node
1248
1288
1249 def strip(self, minlink):
1289 def strip(self, minlink):
1250 """truncate the revlog on the first revision with a linkrev >= minlink
1290 """truncate the revlog on the first revision with a linkrev >= minlink
1251
1291
1252 This function is called when we're stripping revision minlink and
1292 This function is called when we're stripping revision minlink and
1253 its descendants from the repository.
1293 its descendants from the repository.
1254
1294
1255 We have to remove all revisions with linkrev >= minlink, because
1295 We have to remove all revisions with linkrev >= minlink, because
1256 the equivalent changelog revisions will be renumbered after the
1296 the equivalent changelog revisions will be renumbered after the
1257 strip.
1297 strip.
1258
1298
1259 So we truncate the revlog on the first of these revisions, and
1299 So we truncate the revlog on the first of these revisions, and
1260 trust that the caller has saved the revisions that shouldn't be
1300 trust that the caller has saved the revisions that shouldn't be
1261 removed and that it'll readd them after this truncation.
1301 removed and that it'll readd them after this truncation.
1262 """
1302 """
1263 if len(self) == 0:
1303 if len(self) == 0:
1264 return
1304 return
1265
1305
1266 if isinstance(self.index, lazyindex):
1306 if isinstance(self.index, lazyindex):
1267 self._loadindexmap()
1307 self._loadindexmap()
1268
1308
1269 for rev in self:
1309 for rev in self:
1270 if self.index[rev][4] >= minlink:
1310 if self.index[rev][4] >= minlink:
1271 break
1311 break
1272 else:
1312 else:
1273 return
1313 return
1274
1314
1275 # first truncate the files on disk
1315 # first truncate the files on disk
1276 end = self.start(rev)
1316 end = self.start(rev)
1277 if not self._inline:
1317 if not self._inline:
1278 df = self.opener(self.datafile, "a")
1318 df = self.opener(self.datafile, "a")
1279 df.truncate(end)
1319 df.truncate(end)
1280 end = rev * self._io.size
1320 end = rev * self._io.size
1281 else:
1321 else:
1282 end += rev * self._io.size
1322 end += rev * self._io.size
1283
1323
1284 indexf = self.opener(self.indexfile, "a")
1324 indexf = self.opener(self.indexfile, "a")
1285 indexf.truncate(end)
1325 indexf.truncate(end)
1286
1326
1287 # then reset internal state in memory to forget those revisions
1327 # then reset internal state in memory to forget those revisions
1288 self._cache = None
1328 self._cache = None
1289 self._chunkcache = None
1329 self._chunkcache = None
1290 for x in xrange(rev, len(self)):
1330 for x in xrange(rev, len(self)):
1291 del self.nodemap[self.node(x)]
1331 del self.nodemap[self.node(x)]
1292
1332
1293 del self.index[rev:-1]
1333 del self.index[rev:-1]
1294
1334
1295 def checksize(self):
1335 def checksize(self):
1296 expected = 0
1336 expected = 0
1297 if len(self):
1337 if len(self):
1298 expected = max(0, self.end(len(self) - 1))
1338 expected = max(0, self.end(len(self) - 1))
1299
1339
1300 try:
1340 try:
1301 f = self.opener(self.datafile)
1341 f = self.opener(self.datafile)
1302 f.seek(0, 2)
1342 f.seek(0, 2)
1303 actual = f.tell()
1343 actual = f.tell()
1304 dd = actual - expected
1344 dd = actual - expected
1305 except IOError, inst:
1345 except IOError, inst:
1306 if inst.errno != errno.ENOENT:
1346 if inst.errno != errno.ENOENT:
1307 raise
1347 raise
1308 dd = 0
1348 dd = 0
1309
1349
1310 try:
1350 try:
1311 f = self.opener(self.indexfile)
1351 f = self.opener(self.indexfile)
1312 f.seek(0, 2)
1352 f.seek(0, 2)
1313 actual = f.tell()
1353 actual = f.tell()
1314 s = self._io.size
1354 s = self._io.size
1315 i = max(0, actual / s)
1355 i = max(0, actual / s)
1316 di = actual - (i * s)
1356 di = actual - (i * s)
1317 if self._inline:
1357 if self._inline:
1318 databytes = 0
1358 databytes = 0
1319 for r in self:
1359 for r in self:
1320 databytes += max(0, self.length(r))
1360 databytes += max(0, self.length(r))
1321 dd = 0
1361 dd = 0
1322 di = actual - len(self) * s - databytes
1362 di = actual - len(self) * s - databytes
1323 except IOError, inst:
1363 except IOError, inst:
1324 if inst.errno != errno.ENOENT:
1364 if inst.errno != errno.ENOENT:
1325 raise
1365 raise
1326 di = 0
1366 di = 0
1327
1367
1328 return (dd, di)
1368 return (dd, di)
1329
1369
1330 def files(self):
1370 def files(self):
1331 res = [ self.indexfile ]
1371 res = [ self.indexfile ]
1332 if not self._inline:
1372 if not self._inline:
1333 res.append(self.datafile)
1373 res.append(self.datafile)
1334 return res
1374 return res
@@ -1,5 +1,5 b''
1 % initialize repository
1 % initialize repository
2 adding a
2 adding a
3 % try hgweb request
3 % try hgweb request
4 0
4 0
5 54086fe9a47b47d83204f38bda0b90c2 page1
5 1f424bb22ec05c3c6bc866b6e67efe43 page1
@@ -1,180 +1,180 b''
1 % test fetch with default branches only
1 % test fetch with default branches only
2 adding a
2 adding a
3 updating working directory
3 updating working directory
4 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 updating working directory
5 updating working directory
6 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
6 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
7 adding b
7 adding b
8 1:97d72e5f12c7
8 1:97d72e5f12c7
9 % should pull one change
9 % should pull one change
10 pulling from ../a
10 pulling from ../a
11 searching for changes
11 searching for changes
12 adding changesets
12 adding changesets
13 adding manifests
13 adding manifests
14 adding file changes
14 adding file changes
15 added 1 changesets with 1 changes to 1 files
15 added 1 changesets with 1 changes to 1 files
16 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 1:97d72e5f12c7
17 1:97d72e5f12c7
18 adding c
18 adding c
19 updating working directory
19 updating working directory
20 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
20 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
21 updating working directory
21 updating working directory
22 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 % should merge c into a
23 % should merge c into a
24 pulling from ../a
24 pulling from ../a
25 searching for changes
25 searching for changes
26 adding changesets
26 adding changesets
27 adding manifests
27 adding manifests
28 adding file changes
28 adding file changes
29 added 1 changesets with 1 changes to 1 files (+1 heads)
29 added 1 changesets with 1 changes to 1 files (+1 heads)
30 updating to 2:97d72e5f12c7
30 updating to 2:97d72e5f12c7
31 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
31 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
32 merging with 1:5e056962225c
32 merging with 1:5e056962225c
33 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
33 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
34 new changeset 3:cd3a41621cf0 merges remote changes with local
34 new changeset 3:cd3a41621cf0 merges remote changes with local
35 a
35 a
36 b
36 b
37 c
37 c
38 % fetch over http, no auth
38 % fetch over http, no auth
39 pulling from http://localhost:20059/
39 pulling from http://localhost:20059/
40 searching for changes
40 searching for changes
41 adding changesets
41 adding changesets
42 adding manifests
42 adding manifests
43 adding file changes
43 adding file changes
44 added 1 changesets with 1 changes to 1 files (+1 heads)
44 added 1 changesets with 1 changes to 1 files (+1 heads)
45 updating to 2:97d72e5f12c7
45 updating to 2:97d72e5f12c7
46 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
46 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
47 merging with 1:5e056962225c
47 merging with 1:5e056962225c
48 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 new changeset 3:... merges remote changes with local
49 new changeset 3:... merges remote changes with local
50 Automated merge with http://localhost:20059/
50 Automated merge with http://localhost:20059/
51 % fetch over http with auth (should be hidden in desc)
51 % fetch over http with auth (should be hidden in desc)
52 pulling from http://user:***@localhost:20059/
52 pulling from http://user:***@localhost:20059/
53 searching for changes
53 searching for changes
54 adding changesets
54 adding changesets
55 adding manifests
55 adding manifests
56 adding file changes
56 adding file changes
57 added 1 changesets with 1 changes to 1 files (+1 heads)
57 added 1 changesets with 1 changes to 1 files (+1 heads)
58 updating to 2:97d72e5f12c7
58 updating to 2:97d72e5f12c7
59 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
60 merging with 1:5e056962225c
60 merging with 1:5e056962225c
61 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 new changeset 3:... merges remote changes with local
62 new changeset 3:... merges remote changes with local
63 Automated merge with http://localhost:20059/
63 Automated merge with http://localhost:20059/
64 updating working directory
64 updating working directory
65 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
65 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 updating working directory
66 updating working directory
67 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
67 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 adding f
68 adding f
69 adding g
69 adding g
70 % should merge f into g
70 % should merge f into g
71 pulling from ../f
71 pulling from ../f
72 searching for changes
72 searching for changes
73 adding changesets
73 adding changesets
74 adding manifests
74 adding manifests
75 adding file changes
75 adding file changes
76 added 1 changesets with 1 changes to 1 files (+1 heads)
76 added 1 changesets with 1 changes to 1 files (+1 heads)
77 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
77 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 merging with 3:cc6a3744834d
78 merging with 3:cc6a3744834d
79 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
79 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
80 new changeset 4:55aa4f32ec59 merges remote changes with local
80 new changeset 4:55aa4f32ec59 merges remote changes with local
81 % should abort, because i is modified
81 % should abort, because i is modified
82 abort: working directory is missing some files
82 abort: working directory is missing some files
83 % test fetch with named branches
83 % test fetch with named branches
84 adding a
84 adding a
85 marked working directory as branch a
85 marked working directory as branch a
86 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
87 marked working directory as branch b
87 marked working directory as branch b
88 adding b
88 adding b
89 created new head
89 created new head
90
90
91 % pull in change on foreign branch
91 % pull in change on foreign branch
92 updating working directory
92 updating working directory
93 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
93 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 updating working directory
94 updating working directory
95 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
95 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
96 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
96 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
97 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
97 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 pulling from n1
98 pulling from n1
99 searching for changes
99 searching for changes
100 adding changesets
100 adding changesets
101 adding manifests
101 adding manifests
102 adding file changes
102 adding file changes
103 added 1 changesets with 1 changes to 1 files
103 added 1 changesets with 1 changes to 2 files
104 % parent should be 2 (no automatic update)
104 % parent should be 2 (no automatic update)
105 2
105 2
106
106
107 % pull in changes on both foreign and local branches
107 % pull in changes on both foreign and local branches
108 updating working directory
108 updating working directory
109 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 updating working directory
110 updating working directory
111 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
115 pulling from n1
115 pulling from n1
116 searching for changes
116 searching for changes
117 adding changesets
117 adding changesets
118 adding manifests
118 adding manifests
119 adding file changes
119 adding file changes
120 added 2 changesets with 2 changes to 2 files
120 added 2 changesets with 2 changes to 2 files
121 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
121 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
122 % parent should be 4 (fast forward)
122 % parent should be 4 (fast forward)
123 4
123 4
124
124
125 % pull changes on foreign (2 new heads) and local (1 new head) branches
125 % pull changes on foreign (2 new heads) and local (1 new head) branches
126 % with a local change
126 % with a local change
127 updating working directory
127 updating working directory
128 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
128 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
129 updating working directory
129 updating working directory
130 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
131 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
131 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
132 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
132 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
133 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
134 created new head
134 created new head
135 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
135 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
136 adding c
136 adding c
137 pulling from n1
137 pulling from n1
138 searching for changes
138 searching for changes
139 adding changesets
139 adding changesets
140 adding manifests
140 adding manifests
141 adding file changes
141 adding file changes
142 added 3 changesets with 3 changes to 2 files (+2 heads)
142 added 3 changesets with 3 changes to 2 files (+2 heads)
143 updating to 5:708c6cce3d26
143 updating to 5:708c6cce3d26
144 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
144 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
145 merging with 3:d83427717b1f
145 merging with 3:d83427717b1f
146 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 new changeset 7:48f1a33f52af merges remote changes with local
147 new changeset 7:48f1a33f52af merges remote changes with local
148 % parent should be 7 (new merge changeset)
148 % parent should be 7 (new merge changeset)
149 7
149 7
150 % pull in changes on foreign (merge of local branch) and local (2 new
150 % pull in changes on foreign (merge of local branch) and local (2 new
151 % heads) with a local change
151 % heads) with a local change
152 updating working directory
152 updating working directory
153 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
153 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 updating working directory
154 updating working directory
155 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
155 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
156 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
156 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 (branch merge, don't forget to commit)
158 (branch merge, don't forget to commit)
159 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
160 created new head
160 created new head
161 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 created new head
162 created new head
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 pulling from n1
164 pulling from n1
165 searching for changes
165 searching for changes
166 adding changesets
166 adding changesets
167 adding manifests
167 adding manifests
168 adding file changes
168 adding file changes
169 added 3 changesets with 2 changes to 1 files (+2 heads)
169 added 3 changesets with 2 changes to 1 files (+2 heads)
170 not merging with 1 other new branch heads (use "hg heads ." and "hg merge" to merge them)
170 not merging with 1 other new branch heads (use "hg heads ." and "hg merge" to merge them)
171 % parent should be 3 (fetch did not merge anything)
171 % parent should be 3 (fetch did not merge anything)
172 3
172 3
173 % pull in change on different branch than dirstate
173 % pull in change on different branch than dirstate
174 adding a
174 adding a
175 updating working directory
175 updating working directory
176 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
176 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
177 marked working directory as branch topic
177 marked working directory as branch topic
178 abort: working dir not at branch tip (use "hg update" to check out branch tip)
178 abort: working dir not at branch tip (use "hg update" to check out branch tip)
179 % parent should be 0 (fetch did not update or merge anything)
179 % parent should be 0 (fetch did not update or merge anything)
180 0
180 0
1 NO CONTENT: modified file, binary diff hidden
NO CONTENT: modified file, binary diff hidden
@@ -1,82 +1,82 b''
1 updating working directory
1 updating working directory
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 pushing to ../a
3 pushing to ../a
4 searching for changes
4 searching for changes
5 abort: push creates new remote heads!
5 abort: push creates new remote heads!
6 (did you forget to merge? use push -f to force)
6 (did you forget to merge? use push -f to force)
7 pulling from ../a
7 pulling from ../a
8 searching for changes
8 searching for changes
9 adding changesets
9 adding changesets
10 adding manifests
10 adding manifests
11 adding file changes
11 adding file changes
12 added 1 changesets with 1 changes to 1 files (+1 heads)
12 added 1 changesets with 1 changes to 1 files (+1 heads)
13 (run 'hg heads' to see heads, 'hg merge' to merge)
13 (run 'hg heads' to see heads, 'hg merge' to merge)
14 pushing to ../a
14 pushing to ../a
15 searching for changes
15 searching for changes
16 abort: push creates new remote heads!
16 abort: push creates new remote heads!
17 (did you forget to merge? use push -f to force)
17 (did you forget to merge? use push -f to force)
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 (branch merge, don't forget to commit)
19 (branch merge, don't forget to commit)
20 pushing to ../a
20 pushing to ../a
21 searching for changes
21 searching for changes
22 adding changesets
22 adding changesets
23 adding manifests
23 adding manifests
24 adding file changes
24 adding file changes
25 added 2 changesets with 1 changes to 1 files
25 added 2 changesets with 1 changes to 2 files
26 adding foo
26 adding foo
27 updating working directory
27 updating working directory
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 created new head
30 created new head
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 created new head
32 created new head
33 merging foo
33 merging foo
34 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
34 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
35 (branch merge, don't forget to commit)
35 (branch merge, don't forget to commit)
36 pushing to ../c
36 pushing to ../c
37 searching for changes
37 searching for changes
38 abort: push creates new remote heads!
38 abort: push creates new remote heads!
39 (did you forget to merge? use push -f to force)
39 (did you forget to merge? use push -f to force)
40 1
40 1
41 pushing to ../c
41 pushing to ../c
42 searching for changes
42 searching for changes
43 no changes found
43 no changes found
44 0
44 0
45 pushing to ../c
45 pushing to ../c
46 searching for changes
46 searching for changes
47 abort: push creates new remote heads!
47 abort: push creates new remote heads!
48 (did you forget to merge? use push -f to force)
48 (did you forget to merge? use push -f to force)
49 1
49 1
50 pushing to ../c
50 pushing to ../c
51 searching for changes
51 searching for changes
52 abort: push creates new remote heads!
52 abort: push creates new remote heads!
53 (did you forget to merge? use push -f to force)
53 (did you forget to merge? use push -f to force)
54 1
54 1
55 pushing to ../c
55 pushing to ../c
56 searching for changes
56 searching for changes
57 adding changesets
57 adding changesets
58 adding manifests
58 adding manifests
59 adding file changes
59 adding file changes
60 added 2 changesets with 2 changes to 1 files (+2 heads)
60 added 2 changesets with 2 changes to 1 files (+2 heads)
61 0
61 0
62 pushing to ../c
62 pushing to ../c
63 searching for changes
63 searching for changes
64 adding changesets
64 adding changesets
65 adding manifests
65 adding manifests
66 adding file changes
66 adding file changes
67 added 1 changesets with 1 changes to 1 files (-1 heads)
67 added 1 changesets with 1 changes to 1 files (-1 heads)
68 0
68 0
69 pushing to ../e
69 pushing to ../e
70 searching for changes
70 searching for changes
71 adding changesets
71 adding changesets
72 adding manifests
72 adding manifests
73 adding file changes
73 adding file changes
74 added 1 changesets with 1 changes to 1 files
74 added 1 changesets with 1 changes to 1 files
75 0
75 0
76 pushing to ../e
76 pushing to ../e
77 searching for changes
77 searching for changes
78 adding changesets
78 adding changesets
79 adding manifests
79 adding manifests
80 adding file changes
80 adding file changes
81 added 1 changesets with 1 changes to 1 files
81 added 1 changesets with 1 changes to 1 files
82 0
82 0
General Comments 0
You need to be logged in to leave comments. Login now