##// END OF EJS Templates
changegroup: avoid large copies...
Matt Mackall -
r5368:61462e7d default
parent child Browse files
Show More
@@ -1,118 +1,122 b''
1 """
1 """
2 changegroup.py - Mercurial changegroup manipulation functions
2 changegroup.py - Mercurial changegroup manipulation functions
3
3
4 Copyright 2006 Matt Mackall <mpm@selenic.com>
4 Copyright 2006 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from i18n import _
10 from i18n import _
11 import struct, os, bz2, zlib, util, tempfile
11 import struct, os, bz2, zlib, util, tempfile
12
12
13 def getchunk(source):
13 def getchunk(source):
14 """get a chunk from a changegroup"""
14 """get a chunk from a changegroup"""
15 d = source.read(4)
15 d = source.read(4)
16 if not d:
16 if not d:
17 return ""
17 return ""
18 l = struct.unpack(">l", d)[0]
18 l = struct.unpack(">l", d)[0]
19 if l <= 4:
19 if l <= 4:
20 return ""
20 return ""
21 d = source.read(l - 4)
21 d = source.read(l - 4)
22 if len(d) < l - 4:
22 if len(d) < l - 4:
23 raise util.Abort(_("premature EOF reading chunk"
23 raise util.Abort(_("premature EOF reading chunk"
24 " (got %d bytes, expected %d)")
24 " (got %d bytes, expected %d)")
25 % (len(d), l - 4))
25 % (len(d), l - 4))
26 return d
26 return d
27
27
28 def chunkiter(source):
28 def chunkiter(source):
29 """iterate through the chunks in source"""
29 """iterate through the chunks in source"""
30 while 1:
30 while 1:
31 c = getchunk(source)
31 c = getchunk(source)
32 if not c:
32 if not c:
33 break
33 break
34 yield c
34 yield c
35
35
36 def genchunk(data):
36 def chunkheader(length):
37 """build a changegroup chunk"""
37 """build a changegroup chunk header"""
38 header = struct.pack(">l", len(data)+ 4)
38 return struct.pack(">l", length + 4)
39 return "%s%s" % (header, data)
40
39
41 def closechunk():
40 def closechunk():
42 return struct.pack(">l", 0)
41 return struct.pack(">l", 0)
43
42
44 class nocompress(object):
43 class nocompress(object):
45 def compress(self, x):
44 def compress(self, x):
46 return x
45 return x
47 def flush(self):
46 def flush(self):
48 return ""
47 return ""
49
48
50 bundletypes = {
49 bundletypes = {
51 "": ("", nocompress),
50 "": ("", nocompress),
52 "HG10UN": ("HG10UN", nocompress),
51 "HG10UN": ("HG10UN", nocompress),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
52 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
53 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
55 }
54 }
56
55
57 def writebundle(cg, filename, bundletype):
56 def writebundle(cg, filename, bundletype):
58 """Write a bundle file and return its filename.
57 """Write a bundle file and return its filename.
59
58
60 Existing files will not be overwritten.
59 Existing files will not be overwritten.
61 If no filename is specified, a temporary file is created.
60 If no filename is specified, a temporary file is created.
62 bz2 compression can be turned off.
61 bz2 compression can be turned off.
63 The bundle file will be deleted in case of errors.
62 The bundle file will be deleted in case of errors.
64 """
63 """
65
64
66 fh = None
65 fh = None
67 cleanup = None
66 cleanup = None
68 try:
67 try:
69 if filename:
68 if filename:
70 fh = open(filename, "wb")
69 fh = open(filename, "wb")
71 else:
70 else:
72 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
71 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
73 fh = os.fdopen(fd, "wb")
72 fh = os.fdopen(fd, "wb")
74 cleanup = filename
73 cleanup = filename
75
74
76 header, compressor = bundletypes[bundletype]
75 header, compressor = bundletypes[bundletype]
77 fh.write(header)
76 fh.write(header)
78 z = compressor()
77 z = compressor()
79
78
80 # parse the changegroup data, otherwise we will block
79 # parse the changegroup data, otherwise we will block
81 # in case of sshrepo because we don't know the end of the stream
80 # in case of sshrepo because we don't know the end of the stream
82
81
83 # an empty chunkiter is the end of the changegroup
82 # an empty chunkiter is the end of the changegroup
84 empty = False
83 empty = False
85 while not empty:
84 while not empty:
86 empty = True
85 empty = True
87 for chunk in chunkiter(cg):
86 for chunk in chunkiter(cg):
88 empty = False
87 empty = False
89 fh.write(z.compress(genchunk(chunk)))
88 fh.write(z.compress(chunkheader(len(chunk))))
89 pos = 0
90 while pos < len(chunk):
91 next = pos + 2**20
92 fh.write(z.compress(chunk[pos:next]))
93 pos = next
90 fh.write(z.compress(closechunk()))
94 fh.write(z.compress(closechunk()))
91 fh.write(z.flush())
95 fh.write(z.flush())
92 cleanup = None
96 cleanup = None
93 return filename
97 return filename
94 finally:
98 finally:
95 if fh is not None:
99 if fh is not None:
96 fh.close()
100 fh.close()
97 if cleanup is not None:
101 if cleanup is not None:
98 os.unlink(cleanup)
102 os.unlink(cleanup)
99
103
100 def readbundle(fh, fname):
104 def readbundle(fh, fname):
101 header = fh.read(6)
105 header = fh.read(6)
102 if not header.startswith("HG"):
106 if not header.startswith("HG"):
103 raise util.Abort(_("%s: not a Mercurial bundle file") % fname)
107 raise util.Abort(_("%s: not a Mercurial bundle file") % fname)
104 elif not header.startswith("HG10"):
108 elif not header.startswith("HG10"):
105 raise util.Abort(_("%s: unknown bundle version") % fname)
109 raise util.Abort(_("%s: unknown bundle version") % fname)
106
110
107 if header == "HG10BZ":
111 if header == "HG10BZ":
108 def generator(f):
112 def generator(f):
109 zd = bz2.BZ2Decompressor()
113 zd = bz2.BZ2Decompressor()
110 zd.decompress("BZ")
114 zd.decompress("BZ")
111 for chunk in util.filechunkiter(f, 4096):
115 for chunk in util.filechunkiter(f, 4096):
112 yield zd.decompress(chunk)
116 yield zd.decompress(chunk)
113 return util.chunkbuffer(generator(fh))
117 return util.chunkbuffer(generator(fh))
114 elif header == "HG10UN":
118 elif header == "HG10UN":
115 return fh
119 return fh
116
120
117 raise util.Abort(_("%s: unknown bundle compression type")
121 raise util.Abort(_("%s: unknown bundle compression type")
118 % fname)
122 % fname)
@@ -1,1990 +1,1992 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import re, lock, transaction, tempfile, stat, errno, ui
12 import re, lock, transaction, tempfile, stat, errno, ui
13 import os, revlog, time, util, extensions, hook
13 import os, revlog, time, util, extensions, hook
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71 self.sopener = util.encodedopener(util.opener(self.spath),
71 self.sopener = util.encodedopener(util.opener(self.spath),
72 self.encodefn)
72 self.encodefn)
73
73
74 self.ui = ui.ui(parentui=parentui)
74 self.ui = ui.ui(parentui=parentui)
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._transref = self._lockref = self._wlockref = None
85 self._transref = self._lockref = self._wlockref = None
86
86
87 def __getattr__(self, name):
87 def __getattr__(self, name):
88 if name == 'changelog':
88 if name == 'changelog':
89 self.changelog = changelog.changelog(self.sopener)
89 self.changelog = changelog.changelog(self.sopener)
90 self.sopener.defversion = self.changelog.version
90 self.sopener.defversion = self.changelog.version
91 return self.changelog
91 return self.changelog
92 if name == 'manifest':
92 if name == 'manifest':
93 self.changelog
93 self.changelog
94 self.manifest = manifest.manifest(self.sopener)
94 self.manifest = manifest.manifest(self.sopener)
95 return self.manifest
95 return self.manifest
96 if name == 'dirstate':
96 if name == 'dirstate':
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 return self.dirstate
98 return self.dirstate
99 else:
99 else:
100 raise AttributeError, name
100 raise AttributeError, name
101
101
102 def url(self):
102 def url(self):
103 return 'file:' + self.root
103 return 'file:' + self.root
104
104
105 def hook(self, name, throw=False, **args):
105 def hook(self, name, throw=False, **args):
106 return hook.hook(self.ui, self, name, throw, **args)
106 return hook.hook(self.ui, self, name, throw, **args)
107
107
108 tag_disallowed = ':\r\n'
108 tag_disallowed = ':\r\n'
109
109
110 def _tag(self, name, node, message, local, user, date, parent=None,
110 def _tag(self, name, node, message, local, user, date, parent=None,
111 extra={}):
111 extra={}):
112 use_dirstate = parent is None
112 use_dirstate = parent is None
113
113
114 for c in self.tag_disallowed:
114 for c in self.tag_disallowed:
115 if c in name:
115 if c in name:
116 raise util.Abort(_('%r cannot be used in a tag name') % c)
116 raise util.Abort(_('%r cannot be used in a tag name') % c)
117
117
118 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
118 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
119
119
120 def writetag(fp, name, munge, prevtags):
120 def writetag(fp, name, munge, prevtags):
121 if prevtags and prevtags[-1] != '\n':
121 if prevtags and prevtags[-1] != '\n':
122 fp.write('\n')
122 fp.write('\n')
123 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
123 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
124 fp.close()
124 fp.close()
125 self.hook('tag', node=hex(node), tag=name, local=local)
125 self.hook('tag', node=hex(node), tag=name, local=local)
126
126
127 prevtags = ''
127 prevtags = ''
128 if local:
128 if local:
129 try:
129 try:
130 fp = self.opener('localtags', 'r+')
130 fp = self.opener('localtags', 'r+')
131 except IOError, err:
131 except IOError, err:
132 fp = self.opener('localtags', 'a')
132 fp = self.opener('localtags', 'a')
133 else:
133 else:
134 prevtags = fp.read()
134 prevtags = fp.read()
135
135
136 # local tags are stored in the current charset
136 # local tags are stored in the current charset
137 writetag(fp, name, None, prevtags)
137 writetag(fp, name, None, prevtags)
138 return
138 return
139
139
140 if use_dirstate:
140 if use_dirstate:
141 try:
141 try:
142 fp = self.wfile('.hgtags', 'rb+')
142 fp = self.wfile('.hgtags', 'rb+')
143 except IOError, err:
143 except IOError, err:
144 fp = self.wfile('.hgtags', 'ab')
144 fp = self.wfile('.hgtags', 'ab')
145 else:
145 else:
146 prevtags = fp.read()
146 prevtags = fp.read()
147 else:
147 else:
148 try:
148 try:
149 prevtags = self.filectx('.hgtags', parent).data()
149 prevtags = self.filectx('.hgtags', parent).data()
150 except revlog.LookupError:
150 except revlog.LookupError:
151 pass
151 pass
152 fp = self.wfile('.hgtags', 'wb')
152 fp = self.wfile('.hgtags', 'wb')
153 if prevtags:
153 if prevtags:
154 fp.write(prevtags)
154 fp.write(prevtags)
155
155
156 # committed tags are stored in UTF-8
156 # committed tags are stored in UTF-8
157 writetag(fp, name, util.fromlocal, prevtags)
157 writetag(fp, name, util.fromlocal, prevtags)
158
158
159 if use_dirstate and '.hgtags' not in self.dirstate:
159 if use_dirstate and '.hgtags' not in self.dirstate:
160 self.add(['.hgtags'])
160 self.add(['.hgtags'])
161
161
162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
162 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
163 extra=extra)
163 extra=extra)
164
164
165 self.hook('tag', node=hex(node), tag=name, local=local)
165 self.hook('tag', node=hex(node), tag=name, local=local)
166
166
167 return tagnode
167 return tagnode
168
168
169 def tag(self, name, node, message, local, user, date):
169 def tag(self, name, node, message, local, user, date):
170 '''tag a revision with a symbolic name.
170 '''tag a revision with a symbolic name.
171
171
172 if local is True, the tag is stored in a per-repository file.
172 if local is True, the tag is stored in a per-repository file.
173 otherwise, it is stored in the .hgtags file, and a new
173 otherwise, it is stored in the .hgtags file, and a new
174 changeset is committed with the change.
174 changeset is committed with the change.
175
175
176 keyword arguments:
176 keyword arguments:
177
177
178 local: whether to store tag in non-version-controlled file
178 local: whether to store tag in non-version-controlled file
179 (default False)
179 (default False)
180
180
181 message: commit message to use if committing
181 message: commit message to use if committing
182
182
183 user: name of user to use if committing
183 user: name of user to use if committing
184
184
185 date: date tuple to use if committing'''
185 date: date tuple to use if committing'''
186
186
187 for x in self.status()[:5]:
187 for x in self.status()[:5]:
188 if '.hgtags' in x:
188 if '.hgtags' in x:
189 raise util.Abort(_('working copy of .hgtags is changed '
189 raise util.Abort(_('working copy of .hgtags is changed '
190 '(please commit .hgtags manually)'))
190 '(please commit .hgtags manually)'))
191
191
192
192
193 self._tag(name, node, message, local, user, date)
193 self._tag(name, node, message, local, user, date)
194
194
195 def tags(self):
195 def tags(self):
196 '''return a mapping of tag to node'''
196 '''return a mapping of tag to node'''
197 if self.tagscache:
197 if self.tagscache:
198 return self.tagscache
198 return self.tagscache
199
199
200 globaltags = {}
200 globaltags = {}
201
201
202 def readtags(lines, fn):
202 def readtags(lines, fn):
203 filetags = {}
203 filetags = {}
204 count = 0
204 count = 0
205
205
206 def warn(msg):
206 def warn(msg):
207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
207 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
208
208
209 for l in lines:
209 for l in lines:
210 count += 1
210 count += 1
211 if not l:
211 if not l:
212 continue
212 continue
213 s = l.split(" ", 1)
213 s = l.split(" ", 1)
214 if len(s) != 2:
214 if len(s) != 2:
215 warn(_("cannot parse entry"))
215 warn(_("cannot parse entry"))
216 continue
216 continue
217 node, key = s
217 node, key = s
218 key = util.tolocal(key.strip()) # stored in UTF-8
218 key = util.tolocal(key.strip()) # stored in UTF-8
219 try:
219 try:
220 bin_n = bin(node)
220 bin_n = bin(node)
221 except TypeError:
221 except TypeError:
222 warn(_("node '%s' is not well formed") % node)
222 warn(_("node '%s' is not well formed") % node)
223 continue
223 continue
224 if bin_n not in self.changelog.nodemap:
224 if bin_n not in self.changelog.nodemap:
225 warn(_("tag '%s' refers to unknown node") % key)
225 warn(_("tag '%s' refers to unknown node") % key)
226 continue
226 continue
227
227
228 h = []
228 h = []
229 if key in filetags:
229 if key in filetags:
230 n, h = filetags[key]
230 n, h = filetags[key]
231 h.append(n)
231 h.append(n)
232 filetags[key] = (bin_n, h)
232 filetags[key] = (bin_n, h)
233
233
234 for k, nh in filetags.items():
234 for k, nh in filetags.items():
235 if k not in globaltags:
235 if k not in globaltags:
236 globaltags[k] = nh
236 globaltags[k] = nh
237 continue
237 continue
238 # we prefer the global tag if:
238 # we prefer the global tag if:
239 # it supercedes us OR
239 # it supercedes us OR
240 # mutual supercedes and it has a higher rank
240 # mutual supercedes and it has a higher rank
241 # otherwise we win because we're tip-most
241 # otherwise we win because we're tip-most
242 an, ah = nh
242 an, ah = nh
243 bn, bh = globaltags[k]
243 bn, bh = globaltags[k]
244 if (bn != an and an in bh and
244 if (bn != an and an in bh and
245 (bn not in ah or len(bh) > len(ah))):
245 (bn not in ah or len(bh) > len(ah))):
246 an = bn
246 an = bn
247 ah.extend([n for n in bh if n not in ah])
247 ah.extend([n for n in bh if n not in ah])
248 globaltags[k] = an, ah
248 globaltags[k] = an, ah
249
249
250 # read the tags file from each head, ending with the tip
250 # read the tags file from each head, ending with the tip
251 f = None
251 f = None
252 for rev, node, fnode in self._hgtagsnodes():
252 for rev, node, fnode in self._hgtagsnodes():
253 f = (f and f.filectx(fnode) or
253 f = (f and f.filectx(fnode) or
254 self.filectx('.hgtags', fileid=fnode))
254 self.filectx('.hgtags', fileid=fnode))
255 readtags(f.data().splitlines(), f)
255 readtags(f.data().splitlines(), f)
256
256
257 try:
257 try:
258 data = util.fromlocal(self.opener("localtags").read())
258 data = util.fromlocal(self.opener("localtags").read())
259 # localtags are stored in the local character set
259 # localtags are stored in the local character set
260 # while the internal tag table is stored in UTF-8
260 # while the internal tag table is stored in UTF-8
261 readtags(data.splitlines(), "localtags")
261 readtags(data.splitlines(), "localtags")
262 except IOError:
262 except IOError:
263 pass
263 pass
264
264
265 self.tagscache = {}
265 self.tagscache = {}
266 for k,nh in globaltags.items():
266 for k,nh in globaltags.items():
267 n = nh[0]
267 n = nh[0]
268 if n != nullid:
268 if n != nullid:
269 self.tagscache[k] = n
269 self.tagscache[k] = n
270 self.tagscache['tip'] = self.changelog.tip()
270 self.tagscache['tip'] = self.changelog.tip()
271
271
272 return self.tagscache
272 return self.tagscache
273
273
274 def _hgtagsnodes(self):
274 def _hgtagsnodes(self):
275 heads = self.heads()
275 heads = self.heads()
276 heads.reverse()
276 heads.reverse()
277 last = {}
277 last = {}
278 ret = []
278 ret = []
279 for node in heads:
279 for node in heads:
280 c = self.changectx(node)
280 c = self.changectx(node)
281 rev = c.rev()
281 rev = c.rev()
282 try:
282 try:
283 fnode = c.filenode('.hgtags')
283 fnode = c.filenode('.hgtags')
284 except revlog.LookupError:
284 except revlog.LookupError:
285 continue
285 continue
286 ret.append((rev, node, fnode))
286 ret.append((rev, node, fnode))
287 if fnode in last:
287 if fnode in last:
288 ret[last[fnode]] = None
288 ret[last[fnode]] = None
289 last[fnode] = len(ret) - 1
289 last[fnode] = len(ret) - 1
290 return [item for item in ret if item]
290 return [item for item in ret if item]
291
291
292 def tagslist(self):
292 def tagslist(self):
293 '''return a list of tags ordered by revision'''
293 '''return a list of tags ordered by revision'''
294 l = []
294 l = []
295 for t, n in self.tags().items():
295 for t, n in self.tags().items():
296 try:
296 try:
297 r = self.changelog.rev(n)
297 r = self.changelog.rev(n)
298 except:
298 except:
299 r = -2 # sort to the beginning of the list if unknown
299 r = -2 # sort to the beginning of the list if unknown
300 l.append((r, t, n))
300 l.append((r, t, n))
301 l.sort()
301 l.sort()
302 return [(t, n) for r, t, n in l]
302 return [(t, n) for r, t, n in l]
303
303
304 def nodetags(self, node):
304 def nodetags(self, node):
305 '''return the tags associated with a node'''
305 '''return the tags associated with a node'''
306 if not self.nodetagscache:
306 if not self.nodetagscache:
307 self.nodetagscache = {}
307 self.nodetagscache = {}
308 for t, n in self.tags().items():
308 for t, n in self.tags().items():
309 self.nodetagscache.setdefault(n, []).append(t)
309 self.nodetagscache.setdefault(n, []).append(t)
310 return self.nodetagscache.get(node, [])
310 return self.nodetagscache.get(node, [])
311
311
312 def _branchtags(self):
312 def _branchtags(self):
313 partial, last, lrev = self._readbranchcache()
313 partial, last, lrev = self._readbranchcache()
314
314
315 tiprev = self.changelog.count() - 1
315 tiprev = self.changelog.count() - 1
316 if lrev != tiprev:
316 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
319
320 return partial
320 return partial
321
321
322 def branchtags(self):
322 def branchtags(self):
323 if self.branchcache is not None:
323 if self.branchcache is not None:
324 return self.branchcache
324 return self.branchcache
325
325
326 self.branchcache = {} # avoid recursion in changectx
326 self.branchcache = {} # avoid recursion in changectx
327 partial = self._branchtags()
327 partial = self._branchtags()
328
328
329 # the branch cache is stored on disk as UTF-8, but in the local
329 # the branch cache is stored on disk as UTF-8, but in the local
330 # charset internally
330 # charset internally
331 for k, v in partial.items():
331 for k, v in partial.items():
332 self.branchcache[util.tolocal(k)] = v
332 self.branchcache[util.tolocal(k)] = v
333 return self.branchcache
333 return self.branchcache
334
334
335 def _readbranchcache(self):
335 def _readbranchcache(self):
336 partial = {}
336 partial = {}
337 try:
337 try:
338 f = self.opener("branch.cache")
338 f = self.opener("branch.cache")
339 lines = f.read().split('\n')
339 lines = f.read().split('\n')
340 f.close()
340 f.close()
341 except (IOError, OSError):
341 except (IOError, OSError):
342 return {}, nullid, nullrev
342 return {}, nullid, nullrev
343
343
344 try:
344 try:
345 last, lrev = lines.pop(0).split(" ", 1)
345 last, lrev = lines.pop(0).split(" ", 1)
346 last, lrev = bin(last), int(lrev)
346 last, lrev = bin(last), int(lrev)
347 if not (lrev < self.changelog.count() and
347 if not (lrev < self.changelog.count() and
348 self.changelog.node(lrev) == last): # sanity check
348 self.changelog.node(lrev) == last): # sanity check
349 # invalidate the cache
349 # invalidate the cache
350 raise ValueError('Invalid branch cache: unknown tip')
350 raise ValueError('Invalid branch cache: unknown tip')
351 for l in lines:
351 for l in lines:
352 if not l: continue
352 if not l: continue
353 node, label = l.split(" ", 1)
353 node, label = l.split(" ", 1)
354 partial[label.strip()] = bin(node)
354 partial[label.strip()] = bin(node)
355 except (KeyboardInterrupt, util.SignalInterrupt):
355 except (KeyboardInterrupt, util.SignalInterrupt):
356 raise
356 raise
357 except Exception, inst:
357 except Exception, inst:
358 if self.ui.debugflag:
358 if self.ui.debugflag:
359 self.ui.warn(str(inst), '\n')
359 self.ui.warn(str(inst), '\n')
360 partial, last, lrev = {}, nullid, nullrev
360 partial, last, lrev = {}, nullid, nullrev
361 return partial, last, lrev
361 return partial, last, lrev
362
362
363 def _writebranchcache(self, branches, tip, tiprev):
363 def _writebranchcache(self, branches, tip, tiprev):
364 try:
364 try:
365 f = self.opener("branch.cache", "w", atomictemp=True)
365 f = self.opener("branch.cache", "w", atomictemp=True)
366 f.write("%s %s\n" % (hex(tip), tiprev))
366 f.write("%s %s\n" % (hex(tip), tiprev))
367 for label, node in branches.iteritems():
367 for label, node in branches.iteritems():
368 f.write("%s %s\n" % (hex(node), label))
368 f.write("%s %s\n" % (hex(node), label))
369 f.rename()
369 f.rename()
370 except (IOError, OSError):
370 except (IOError, OSError):
371 pass
371 pass
372
372
373 def _updatebranchcache(self, partial, start, end):
373 def _updatebranchcache(self, partial, start, end):
374 for r in xrange(start, end):
374 for r in xrange(start, end):
375 c = self.changectx(r)
375 c = self.changectx(r)
376 b = c.branch()
376 b = c.branch()
377 partial[b] = c.node()
377 partial[b] = c.node()
378
378
379 def lookup(self, key):
379 def lookup(self, key):
380 if key == '.':
380 if key == '.':
381 key, second = self.dirstate.parents()
381 key, second = self.dirstate.parents()
382 if key == nullid:
382 if key == nullid:
383 raise repo.RepoError(_("no revision checked out"))
383 raise repo.RepoError(_("no revision checked out"))
384 if second != nullid:
384 if second != nullid:
385 self.ui.warn(_("warning: working directory has two parents, "
385 self.ui.warn(_("warning: working directory has two parents, "
386 "tag '.' uses the first\n"))
386 "tag '.' uses the first\n"))
387 elif key == 'null':
387 elif key == 'null':
388 return nullid
388 return nullid
389 n = self.changelog._match(key)
389 n = self.changelog._match(key)
390 if n:
390 if n:
391 return n
391 return n
392 if key in self.tags():
392 if key in self.tags():
393 return self.tags()[key]
393 return self.tags()[key]
394 if key in self.branchtags():
394 if key in self.branchtags():
395 return self.branchtags()[key]
395 return self.branchtags()[key]
396 n = self.changelog._partialmatch(key)
396 n = self.changelog._partialmatch(key)
397 if n:
397 if n:
398 return n
398 return n
399 try:
399 try:
400 if len(key) == 20:
400 if len(key) == 20:
401 key = hex(key)
401 key = hex(key)
402 except:
402 except:
403 pass
403 pass
404 raise repo.RepoError(_("unknown revision '%s'") % key)
404 raise repo.RepoError(_("unknown revision '%s'") % key)
405
405
406 def dev(self):
406 def dev(self):
407 return os.lstat(self.path).st_dev
407 return os.lstat(self.path).st_dev
408
408
409 def local(self):
409 def local(self):
410 return True
410 return True
411
411
412 def join(self, f):
412 def join(self, f):
413 return os.path.join(self.path, f)
413 return os.path.join(self.path, f)
414
414
415 def sjoin(self, f):
415 def sjoin(self, f):
416 f = self.encodefn(f)
416 f = self.encodefn(f)
417 return os.path.join(self.spath, f)
417 return os.path.join(self.spath, f)
418
418
419 def wjoin(self, f):
419 def wjoin(self, f):
420 return os.path.join(self.root, f)
420 return os.path.join(self.root, f)
421
421
422 def file(self, f):
422 def file(self, f):
423 if f[0] == '/':
423 if f[0] == '/':
424 f = f[1:]
424 f = f[1:]
425 return filelog.filelog(self.sopener, f)
425 return filelog.filelog(self.sopener, f)
426
426
427 def changectx(self, changeid=None):
427 def changectx(self, changeid=None):
428 return context.changectx(self, changeid)
428 return context.changectx(self, changeid)
429
429
430 def workingctx(self):
430 def workingctx(self):
431 return context.workingctx(self)
431 return context.workingctx(self)
432
432
433 def parents(self, changeid=None):
433 def parents(self, changeid=None):
434 '''
434 '''
435 get list of changectxs for parents of changeid or working directory
435 get list of changectxs for parents of changeid or working directory
436 '''
436 '''
437 if changeid is None:
437 if changeid is None:
438 pl = self.dirstate.parents()
438 pl = self.dirstate.parents()
439 else:
439 else:
440 n = self.changelog.lookup(changeid)
440 n = self.changelog.lookup(changeid)
441 pl = self.changelog.parents(n)
441 pl = self.changelog.parents(n)
442 if pl[1] == nullid:
442 if pl[1] == nullid:
443 return [self.changectx(pl[0])]
443 return [self.changectx(pl[0])]
444 return [self.changectx(pl[0]), self.changectx(pl[1])]
444 return [self.changectx(pl[0]), self.changectx(pl[1])]
445
445
446 def filectx(self, path, changeid=None, fileid=None):
446 def filectx(self, path, changeid=None, fileid=None):
447 """changeid can be a changeset revision, node, or tag.
447 """changeid can be a changeset revision, node, or tag.
448 fileid can be a file revision or node."""
448 fileid can be a file revision or node."""
449 return context.filectx(self, path, changeid, fileid)
449 return context.filectx(self, path, changeid, fileid)
450
450
451 def getcwd(self):
451 def getcwd(self):
452 return self.dirstate.getcwd()
452 return self.dirstate.getcwd()
453
453
454 def pathto(self, f, cwd=None):
454 def pathto(self, f, cwd=None):
455 return self.dirstate.pathto(f, cwd)
455 return self.dirstate.pathto(f, cwd)
456
456
457 def wfile(self, f, mode='r'):
457 def wfile(self, f, mode='r'):
458 return self.wopener(f, mode)
458 return self.wopener(f, mode)
459
459
460 def _link(self, f):
460 def _link(self, f):
461 return os.path.islink(self.wjoin(f))
461 return os.path.islink(self.wjoin(f))
462
462
463 def _filter(self, filter, filename, data):
463 def _filter(self, filter, filename, data):
464 if filter not in self.filterpats:
464 if filter not in self.filterpats:
465 l = []
465 l = []
466 for pat, cmd in self.ui.configitems(filter):
466 for pat, cmd in self.ui.configitems(filter):
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
468 l.append((mf, cmd))
468 l.append((mf, cmd))
469 self.filterpats[filter] = l
469 self.filterpats[filter] = l
470
470
471 for mf, cmd in self.filterpats[filter]:
471 for mf, cmd in self.filterpats[filter]:
472 if mf(filename):
472 if mf(filename):
473 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
473 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
474 data = util.filter(data, cmd)
474 data = util.filter(data, cmd)
475 break
475 break
476
476
477 return data
477 return data
478
478
479 def wread(self, filename):
479 def wread(self, filename):
480 if self._link(filename):
480 if self._link(filename):
481 data = os.readlink(self.wjoin(filename))
481 data = os.readlink(self.wjoin(filename))
482 else:
482 else:
483 data = self.wopener(filename, 'r').read()
483 data = self.wopener(filename, 'r').read()
484 return self._filter("encode", filename, data)
484 return self._filter("encode", filename, data)
485
485
486 def wwrite(self, filename, data, flags):
486 def wwrite(self, filename, data, flags):
487 data = self._filter("decode", filename, data)
487 data = self._filter("decode", filename, data)
488 if "l" in flags:
488 if "l" in flags:
489 self.wopener.symlink(data, filename)
489 self.wopener.symlink(data, filename)
490 else:
490 else:
491 try:
491 try:
492 if self._link(filename):
492 if self._link(filename):
493 os.unlink(self.wjoin(filename))
493 os.unlink(self.wjoin(filename))
494 except OSError:
494 except OSError:
495 pass
495 pass
496 self.wopener(filename, 'w').write(data)
496 self.wopener(filename, 'w').write(data)
497 util.set_exec(self.wjoin(filename), "x" in flags)
497 util.set_exec(self.wjoin(filename), "x" in flags)
498
498
499 def wwritedata(self, filename, data):
499 def wwritedata(self, filename, data):
500 return self._filter("decode", filename, data)
500 return self._filter("decode", filename, data)
501
501
502 def transaction(self):
502 def transaction(self):
503 if self._transref and self._transref():
503 if self._transref and self._transref():
504 return self._transref().nest()
504 return self._transref().nest()
505
505
506 # save dirstate for rollback
506 # save dirstate for rollback
507 try:
507 try:
508 ds = self.opener("dirstate").read()
508 ds = self.opener("dirstate").read()
509 except IOError:
509 except IOError:
510 ds = ""
510 ds = ""
511 self.opener("journal.dirstate", "w").write(ds)
511 self.opener("journal.dirstate", "w").write(ds)
512
512
513 renames = [(self.sjoin("journal"), self.sjoin("undo")),
513 renames = [(self.sjoin("journal"), self.sjoin("undo")),
514 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
514 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
515 tr = transaction.transaction(self.ui.warn, self.sopener,
515 tr = transaction.transaction(self.ui.warn, self.sopener,
516 self.sjoin("journal"),
516 self.sjoin("journal"),
517 aftertrans(renames))
517 aftertrans(renames))
518 self._transref = weakref.ref(tr)
518 self._transref = weakref.ref(tr)
519 return tr
519 return tr
520
520
521 def recover(self):
521 def recover(self):
522 l = self.lock()
522 l = self.lock()
523 try:
523 try:
524 if os.path.exists(self.sjoin("journal")):
524 if os.path.exists(self.sjoin("journal")):
525 self.ui.status(_("rolling back interrupted transaction\n"))
525 self.ui.status(_("rolling back interrupted transaction\n"))
526 transaction.rollback(self.sopener, self.sjoin("journal"))
526 transaction.rollback(self.sopener, self.sjoin("journal"))
527 self.invalidate()
527 self.invalidate()
528 return True
528 return True
529 else:
529 else:
530 self.ui.warn(_("no interrupted transaction available\n"))
530 self.ui.warn(_("no interrupted transaction available\n"))
531 return False
531 return False
532 finally:
532 finally:
533 del l
533 del l
534
534
535 def rollback(self):
535 def rollback(self):
536 wlock = lock = None
536 wlock = lock = None
537 try:
537 try:
538 wlock = self.wlock()
538 wlock = self.wlock()
539 lock = self.lock()
539 lock = self.lock()
540 if os.path.exists(self.sjoin("undo")):
540 if os.path.exists(self.sjoin("undo")):
541 self.ui.status(_("rolling back last transaction\n"))
541 self.ui.status(_("rolling back last transaction\n"))
542 transaction.rollback(self.sopener, self.sjoin("undo"))
542 transaction.rollback(self.sopener, self.sjoin("undo"))
543 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
543 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
544 self.invalidate()
544 self.invalidate()
545 self.dirstate.invalidate()
545 self.dirstate.invalidate()
546 else:
546 else:
547 self.ui.warn(_("no rollback information available\n"))
547 self.ui.warn(_("no rollback information available\n"))
548 finally:
548 finally:
549 del lock, wlock
549 del lock, wlock
550
550
551 def invalidate(self):
551 def invalidate(self):
552 for a in "changelog manifest".split():
552 for a in "changelog manifest".split():
553 if hasattr(self, a):
553 if hasattr(self, a):
554 self.__delattr__(a)
554 self.__delattr__(a)
555 self.tagscache = None
555 self.tagscache = None
556 self.nodetagscache = None
556 self.nodetagscache = None
557
557
558 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
558 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
559 try:
559 try:
560 l = lock.lock(lockname, 0, releasefn, desc=desc)
560 l = lock.lock(lockname, 0, releasefn, desc=desc)
561 except lock.LockHeld, inst:
561 except lock.LockHeld, inst:
562 if not wait:
562 if not wait:
563 raise
563 raise
564 self.ui.warn(_("waiting for lock on %s held by %r\n") %
564 self.ui.warn(_("waiting for lock on %s held by %r\n") %
565 (desc, inst.locker))
565 (desc, inst.locker))
566 # default to 600 seconds timeout
566 # default to 600 seconds timeout
567 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
567 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
568 releasefn, desc=desc)
568 releasefn, desc=desc)
569 if acquirefn:
569 if acquirefn:
570 acquirefn()
570 acquirefn()
571 return l
571 return l
572
572
573 def lock(self, wait=True):
573 def lock(self, wait=True):
574 if self._lockref and self._lockref():
574 if self._lockref and self._lockref():
575 return self._lockref()
575 return self._lockref()
576
576
577 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
577 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
578 _('repository %s') % self.origroot)
578 _('repository %s') % self.origroot)
579 self._lockref = weakref.ref(l)
579 self._lockref = weakref.ref(l)
580 return l
580 return l
581
581
582 def wlock(self, wait=True):
582 def wlock(self, wait=True):
583 if self._wlockref and self._wlockref():
583 if self._wlockref and self._wlockref():
584 return self._wlockref()
584 return self._wlockref()
585
585
586 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
586 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
587 self.dirstate.invalidate, _('working directory of %s') %
587 self.dirstate.invalidate, _('working directory of %s') %
588 self.origroot)
588 self.origroot)
589 self._wlockref = weakref.ref(l)
589 self._wlockref = weakref.ref(l)
590 return l
590 return l
591
591
592 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
592 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
593 """
593 """
594 commit an individual file as part of a larger transaction
594 commit an individual file as part of a larger transaction
595 """
595 """
596
596
597 t = self.wread(fn)
597 t = self.wread(fn)
598 fl = self.file(fn)
598 fl = self.file(fn)
599 fp1 = manifest1.get(fn, nullid)
599 fp1 = manifest1.get(fn, nullid)
600 fp2 = manifest2.get(fn, nullid)
600 fp2 = manifest2.get(fn, nullid)
601
601
602 meta = {}
602 meta = {}
603 cp = self.dirstate.copied(fn)
603 cp = self.dirstate.copied(fn)
604 if cp:
604 if cp:
605 # Mark the new revision of this file as a copy of another
605 # Mark the new revision of this file as a copy of another
606 # file. This copy data will effectively act as a parent
606 # file. This copy data will effectively act as a parent
607 # of this new revision. If this is a merge, the first
607 # of this new revision. If this is a merge, the first
608 # parent will be the nullid (meaning "look up the copy data")
608 # parent will be the nullid (meaning "look up the copy data")
609 # and the second one will be the other parent. For example:
609 # and the second one will be the other parent. For example:
610 #
610 #
611 # 0 --- 1 --- 3 rev1 changes file foo
611 # 0 --- 1 --- 3 rev1 changes file foo
612 # \ / rev2 renames foo to bar and changes it
612 # \ / rev2 renames foo to bar and changes it
613 # \- 2 -/ rev3 should have bar with all changes and
613 # \- 2 -/ rev3 should have bar with all changes and
614 # should record that bar descends from
614 # should record that bar descends from
615 # bar in rev2 and foo in rev1
615 # bar in rev2 and foo in rev1
616 #
616 #
617 # this allows this merge to succeed:
617 # this allows this merge to succeed:
618 #
618 #
619 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
619 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
620 # \ / merging rev3 and rev4 should use bar@rev2
620 # \ / merging rev3 and rev4 should use bar@rev2
621 # \- 2 --- 4 as the merge base
621 # \- 2 --- 4 as the merge base
622 #
622 #
623 meta["copy"] = cp
623 meta["copy"] = cp
624 if not manifest2: # not a branch merge
624 if not manifest2: # not a branch merge
625 meta["copyrev"] = hex(manifest1.get(cp, nullid))
625 meta["copyrev"] = hex(manifest1.get(cp, nullid))
626 fp2 = nullid
626 fp2 = nullid
627 elif fp2 != nullid: # copied on remote side
627 elif fp2 != nullid: # copied on remote side
628 meta["copyrev"] = hex(manifest1.get(cp, nullid))
628 meta["copyrev"] = hex(manifest1.get(cp, nullid))
629 elif fp1 != nullid: # copied on local side, reversed
629 elif fp1 != nullid: # copied on local side, reversed
630 meta["copyrev"] = hex(manifest2.get(cp))
630 meta["copyrev"] = hex(manifest2.get(cp))
631 fp2 = fp1
631 fp2 = fp1
632 elif cp in manifest2: # directory rename on local side
632 elif cp in manifest2: # directory rename on local side
633 meta["copyrev"] = hex(manifest2[cp])
633 meta["copyrev"] = hex(manifest2[cp])
634 else: # directory rename on remote side
634 else: # directory rename on remote side
635 meta["copyrev"] = hex(manifest1.get(cp, nullid))
635 meta["copyrev"] = hex(manifest1.get(cp, nullid))
636 self.ui.debug(_(" %s: copy %s:%s\n") %
636 self.ui.debug(_(" %s: copy %s:%s\n") %
637 (fn, cp, meta["copyrev"]))
637 (fn, cp, meta["copyrev"]))
638 fp1 = nullid
638 fp1 = nullid
639 elif fp2 != nullid:
639 elif fp2 != nullid:
640 # is one parent an ancestor of the other?
640 # is one parent an ancestor of the other?
641 fpa = fl.ancestor(fp1, fp2)
641 fpa = fl.ancestor(fp1, fp2)
642 if fpa == fp1:
642 if fpa == fp1:
643 fp1, fp2 = fp2, nullid
643 fp1, fp2 = fp2, nullid
644 elif fpa == fp2:
644 elif fpa == fp2:
645 fp2 = nullid
645 fp2 = nullid
646
646
647 # is the file unmodified from the parent? report existing entry
647 # is the file unmodified from the parent? report existing entry
648 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
648 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
649 return fp1
649 return fp1
650
650
651 changelist.append(fn)
651 changelist.append(fn)
652 return fl.add(t, meta, tr, linkrev, fp1, fp2)
652 return fl.add(t, meta, tr, linkrev, fp1, fp2)
653
653
654 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
654 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
655 if p1 is None:
655 if p1 is None:
656 p1, p2 = self.dirstate.parents()
656 p1, p2 = self.dirstate.parents()
657 return self.commit(files=files, text=text, user=user, date=date,
657 return self.commit(files=files, text=text, user=user, date=date,
658 p1=p1, p2=p2, extra=extra, empty_ok=True)
658 p1=p1, p2=p2, extra=extra, empty_ok=True)
659
659
660 def commit(self, files=None, text="", user=None, date=None,
660 def commit(self, files=None, text="", user=None, date=None,
661 match=util.always, force=False, force_editor=False,
661 match=util.always, force=False, force_editor=False,
662 p1=None, p2=None, extra={}, empty_ok=False):
662 p1=None, p2=None, extra={}, empty_ok=False):
663 wlock = lock = tr = None
663 wlock = lock = tr = None
664 try:
664 try:
665 commit = []
665 commit = []
666 remove = []
666 remove = []
667 changed = []
667 changed = []
668 use_dirstate = (p1 is None) # not rawcommit
668 use_dirstate = (p1 is None) # not rawcommit
669 extra = extra.copy()
669 extra = extra.copy()
670
670
671 if use_dirstate:
671 if use_dirstate:
672 if files:
672 if files:
673 for f in files:
673 for f in files:
674 s = self.dirstate[f]
674 s = self.dirstate[f]
675 if s in 'nma':
675 if s in 'nma':
676 commit.append(f)
676 commit.append(f)
677 elif s == 'r':
677 elif s == 'r':
678 remove.append(f)
678 remove.append(f)
679 else:
679 else:
680 self.ui.warn(_("%s not tracked!\n") % f)
680 self.ui.warn(_("%s not tracked!\n") % f)
681 else:
681 else:
682 changes = self.status(match=match)[:5]
682 changes = self.status(match=match)[:5]
683 modified, added, removed, deleted, unknown = changes
683 modified, added, removed, deleted, unknown = changes
684 commit = modified + added
684 commit = modified + added
685 remove = removed
685 remove = removed
686 else:
686 else:
687 commit = files
687 commit = files
688
688
689 if use_dirstate:
689 if use_dirstate:
690 p1, p2 = self.dirstate.parents()
690 p1, p2 = self.dirstate.parents()
691 update_dirstate = True
691 update_dirstate = True
692 else:
692 else:
693 p1, p2 = p1, p2 or nullid
693 p1, p2 = p1, p2 or nullid
694 update_dirstate = (self.dirstate.parents()[0] == p1)
694 update_dirstate = (self.dirstate.parents()[0] == p1)
695
695
696 c1 = self.changelog.read(p1)
696 c1 = self.changelog.read(p1)
697 c2 = self.changelog.read(p2)
697 c2 = self.changelog.read(p2)
698 m1 = self.manifest.read(c1[0]).copy()
698 m1 = self.manifest.read(c1[0]).copy()
699 m2 = self.manifest.read(c2[0])
699 m2 = self.manifest.read(c2[0])
700
700
701 if use_dirstate:
701 if use_dirstate:
702 branchname = self.workingctx().branch()
702 branchname = self.workingctx().branch()
703 try:
703 try:
704 branchname = branchname.decode('UTF-8').encode('UTF-8')
704 branchname = branchname.decode('UTF-8').encode('UTF-8')
705 except UnicodeDecodeError:
705 except UnicodeDecodeError:
706 raise util.Abort(_('branch name not in UTF-8!'))
706 raise util.Abort(_('branch name not in UTF-8!'))
707 else:
707 else:
708 branchname = ""
708 branchname = ""
709
709
710 if use_dirstate:
710 if use_dirstate:
711 oldname = c1[5].get("branch") # stored in UTF-8
711 oldname = c1[5].get("branch") # stored in UTF-8
712 if (not commit and not remove and not force and p2 == nullid
712 if (not commit and not remove and not force and p2 == nullid
713 and branchname == oldname):
713 and branchname == oldname):
714 self.ui.status(_("nothing changed\n"))
714 self.ui.status(_("nothing changed\n"))
715 return None
715 return None
716
716
717 xp1 = hex(p1)
717 xp1 = hex(p1)
718 if p2 == nullid: xp2 = ''
718 if p2 == nullid: xp2 = ''
719 else: xp2 = hex(p2)
719 else: xp2 = hex(p2)
720
720
721 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
721 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
722
722
723 wlock = self.wlock()
723 wlock = self.wlock()
724 lock = self.lock()
724 lock = self.lock()
725 tr = self.transaction()
725 tr = self.transaction()
726 trp = weakref.proxy(tr)
726 trp = weakref.proxy(tr)
727
727
728 # check in files
728 # check in files
729 new = {}
729 new = {}
730 linkrev = self.changelog.count()
730 linkrev = self.changelog.count()
731 commit.sort()
731 commit.sort()
732 is_exec = util.execfunc(self.root, m1.execf)
732 is_exec = util.execfunc(self.root, m1.execf)
733 is_link = util.linkfunc(self.root, m1.linkf)
733 is_link = util.linkfunc(self.root, m1.linkf)
734 for f in commit:
734 for f in commit:
735 self.ui.note(f + "\n")
735 self.ui.note(f + "\n")
736 try:
736 try:
737 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
737 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
738 new_exec = is_exec(f)
738 new_exec = is_exec(f)
739 new_link = is_link(f)
739 new_link = is_link(f)
740 if ((not changed or changed[-1] != f) and
740 if ((not changed or changed[-1] != f) and
741 m2.get(f) != new[f]):
741 m2.get(f) != new[f]):
742 # mention the file in the changelog if some
742 # mention the file in the changelog if some
743 # flag changed, even if there was no content
743 # flag changed, even if there was no content
744 # change.
744 # change.
745 old_exec = m1.execf(f)
745 old_exec = m1.execf(f)
746 old_link = m1.linkf(f)
746 old_link = m1.linkf(f)
747 if old_exec != new_exec or old_link != new_link:
747 if old_exec != new_exec or old_link != new_link:
748 changed.append(f)
748 changed.append(f)
749 m1.set(f, new_exec, new_link)
749 m1.set(f, new_exec, new_link)
750 except (OSError, IOError):
750 except (OSError, IOError):
751 if use_dirstate:
751 if use_dirstate:
752 self.ui.warn(_("trouble committing %s!\n") % f)
752 self.ui.warn(_("trouble committing %s!\n") % f)
753 raise
753 raise
754 else:
754 else:
755 remove.append(f)
755 remove.append(f)
756
756
757 # update manifest
757 # update manifest
758 m1.update(new)
758 m1.update(new)
759 remove.sort()
759 remove.sort()
760 removed = []
760 removed = []
761
761
762 for f in remove:
762 for f in remove:
763 if f in m1:
763 if f in m1:
764 del m1[f]
764 del m1[f]
765 removed.append(f)
765 removed.append(f)
766 elif f in m2:
766 elif f in m2:
767 removed.append(f)
767 removed.append(f)
768 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
768 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
769 (new, removed))
769 (new, removed))
770
770
771 # add changeset
771 # add changeset
772 new = new.keys()
772 new = new.keys()
773 new.sort()
773 new.sort()
774
774
775 user = user or self.ui.username()
775 user = user or self.ui.username()
776 if (not empty_ok and not text) or force_editor:
776 if (not empty_ok and not text) or force_editor:
777 edittext = []
777 edittext = []
778 if text:
778 if text:
779 edittext.append(text)
779 edittext.append(text)
780 edittext.append("")
780 edittext.append("")
781 edittext.append("HG: user: %s" % user)
781 edittext.append("HG: user: %s" % user)
782 if p2 != nullid:
782 if p2 != nullid:
783 edittext.append("HG: branch merge")
783 edittext.append("HG: branch merge")
784 if branchname:
784 if branchname:
785 edittext.append("HG: branch %s" % util.tolocal(branchname))
785 edittext.append("HG: branch %s" % util.tolocal(branchname))
786 edittext.extend(["HG: changed %s" % f for f in changed])
786 edittext.extend(["HG: changed %s" % f for f in changed])
787 edittext.extend(["HG: removed %s" % f for f in removed])
787 edittext.extend(["HG: removed %s" % f for f in removed])
788 if not changed and not remove:
788 if not changed and not remove:
789 edittext.append("HG: no files changed")
789 edittext.append("HG: no files changed")
790 edittext.append("")
790 edittext.append("")
791 # run editor in the repository root
791 # run editor in the repository root
792 olddir = os.getcwd()
792 olddir = os.getcwd()
793 os.chdir(self.root)
793 os.chdir(self.root)
794 text = self.ui.edit("\n".join(edittext), user)
794 text = self.ui.edit("\n".join(edittext), user)
795 os.chdir(olddir)
795 os.chdir(olddir)
796
796
797 if branchname:
797 if branchname:
798 extra["branch"] = branchname
798 extra["branch"] = branchname
799
799
800 if use_dirstate:
800 if use_dirstate:
801 lines = [line.rstrip() for line in text.rstrip().splitlines()]
801 lines = [line.rstrip() for line in text.rstrip().splitlines()]
802 while lines and not lines[0]:
802 while lines and not lines[0]:
803 del lines[0]
803 del lines[0]
804 if not lines:
804 if not lines:
805 return None
805 return None
806 text = '\n'.join(lines)
806 text = '\n'.join(lines)
807
807
808 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
808 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
809 user, date, extra)
809 user, date, extra)
810 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
810 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
811 parent2=xp2)
811 parent2=xp2)
812 tr.close()
812 tr.close()
813
813
814 if self.branchcache and "branch" in extra:
814 if self.branchcache and "branch" in extra:
815 self.branchcache[util.tolocal(extra["branch"])] = n
815 self.branchcache[util.tolocal(extra["branch"])] = n
816
816
817 if use_dirstate or update_dirstate:
817 if use_dirstate or update_dirstate:
818 self.dirstate.setparents(n)
818 self.dirstate.setparents(n)
819 if use_dirstate:
819 if use_dirstate:
820 for f in new:
820 for f in new:
821 self.dirstate.normal(f)
821 self.dirstate.normal(f)
822 for f in removed:
822 for f in removed:
823 self.dirstate.forget(f)
823 self.dirstate.forget(f)
824
824
825 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
825 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
826 return n
826 return n
827 finally:
827 finally:
828 del tr, lock, wlock
828 del tr, lock, wlock
829
829
830 def walk(self, node=None, files=[], match=util.always, badmatch=None):
830 def walk(self, node=None, files=[], match=util.always, badmatch=None):
831 '''
831 '''
832 walk recursively through the directory tree or a given
832 walk recursively through the directory tree or a given
833 changeset, finding all files matched by the match
833 changeset, finding all files matched by the match
834 function
834 function
835
835
836 results are yielded in a tuple (src, filename), where src
836 results are yielded in a tuple (src, filename), where src
837 is one of:
837 is one of:
838 'f' the file was found in the directory tree
838 'f' the file was found in the directory tree
839 'm' the file was only in the dirstate and not in the tree
839 'm' the file was only in the dirstate and not in the tree
840 'b' file was not found and matched badmatch
840 'b' file was not found and matched badmatch
841 '''
841 '''
842
842
843 if node:
843 if node:
844 fdict = dict.fromkeys(files)
844 fdict = dict.fromkeys(files)
845 # for dirstate.walk, files=['.'] means "walk the whole tree".
845 # for dirstate.walk, files=['.'] means "walk the whole tree".
846 # follow that here, too
846 # follow that here, too
847 fdict.pop('.', None)
847 fdict.pop('.', None)
848 mdict = self.manifest.read(self.changelog.read(node)[0])
848 mdict = self.manifest.read(self.changelog.read(node)[0])
849 mfiles = mdict.keys()
849 mfiles = mdict.keys()
850 mfiles.sort()
850 mfiles.sort()
851 for fn in mfiles:
851 for fn in mfiles:
852 for ffn in fdict:
852 for ffn in fdict:
853 # match if the file is the exact name or a directory
853 # match if the file is the exact name or a directory
854 if ffn == fn or fn.startswith("%s/" % ffn):
854 if ffn == fn or fn.startswith("%s/" % ffn):
855 del fdict[ffn]
855 del fdict[ffn]
856 break
856 break
857 if match(fn):
857 if match(fn):
858 yield 'm', fn
858 yield 'm', fn
859 ffiles = fdict.keys()
859 ffiles = fdict.keys()
860 ffiles.sort()
860 ffiles.sort()
861 for fn in ffiles:
861 for fn in ffiles:
862 if badmatch and badmatch(fn):
862 if badmatch and badmatch(fn):
863 if match(fn):
863 if match(fn):
864 yield 'b', fn
864 yield 'b', fn
865 else:
865 else:
866 self.ui.warn(_('%s: No such file in rev %s\n')
866 self.ui.warn(_('%s: No such file in rev %s\n')
867 % (self.pathto(fn), short(node)))
867 % (self.pathto(fn), short(node)))
868 else:
868 else:
869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
870 yield src, fn
870 yield src, fn
871
871
872 def status(self, node1=None, node2=None, files=[], match=util.always,
872 def status(self, node1=None, node2=None, files=[], match=util.always,
873 list_ignored=False, list_clean=False):
873 list_ignored=False, list_clean=False):
874 """return status of files between two nodes or node and working directory
874 """return status of files between two nodes or node and working directory
875
875
876 If node1 is None, use the first dirstate parent instead.
876 If node1 is None, use the first dirstate parent instead.
877 If node2 is None, compare node1 with working directory.
877 If node2 is None, compare node1 with working directory.
878 """
878 """
879
879
880 def fcmp(fn, getnode):
880 def fcmp(fn, getnode):
881 t1 = self.wread(fn)
881 t1 = self.wread(fn)
882 return self.file(fn).cmp(getnode(fn), t1)
882 return self.file(fn).cmp(getnode(fn), t1)
883
883
884 def mfmatches(node):
884 def mfmatches(node):
885 change = self.changelog.read(node)
885 change = self.changelog.read(node)
886 mf = self.manifest.read(change[0]).copy()
886 mf = self.manifest.read(change[0]).copy()
887 for fn in mf.keys():
887 for fn in mf.keys():
888 if not match(fn):
888 if not match(fn):
889 del mf[fn]
889 del mf[fn]
890 return mf
890 return mf
891
891
892 modified, added, removed, deleted, unknown = [], [], [], [], []
892 modified, added, removed, deleted, unknown = [], [], [], [], []
893 ignored, clean = [], []
893 ignored, clean = [], []
894
894
895 compareworking = False
895 compareworking = False
896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
897 compareworking = True
897 compareworking = True
898
898
899 if not compareworking:
899 if not compareworking:
900 # read the manifest from node1 before the manifest from node2,
900 # read the manifest from node1 before the manifest from node2,
901 # so that we'll hit the manifest cache if we're going through
901 # so that we'll hit the manifest cache if we're going through
902 # all the revisions in parent->child order.
902 # all the revisions in parent->child order.
903 mf1 = mfmatches(node1)
903 mf1 = mfmatches(node1)
904
904
905 # are we comparing the working directory?
905 # are we comparing the working directory?
906 if not node2:
906 if not node2:
907 (lookup, modified, added, removed, deleted, unknown,
907 (lookup, modified, added, removed, deleted, unknown,
908 ignored, clean) = self.dirstate.status(files, match,
908 ignored, clean) = self.dirstate.status(files, match,
909 list_ignored, list_clean)
909 list_ignored, list_clean)
910
910
911 # are we comparing working dir against its parent?
911 # are we comparing working dir against its parent?
912 if compareworking:
912 if compareworking:
913 if lookup:
913 if lookup:
914 fixup = []
914 fixup = []
915 # do a full compare of any files that might have changed
915 # do a full compare of any files that might have changed
916 ctx = self.changectx()
916 ctx = self.changectx()
917 for f in lookup:
917 for f in lookup:
918 if f not in ctx or ctx[f].cmp(self.wread(f)):
918 if f not in ctx or ctx[f].cmp(self.wread(f)):
919 modified.append(f)
919 modified.append(f)
920 else:
920 else:
921 fixup.append(f)
921 fixup.append(f)
922 if list_clean:
922 if list_clean:
923 clean.append(f)
923 clean.append(f)
924
924
925 # update dirstate for files that are actually clean
925 # update dirstate for files that are actually clean
926 if fixup:
926 if fixup:
927 wlock = None
927 wlock = None
928 try:
928 try:
929 try:
929 try:
930 wlock = self.wlock(False)
930 wlock = self.wlock(False)
931 except lock.LockException:
931 except lock.LockException:
932 pass
932 pass
933 if wlock:
933 if wlock:
934 for f in fixup:
934 for f in fixup:
935 self.dirstate.normal(f)
935 self.dirstate.normal(f)
936 finally:
936 finally:
937 del wlock
937 del wlock
938 else:
938 else:
939 # we are comparing working dir against non-parent
939 # we are comparing working dir against non-parent
940 # generate a pseudo-manifest for the working dir
940 # generate a pseudo-manifest for the working dir
941 # XXX: create it in dirstate.py ?
941 # XXX: create it in dirstate.py ?
942 mf2 = mfmatches(self.dirstate.parents()[0])
942 mf2 = mfmatches(self.dirstate.parents()[0])
943 is_exec = util.execfunc(self.root, mf2.execf)
943 is_exec = util.execfunc(self.root, mf2.execf)
944 is_link = util.linkfunc(self.root, mf2.linkf)
944 is_link = util.linkfunc(self.root, mf2.linkf)
945 for f in lookup + modified + added:
945 for f in lookup + modified + added:
946 mf2[f] = ""
946 mf2[f] = ""
947 mf2.set(f, is_exec(f), is_link(f))
947 mf2.set(f, is_exec(f), is_link(f))
948 for f in removed:
948 for f in removed:
949 if f in mf2:
949 if f in mf2:
950 del mf2[f]
950 del mf2[f]
951
951
952 else:
952 else:
953 # we are comparing two revisions
953 # we are comparing two revisions
954 mf2 = mfmatches(node2)
954 mf2 = mfmatches(node2)
955
955
956 if not compareworking:
956 if not compareworking:
957 # flush lists from dirstate before comparing manifests
957 # flush lists from dirstate before comparing manifests
958 modified, added, clean = [], [], []
958 modified, added, clean = [], [], []
959
959
960 # make sure to sort the files so we talk to the disk in a
960 # make sure to sort the files so we talk to the disk in a
961 # reasonable order
961 # reasonable order
962 mf2keys = mf2.keys()
962 mf2keys = mf2.keys()
963 mf2keys.sort()
963 mf2keys.sort()
964 getnode = lambda fn: mf1.get(fn, nullid)
964 getnode = lambda fn: mf1.get(fn, nullid)
965 for fn in mf2keys:
965 for fn in mf2keys:
966 if mf1.has_key(fn):
966 if mf1.has_key(fn):
967 if (mf1.flags(fn) != mf2.flags(fn) or
967 if (mf1.flags(fn) != mf2.flags(fn) or
968 (mf1[fn] != mf2[fn] and
968 (mf1[fn] != mf2[fn] and
969 (mf2[fn] != "" or fcmp(fn, getnode)))):
969 (mf2[fn] != "" or fcmp(fn, getnode)))):
970 modified.append(fn)
970 modified.append(fn)
971 elif list_clean:
971 elif list_clean:
972 clean.append(fn)
972 clean.append(fn)
973 del mf1[fn]
973 del mf1[fn]
974 else:
974 else:
975 added.append(fn)
975 added.append(fn)
976
976
977 removed = mf1.keys()
977 removed = mf1.keys()
978
978
979 # sort and return results:
979 # sort and return results:
980 for l in modified, added, removed, deleted, unknown, ignored, clean:
980 for l in modified, added, removed, deleted, unknown, ignored, clean:
981 l.sort()
981 l.sort()
982 return (modified, added, removed, deleted, unknown, ignored, clean)
982 return (modified, added, removed, deleted, unknown, ignored, clean)
983
983
984 def add(self, list):
984 def add(self, list):
985 wlock = self.wlock()
985 wlock = self.wlock()
986 try:
986 try:
987 for f in list:
987 for f in list:
988 p = self.wjoin(f)
988 p = self.wjoin(f)
989 try:
989 try:
990 st = os.lstat(p)
990 st = os.lstat(p)
991 except:
991 except:
992 self.ui.warn(_("%s does not exist!\n") % f)
992 self.ui.warn(_("%s does not exist!\n") % f)
993 continue
993 continue
994 if st.st_size > 10000000:
994 if st.st_size > 10000000:
995 self.ui.warn(_("%s: files over 10MB may cause memory and"
995 self.ui.warn(_("%s: files over 10MB may cause memory and"
996 " performance problems\n"
996 " performance problems\n"
997 "(use 'hg revert %s' to unadd the file)\n")
997 "(use 'hg revert %s' to unadd the file)\n")
998 % (f, f))
998 % (f, f))
999 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
999 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1000 self.ui.warn(_("%s not added: only files and symlinks "
1000 self.ui.warn(_("%s not added: only files and symlinks "
1001 "supported currently\n") % f)
1001 "supported currently\n") % f)
1002 elif self.dirstate[f] in 'amn':
1002 elif self.dirstate[f] in 'amn':
1003 self.ui.warn(_("%s already tracked!\n") % f)
1003 self.ui.warn(_("%s already tracked!\n") % f)
1004 elif self.dirstate[f] == 'r':
1004 elif self.dirstate[f] == 'r':
1005 self.dirstate.normallookup(f)
1005 self.dirstate.normallookup(f)
1006 else:
1006 else:
1007 self.dirstate.add(f)
1007 self.dirstate.add(f)
1008 finally:
1008 finally:
1009 del wlock
1009 del wlock
1010
1010
1011 def forget(self, list):
1011 def forget(self, list):
1012 wlock = self.wlock()
1012 wlock = self.wlock()
1013 try:
1013 try:
1014 for f in list:
1014 for f in list:
1015 if self.dirstate[f] != 'a':
1015 if self.dirstate[f] != 'a':
1016 self.ui.warn(_("%s not added!\n") % f)
1016 self.ui.warn(_("%s not added!\n") % f)
1017 else:
1017 else:
1018 self.dirstate.forget(f)
1018 self.dirstate.forget(f)
1019 finally:
1019 finally:
1020 del wlock
1020 del wlock
1021
1021
1022 def remove(self, list, unlink=False):
1022 def remove(self, list, unlink=False):
1023 wlock = None
1023 wlock = None
1024 try:
1024 try:
1025 if unlink:
1025 if unlink:
1026 for f in list:
1026 for f in list:
1027 try:
1027 try:
1028 util.unlink(self.wjoin(f))
1028 util.unlink(self.wjoin(f))
1029 except OSError, inst:
1029 except OSError, inst:
1030 if inst.errno != errno.ENOENT:
1030 if inst.errno != errno.ENOENT:
1031 raise
1031 raise
1032 wlock = self.wlock()
1032 wlock = self.wlock()
1033 for f in list:
1033 for f in list:
1034 if unlink and os.path.exists(self.wjoin(f)):
1034 if unlink and os.path.exists(self.wjoin(f)):
1035 self.ui.warn(_("%s still exists!\n") % f)
1035 self.ui.warn(_("%s still exists!\n") % f)
1036 elif self.dirstate[f] == 'a':
1036 elif self.dirstate[f] == 'a':
1037 self.dirstate.forget(f)
1037 self.dirstate.forget(f)
1038 elif f not in self.dirstate:
1038 elif f not in self.dirstate:
1039 self.ui.warn(_("%s not tracked!\n") % f)
1039 self.ui.warn(_("%s not tracked!\n") % f)
1040 else:
1040 else:
1041 self.dirstate.remove(f)
1041 self.dirstate.remove(f)
1042 finally:
1042 finally:
1043 del wlock
1043 del wlock
1044
1044
1045 def undelete(self, list):
1045 def undelete(self, list):
1046 wlock = None
1046 wlock = None
1047 try:
1047 try:
1048 manifests = [self.manifest.read(self.changelog.read(p)[0])
1048 manifests = [self.manifest.read(self.changelog.read(p)[0])
1049 for p in self.dirstate.parents() if p != nullid]
1049 for p in self.dirstate.parents() if p != nullid]
1050 wlock = self.wlock()
1050 wlock = self.wlock()
1051 for f in list:
1051 for f in list:
1052 if self.dirstate[f] != 'r':
1052 if self.dirstate[f] != 'r':
1053 self.ui.warn("%s not removed!\n" % f)
1053 self.ui.warn("%s not removed!\n" % f)
1054 else:
1054 else:
1055 m = f in manifests[0] and manifests[0] or manifests[1]
1055 m = f in manifests[0] and manifests[0] or manifests[1]
1056 t = self.file(f).read(m[f])
1056 t = self.file(f).read(m[f])
1057 self.wwrite(f, t, m.flags(f))
1057 self.wwrite(f, t, m.flags(f))
1058 self.dirstate.normal(f)
1058 self.dirstate.normal(f)
1059 finally:
1059 finally:
1060 del wlock
1060 del wlock
1061
1061
1062 def copy(self, source, dest):
1062 def copy(self, source, dest):
1063 wlock = None
1063 wlock = None
1064 try:
1064 try:
1065 p = self.wjoin(dest)
1065 p = self.wjoin(dest)
1066 if not (os.path.exists(p) or os.path.islink(p)):
1066 if not (os.path.exists(p) or os.path.islink(p)):
1067 self.ui.warn(_("%s does not exist!\n") % dest)
1067 self.ui.warn(_("%s does not exist!\n") % dest)
1068 elif not (os.path.isfile(p) or os.path.islink(p)):
1068 elif not (os.path.isfile(p) or os.path.islink(p)):
1069 self.ui.warn(_("copy failed: %s is not a file or a "
1069 self.ui.warn(_("copy failed: %s is not a file or a "
1070 "symbolic link\n") % dest)
1070 "symbolic link\n") % dest)
1071 else:
1071 else:
1072 wlock = self.wlock()
1072 wlock = self.wlock()
1073 if dest not in self.dirstate:
1073 if dest not in self.dirstate:
1074 self.dirstate.add(dest)
1074 self.dirstate.add(dest)
1075 self.dirstate.copy(source, dest)
1075 self.dirstate.copy(source, dest)
1076 finally:
1076 finally:
1077 del wlock
1077 del wlock
1078
1078
1079 def heads(self, start=None):
1079 def heads(self, start=None):
1080 heads = self.changelog.heads(start)
1080 heads = self.changelog.heads(start)
1081 # sort the output in rev descending order
1081 # sort the output in rev descending order
1082 heads = [(-self.changelog.rev(h), h) for h in heads]
1082 heads = [(-self.changelog.rev(h), h) for h in heads]
1083 heads.sort()
1083 heads.sort()
1084 return [n for (r, n) in heads]
1084 return [n for (r, n) in heads]
1085
1085
1086 def branchheads(self, branch, start=None):
1086 def branchheads(self, branch, start=None):
1087 branches = self.branchtags()
1087 branches = self.branchtags()
1088 if branch not in branches:
1088 if branch not in branches:
1089 return []
1089 return []
1090 # The basic algorithm is this:
1090 # The basic algorithm is this:
1091 #
1091 #
1092 # Start from the branch tip since there are no later revisions that can
1092 # Start from the branch tip since there are no later revisions that can
1093 # possibly be in this branch, and the tip is a guaranteed head.
1093 # possibly be in this branch, and the tip is a guaranteed head.
1094 #
1094 #
1095 # Remember the tip's parents as the first ancestors, since these by
1095 # Remember the tip's parents as the first ancestors, since these by
1096 # definition are not heads.
1096 # definition are not heads.
1097 #
1097 #
1098 # Step backwards from the brach tip through all the revisions. We are
1098 # Step backwards from the brach tip through all the revisions. We are
1099 # guaranteed by the rules of Mercurial that we will now be visiting the
1099 # guaranteed by the rules of Mercurial that we will now be visiting the
1100 # nodes in reverse topological order (children before parents).
1100 # nodes in reverse topological order (children before parents).
1101 #
1101 #
1102 # If a revision is one of the ancestors of a head then we can toss it
1102 # If a revision is one of the ancestors of a head then we can toss it
1103 # out of the ancestors set (we've already found it and won't be
1103 # out of the ancestors set (we've already found it and won't be
1104 # visiting it again) and put its parents in the ancestors set.
1104 # visiting it again) and put its parents in the ancestors set.
1105 #
1105 #
1106 # Otherwise, if a revision is in the branch it's another head, since it
1106 # Otherwise, if a revision is in the branch it's another head, since it
1107 # wasn't in the ancestor list of an existing head. So add it to the
1107 # wasn't in the ancestor list of an existing head. So add it to the
1108 # head list, and add its parents to the ancestor list.
1108 # head list, and add its parents to the ancestor list.
1109 #
1109 #
1110 # If it is not in the branch ignore it.
1110 # If it is not in the branch ignore it.
1111 #
1111 #
1112 # Once we have a list of heads, use nodesbetween to filter out all the
1112 # Once we have a list of heads, use nodesbetween to filter out all the
1113 # heads that cannot be reached from startrev. There may be a more
1113 # heads that cannot be reached from startrev. There may be a more
1114 # efficient way to do this as part of the previous algorithm.
1114 # efficient way to do this as part of the previous algorithm.
1115
1115
1116 set = util.set
1116 set = util.set
1117 heads = [self.changelog.rev(branches[branch])]
1117 heads = [self.changelog.rev(branches[branch])]
1118 # Don't care if ancestors contains nullrev or not.
1118 # Don't care if ancestors contains nullrev or not.
1119 ancestors = set(self.changelog.parentrevs(heads[0]))
1119 ancestors = set(self.changelog.parentrevs(heads[0]))
1120 for rev in xrange(heads[0] - 1, nullrev, -1):
1120 for rev in xrange(heads[0] - 1, nullrev, -1):
1121 if rev in ancestors:
1121 if rev in ancestors:
1122 ancestors.update(self.changelog.parentrevs(rev))
1122 ancestors.update(self.changelog.parentrevs(rev))
1123 ancestors.remove(rev)
1123 ancestors.remove(rev)
1124 elif self.changectx(rev).branch() == branch:
1124 elif self.changectx(rev).branch() == branch:
1125 heads.append(rev)
1125 heads.append(rev)
1126 ancestors.update(self.changelog.parentrevs(rev))
1126 ancestors.update(self.changelog.parentrevs(rev))
1127 heads = [self.changelog.node(rev) for rev in heads]
1127 heads = [self.changelog.node(rev) for rev in heads]
1128 if start is not None:
1128 if start is not None:
1129 heads = self.changelog.nodesbetween([start], heads)[2]
1129 heads = self.changelog.nodesbetween([start], heads)[2]
1130 return heads
1130 return heads
1131
1131
1132 def branches(self, nodes):
1132 def branches(self, nodes):
1133 if not nodes:
1133 if not nodes:
1134 nodes = [self.changelog.tip()]
1134 nodes = [self.changelog.tip()]
1135 b = []
1135 b = []
1136 for n in nodes:
1136 for n in nodes:
1137 t = n
1137 t = n
1138 while 1:
1138 while 1:
1139 p = self.changelog.parents(n)
1139 p = self.changelog.parents(n)
1140 if p[1] != nullid or p[0] == nullid:
1140 if p[1] != nullid or p[0] == nullid:
1141 b.append((t, n, p[0], p[1]))
1141 b.append((t, n, p[0], p[1]))
1142 break
1142 break
1143 n = p[0]
1143 n = p[0]
1144 return b
1144 return b
1145
1145
1146 def between(self, pairs):
1146 def between(self, pairs):
1147 r = []
1147 r = []
1148
1148
1149 for top, bottom in pairs:
1149 for top, bottom in pairs:
1150 n, l, i = top, [], 0
1150 n, l, i = top, [], 0
1151 f = 1
1151 f = 1
1152
1152
1153 while n != bottom:
1153 while n != bottom:
1154 p = self.changelog.parents(n)[0]
1154 p = self.changelog.parents(n)[0]
1155 if i == f:
1155 if i == f:
1156 l.append(n)
1156 l.append(n)
1157 f = f * 2
1157 f = f * 2
1158 n = p
1158 n = p
1159 i += 1
1159 i += 1
1160
1160
1161 r.append(l)
1161 r.append(l)
1162
1162
1163 return r
1163 return r
1164
1164
1165 def findincoming(self, remote, base=None, heads=None, force=False):
1165 def findincoming(self, remote, base=None, heads=None, force=False):
1166 """Return list of roots of the subsets of missing nodes from remote
1166 """Return list of roots of the subsets of missing nodes from remote
1167
1167
1168 If base dict is specified, assume that these nodes and their parents
1168 If base dict is specified, assume that these nodes and their parents
1169 exist on the remote side and that no child of a node of base exists
1169 exist on the remote side and that no child of a node of base exists
1170 in both remote and self.
1170 in both remote and self.
1171 Furthermore base will be updated to include the nodes that exists
1171 Furthermore base will be updated to include the nodes that exists
1172 in self and remote but no children exists in self and remote.
1172 in self and remote but no children exists in self and remote.
1173 If a list of heads is specified, return only nodes which are heads
1173 If a list of heads is specified, return only nodes which are heads
1174 or ancestors of these heads.
1174 or ancestors of these heads.
1175
1175
1176 All the ancestors of base are in self and in remote.
1176 All the ancestors of base are in self and in remote.
1177 All the descendants of the list returned are missing in self.
1177 All the descendants of the list returned are missing in self.
1178 (and so we know that the rest of the nodes are missing in remote, see
1178 (and so we know that the rest of the nodes are missing in remote, see
1179 outgoing)
1179 outgoing)
1180 """
1180 """
1181 m = self.changelog.nodemap
1181 m = self.changelog.nodemap
1182 search = []
1182 search = []
1183 fetch = {}
1183 fetch = {}
1184 seen = {}
1184 seen = {}
1185 seenbranch = {}
1185 seenbranch = {}
1186 if base == None:
1186 if base == None:
1187 base = {}
1187 base = {}
1188
1188
1189 if not heads:
1189 if not heads:
1190 heads = remote.heads()
1190 heads = remote.heads()
1191
1191
1192 if self.changelog.tip() == nullid:
1192 if self.changelog.tip() == nullid:
1193 base[nullid] = 1
1193 base[nullid] = 1
1194 if heads != [nullid]:
1194 if heads != [nullid]:
1195 return [nullid]
1195 return [nullid]
1196 return []
1196 return []
1197
1197
1198 # assume we're closer to the tip than the root
1198 # assume we're closer to the tip than the root
1199 # and start by examining the heads
1199 # and start by examining the heads
1200 self.ui.status(_("searching for changes\n"))
1200 self.ui.status(_("searching for changes\n"))
1201
1201
1202 unknown = []
1202 unknown = []
1203 for h in heads:
1203 for h in heads:
1204 if h not in m:
1204 if h not in m:
1205 unknown.append(h)
1205 unknown.append(h)
1206 else:
1206 else:
1207 base[h] = 1
1207 base[h] = 1
1208
1208
1209 if not unknown:
1209 if not unknown:
1210 return []
1210 return []
1211
1211
1212 req = dict.fromkeys(unknown)
1212 req = dict.fromkeys(unknown)
1213 reqcnt = 0
1213 reqcnt = 0
1214
1214
1215 # search through remote branches
1215 # search through remote branches
1216 # a 'branch' here is a linear segment of history, with four parts:
1216 # a 'branch' here is a linear segment of history, with four parts:
1217 # head, root, first parent, second parent
1217 # head, root, first parent, second parent
1218 # (a branch always has two parents (or none) by definition)
1218 # (a branch always has two parents (or none) by definition)
1219 unknown = remote.branches(unknown)
1219 unknown = remote.branches(unknown)
1220 while unknown:
1220 while unknown:
1221 r = []
1221 r = []
1222 while unknown:
1222 while unknown:
1223 n = unknown.pop(0)
1223 n = unknown.pop(0)
1224 if n[0] in seen:
1224 if n[0] in seen:
1225 continue
1225 continue
1226
1226
1227 self.ui.debug(_("examining %s:%s\n")
1227 self.ui.debug(_("examining %s:%s\n")
1228 % (short(n[0]), short(n[1])))
1228 % (short(n[0]), short(n[1])))
1229 if n[0] == nullid: # found the end of the branch
1229 if n[0] == nullid: # found the end of the branch
1230 pass
1230 pass
1231 elif n in seenbranch:
1231 elif n in seenbranch:
1232 self.ui.debug(_("branch already found\n"))
1232 self.ui.debug(_("branch already found\n"))
1233 continue
1233 continue
1234 elif n[1] and n[1] in m: # do we know the base?
1234 elif n[1] and n[1] in m: # do we know the base?
1235 self.ui.debug(_("found incomplete branch %s:%s\n")
1235 self.ui.debug(_("found incomplete branch %s:%s\n")
1236 % (short(n[0]), short(n[1])))
1236 % (short(n[0]), short(n[1])))
1237 search.append(n) # schedule branch range for scanning
1237 search.append(n) # schedule branch range for scanning
1238 seenbranch[n] = 1
1238 seenbranch[n] = 1
1239 else:
1239 else:
1240 if n[1] not in seen and n[1] not in fetch:
1240 if n[1] not in seen and n[1] not in fetch:
1241 if n[2] in m and n[3] in m:
1241 if n[2] in m and n[3] in m:
1242 self.ui.debug(_("found new changeset %s\n") %
1242 self.ui.debug(_("found new changeset %s\n") %
1243 short(n[1]))
1243 short(n[1]))
1244 fetch[n[1]] = 1 # earliest unknown
1244 fetch[n[1]] = 1 # earliest unknown
1245 for p in n[2:4]:
1245 for p in n[2:4]:
1246 if p in m:
1246 if p in m:
1247 base[p] = 1 # latest known
1247 base[p] = 1 # latest known
1248
1248
1249 for p in n[2:4]:
1249 for p in n[2:4]:
1250 if p not in req and p not in m:
1250 if p not in req and p not in m:
1251 r.append(p)
1251 r.append(p)
1252 req[p] = 1
1252 req[p] = 1
1253 seen[n[0]] = 1
1253 seen[n[0]] = 1
1254
1254
1255 if r:
1255 if r:
1256 reqcnt += 1
1256 reqcnt += 1
1257 self.ui.debug(_("request %d: %s\n") %
1257 self.ui.debug(_("request %d: %s\n") %
1258 (reqcnt, " ".join(map(short, r))))
1258 (reqcnt, " ".join(map(short, r))))
1259 for p in xrange(0, len(r), 10):
1259 for p in xrange(0, len(r), 10):
1260 for b in remote.branches(r[p:p+10]):
1260 for b in remote.branches(r[p:p+10]):
1261 self.ui.debug(_("received %s:%s\n") %
1261 self.ui.debug(_("received %s:%s\n") %
1262 (short(b[0]), short(b[1])))
1262 (short(b[0]), short(b[1])))
1263 unknown.append(b)
1263 unknown.append(b)
1264
1264
1265 # do binary search on the branches we found
1265 # do binary search on the branches we found
1266 while search:
1266 while search:
1267 n = search.pop(0)
1267 n = search.pop(0)
1268 reqcnt += 1
1268 reqcnt += 1
1269 l = remote.between([(n[0], n[1])])[0]
1269 l = remote.between([(n[0], n[1])])[0]
1270 l.append(n[1])
1270 l.append(n[1])
1271 p = n[0]
1271 p = n[0]
1272 f = 1
1272 f = 1
1273 for i in l:
1273 for i in l:
1274 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1274 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1275 if i in m:
1275 if i in m:
1276 if f <= 2:
1276 if f <= 2:
1277 self.ui.debug(_("found new branch changeset %s\n") %
1277 self.ui.debug(_("found new branch changeset %s\n") %
1278 short(p))
1278 short(p))
1279 fetch[p] = 1
1279 fetch[p] = 1
1280 base[i] = 1
1280 base[i] = 1
1281 else:
1281 else:
1282 self.ui.debug(_("narrowed branch search to %s:%s\n")
1282 self.ui.debug(_("narrowed branch search to %s:%s\n")
1283 % (short(p), short(i)))
1283 % (short(p), short(i)))
1284 search.append((p, i))
1284 search.append((p, i))
1285 break
1285 break
1286 p, f = i, f * 2
1286 p, f = i, f * 2
1287
1287
1288 # sanity check our fetch list
1288 # sanity check our fetch list
1289 for f in fetch.keys():
1289 for f in fetch.keys():
1290 if f in m:
1290 if f in m:
1291 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1291 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1292
1292
1293 if base.keys() == [nullid]:
1293 if base.keys() == [nullid]:
1294 if force:
1294 if force:
1295 self.ui.warn(_("warning: repository is unrelated\n"))
1295 self.ui.warn(_("warning: repository is unrelated\n"))
1296 else:
1296 else:
1297 raise util.Abort(_("repository is unrelated"))
1297 raise util.Abort(_("repository is unrelated"))
1298
1298
1299 self.ui.debug(_("found new changesets starting at ") +
1299 self.ui.debug(_("found new changesets starting at ") +
1300 " ".join([short(f) for f in fetch]) + "\n")
1300 " ".join([short(f) for f in fetch]) + "\n")
1301
1301
1302 self.ui.debug(_("%d total queries\n") % reqcnt)
1302 self.ui.debug(_("%d total queries\n") % reqcnt)
1303
1303
1304 return fetch.keys()
1304 return fetch.keys()
1305
1305
1306 def findoutgoing(self, remote, base=None, heads=None, force=False):
1306 def findoutgoing(self, remote, base=None, heads=None, force=False):
1307 """Return list of nodes that are roots of subsets not in remote
1307 """Return list of nodes that are roots of subsets not in remote
1308
1308
1309 If base dict is specified, assume that these nodes and their parents
1309 If base dict is specified, assume that these nodes and their parents
1310 exist on the remote side.
1310 exist on the remote side.
1311 If a list of heads is specified, return only nodes which are heads
1311 If a list of heads is specified, return only nodes which are heads
1312 or ancestors of these heads, and return a second element which
1312 or ancestors of these heads, and return a second element which
1313 contains all remote heads which get new children.
1313 contains all remote heads which get new children.
1314 """
1314 """
1315 if base == None:
1315 if base == None:
1316 base = {}
1316 base = {}
1317 self.findincoming(remote, base, heads, force=force)
1317 self.findincoming(remote, base, heads, force=force)
1318
1318
1319 self.ui.debug(_("common changesets up to ")
1319 self.ui.debug(_("common changesets up to ")
1320 + " ".join(map(short, base.keys())) + "\n")
1320 + " ".join(map(short, base.keys())) + "\n")
1321
1321
1322 remain = dict.fromkeys(self.changelog.nodemap)
1322 remain = dict.fromkeys(self.changelog.nodemap)
1323
1323
1324 # prune everything remote has from the tree
1324 # prune everything remote has from the tree
1325 del remain[nullid]
1325 del remain[nullid]
1326 remove = base.keys()
1326 remove = base.keys()
1327 while remove:
1327 while remove:
1328 n = remove.pop(0)
1328 n = remove.pop(0)
1329 if n in remain:
1329 if n in remain:
1330 del remain[n]
1330 del remain[n]
1331 for p in self.changelog.parents(n):
1331 for p in self.changelog.parents(n):
1332 remove.append(p)
1332 remove.append(p)
1333
1333
1334 # find every node whose parents have been pruned
1334 # find every node whose parents have been pruned
1335 subset = []
1335 subset = []
1336 # find every remote head that will get new children
1336 # find every remote head that will get new children
1337 updated_heads = {}
1337 updated_heads = {}
1338 for n in remain:
1338 for n in remain:
1339 p1, p2 = self.changelog.parents(n)
1339 p1, p2 = self.changelog.parents(n)
1340 if p1 not in remain and p2 not in remain:
1340 if p1 not in remain and p2 not in remain:
1341 subset.append(n)
1341 subset.append(n)
1342 if heads:
1342 if heads:
1343 if p1 in heads:
1343 if p1 in heads:
1344 updated_heads[p1] = True
1344 updated_heads[p1] = True
1345 if p2 in heads:
1345 if p2 in heads:
1346 updated_heads[p2] = True
1346 updated_heads[p2] = True
1347
1347
1348 # this is the set of all roots we have to push
1348 # this is the set of all roots we have to push
1349 if heads:
1349 if heads:
1350 return subset, updated_heads.keys()
1350 return subset, updated_heads.keys()
1351 else:
1351 else:
1352 return subset
1352 return subset
1353
1353
1354 def pull(self, remote, heads=None, force=False):
1354 def pull(self, remote, heads=None, force=False):
1355 lock = self.lock()
1355 lock = self.lock()
1356 try:
1356 try:
1357 fetch = self.findincoming(remote, heads=heads, force=force)
1357 fetch = self.findincoming(remote, heads=heads, force=force)
1358 if fetch == [nullid]:
1358 if fetch == [nullid]:
1359 self.ui.status(_("requesting all changes\n"))
1359 self.ui.status(_("requesting all changes\n"))
1360
1360
1361 if not fetch:
1361 if not fetch:
1362 self.ui.status(_("no changes found\n"))
1362 self.ui.status(_("no changes found\n"))
1363 return 0
1363 return 0
1364
1364
1365 if heads is None:
1365 if heads is None:
1366 cg = remote.changegroup(fetch, 'pull')
1366 cg = remote.changegroup(fetch, 'pull')
1367 else:
1367 else:
1368 if 'changegroupsubset' not in remote.capabilities:
1368 if 'changegroupsubset' not in remote.capabilities:
1369 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1369 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1370 cg = remote.changegroupsubset(fetch, heads, 'pull')
1370 cg = remote.changegroupsubset(fetch, heads, 'pull')
1371 return self.addchangegroup(cg, 'pull', remote.url())
1371 return self.addchangegroup(cg, 'pull', remote.url())
1372 finally:
1372 finally:
1373 del lock
1373 del lock
1374
1374
1375 def push(self, remote, force=False, revs=None):
1375 def push(self, remote, force=False, revs=None):
1376 # there are two ways to push to remote repo:
1376 # there are two ways to push to remote repo:
1377 #
1377 #
1378 # addchangegroup assumes local user can lock remote
1378 # addchangegroup assumes local user can lock remote
1379 # repo (local filesystem, old ssh servers).
1379 # repo (local filesystem, old ssh servers).
1380 #
1380 #
1381 # unbundle assumes local user cannot lock remote repo (new ssh
1381 # unbundle assumes local user cannot lock remote repo (new ssh
1382 # servers, http servers).
1382 # servers, http servers).
1383
1383
1384 if remote.capable('unbundle'):
1384 if remote.capable('unbundle'):
1385 return self.push_unbundle(remote, force, revs)
1385 return self.push_unbundle(remote, force, revs)
1386 return self.push_addchangegroup(remote, force, revs)
1386 return self.push_addchangegroup(remote, force, revs)
1387
1387
1388 def prepush(self, remote, force, revs):
1388 def prepush(self, remote, force, revs):
1389 base = {}
1389 base = {}
1390 remote_heads = remote.heads()
1390 remote_heads = remote.heads()
1391 inc = self.findincoming(remote, base, remote_heads, force=force)
1391 inc = self.findincoming(remote, base, remote_heads, force=force)
1392
1392
1393 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1393 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1394 if revs is not None:
1394 if revs is not None:
1395 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1395 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1396 else:
1396 else:
1397 bases, heads = update, self.changelog.heads()
1397 bases, heads = update, self.changelog.heads()
1398
1398
1399 if not bases:
1399 if not bases:
1400 self.ui.status(_("no changes found\n"))
1400 self.ui.status(_("no changes found\n"))
1401 return None, 1
1401 return None, 1
1402 elif not force:
1402 elif not force:
1403 # check if we're creating new remote heads
1403 # check if we're creating new remote heads
1404 # to be a remote head after push, node must be either
1404 # to be a remote head after push, node must be either
1405 # - unknown locally
1405 # - unknown locally
1406 # - a local outgoing head descended from update
1406 # - a local outgoing head descended from update
1407 # - a remote head that's known locally and not
1407 # - a remote head that's known locally and not
1408 # ancestral to an outgoing head
1408 # ancestral to an outgoing head
1409
1409
1410 warn = 0
1410 warn = 0
1411
1411
1412 if remote_heads == [nullid]:
1412 if remote_heads == [nullid]:
1413 warn = 0
1413 warn = 0
1414 elif not revs and len(heads) > len(remote_heads):
1414 elif not revs and len(heads) > len(remote_heads):
1415 warn = 1
1415 warn = 1
1416 else:
1416 else:
1417 newheads = list(heads)
1417 newheads = list(heads)
1418 for r in remote_heads:
1418 for r in remote_heads:
1419 if r in self.changelog.nodemap:
1419 if r in self.changelog.nodemap:
1420 desc = self.changelog.heads(r, heads)
1420 desc = self.changelog.heads(r, heads)
1421 l = [h for h in heads if h in desc]
1421 l = [h for h in heads if h in desc]
1422 if not l:
1422 if not l:
1423 newheads.append(r)
1423 newheads.append(r)
1424 else:
1424 else:
1425 newheads.append(r)
1425 newheads.append(r)
1426 if len(newheads) > len(remote_heads):
1426 if len(newheads) > len(remote_heads):
1427 warn = 1
1427 warn = 1
1428
1428
1429 if warn:
1429 if warn:
1430 self.ui.warn(_("abort: push creates new remote branches!\n"))
1430 self.ui.warn(_("abort: push creates new remote branches!\n"))
1431 self.ui.status(_("(did you forget to merge?"
1431 self.ui.status(_("(did you forget to merge?"
1432 " use push -f to force)\n"))
1432 " use push -f to force)\n"))
1433 return None, 1
1433 return None, 1
1434 elif inc:
1434 elif inc:
1435 self.ui.warn(_("note: unsynced remote changes!\n"))
1435 self.ui.warn(_("note: unsynced remote changes!\n"))
1436
1436
1437
1437
1438 if revs is None:
1438 if revs is None:
1439 cg = self.changegroup(update, 'push')
1439 cg = self.changegroup(update, 'push')
1440 else:
1440 else:
1441 cg = self.changegroupsubset(update, revs, 'push')
1441 cg = self.changegroupsubset(update, revs, 'push')
1442 return cg, remote_heads
1442 return cg, remote_heads
1443
1443
1444 def push_addchangegroup(self, remote, force, revs):
1444 def push_addchangegroup(self, remote, force, revs):
1445 lock = remote.lock()
1445 lock = remote.lock()
1446 try:
1446 try:
1447 ret = self.prepush(remote, force, revs)
1447 ret = self.prepush(remote, force, revs)
1448 if ret[0] is not None:
1448 if ret[0] is not None:
1449 cg, remote_heads = ret
1449 cg, remote_heads = ret
1450 return remote.addchangegroup(cg, 'push', self.url())
1450 return remote.addchangegroup(cg, 'push', self.url())
1451 return ret[1]
1451 return ret[1]
1452 finally:
1452 finally:
1453 del lock
1453 del lock
1454
1454
1455 def push_unbundle(self, remote, force, revs):
1455 def push_unbundle(self, remote, force, revs):
1456 # local repo finds heads on server, finds out what revs it
1456 # local repo finds heads on server, finds out what revs it
1457 # must push. once revs transferred, if server finds it has
1457 # must push. once revs transferred, if server finds it has
1458 # different heads (someone else won commit/push race), server
1458 # different heads (someone else won commit/push race), server
1459 # aborts.
1459 # aborts.
1460
1460
1461 ret = self.prepush(remote, force, revs)
1461 ret = self.prepush(remote, force, revs)
1462 if ret[0] is not None:
1462 if ret[0] is not None:
1463 cg, remote_heads = ret
1463 cg, remote_heads = ret
1464 if force: remote_heads = ['force']
1464 if force: remote_heads = ['force']
1465 return remote.unbundle(cg, remote_heads, 'push')
1465 return remote.unbundle(cg, remote_heads, 'push')
1466 return ret[1]
1466 return ret[1]
1467
1467
1468 def changegroupinfo(self, nodes):
1468 def changegroupinfo(self, nodes):
1469 self.ui.note(_("%d changesets found\n") % len(nodes))
1469 self.ui.note(_("%d changesets found\n") % len(nodes))
1470 if self.ui.debugflag:
1470 if self.ui.debugflag:
1471 self.ui.debug(_("List of changesets:\n"))
1471 self.ui.debug(_("List of changesets:\n"))
1472 for node in nodes:
1472 for node in nodes:
1473 self.ui.debug("%s\n" % hex(node))
1473 self.ui.debug("%s\n" % hex(node))
1474
1474
1475 def changegroupsubset(self, bases, heads, source):
1475 def changegroupsubset(self, bases, heads, source):
1476 """This function generates a changegroup consisting of all the nodes
1476 """This function generates a changegroup consisting of all the nodes
1477 that are descendents of any of the bases, and ancestors of any of
1477 that are descendents of any of the bases, and ancestors of any of
1478 the heads.
1478 the heads.
1479
1479
1480 It is fairly complex as determining which filenodes and which
1480 It is fairly complex as determining which filenodes and which
1481 manifest nodes need to be included for the changeset to be complete
1481 manifest nodes need to be included for the changeset to be complete
1482 is non-trivial.
1482 is non-trivial.
1483
1483
1484 Another wrinkle is doing the reverse, figuring out which changeset in
1484 Another wrinkle is doing the reverse, figuring out which changeset in
1485 the changegroup a particular filenode or manifestnode belongs to."""
1485 the changegroup a particular filenode or manifestnode belongs to."""
1486
1486
1487 self.hook('preoutgoing', throw=True, source=source)
1487 self.hook('preoutgoing', throw=True, source=source)
1488
1488
1489 # Set up some initial variables
1489 # Set up some initial variables
1490 # Make it easy to refer to self.changelog
1490 # Make it easy to refer to self.changelog
1491 cl = self.changelog
1491 cl = self.changelog
1492 # msng is short for missing - compute the list of changesets in this
1492 # msng is short for missing - compute the list of changesets in this
1493 # changegroup.
1493 # changegroup.
1494 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1494 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1495 self.changegroupinfo(msng_cl_lst)
1495 self.changegroupinfo(msng_cl_lst)
1496 # Some bases may turn out to be superfluous, and some heads may be
1496 # Some bases may turn out to be superfluous, and some heads may be
1497 # too. nodesbetween will return the minimal set of bases and heads
1497 # too. nodesbetween will return the minimal set of bases and heads
1498 # necessary to re-create the changegroup.
1498 # necessary to re-create the changegroup.
1499
1499
1500 # Known heads are the list of heads that it is assumed the recipient
1500 # Known heads are the list of heads that it is assumed the recipient
1501 # of this changegroup will know about.
1501 # of this changegroup will know about.
1502 knownheads = {}
1502 knownheads = {}
1503 # We assume that all parents of bases are known heads.
1503 # We assume that all parents of bases are known heads.
1504 for n in bases:
1504 for n in bases:
1505 for p in cl.parents(n):
1505 for p in cl.parents(n):
1506 if p != nullid:
1506 if p != nullid:
1507 knownheads[p] = 1
1507 knownheads[p] = 1
1508 knownheads = knownheads.keys()
1508 knownheads = knownheads.keys()
1509 if knownheads:
1509 if knownheads:
1510 # Now that we know what heads are known, we can compute which
1510 # Now that we know what heads are known, we can compute which
1511 # changesets are known. The recipient must know about all
1511 # changesets are known. The recipient must know about all
1512 # changesets required to reach the known heads from the null
1512 # changesets required to reach the known heads from the null
1513 # changeset.
1513 # changeset.
1514 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1514 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1515 junk = None
1515 junk = None
1516 # Transform the list into an ersatz set.
1516 # Transform the list into an ersatz set.
1517 has_cl_set = dict.fromkeys(has_cl_set)
1517 has_cl_set = dict.fromkeys(has_cl_set)
1518 else:
1518 else:
1519 # If there were no known heads, the recipient cannot be assumed to
1519 # If there were no known heads, the recipient cannot be assumed to
1520 # know about any changesets.
1520 # know about any changesets.
1521 has_cl_set = {}
1521 has_cl_set = {}
1522
1522
1523 # Make it easy to refer to self.manifest
1523 # Make it easy to refer to self.manifest
1524 mnfst = self.manifest
1524 mnfst = self.manifest
1525 # We don't know which manifests are missing yet
1525 # We don't know which manifests are missing yet
1526 msng_mnfst_set = {}
1526 msng_mnfst_set = {}
1527 # Nor do we know which filenodes are missing.
1527 # Nor do we know which filenodes are missing.
1528 msng_filenode_set = {}
1528 msng_filenode_set = {}
1529
1529
1530 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1530 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1531 junk = None
1531 junk = None
1532
1532
1533 # A changeset always belongs to itself, so the changenode lookup
1533 # A changeset always belongs to itself, so the changenode lookup
1534 # function for a changenode is identity.
1534 # function for a changenode is identity.
1535 def identity(x):
1535 def identity(x):
1536 return x
1536 return x
1537
1537
1538 # A function generating function. Sets up an environment for the
1538 # A function generating function. Sets up an environment for the
1539 # inner function.
1539 # inner function.
1540 def cmp_by_rev_func(revlog):
1540 def cmp_by_rev_func(revlog):
1541 # Compare two nodes by their revision number in the environment's
1541 # Compare two nodes by their revision number in the environment's
1542 # revision history. Since the revision number both represents the
1542 # revision history. Since the revision number both represents the
1543 # most efficient order to read the nodes in, and represents a
1543 # most efficient order to read the nodes in, and represents a
1544 # topological sorting of the nodes, this function is often useful.
1544 # topological sorting of the nodes, this function is often useful.
1545 def cmp_by_rev(a, b):
1545 def cmp_by_rev(a, b):
1546 return cmp(revlog.rev(a), revlog.rev(b))
1546 return cmp(revlog.rev(a), revlog.rev(b))
1547 return cmp_by_rev
1547 return cmp_by_rev
1548
1548
1549 # If we determine that a particular file or manifest node must be a
1549 # If we determine that a particular file or manifest node must be a
1550 # node that the recipient of the changegroup will already have, we can
1550 # node that the recipient of the changegroup will already have, we can
1551 # also assume the recipient will have all the parents. This function
1551 # also assume the recipient will have all the parents. This function
1552 # prunes them from the set of missing nodes.
1552 # prunes them from the set of missing nodes.
1553 def prune_parents(revlog, hasset, msngset):
1553 def prune_parents(revlog, hasset, msngset):
1554 haslst = hasset.keys()
1554 haslst = hasset.keys()
1555 haslst.sort(cmp_by_rev_func(revlog))
1555 haslst.sort(cmp_by_rev_func(revlog))
1556 for node in haslst:
1556 for node in haslst:
1557 parentlst = [p for p in revlog.parents(node) if p != nullid]
1557 parentlst = [p for p in revlog.parents(node) if p != nullid]
1558 while parentlst:
1558 while parentlst:
1559 n = parentlst.pop()
1559 n = parentlst.pop()
1560 if n not in hasset:
1560 if n not in hasset:
1561 hasset[n] = 1
1561 hasset[n] = 1
1562 p = [p for p in revlog.parents(n) if p != nullid]
1562 p = [p for p in revlog.parents(n) if p != nullid]
1563 parentlst.extend(p)
1563 parentlst.extend(p)
1564 for n in hasset:
1564 for n in hasset:
1565 msngset.pop(n, None)
1565 msngset.pop(n, None)
1566
1566
1567 # This is a function generating function used to set up an environment
1567 # This is a function generating function used to set up an environment
1568 # for the inner function to execute in.
1568 # for the inner function to execute in.
1569 def manifest_and_file_collector(changedfileset):
1569 def manifest_and_file_collector(changedfileset):
1570 # This is an information gathering function that gathers
1570 # This is an information gathering function that gathers
1571 # information from each changeset node that goes out as part of
1571 # information from each changeset node that goes out as part of
1572 # the changegroup. The information gathered is a list of which
1572 # the changegroup. The information gathered is a list of which
1573 # manifest nodes are potentially required (the recipient may
1573 # manifest nodes are potentially required (the recipient may
1574 # already have them) and total list of all files which were
1574 # already have them) and total list of all files which were
1575 # changed in any changeset in the changegroup.
1575 # changed in any changeset in the changegroup.
1576 #
1576 #
1577 # We also remember the first changenode we saw any manifest
1577 # We also remember the first changenode we saw any manifest
1578 # referenced by so we can later determine which changenode 'owns'
1578 # referenced by so we can later determine which changenode 'owns'
1579 # the manifest.
1579 # the manifest.
1580 def collect_manifests_and_files(clnode):
1580 def collect_manifests_and_files(clnode):
1581 c = cl.read(clnode)
1581 c = cl.read(clnode)
1582 for f in c[3]:
1582 for f in c[3]:
1583 # This is to make sure we only have one instance of each
1583 # This is to make sure we only have one instance of each
1584 # filename string for each filename.
1584 # filename string for each filename.
1585 changedfileset.setdefault(f, f)
1585 changedfileset.setdefault(f, f)
1586 msng_mnfst_set.setdefault(c[0], clnode)
1586 msng_mnfst_set.setdefault(c[0], clnode)
1587 return collect_manifests_and_files
1587 return collect_manifests_and_files
1588
1588
1589 # Figure out which manifest nodes (of the ones we think might be part
1589 # Figure out which manifest nodes (of the ones we think might be part
1590 # of the changegroup) the recipient must know about and remove them
1590 # of the changegroup) the recipient must know about and remove them
1591 # from the changegroup.
1591 # from the changegroup.
1592 def prune_manifests():
1592 def prune_manifests():
1593 has_mnfst_set = {}
1593 has_mnfst_set = {}
1594 for n in msng_mnfst_set:
1594 for n in msng_mnfst_set:
1595 # If a 'missing' manifest thinks it belongs to a changenode
1595 # If a 'missing' manifest thinks it belongs to a changenode
1596 # the recipient is assumed to have, obviously the recipient
1596 # the recipient is assumed to have, obviously the recipient
1597 # must have that manifest.
1597 # must have that manifest.
1598 linknode = cl.node(mnfst.linkrev(n))
1598 linknode = cl.node(mnfst.linkrev(n))
1599 if linknode in has_cl_set:
1599 if linknode in has_cl_set:
1600 has_mnfst_set[n] = 1
1600 has_mnfst_set[n] = 1
1601 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1601 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1602
1602
1603 # Use the information collected in collect_manifests_and_files to say
1603 # Use the information collected in collect_manifests_and_files to say
1604 # which changenode any manifestnode belongs to.
1604 # which changenode any manifestnode belongs to.
1605 def lookup_manifest_link(mnfstnode):
1605 def lookup_manifest_link(mnfstnode):
1606 return msng_mnfst_set[mnfstnode]
1606 return msng_mnfst_set[mnfstnode]
1607
1607
1608 # A function generating function that sets up the initial environment
1608 # A function generating function that sets up the initial environment
1609 # the inner function.
1609 # the inner function.
1610 def filenode_collector(changedfiles):
1610 def filenode_collector(changedfiles):
1611 next_rev = [0]
1611 next_rev = [0]
1612 # This gathers information from each manifestnode included in the
1612 # This gathers information from each manifestnode included in the
1613 # changegroup about which filenodes the manifest node references
1613 # changegroup about which filenodes the manifest node references
1614 # so we can include those in the changegroup too.
1614 # so we can include those in the changegroup too.
1615 #
1615 #
1616 # It also remembers which changenode each filenode belongs to. It
1616 # It also remembers which changenode each filenode belongs to. It
1617 # does this by assuming the a filenode belongs to the changenode
1617 # does this by assuming the a filenode belongs to the changenode
1618 # the first manifest that references it belongs to.
1618 # the first manifest that references it belongs to.
1619 def collect_msng_filenodes(mnfstnode):
1619 def collect_msng_filenodes(mnfstnode):
1620 r = mnfst.rev(mnfstnode)
1620 r = mnfst.rev(mnfstnode)
1621 if r == next_rev[0]:
1621 if r == next_rev[0]:
1622 # If the last rev we looked at was the one just previous,
1622 # If the last rev we looked at was the one just previous,
1623 # we only need to see a diff.
1623 # we only need to see a diff.
1624 deltamf = mnfst.readdelta(mnfstnode)
1624 deltamf = mnfst.readdelta(mnfstnode)
1625 # For each line in the delta
1625 # For each line in the delta
1626 for f, fnode in deltamf.items():
1626 for f, fnode in deltamf.items():
1627 f = changedfiles.get(f, None)
1627 f = changedfiles.get(f, None)
1628 # And if the file is in the list of files we care
1628 # And if the file is in the list of files we care
1629 # about.
1629 # about.
1630 if f is not None:
1630 if f is not None:
1631 # Get the changenode this manifest belongs to
1631 # Get the changenode this manifest belongs to
1632 clnode = msng_mnfst_set[mnfstnode]
1632 clnode = msng_mnfst_set[mnfstnode]
1633 # Create the set of filenodes for the file if
1633 # Create the set of filenodes for the file if
1634 # there isn't one already.
1634 # there isn't one already.
1635 ndset = msng_filenode_set.setdefault(f, {})
1635 ndset = msng_filenode_set.setdefault(f, {})
1636 # And set the filenode's changelog node to the
1636 # And set the filenode's changelog node to the
1637 # manifest's if it hasn't been set already.
1637 # manifest's if it hasn't been set already.
1638 ndset.setdefault(fnode, clnode)
1638 ndset.setdefault(fnode, clnode)
1639 else:
1639 else:
1640 # Otherwise we need a full manifest.
1640 # Otherwise we need a full manifest.
1641 m = mnfst.read(mnfstnode)
1641 m = mnfst.read(mnfstnode)
1642 # For every file in we care about.
1642 # For every file in we care about.
1643 for f in changedfiles:
1643 for f in changedfiles:
1644 fnode = m.get(f, None)
1644 fnode = m.get(f, None)
1645 # If it's in the manifest
1645 # If it's in the manifest
1646 if fnode is not None:
1646 if fnode is not None:
1647 # See comments above.
1647 # See comments above.
1648 clnode = msng_mnfst_set[mnfstnode]
1648 clnode = msng_mnfst_set[mnfstnode]
1649 ndset = msng_filenode_set.setdefault(f, {})
1649 ndset = msng_filenode_set.setdefault(f, {})
1650 ndset.setdefault(fnode, clnode)
1650 ndset.setdefault(fnode, clnode)
1651 # Remember the revision we hope to see next.
1651 # Remember the revision we hope to see next.
1652 next_rev[0] = r + 1
1652 next_rev[0] = r + 1
1653 return collect_msng_filenodes
1653 return collect_msng_filenodes
1654
1654
1655 # We have a list of filenodes we think we need for a file, lets remove
1655 # We have a list of filenodes we think we need for a file, lets remove
1656 # all those we now the recipient must have.
1656 # all those we now the recipient must have.
1657 def prune_filenodes(f, filerevlog):
1657 def prune_filenodes(f, filerevlog):
1658 msngset = msng_filenode_set[f]
1658 msngset = msng_filenode_set[f]
1659 hasset = {}
1659 hasset = {}
1660 # If a 'missing' filenode thinks it belongs to a changenode we
1660 # If a 'missing' filenode thinks it belongs to a changenode we
1661 # assume the recipient must have, then the recipient must have
1661 # assume the recipient must have, then the recipient must have
1662 # that filenode.
1662 # that filenode.
1663 for n in msngset:
1663 for n in msngset:
1664 clnode = cl.node(filerevlog.linkrev(n))
1664 clnode = cl.node(filerevlog.linkrev(n))
1665 if clnode in has_cl_set:
1665 if clnode in has_cl_set:
1666 hasset[n] = 1
1666 hasset[n] = 1
1667 prune_parents(filerevlog, hasset, msngset)
1667 prune_parents(filerevlog, hasset, msngset)
1668
1668
1669 # A function generator function that sets up the a context for the
1669 # A function generator function that sets up the a context for the
1670 # inner function.
1670 # inner function.
1671 def lookup_filenode_link_func(fname):
1671 def lookup_filenode_link_func(fname):
1672 msngset = msng_filenode_set[fname]
1672 msngset = msng_filenode_set[fname]
1673 # Lookup the changenode the filenode belongs to.
1673 # Lookup the changenode the filenode belongs to.
1674 def lookup_filenode_link(fnode):
1674 def lookup_filenode_link(fnode):
1675 return msngset[fnode]
1675 return msngset[fnode]
1676 return lookup_filenode_link
1676 return lookup_filenode_link
1677
1677
1678 # Now that we have all theses utility functions to help out and
1678 # Now that we have all theses utility functions to help out and
1679 # logically divide up the task, generate the group.
1679 # logically divide up the task, generate the group.
1680 def gengroup():
1680 def gengroup():
1681 # The set of changed files starts empty.
1681 # The set of changed files starts empty.
1682 changedfiles = {}
1682 changedfiles = {}
1683 # Create a changenode group generator that will call our functions
1683 # Create a changenode group generator that will call our functions
1684 # back to lookup the owning changenode and collect information.
1684 # back to lookup the owning changenode and collect information.
1685 group = cl.group(msng_cl_lst, identity,
1685 group = cl.group(msng_cl_lst, identity,
1686 manifest_and_file_collector(changedfiles))
1686 manifest_and_file_collector(changedfiles))
1687 for chnk in group:
1687 for chnk in group:
1688 yield chnk
1688 yield chnk
1689
1689
1690 # The list of manifests has been collected by the generator
1690 # The list of manifests has been collected by the generator
1691 # calling our functions back.
1691 # calling our functions back.
1692 prune_manifests()
1692 prune_manifests()
1693 msng_mnfst_lst = msng_mnfst_set.keys()
1693 msng_mnfst_lst = msng_mnfst_set.keys()
1694 # Sort the manifestnodes by revision number.
1694 # Sort the manifestnodes by revision number.
1695 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1695 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1696 # Create a generator for the manifestnodes that calls our lookup
1696 # Create a generator for the manifestnodes that calls our lookup
1697 # and data collection functions back.
1697 # and data collection functions back.
1698 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1698 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1699 filenode_collector(changedfiles))
1699 filenode_collector(changedfiles))
1700 for chnk in group:
1700 for chnk in group:
1701 yield chnk
1701 yield chnk
1702
1702
1703 # These are no longer needed, dereference and toss the memory for
1703 # These are no longer needed, dereference and toss the memory for
1704 # them.
1704 # them.
1705 msng_mnfst_lst = None
1705 msng_mnfst_lst = None
1706 msng_mnfst_set.clear()
1706 msng_mnfst_set.clear()
1707
1707
1708 changedfiles = changedfiles.keys()
1708 changedfiles = changedfiles.keys()
1709 changedfiles.sort()
1709 changedfiles.sort()
1710 # Go through all our files in order sorted by name.
1710 # Go through all our files in order sorted by name.
1711 for fname in changedfiles:
1711 for fname in changedfiles:
1712 filerevlog = self.file(fname)
1712 filerevlog = self.file(fname)
1713 # Toss out the filenodes that the recipient isn't really
1713 # Toss out the filenodes that the recipient isn't really
1714 # missing.
1714 # missing.
1715 if msng_filenode_set.has_key(fname):
1715 if msng_filenode_set.has_key(fname):
1716 prune_filenodes(fname, filerevlog)
1716 prune_filenodes(fname, filerevlog)
1717 msng_filenode_lst = msng_filenode_set[fname].keys()
1717 msng_filenode_lst = msng_filenode_set[fname].keys()
1718 else:
1718 else:
1719 msng_filenode_lst = []
1719 msng_filenode_lst = []
1720 # If any filenodes are left, generate the group for them,
1720 # If any filenodes are left, generate the group for them,
1721 # otherwise don't bother.
1721 # otherwise don't bother.
1722 if len(msng_filenode_lst) > 0:
1722 if len(msng_filenode_lst) > 0:
1723 yield changegroup.genchunk(fname)
1723 yield changegroup.chunkheader(len(fname))
1724 yield fname
1724 # Sort the filenodes by their revision #
1725 # Sort the filenodes by their revision #
1725 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1726 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1726 # Create a group generator and only pass in a changenode
1727 # Create a group generator and only pass in a changenode
1727 # lookup function as we need to collect no information
1728 # lookup function as we need to collect no information
1728 # from filenodes.
1729 # from filenodes.
1729 group = filerevlog.group(msng_filenode_lst,
1730 group = filerevlog.group(msng_filenode_lst,
1730 lookup_filenode_link_func(fname))
1731 lookup_filenode_link_func(fname))
1731 for chnk in group:
1732 for chnk in group:
1732 yield chnk
1733 yield chnk
1733 if msng_filenode_set.has_key(fname):
1734 if msng_filenode_set.has_key(fname):
1734 # Don't need this anymore, toss it to free memory.
1735 # Don't need this anymore, toss it to free memory.
1735 del msng_filenode_set[fname]
1736 del msng_filenode_set[fname]
1736 # Signal that no more groups are left.
1737 # Signal that no more groups are left.
1737 yield changegroup.closechunk()
1738 yield changegroup.closechunk()
1738
1739
1739 if msng_cl_lst:
1740 if msng_cl_lst:
1740 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1741 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1741
1742
1742 return util.chunkbuffer(gengroup())
1743 return util.chunkbuffer(gengroup())
1743
1744
1744 def changegroup(self, basenodes, source):
1745 def changegroup(self, basenodes, source):
1745 """Generate a changegroup of all nodes that we have that a recipient
1746 """Generate a changegroup of all nodes that we have that a recipient
1746 doesn't.
1747 doesn't.
1747
1748
1748 This is much easier than the previous function as we can assume that
1749 This is much easier than the previous function as we can assume that
1749 the recipient has any changenode we aren't sending them."""
1750 the recipient has any changenode we aren't sending them."""
1750
1751
1751 self.hook('preoutgoing', throw=True, source=source)
1752 self.hook('preoutgoing', throw=True, source=source)
1752
1753
1753 cl = self.changelog
1754 cl = self.changelog
1754 nodes = cl.nodesbetween(basenodes, None)[0]
1755 nodes = cl.nodesbetween(basenodes, None)[0]
1755 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1756 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1756 self.changegroupinfo(nodes)
1757 self.changegroupinfo(nodes)
1757
1758
1758 def identity(x):
1759 def identity(x):
1759 return x
1760 return x
1760
1761
1761 def gennodelst(revlog):
1762 def gennodelst(revlog):
1762 for r in xrange(0, revlog.count()):
1763 for r in xrange(0, revlog.count()):
1763 n = revlog.node(r)
1764 n = revlog.node(r)
1764 if revlog.linkrev(n) in revset:
1765 if revlog.linkrev(n) in revset:
1765 yield n
1766 yield n
1766
1767
1767 def changed_file_collector(changedfileset):
1768 def changed_file_collector(changedfileset):
1768 def collect_changed_files(clnode):
1769 def collect_changed_files(clnode):
1769 c = cl.read(clnode)
1770 c = cl.read(clnode)
1770 for fname in c[3]:
1771 for fname in c[3]:
1771 changedfileset[fname] = 1
1772 changedfileset[fname] = 1
1772 return collect_changed_files
1773 return collect_changed_files
1773
1774
1774 def lookuprevlink_func(revlog):
1775 def lookuprevlink_func(revlog):
1775 def lookuprevlink(n):
1776 def lookuprevlink(n):
1776 return cl.node(revlog.linkrev(n))
1777 return cl.node(revlog.linkrev(n))
1777 return lookuprevlink
1778 return lookuprevlink
1778
1779
1779 def gengroup():
1780 def gengroup():
1780 # construct a list of all changed files
1781 # construct a list of all changed files
1781 changedfiles = {}
1782 changedfiles = {}
1782
1783
1783 for chnk in cl.group(nodes, identity,
1784 for chnk in cl.group(nodes, identity,
1784 changed_file_collector(changedfiles)):
1785 changed_file_collector(changedfiles)):
1785 yield chnk
1786 yield chnk
1786 changedfiles = changedfiles.keys()
1787 changedfiles = changedfiles.keys()
1787 changedfiles.sort()
1788 changedfiles.sort()
1788
1789
1789 mnfst = self.manifest
1790 mnfst = self.manifest
1790 nodeiter = gennodelst(mnfst)
1791 nodeiter = gennodelst(mnfst)
1791 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1792 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1792 yield chnk
1793 yield chnk
1793
1794
1794 for fname in changedfiles:
1795 for fname in changedfiles:
1795 filerevlog = self.file(fname)
1796 filerevlog = self.file(fname)
1796 nodeiter = gennodelst(filerevlog)
1797 nodeiter = gennodelst(filerevlog)
1797 nodeiter = list(nodeiter)
1798 nodeiter = list(nodeiter)
1798 if nodeiter:
1799 if nodeiter:
1799 yield changegroup.genchunk(fname)
1800 yield changegroup.chunkheader(len(fname))
1801 yield fname
1800 lookup = lookuprevlink_func(filerevlog)
1802 lookup = lookuprevlink_func(filerevlog)
1801 for chnk in filerevlog.group(nodeiter, lookup):
1803 for chnk in filerevlog.group(nodeiter, lookup):
1802 yield chnk
1804 yield chnk
1803
1805
1804 yield changegroup.closechunk()
1806 yield changegroup.closechunk()
1805
1807
1806 if nodes:
1808 if nodes:
1807 self.hook('outgoing', node=hex(nodes[0]), source=source)
1809 self.hook('outgoing', node=hex(nodes[0]), source=source)
1808
1810
1809 return util.chunkbuffer(gengroup())
1811 return util.chunkbuffer(gengroup())
1810
1812
1811 def addchangegroup(self, source, srctype, url):
1813 def addchangegroup(self, source, srctype, url):
1812 """add changegroup to repo.
1814 """add changegroup to repo.
1813
1815
1814 return values:
1816 return values:
1815 - nothing changed or no source: 0
1817 - nothing changed or no source: 0
1816 - more heads than before: 1+added heads (2..n)
1818 - more heads than before: 1+added heads (2..n)
1817 - less heads than before: -1-removed heads (-2..-n)
1819 - less heads than before: -1-removed heads (-2..-n)
1818 - number of heads stays the same: 1
1820 - number of heads stays the same: 1
1819 """
1821 """
1820 def csmap(x):
1822 def csmap(x):
1821 self.ui.debug(_("add changeset %s\n") % short(x))
1823 self.ui.debug(_("add changeset %s\n") % short(x))
1822 return cl.count()
1824 return cl.count()
1823
1825
1824 def revmap(x):
1826 def revmap(x):
1825 return cl.rev(x)
1827 return cl.rev(x)
1826
1828
1827 if not source:
1829 if not source:
1828 return 0
1830 return 0
1829
1831
1830 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1832 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1831
1833
1832 changesets = files = revisions = 0
1834 changesets = files = revisions = 0
1833
1835
1834 # write changelog data to temp files so concurrent readers will not see
1836 # write changelog data to temp files so concurrent readers will not see
1835 # inconsistent view
1837 # inconsistent view
1836 cl = self.changelog
1838 cl = self.changelog
1837 cl.delayupdate()
1839 cl.delayupdate()
1838 oldheads = len(cl.heads())
1840 oldheads = len(cl.heads())
1839
1841
1840 tr = self.transaction()
1842 tr = self.transaction()
1841 try:
1843 try:
1842 trp = weakref.proxy(tr)
1844 trp = weakref.proxy(tr)
1843 # pull off the changeset group
1845 # pull off the changeset group
1844 self.ui.status(_("adding changesets\n"))
1846 self.ui.status(_("adding changesets\n"))
1845 cor = cl.count() - 1
1847 cor = cl.count() - 1
1846 chunkiter = changegroup.chunkiter(source)
1848 chunkiter = changegroup.chunkiter(source)
1847 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1849 if cl.addgroup(chunkiter, csmap, trp, 1) is None:
1848 raise util.Abort(_("received changelog group is empty"))
1850 raise util.Abort(_("received changelog group is empty"))
1849 cnr = cl.count() - 1
1851 cnr = cl.count() - 1
1850 changesets = cnr - cor
1852 changesets = cnr - cor
1851
1853
1852 # pull off the manifest group
1854 # pull off the manifest group
1853 self.ui.status(_("adding manifests\n"))
1855 self.ui.status(_("adding manifests\n"))
1854 chunkiter = changegroup.chunkiter(source)
1856 chunkiter = changegroup.chunkiter(source)
1855 # no need to check for empty manifest group here:
1857 # no need to check for empty manifest group here:
1856 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1858 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1857 # no new manifest will be created and the manifest group will
1859 # no new manifest will be created and the manifest group will
1858 # be empty during the pull
1860 # be empty during the pull
1859 self.manifest.addgroup(chunkiter, revmap, trp)
1861 self.manifest.addgroup(chunkiter, revmap, trp)
1860
1862
1861 # process the files
1863 # process the files
1862 self.ui.status(_("adding file changes\n"))
1864 self.ui.status(_("adding file changes\n"))
1863 while 1:
1865 while 1:
1864 f = changegroup.getchunk(source)
1866 f = changegroup.getchunk(source)
1865 if not f:
1867 if not f:
1866 break
1868 break
1867 self.ui.debug(_("adding %s revisions\n") % f)
1869 self.ui.debug(_("adding %s revisions\n") % f)
1868 fl = self.file(f)
1870 fl = self.file(f)
1869 o = fl.count()
1871 o = fl.count()
1870 chunkiter = changegroup.chunkiter(source)
1872 chunkiter = changegroup.chunkiter(source)
1871 if fl.addgroup(chunkiter, revmap, trp) is None:
1873 if fl.addgroup(chunkiter, revmap, trp) is None:
1872 raise util.Abort(_("received file revlog group is empty"))
1874 raise util.Abort(_("received file revlog group is empty"))
1873 revisions += fl.count() - o
1875 revisions += fl.count() - o
1874 files += 1
1876 files += 1
1875
1877
1876 # make changelog see real files again
1878 # make changelog see real files again
1877 cl.finalize(trp)
1879 cl.finalize(trp)
1878
1880
1879 newheads = len(self.changelog.heads())
1881 newheads = len(self.changelog.heads())
1880 heads = ""
1882 heads = ""
1881 if oldheads and newheads != oldheads:
1883 if oldheads and newheads != oldheads:
1882 heads = _(" (%+d heads)") % (newheads - oldheads)
1884 heads = _(" (%+d heads)") % (newheads - oldheads)
1883
1885
1884 self.ui.status(_("added %d changesets"
1886 self.ui.status(_("added %d changesets"
1885 " with %d changes to %d files%s\n")
1887 " with %d changes to %d files%s\n")
1886 % (changesets, revisions, files, heads))
1888 % (changesets, revisions, files, heads))
1887
1889
1888 if changesets > 0:
1890 if changesets > 0:
1889 self.hook('pretxnchangegroup', throw=True,
1891 self.hook('pretxnchangegroup', throw=True,
1890 node=hex(self.changelog.node(cor+1)), source=srctype,
1892 node=hex(self.changelog.node(cor+1)), source=srctype,
1891 url=url)
1893 url=url)
1892
1894
1893 tr.close()
1895 tr.close()
1894 finally:
1896 finally:
1895 del tr
1897 del tr
1896
1898
1897 if changesets > 0:
1899 if changesets > 0:
1898 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1900 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1899 source=srctype, url=url)
1901 source=srctype, url=url)
1900
1902
1901 for i in xrange(cor + 1, cnr + 1):
1903 for i in xrange(cor + 1, cnr + 1):
1902 self.hook("incoming", node=hex(self.changelog.node(i)),
1904 self.hook("incoming", node=hex(self.changelog.node(i)),
1903 source=srctype, url=url)
1905 source=srctype, url=url)
1904
1906
1905 # never return 0 here:
1907 # never return 0 here:
1906 if newheads < oldheads:
1908 if newheads < oldheads:
1907 return newheads - oldheads - 1
1909 return newheads - oldheads - 1
1908 else:
1910 else:
1909 return newheads - oldheads + 1
1911 return newheads - oldheads + 1
1910
1912
1911
1913
1912 def stream_in(self, remote):
1914 def stream_in(self, remote):
1913 fp = remote.stream_out()
1915 fp = remote.stream_out()
1914 l = fp.readline()
1916 l = fp.readline()
1915 try:
1917 try:
1916 resp = int(l)
1918 resp = int(l)
1917 except ValueError:
1919 except ValueError:
1918 raise util.UnexpectedOutput(
1920 raise util.UnexpectedOutput(
1919 _('Unexpected response from remote server:'), l)
1921 _('Unexpected response from remote server:'), l)
1920 if resp == 1:
1922 if resp == 1:
1921 raise util.Abort(_('operation forbidden by server'))
1923 raise util.Abort(_('operation forbidden by server'))
1922 elif resp == 2:
1924 elif resp == 2:
1923 raise util.Abort(_('locking the remote repository failed'))
1925 raise util.Abort(_('locking the remote repository failed'))
1924 elif resp != 0:
1926 elif resp != 0:
1925 raise util.Abort(_('the server sent an unknown error code'))
1927 raise util.Abort(_('the server sent an unknown error code'))
1926 self.ui.status(_('streaming all changes\n'))
1928 self.ui.status(_('streaming all changes\n'))
1927 l = fp.readline()
1929 l = fp.readline()
1928 try:
1930 try:
1929 total_files, total_bytes = map(int, l.split(' ', 1))
1931 total_files, total_bytes = map(int, l.split(' ', 1))
1930 except ValueError, TypeError:
1932 except ValueError, TypeError:
1931 raise util.UnexpectedOutput(
1933 raise util.UnexpectedOutput(
1932 _('Unexpected response from remote server:'), l)
1934 _('Unexpected response from remote server:'), l)
1933 self.ui.status(_('%d files to transfer, %s of data\n') %
1935 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 (total_files, util.bytecount(total_bytes)))
1936 (total_files, util.bytecount(total_bytes)))
1935 start = time.time()
1937 start = time.time()
1936 for i in xrange(total_files):
1938 for i in xrange(total_files):
1937 # XXX doesn't support '\n' or '\r' in filenames
1939 # XXX doesn't support '\n' or '\r' in filenames
1938 l = fp.readline()
1940 l = fp.readline()
1939 try:
1941 try:
1940 name, size = l.split('\0', 1)
1942 name, size = l.split('\0', 1)
1941 size = int(size)
1943 size = int(size)
1942 except ValueError, TypeError:
1944 except ValueError, TypeError:
1943 raise util.UnexpectedOutput(
1945 raise util.UnexpectedOutput(
1944 _('Unexpected response from remote server:'), l)
1946 _('Unexpected response from remote server:'), l)
1945 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1947 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 ofp = self.sopener(name, 'w')
1948 ofp = self.sopener(name, 'w')
1947 for chunk in util.filechunkiter(fp, limit=size):
1949 for chunk in util.filechunkiter(fp, limit=size):
1948 ofp.write(chunk)
1950 ofp.write(chunk)
1949 ofp.close()
1951 ofp.close()
1950 elapsed = time.time() - start
1952 elapsed = time.time() - start
1951 if elapsed <= 0:
1953 if elapsed <= 0:
1952 elapsed = 0.001
1954 elapsed = 0.001
1953 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1955 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1954 (util.bytecount(total_bytes), elapsed,
1956 (util.bytecount(total_bytes), elapsed,
1955 util.bytecount(total_bytes / elapsed)))
1957 util.bytecount(total_bytes / elapsed)))
1956 self.invalidate()
1958 self.invalidate()
1957 return len(self.heads()) + 1
1959 return len(self.heads()) + 1
1958
1960
1959 def clone(self, remote, heads=[], stream=False):
1961 def clone(self, remote, heads=[], stream=False):
1960 '''clone remote repository.
1962 '''clone remote repository.
1961
1963
1962 keyword arguments:
1964 keyword arguments:
1963 heads: list of revs to clone (forces use of pull)
1965 heads: list of revs to clone (forces use of pull)
1964 stream: use streaming clone if possible'''
1966 stream: use streaming clone if possible'''
1965
1967
1966 # now, all clients that can request uncompressed clones can
1968 # now, all clients that can request uncompressed clones can
1967 # read repo formats supported by all servers that can serve
1969 # read repo formats supported by all servers that can serve
1968 # them.
1970 # them.
1969
1971
1970 # if revlog format changes, client will have to check version
1972 # if revlog format changes, client will have to check version
1971 # and format flags on "stream" capability, and use
1973 # and format flags on "stream" capability, and use
1972 # uncompressed only if compatible.
1974 # uncompressed only if compatible.
1973
1975
1974 if stream and not heads and remote.capable('stream'):
1976 if stream and not heads and remote.capable('stream'):
1975 return self.stream_in(remote)
1977 return self.stream_in(remote)
1976 return self.pull(remote, heads)
1978 return self.pull(remote, heads)
1977
1979
1978 # used to avoid circular references so destructors work
1980 # used to avoid circular references so destructors work
1979 def aftertrans(files):
1981 def aftertrans(files):
1980 renamefiles = [tuple(t) for t in files]
1982 renamefiles = [tuple(t) for t in files]
1981 def a():
1983 def a():
1982 for src, dest in renamefiles:
1984 for src, dest in renamefiles:
1983 util.rename(src, dest)
1985 util.rename(src, dest)
1984 return a
1986 return a
1985
1987
1986 def instance(ui, path, create):
1988 def instance(ui, path, create):
1987 return localrepository(ui, util.drop_scheme('file', path), create)
1989 return localrepository(ui, util.drop_scheme('file', path), create)
1988
1990
1989 def islocal(path):
1991 def islocal(path):
1990 return True
1992 return True
@@ -1,1274 +1,1276 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import _
14 from i18n import _
15 import binascii, changegroup, errno, ancestor, mdiff, os
15 import binascii, changegroup, errno, ancestor, mdiff, os
16 import sha, struct, util, zlib
16 import sha, struct, util, zlib
17
17
18 _pack = struct.pack
18 _pack = struct.pack
19 _unpack = struct.unpack
19 _unpack = struct.unpack
20 _compress = zlib.compress
20 _compress = zlib.compress
21 _decompress = zlib.decompress
21 _decompress = zlib.decompress
22 _sha = sha.new
22 _sha = sha.new
23
23
24 # revlog flags
24 # revlog flags
25 REVLOGV0 = 0
25 REVLOGV0 = 0
26 REVLOGNG = 1
26 REVLOGNG = 1
27 REVLOGNGINLINEDATA = (1 << 16)
27 REVLOGNGINLINEDATA = (1 << 16)
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
29 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31
31
32 class RevlogError(Exception):
32 class RevlogError(Exception):
33 pass
33 pass
34 class LookupError(RevlogError):
34 class LookupError(RevlogError):
35 pass
35 pass
36
36
37 def getoffset(q):
37 def getoffset(q):
38 return int(q >> 16)
38 return int(q >> 16)
39
39
40 def gettype(q):
40 def gettype(q):
41 return int(q & 0xFFFF)
41 return int(q & 0xFFFF)
42
42
43 def offset_type(offset, type):
43 def offset_type(offset, type):
44 return long(long(offset) << 16 | type)
44 return long(long(offset) << 16 | type)
45
45
46 def hash(text, p1, p2):
46 def hash(text, p1, p2):
47 """generate a hash from the given text and its parent hashes
47 """generate a hash from the given text and its parent hashes
48
48
49 This hash combines both the current file contents and its history
49 This hash combines both the current file contents and its history
50 in a manner that makes it easy to distinguish nodes with the same
50 in a manner that makes it easy to distinguish nodes with the same
51 content in the revision graph.
51 content in the revision graph.
52 """
52 """
53 l = [p1, p2]
53 l = [p1, p2]
54 l.sort()
54 l.sort()
55 s = _sha(l[0])
55 s = _sha(l[0])
56 s.update(l[1])
56 s.update(l[1])
57 s.update(text)
57 s.update(text)
58 return s.digest()
58 return s.digest()
59
59
60 def compress(text):
60 def compress(text):
61 """ generate a possibly-compressed representation of text """
61 """ generate a possibly-compressed representation of text """
62 if not text:
62 if not text:
63 return ("", text)
63 return ("", text)
64 if len(text) < 44:
64 if len(text) < 44:
65 if text[0] == '\0':
65 if text[0] == '\0':
66 return ("", text)
66 return ("", text)
67 return ('u', text)
67 return ('u', text)
68 bin = _compress(text)
68 bin = _compress(text)
69 if len(bin) > len(text):
69 if len(bin) > len(text):
70 if text[0] == '\0':
70 if text[0] == '\0':
71 return ("", text)
71 return ("", text)
72 return ('u', text)
72 return ('u', text)
73 return ("", bin)
73 return ("", bin)
74
74
75 def decompress(bin):
75 def decompress(bin):
76 """ decompress the given input """
76 """ decompress the given input """
77 if not bin:
77 if not bin:
78 return bin
78 return bin
79 t = bin[0]
79 t = bin[0]
80 if t == '\0':
80 if t == '\0':
81 return bin
81 return bin
82 if t == 'x':
82 if t == 'x':
83 return _decompress(bin)
83 return _decompress(bin)
84 if t == 'u':
84 if t == 'u':
85 return bin[1:]
85 return bin[1:]
86 raise RevlogError(_("unknown compression type %r") % t)
86 raise RevlogError(_("unknown compression type %r") % t)
87
87
88 class lazyparser(object):
88 class lazyparser(object):
89 """
89 """
90 this class avoids the need to parse the entirety of large indices
90 this class avoids the need to parse the entirety of large indices
91 """
91 """
92
92
93 # lazyparser is not safe to use on windows if win32 extensions not
93 # lazyparser is not safe to use on windows if win32 extensions not
94 # available. it keeps file handle open, which make it not possible
94 # available. it keeps file handle open, which make it not possible
95 # to break hardlinks on local cloned repos.
95 # to break hardlinks on local cloned repos.
96 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
96 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
97 hasattr(util, 'win32api'))
97 hasattr(util, 'win32api'))
98
98
99 def __init__(self, dataf, size):
99 def __init__(self, dataf, size):
100 self.dataf = dataf
100 self.dataf = dataf
101 self.s = struct.calcsize(indexformatng)
101 self.s = struct.calcsize(indexformatng)
102 self.datasize = size
102 self.datasize = size
103 self.l = size/self.s
103 self.l = size/self.s
104 self.index = [None] * self.l
104 self.index = [None] * self.l
105 self.map = {nullid: nullrev}
105 self.map = {nullid: nullrev}
106 self.allmap = 0
106 self.allmap = 0
107 self.all = 0
107 self.all = 0
108 self.mapfind_count = 0
108 self.mapfind_count = 0
109
109
110 def loadmap(self):
110 def loadmap(self):
111 """
111 """
112 during a commit, we need to make sure the rev being added is
112 during a commit, we need to make sure the rev being added is
113 not a duplicate. This requires loading the entire index,
113 not a duplicate. This requires loading the entire index,
114 which is fairly slow. loadmap can load up just the node map,
114 which is fairly slow. loadmap can load up just the node map,
115 which takes much less time.
115 which takes much less time.
116 """
116 """
117 if self.allmap:
117 if self.allmap:
118 return
118 return
119 end = self.datasize
119 end = self.datasize
120 self.allmap = 1
120 self.allmap = 1
121 cur = 0
121 cur = 0
122 count = 0
122 count = 0
123 blocksize = self.s * 256
123 blocksize = self.s * 256
124 self.dataf.seek(0)
124 self.dataf.seek(0)
125 while cur < end:
125 while cur < end:
126 data = self.dataf.read(blocksize)
126 data = self.dataf.read(blocksize)
127 off = 0
127 off = 0
128 for x in xrange(256):
128 for x in xrange(256):
129 n = data[off + ngshaoffset:off + ngshaoffset + 20]
129 n = data[off + ngshaoffset:off + ngshaoffset + 20]
130 self.map[n] = count
130 self.map[n] = count
131 count += 1
131 count += 1
132 if count >= self.l:
132 if count >= self.l:
133 break
133 break
134 off += self.s
134 off += self.s
135 cur += blocksize
135 cur += blocksize
136
136
137 def loadblock(self, blockstart, blocksize, data=None):
137 def loadblock(self, blockstart, blocksize, data=None):
138 if self.all:
138 if self.all:
139 return
139 return
140 if data is None:
140 if data is None:
141 self.dataf.seek(blockstart)
141 self.dataf.seek(blockstart)
142 if blockstart + blocksize > self.datasize:
142 if blockstart + blocksize > self.datasize:
143 # the revlog may have grown since we've started running,
143 # the revlog may have grown since we've started running,
144 # but we don't have space in self.index for more entries.
144 # but we don't have space in self.index for more entries.
145 # limit blocksize so that we don't get too much data.
145 # limit blocksize so that we don't get too much data.
146 blocksize = max(self.datasize - blockstart, 0)
146 blocksize = max(self.datasize - blockstart, 0)
147 data = self.dataf.read(blocksize)
147 data = self.dataf.read(blocksize)
148 lend = len(data) / self.s
148 lend = len(data) / self.s
149 i = blockstart / self.s
149 i = blockstart / self.s
150 off = 0
150 off = 0
151 # lazyindex supports __delitem__
151 # lazyindex supports __delitem__
152 if lend > len(self.index) - i:
152 if lend > len(self.index) - i:
153 lend = len(self.index) - i
153 lend = len(self.index) - i
154 for x in xrange(lend):
154 for x in xrange(lend):
155 if self.index[i + x] == None:
155 if self.index[i + x] == None:
156 b = data[off : off + self.s]
156 b = data[off : off + self.s]
157 self.index[i + x] = b
157 self.index[i + x] = b
158 n = b[ngshaoffset:ngshaoffset + 20]
158 n = b[ngshaoffset:ngshaoffset + 20]
159 self.map[n] = i + x
159 self.map[n] = i + x
160 off += self.s
160 off += self.s
161
161
162 def findnode(self, node):
162 def findnode(self, node):
163 """search backwards through the index file for a specific node"""
163 """search backwards through the index file for a specific node"""
164 if self.allmap:
164 if self.allmap:
165 return None
165 return None
166
166
167 # hg log will cause many many searches for the manifest
167 # hg log will cause many many searches for the manifest
168 # nodes. After we get called a few times, just load the whole
168 # nodes. After we get called a few times, just load the whole
169 # thing.
169 # thing.
170 if self.mapfind_count > 8:
170 if self.mapfind_count > 8:
171 self.loadmap()
171 self.loadmap()
172 if node in self.map:
172 if node in self.map:
173 return node
173 return node
174 return None
174 return None
175 self.mapfind_count += 1
175 self.mapfind_count += 1
176 last = self.l - 1
176 last = self.l - 1
177 while self.index[last] != None:
177 while self.index[last] != None:
178 if last == 0:
178 if last == 0:
179 self.all = 1
179 self.all = 1
180 self.allmap = 1
180 self.allmap = 1
181 return None
181 return None
182 last -= 1
182 last -= 1
183 end = (last + 1) * self.s
183 end = (last + 1) * self.s
184 blocksize = self.s * 256
184 blocksize = self.s * 256
185 while end >= 0:
185 while end >= 0:
186 start = max(end - blocksize, 0)
186 start = max(end - blocksize, 0)
187 self.dataf.seek(start)
187 self.dataf.seek(start)
188 data = self.dataf.read(end - start)
188 data = self.dataf.read(end - start)
189 findend = end - start
189 findend = end - start
190 while True:
190 while True:
191 # we're searching backwards, so we have to make sure
191 # we're searching backwards, so we have to make sure
192 # we don't find a changeset where this node is a parent
192 # we don't find a changeset where this node is a parent
193 off = data.find(node, 0, findend)
193 off = data.find(node, 0, findend)
194 findend = off
194 findend = off
195 if off >= 0:
195 if off >= 0:
196 i = off / self.s
196 i = off / self.s
197 off = i * self.s
197 off = i * self.s
198 n = data[off + ngshaoffset:off + ngshaoffset + 20]
198 n = data[off + ngshaoffset:off + ngshaoffset + 20]
199 if n == node:
199 if n == node:
200 self.map[n] = i + start / self.s
200 self.map[n] = i + start / self.s
201 return node
201 return node
202 else:
202 else:
203 break
203 break
204 end -= blocksize
204 end -= blocksize
205 return None
205 return None
206
206
207 def loadindex(self, i=None, end=None):
207 def loadindex(self, i=None, end=None):
208 if self.all:
208 if self.all:
209 return
209 return
210 all = False
210 all = False
211 if i == None:
211 if i == None:
212 blockstart = 0
212 blockstart = 0
213 blocksize = (65536 / self.s) * self.s
213 blocksize = (65536 / self.s) * self.s
214 end = self.datasize
214 end = self.datasize
215 all = True
215 all = True
216 else:
216 else:
217 if end:
217 if end:
218 blockstart = i * self.s
218 blockstart = i * self.s
219 end = end * self.s
219 end = end * self.s
220 blocksize = end - blockstart
220 blocksize = end - blockstart
221 else:
221 else:
222 blockstart = (i & ~1023) * self.s
222 blockstart = (i & ~1023) * self.s
223 blocksize = self.s * 1024
223 blocksize = self.s * 1024
224 end = blockstart + blocksize
224 end = blockstart + blocksize
225 while blockstart < end:
225 while blockstart < end:
226 self.loadblock(blockstart, blocksize)
226 self.loadblock(blockstart, blocksize)
227 blockstart += blocksize
227 blockstart += blocksize
228 if all:
228 if all:
229 self.all = True
229 self.all = True
230
230
231 class lazyindex(object):
231 class lazyindex(object):
232 """a lazy version of the index array"""
232 """a lazy version of the index array"""
233 def __init__(self, parser):
233 def __init__(self, parser):
234 self.p = parser
234 self.p = parser
235 def __len__(self):
235 def __len__(self):
236 return len(self.p.index)
236 return len(self.p.index)
237 def load(self, pos):
237 def load(self, pos):
238 if pos < 0:
238 if pos < 0:
239 pos += len(self.p.index)
239 pos += len(self.p.index)
240 self.p.loadindex(pos)
240 self.p.loadindex(pos)
241 return self.p.index[pos]
241 return self.p.index[pos]
242 def __getitem__(self, pos):
242 def __getitem__(self, pos):
243 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
243 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
244 def __setitem__(self, pos, item):
244 def __setitem__(self, pos, item):
245 self.p.index[pos] = _pack(indexformatng, *item)
245 self.p.index[pos] = _pack(indexformatng, *item)
246 def __delitem__(self, pos):
246 def __delitem__(self, pos):
247 del self.p.index[pos]
247 del self.p.index[pos]
248 def insert(self, pos, e):
248 def insert(self, pos, e):
249 self.p.index.insert(pos, _pack(indexformatng, *e))
249 self.p.index.insert(pos, _pack(indexformatng, *e))
250 def append(self, e):
250 def append(self, e):
251 self.p.index.append(_pack(indexformatng, *e))
251 self.p.index.append(_pack(indexformatng, *e))
252
252
253 class lazymap(object):
253 class lazymap(object):
254 """a lazy version of the node map"""
254 """a lazy version of the node map"""
255 def __init__(self, parser):
255 def __init__(self, parser):
256 self.p = parser
256 self.p = parser
257 def load(self, key):
257 def load(self, key):
258 n = self.p.findnode(key)
258 n = self.p.findnode(key)
259 if n == None:
259 if n == None:
260 raise KeyError(key)
260 raise KeyError(key)
261 def __contains__(self, key):
261 def __contains__(self, key):
262 if key in self.p.map:
262 if key in self.p.map:
263 return True
263 return True
264 self.p.loadmap()
264 self.p.loadmap()
265 return key in self.p.map
265 return key in self.p.map
266 def __iter__(self):
266 def __iter__(self):
267 yield nullid
267 yield nullid
268 for i in xrange(self.p.l):
268 for i in xrange(self.p.l):
269 ret = self.p.index[i]
269 ret = self.p.index[i]
270 if not ret:
270 if not ret:
271 self.p.loadindex(i)
271 self.p.loadindex(i)
272 ret = self.p.index[i]
272 ret = self.p.index[i]
273 if isinstance(ret, str):
273 if isinstance(ret, str):
274 ret = _unpack(indexformatng, ret)
274 ret = _unpack(indexformatng, ret)
275 yield ret[7]
275 yield ret[7]
276 def __getitem__(self, key):
276 def __getitem__(self, key):
277 try:
277 try:
278 return self.p.map[key]
278 return self.p.map[key]
279 except KeyError:
279 except KeyError:
280 try:
280 try:
281 self.load(key)
281 self.load(key)
282 return self.p.map[key]
282 return self.p.map[key]
283 except KeyError:
283 except KeyError:
284 raise KeyError("node " + hex(key))
284 raise KeyError("node " + hex(key))
285 def __setitem__(self, key, val):
285 def __setitem__(self, key, val):
286 self.p.map[key] = val
286 self.p.map[key] = val
287 def __delitem__(self, key):
287 def __delitem__(self, key):
288 del self.p.map[key]
288 del self.p.map[key]
289
289
290 indexformatv0 = ">4l20s20s20s"
290 indexformatv0 = ">4l20s20s20s"
291 v0shaoffset = 56
291 v0shaoffset = 56
292
292
293 class revlogoldio(object):
293 class revlogoldio(object):
294 def __init__(self):
294 def __init__(self):
295 self.size = struct.calcsize(indexformatv0)
295 self.size = struct.calcsize(indexformatv0)
296
296
297 def parseindex(self, fp, inline):
297 def parseindex(self, fp, inline):
298 s = self.size
298 s = self.size
299 index = []
299 index = []
300 nodemap = {nullid: nullrev}
300 nodemap = {nullid: nullrev}
301 n = off = 0
301 n = off = 0
302 data = fp.read()
302 data = fp.read()
303 l = len(data)
303 l = len(data)
304 while off + s <= l:
304 while off + s <= l:
305 cur = data[off:off + s]
305 cur = data[off:off + s]
306 off += s
306 off += s
307 e = _unpack(indexformatv0, cur)
307 e = _unpack(indexformatv0, cur)
308 # transform to revlogv1 format
308 # transform to revlogv1 format
309 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
309 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
310 nodemap[e[4]], nodemap[e[5]], e[6])
310 nodemap[e[4]], nodemap[e[5]], e[6])
311 index.append(e2)
311 index.append(e2)
312 nodemap[e[6]] = n
312 nodemap[e[6]] = n
313 n += 1
313 n += 1
314
314
315 return index, nodemap, None
315 return index, nodemap, None
316
316
317 def packentry(self, entry, node, version, rev):
317 def packentry(self, entry, node, version, rev):
318 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
318 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
319 node(entry[5]), node(entry[6]), entry[7])
319 node(entry[5]), node(entry[6]), entry[7])
320 return _pack(indexformatv0, *e2)
320 return _pack(indexformatv0, *e2)
321
321
322 # index ng:
322 # index ng:
323 # 6 bytes offset
323 # 6 bytes offset
324 # 2 bytes flags
324 # 2 bytes flags
325 # 4 bytes compressed length
325 # 4 bytes compressed length
326 # 4 bytes uncompressed length
326 # 4 bytes uncompressed length
327 # 4 bytes: base rev
327 # 4 bytes: base rev
328 # 4 bytes link rev
328 # 4 bytes link rev
329 # 4 bytes parent 1 rev
329 # 4 bytes parent 1 rev
330 # 4 bytes parent 2 rev
330 # 4 bytes parent 2 rev
331 # 32 bytes: nodeid
331 # 32 bytes: nodeid
332 indexformatng = ">Qiiiiii20s12x"
332 indexformatng = ">Qiiiiii20s12x"
333 ngshaoffset = 32
333 ngshaoffset = 32
334 versionformat = ">I"
334 versionformat = ">I"
335
335
336 class revlogio(object):
336 class revlogio(object):
337 def __init__(self):
337 def __init__(self):
338 self.size = struct.calcsize(indexformatng)
338 self.size = struct.calcsize(indexformatng)
339
339
340 def parseindex(self, fp, inline):
340 def parseindex(self, fp, inline):
341 try:
341 try:
342 size = util.fstat(fp).st_size
342 size = util.fstat(fp).st_size
343 except AttributeError:
343 except AttributeError:
344 size = 0
344 size = 0
345
345
346 if lazyparser.safe_to_use and not inline and size > 1000000:
346 if lazyparser.safe_to_use and not inline and size > 1000000:
347 # big index, let's parse it on demand
347 # big index, let's parse it on demand
348 parser = lazyparser(fp, size)
348 parser = lazyparser(fp, size)
349 index = lazyindex(parser)
349 index = lazyindex(parser)
350 nodemap = lazymap(parser)
350 nodemap = lazymap(parser)
351 e = list(index[0])
351 e = list(index[0])
352 type = gettype(e[0])
352 type = gettype(e[0])
353 e[0] = offset_type(0, type)
353 e[0] = offset_type(0, type)
354 index[0] = e
354 index[0] = e
355 return index, nodemap, None
355 return index, nodemap, None
356
356
357 s = self.size
357 s = self.size
358 cache = None
358 cache = None
359 index = []
359 index = []
360 nodemap = {nullid: nullrev}
360 nodemap = {nullid: nullrev}
361 n = off = 0
361 n = off = 0
362 # if we're not using lazymap, always read the whole index
362 # if we're not using lazymap, always read the whole index
363 data = fp.read()
363 data = fp.read()
364 l = len(data) - s
364 l = len(data) - s
365 append = index.append
365 append = index.append
366 if inline:
366 if inline:
367 cache = (0, data)
367 cache = (0, data)
368 while off <= l:
368 while off <= l:
369 e = _unpack(indexformatng, data[off:off + s])
369 e = _unpack(indexformatng, data[off:off + s])
370 nodemap[e[7]] = n
370 nodemap[e[7]] = n
371 append(e)
371 append(e)
372 n += 1
372 n += 1
373 if e[1] < 0:
373 if e[1] < 0:
374 break
374 break
375 off += e[1] + s
375 off += e[1] + s
376 else:
376 else:
377 while off <= l:
377 while off <= l:
378 e = _unpack(indexformatng, data[off:off + s])
378 e = _unpack(indexformatng, data[off:off + s])
379 nodemap[e[7]] = n
379 nodemap[e[7]] = n
380 append(e)
380 append(e)
381 n += 1
381 n += 1
382 off += s
382 off += s
383
383
384 e = list(index[0])
384 e = list(index[0])
385 type = gettype(e[0])
385 type = gettype(e[0])
386 e[0] = offset_type(0, type)
386 e[0] = offset_type(0, type)
387 index[0] = e
387 index[0] = e
388
388
389 return index, nodemap, cache
389 return index, nodemap, cache
390
390
391 def packentry(self, entry, node, version, rev):
391 def packentry(self, entry, node, version, rev):
392 p = _pack(indexformatng, *entry)
392 p = _pack(indexformatng, *entry)
393 if rev == 0:
393 if rev == 0:
394 p = _pack(versionformat, version) + p[4:]
394 p = _pack(versionformat, version) + p[4:]
395 return p
395 return p
396
396
397 class revlog(object):
397 class revlog(object):
398 """
398 """
399 the underlying revision storage object
399 the underlying revision storage object
400
400
401 A revlog consists of two parts, an index and the revision data.
401 A revlog consists of two parts, an index and the revision data.
402
402
403 The index is a file with a fixed record size containing
403 The index is a file with a fixed record size containing
404 information on each revision, includings its nodeid (hash), the
404 information on each revision, includings its nodeid (hash), the
405 nodeids of its parents, the position and offset of its data within
405 nodeids of its parents, the position and offset of its data within
406 the data file, and the revision it's based on. Finally, each entry
406 the data file, and the revision it's based on. Finally, each entry
407 contains a linkrev entry that can serve as a pointer to external
407 contains a linkrev entry that can serve as a pointer to external
408 data.
408 data.
409
409
410 The revision data itself is a linear collection of data chunks.
410 The revision data itself is a linear collection of data chunks.
411 Each chunk represents a revision and is usually represented as a
411 Each chunk represents a revision and is usually represented as a
412 delta against the previous chunk. To bound lookup time, runs of
412 delta against the previous chunk. To bound lookup time, runs of
413 deltas are limited to about 2 times the length of the original
413 deltas are limited to about 2 times the length of the original
414 version data. This makes retrieval of a version proportional to
414 version data. This makes retrieval of a version proportional to
415 its size, or O(1) relative to the number of revisions.
415 its size, or O(1) relative to the number of revisions.
416
416
417 Both pieces of the revlog are written to in an append-only
417 Both pieces of the revlog are written to in an append-only
418 fashion, which means we never need to rewrite a file to insert or
418 fashion, which means we never need to rewrite a file to insert or
419 remove data, and can use some simple techniques to avoid the need
419 remove data, and can use some simple techniques to avoid the need
420 for locking while reading.
420 for locking while reading.
421 """
421 """
422 def __init__(self, opener, indexfile):
422 def __init__(self, opener, indexfile):
423 """
423 """
424 create a revlog object
424 create a revlog object
425
425
426 opener is a function that abstracts the file opening operation
426 opener is a function that abstracts the file opening operation
427 and can be used to implement COW semantics or the like.
427 and can be used to implement COW semantics or the like.
428 """
428 """
429 self.indexfile = indexfile
429 self.indexfile = indexfile
430 self.datafile = indexfile[:-2] + ".d"
430 self.datafile = indexfile[:-2] + ".d"
431 self.opener = opener
431 self.opener = opener
432 self._cache = None
432 self._cache = None
433 self._chunkcache = None
433 self._chunkcache = None
434 self.nodemap = {nullid: nullrev}
434 self.nodemap = {nullid: nullrev}
435 self.index = []
435 self.index = []
436
436
437 v = REVLOG_DEFAULT_VERSION
437 v = REVLOG_DEFAULT_VERSION
438 if hasattr(opener, "defversion"):
438 if hasattr(opener, "defversion"):
439 v = opener.defversion
439 v = opener.defversion
440 if v & REVLOGNG:
440 if v & REVLOGNG:
441 v |= REVLOGNGINLINEDATA
441 v |= REVLOGNGINLINEDATA
442
442
443 i = ""
443 i = ""
444 try:
444 try:
445 f = self.opener(self.indexfile)
445 f = self.opener(self.indexfile)
446 i = f.read(4)
446 i = f.read(4)
447 f.seek(0)
447 f.seek(0)
448 if len(i) > 0:
448 if len(i) > 0:
449 v = struct.unpack(versionformat, i)[0]
449 v = struct.unpack(versionformat, i)[0]
450 except IOError, inst:
450 except IOError, inst:
451 if inst.errno != errno.ENOENT:
451 if inst.errno != errno.ENOENT:
452 raise
452 raise
453
453
454 self.version = v
454 self.version = v
455 self._inline = v & REVLOGNGINLINEDATA
455 self._inline = v & REVLOGNGINLINEDATA
456 flags = v & ~0xFFFF
456 flags = v & ~0xFFFF
457 fmt = v & 0xFFFF
457 fmt = v & 0xFFFF
458 if fmt == REVLOGV0 and flags:
458 if fmt == REVLOGV0 and flags:
459 raise RevlogError(_("index %s unknown flags %#04x for format v0")
459 raise RevlogError(_("index %s unknown flags %#04x for format v0")
460 % (self.indexfile, flags >> 16))
460 % (self.indexfile, flags >> 16))
461 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
461 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
462 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
462 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
463 % (self.indexfile, flags >> 16))
463 % (self.indexfile, flags >> 16))
464 elif fmt > REVLOGNG:
464 elif fmt > REVLOGNG:
465 raise RevlogError(_("index %s unknown format %d")
465 raise RevlogError(_("index %s unknown format %d")
466 % (self.indexfile, fmt))
466 % (self.indexfile, fmt))
467
467
468 self._io = revlogio()
468 self._io = revlogio()
469 if self.version == REVLOGV0:
469 if self.version == REVLOGV0:
470 self._io = revlogoldio()
470 self._io = revlogoldio()
471 if i:
471 if i:
472 d = self._io.parseindex(f, self._inline)
472 d = self._io.parseindex(f, self._inline)
473 self.index, self.nodemap, self._chunkcache = d
473 self.index, self.nodemap, self._chunkcache = d
474
474
475 # add the magic null revision at -1
475 # add the magic null revision at -1
476 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
476 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
477
477
478 def _loadindex(self, start, end):
478 def _loadindex(self, start, end):
479 """load a block of indexes all at once from the lazy parser"""
479 """load a block of indexes all at once from the lazy parser"""
480 if isinstance(self.index, lazyindex):
480 if isinstance(self.index, lazyindex):
481 self.index.p.loadindex(start, end)
481 self.index.p.loadindex(start, end)
482
482
483 def _loadindexmap(self):
483 def _loadindexmap(self):
484 """loads both the map and the index from the lazy parser"""
484 """loads both the map and the index from the lazy parser"""
485 if isinstance(self.index, lazyindex):
485 if isinstance(self.index, lazyindex):
486 p = self.index.p
486 p = self.index.p
487 p.loadindex()
487 p.loadindex()
488 self.nodemap = p.map
488 self.nodemap = p.map
489
489
490 def _loadmap(self):
490 def _loadmap(self):
491 """loads the map from the lazy parser"""
491 """loads the map from the lazy parser"""
492 if isinstance(self.nodemap, lazymap):
492 if isinstance(self.nodemap, lazymap):
493 self.nodemap.p.loadmap()
493 self.nodemap.p.loadmap()
494 self.nodemap = self.nodemap.p.map
494 self.nodemap = self.nodemap.p.map
495
495
496 def tip(self):
496 def tip(self):
497 return self.node(len(self.index) - 2)
497 return self.node(len(self.index) - 2)
498 def count(self):
498 def count(self):
499 return len(self.index) - 1
499 return len(self.index) - 1
500
500
501 def rev(self, node):
501 def rev(self, node):
502 try:
502 try:
503 return self.nodemap[node]
503 return self.nodemap[node]
504 except KeyError:
504 except KeyError:
505 raise LookupError(_('%s: no node %s') % (self.indexfile, hex(node)))
505 raise LookupError(_('%s: no node %s') % (self.indexfile, hex(node)))
506 def node(self, rev):
506 def node(self, rev):
507 return self.index[rev][7]
507 return self.index[rev][7]
508 def linkrev(self, node):
508 def linkrev(self, node):
509 return self.index[self.rev(node)][4]
509 return self.index[self.rev(node)][4]
510 def parents(self, node):
510 def parents(self, node):
511 d = self.index[self.rev(node)][5:7]
511 d = self.index[self.rev(node)][5:7]
512 return (self.node(d[0]), self.node(d[1]))
512 return (self.node(d[0]), self.node(d[1]))
513 def parentrevs(self, rev):
513 def parentrevs(self, rev):
514 return self.index[rev][5:7]
514 return self.index[rev][5:7]
515 def start(self, rev):
515 def start(self, rev):
516 return int(self.index[rev][0] >> 16)
516 return int(self.index[rev][0] >> 16)
517 def end(self, rev):
517 def end(self, rev):
518 return self.start(rev) + self.length(rev)
518 return self.start(rev) + self.length(rev)
519 def length(self, rev):
519 def length(self, rev):
520 return self.index[rev][1]
520 return self.index[rev][1]
521 def base(self, rev):
521 def base(self, rev):
522 return self.index[rev][3]
522 return self.index[rev][3]
523
523
524 def size(self, rev):
524 def size(self, rev):
525 """return the length of the uncompressed text for a given revision"""
525 """return the length of the uncompressed text for a given revision"""
526 l = self.index[rev][2]
526 l = self.index[rev][2]
527 if l >= 0:
527 if l >= 0:
528 return l
528 return l
529
529
530 t = self.revision(self.node(rev))
530 t = self.revision(self.node(rev))
531 return len(t)
531 return len(t)
532
532
533 # alternate implementation, The advantage to this code is it
533 # alternate implementation, The advantage to this code is it
534 # will be faster for a single revision. But, the results are not
534 # will be faster for a single revision. But, the results are not
535 # cached, so finding the size of every revision will be slower.
535 # cached, so finding the size of every revision will be slower.
536 """
536 """
537 if self.cache and self.cache[1] == rev:
537 if self.cache and self.cache[1] == rev:
538 return len(self.cache[2])
538 return len(self.cache[2])
539
539
540 base = self.base(rev)
540 base = self.base(rev)
541 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
541 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
542 base = self.cache[1]
542 base = self.cache[1]
543 text = self.cache[2]
543 text = self.cache[2]
544 else:
544 else:
545 text = self.revision(self.node(base))
545 text = self.revision(self.node(base))
546
546
547 l = len(text)
547 l = len(text)
548 for x in xrange(base + 1, rev + 1):
548 for x in xrange(base + 1, rev + 1):
549 l = mdiff.patchedsize(l, self.chunk(x))
549 l = mdiff.patchedsize(l, self.chunk(x))
550 return l
550 return l
551 """
551 """
552
552
553 def reachable(self, node, stop=None):
553 def reachable(self, node, stop=None):
554 """return a hash of all nodes ancestral to a given node, including
554 """return a hash of all nodes ancestral to a given node, including
555 the node itself, stopping when stop is matched"""
555 the node itself, stopping when stop is matched"""
556 reachable = {}
556 reachable = {}
557 visit = [node]
557 visit = [node]
558 reachable[node] = 1
558 reachable[node] = 1
559 if stop:
559 if stop:
560 stopn = self.rev(stop)
560 stopn = self.rev(stop)
561 else:
561 else:
562 stopn = 0
562 stopn = 0
563 while visit:
563 while visit:
564 n = visit.pop(0)
564 n = visit.pop(0)
565 if n == stop:
565 if n == stop:
566 continue
566 continue
567 if n == nullid:
567 if n == nullid:
568 continue
568 continue
569 for p in self.parents(n):
569 for p in self.parents(n):
570 if self.rev(p) < stopn:
570 if self.rev(p) < stopn:
571 continue
571 continue
572 if p not in reachable:
572 if p not in reachable:
573 reachable[p] = 1
573 reachable[p] = 1
574 visit.append(p)
574 visit.append(p)
575 return reachable
575 return reachable
576
576
577 def nodesbetween(self, roots=None, heads=None):
577 def nodesbetween(self, roots=None, heads=None):
578 """Return a tuple containing three elements. Elements 1 and 2 contain
578 """Return a tuple containing three elements. Elements 1 and 2 contain
579 a final list bases and heads after all the unreachable ones have been
579 a final list bases and heads after all the unreachable ones have been
580 pruned. Element 0 contains a topologically sorted list of all
580 pruned. Element 0 contains a topologically sorted list of all
581
581
582 nodes that satisfy these constraints:
582 nodes that satisfy these constraints:
583 1. All nodes must be descended from a node in roots (the nodes on
583 1. All nodes must be descended from a node in roots (the nodes on
584 roots are considered descended from themselves).
584 roots are considered descended from themselves).
585 2. All nodes must also be ancestors of a node in heads (the nodes in
585 2. All nodes must also be ancestors of a node in heads (the nodes in
586 heads are considered to be their own ancestors).
586 heads are considered to be their own ancestors).
587
587
588 If roots is unspecified, nullid is assumed as the only root.
588 If roots is unspecified, nullid is assumed as the only root.
589 If heads is unspecified, it is taken to be the output of the
589 If heads is unspecified, it is taken to be the output of the
590 heads method (i.e. a list of all nodes in the repository that
590 heads method (i.e. a list of all nodes in the repository that
591 have no children)."""
591 have no children)."""
592 nonodes = ([], [], [])
592 nonodes = ([], [], [])
593 if roots is not None:
593 if roots is not None:
594 roots = list(roots)
594 roots = list(roots)
595 if not roots:
595 if not roots:
596 return nonodes
596 return nonodes
597 lowestrev = min([self.rev(n) for n in roots])
597 lowestrev = min([self.rev(n) for n in roots])
598 else:
598 else:
599 roots = [nullid] # Everybody's a descendent of nullid
599 roots = [nullid] # Everybody's a descendent of nullid
600 lowestrev = nullrev
600 lowestrev = nullrev
601 if (lowestrev == nullrev) and (heads is None):
601 if (lowestrev == nullrev) and (heads is None):
602 # We want _all_ the nodes!
602 # We want _all_ the nodes!
603 return ([self.node(r) for r in xrange(0, self.count())],
603 return ([self.node(r) for r in xrange(0, self.count())],
604 [nullid], list(self.heads()))
604 [nullid], list(self.heads()))
605 if heads is None:
605 if heads is None:
606 # All nodes are ancestors, so the latest ancestor is the last
606 # All nodes are ancestors, so the latest ancestor is the last
607 # node.
607 # node.
608 highestrev = self.count() - 1
608 highestrev = self.count() - 1
609 # Set ancestors to None to signal that every node is an ancestor.
609 # Set ancestors to None to signal that every node is an ancestor.
610 ancestors = None
610 ancestors = None
611 # Set heads to an empty dictionary for later discovery of heads
611 # Set heads to an empty dictionary for later discovery of heads
612 heads = {}
612 heads = {}
613 else:
613 else:
614 heads = list(heads)
614 heads = list(heads)
615 if not heads:
615 if not heads:
616 return nonodes
616 return nonodes
617 ancestors = {}
617 ancestors = {}
618 # Turn heads into a dictionary so we can remove 'fake' heads.
618 # Turn heads into a dictionary so we can remove 'fake' heads.
619 # Also, later we will be using it to filter out the heads we can't
619 # Also, later we will be using it to filter out the heads we can't
620 # find from roots.
620 # find from roots.
621 heads = dict.fromkeys(heads, 0)
621 heads = dict.fromkeys(heads, 0)
622 # Start at the top and keep marking parents until we're done.
622 # Start at the top and keep marking parents until we're done.
623 nodestotag = heads.keys()
623 nodestotag = heads.keys()
624 # Remember where the top was so we can use it as a limit later.
624 # Remember where the top was so we can use it as a limit later.
625 highestrev = max([self.rev(n) for n in nodestotag])
625 highestrev = max([self.rev(n) for n in nodestotag])
626 while nodestotag:
626 while nodestotag:
627 # grab a node to tag
627 # grab a node to tag
628 n = nodestotag.pop()
628 n = nodestotag.pop()
629 # Never tag nullid
629 # Never tag nullid
630 if n == nullid:
630 if n == nullid:
631 continue
631 continue
632 # A node's revision number represents its place in a
632 # A node's revision number represents its place in a
633 # topologically sorted list of nodes.
633 # topologically sorted list of nodes.
634 r = self.rev(n)
634 r = self.rev(n)
635 if r >= lowestrev:
635 if r >= lowestrev:
636 if n not in ancestors:
636 if n not in ancestors:
637 # If we are possibly a descendent of one of the roots
637 # If we are possibly a descendent of one of the roots
638 # and we haven't already been marked as an ancestor
638 # and we haven't already been marked as an ancestor
639 ancestors[n] = 1 # Mark as ancestor
639 ancestors[n] = 1 # Mark as ancestor
640 # Add non-nullid parents to list of nodes to tag.
640 # Add non-nullid parents to list of nodes to tag.
641 nodestotag.extend([p for p in self.parents(n) if
641 nodestotag.extend([p for p in self.parents(n) if
642 p != nullid])
642 p != nullid])
643 elif n in heads: # We've seen it before, is it a fake head?
643 elif n in heads: # We've seen it before, is it a fake head?
644 # So it is, real heads should not be the ancestors of
644 # So it is, real heads should not be the ancestors of
645 # any other heads.
645 # any other heads.
646 heads.pop(n)
646 heads.pop(n)
647 if not ancestors:
647 if not ancestors:
648 return nonodes
648 return nonodes
649 # Now that we have our set of ancestors, we want to remove any
649 # Now that we have our set of ancestors, we want to remove any
650 # roots that are not ancestors.
650 # roots that are not ancestors.
651
651
652 # If one of the roots was nullid, everything is included anyway.
652 # If one of the roots was nullid, everything is included anyway.
653 if lowestrev > nullrev:
653 if lowestrev > nullrev:
654 # But, since we weren't, let's recompute the lowest rev to not
654 # But, since we weren't, let's recompute the lowest rev to not
655 # include roots that aren't ancestors.
655 # include roots that aren't ancestors.
656
656
657 # Filter out roots that aren't ancestors of heads
657 # Filter out roots that aren't ancestors of heads
658 roots = [n for n in roots if n in ancestors]
658 roots = [n for n in roots if n in ancestors]
659 # Recompute the lowest revision
659 # Recompute the lowest revision
660 if roots:
660 if roots:
661 lowestrev = min([self.rev(n) for n in roots])
661 lowestrev = min([self.rev(n) for n in roots])
662 else:
662 else:
663 # No more roots? Return empty list
663 # No more roots? Return empty list
664 return nonodes
664 return nonodes
665 else:
665 else:
666 # We are descending from nullid, and don't need to care about
666 # We are descending from nullid, and don't need to care about
667 # any other roots.
667 # any other roots.
668 lowestrev = nullrev
668 lowestrev = nullrev
669 roots = [nullid]
669 roots = [nullid]
670 # Transform our roots list into a 'set' (i.e. a dictionary where the
670 # Transform our roots list into a 'set' (i.e. a dictionary where the
671 # values don't matter.
671 # values don't matter.
672 descendents = dict.fromkeys(roots, 1)
672 descendents = dict.fromkeys(roots, 1)
673 # Also, keep the original roots so we can filter out roots that aren't
673 # Also, keep the original roots so we can filter out roots that aren't
674 # 'real' roots (i.e. are descended from other roots).
674 # 'real' roots (i.e. are descended from other roots).
675 roots = descendents.copy()
675 roots = descendents.copy()
676 # Our topologically sorted list of output nodes.
676 # Our topologically sorted list of output nodes.
677 orderedout = []
677 orderedout = []
678 # Don't start at nullid since we don't want nullid in our output list,
678 # Don't start at nullid since we don't want nullid in our output list,
679 # and if nullid shows up in descedents, empty parents will look like
679 # and if nullid shows up in descedents, empty parents will look like
680 # they're descendents.
680 # they're descendents.
681 for r in xrange(max(lowestrev, 0), highestrev + 1):
681 for r in xrange(max(lowestrev, 0), highestrev + 1):
682 n = self.node(r)
682 n = self.node(r)
683 isdescendent = False
683 isdescendent = False
684 if lowestrev == nullrev: # Everybody is a descendent of nullid
684 if lowestrev == nullrev: # Everybody is a descendent of nullid
685 isdescendent = True
685 isdescendent = True
686 elif n in descendents:
686 elif n in descendents:
687 # n is already a descendent
687 # n is already a descendent
688 isdescendent = True
688 isdescendent = True
689 # This check only needs to be done here because all the roots
689 # This check only needs to be done here because all the roots
690 # will start being marked is descendents before the loop.
690 # will start being marked is descendents before the loop.
691 if n in roots:
691 if n in roots:
692 # If n was a root, check if it's a 'real' root.
692 # If n was a root, check if it's a 'real' root.
693 p = tuple(self.parents(n))
693 p = tuple(self.parents(n))
694 # If any of its parents are descendents, it's not a root.
694 # If any of its parents are descendents, it's not a root.
695 if (p[0] in descendents) or (p[1] in descendents):
695 if (p[0] in descendents) or (p[1] in descendents):
696 roots.pop(n)
696 roots.pop(n)
697 else:
697 else:
698 p = tuple(self.parents(n))
698 p = tuple(self.parents(n))
699 # A node is a descendent if either of its parents are
699 # A node is a descendent if either of its parents are
700 # descendents. (We seeded the dependents list with the roots
700 # descendents. (We seeded the dependents list with the roots
701 # up there, remember?)
701 # up there, remember?)
702 if (p[0] in descendents) or (p[1] in descendents):
702 if (p[0] in descendents) or (p[1] in descendents):
703 descendents[n] = 1
703 descendents[n] = 1
704 isdescendent = True
704 isdescendent = True
705 if isdescendent and ((ancestors is None) or (n in ancestors)):
705 if isdescendent and ((ancestors is None) or (n in ancestors)):
706 # Only include nodes that are both descendents and ancestors.
706 # Only include nodes that are both descendents and ancestors.
707 orderedout.append(n)
707 orderedout.append(n)
708 if (ancestors is not None) and (n in heads):
708 if (ancestors is not None) and (n in heads):
709 # We're trying to figure out which heads are reachable
709 # We're trying to figure out which heads are reachable
710 # from roots.
710 # from roots.
711 # Mark this head as having been reached
711 # Mark this head as having been reached
712 heads[n] = 1
712 heads[n] = 1
713 elif ancestors is None:
713 elif ancestors is None:
714 # Otherwise, we're trying to discover the heads.
714 # Otherwise, we're trying to discover the heads.
715 # Assume this is a head because if it isn't, the next step
715 # Assume this is a head because if it isn't, the next step
716 # will eventually remove it.
716 # will eventually remove it.
717 heads[n] = 1
717 heads[n] = 1
718 # But, obviously its parents aren't.
718 # But, obviously its parents aren't.
719 for p in self.parents(n):
719 for p in self.parents(n):
720 heads.pop(p, None)
720 heads.pop(p, None)
721 heads = [n for n in heads.iterkeys() if heads[n] != 0]
721 heads = [n for n in heads.iterkeys() if heads[n] != 0]
722 roots = roots.keys()
722 roots = roots.keys()
723 assert orderedout
723 assert orderedout
724 assert roots
724 assert roots
725 assert heads
725 assert heads
726 return (orderedout, roots, heads)
726 return (orderedout, roots, heads)
727
727
728 def heads(self, start=None, stop=None):
728 def heads(self, start=None, stop=None):
729 """return the list of all nodes that have no children
729 """return the list of all nodes that have no children
730
730
731 if start is specified, only heads that are descendants of
731 if start is specified, only heads that are descendants of
732 start will be returned
732 start will be returned
733 if stop is specified, it will consider all the revs from stop
733 if stop is specified, it will consider all the revs from stop
734 as if they had no children
734 as if they had no children
735 """
735 """
736 if start is None and stop is None:
736 if start is None and stop is None:
737 count = self.count()
737 count = self.count()
738 if not count:
738 if not count:
739 return [nullid]
739 return [nullid]
740 ishead = [1] * (count + 1)
740 ishead = [1] * (count + 1)
741 index = self.index
741 index = self.index
742 for r in xrange(count):
742 for r in xrange(count):
743 e = index[r]
743 e = index[r]
744 ishead[e[5]] = ishead[e[6]] = 0
744 ishead[e[5]] = ishead[e[6]] = 0
745 return [self.node(r) for r in xrange(count) if ishead[r]]
745 return [self.node(r) for r in xrange(count) if ishead[r]]
746
746
747 if start is None:
747 if start is None:
748 start = nullid
748 start = nullid
749 if stop is None:
749 if stop is None:
750 stop = []
750 stop = []
751 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
751 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
752 startrev = self.rev(start)
752 startrev = self.rev(start)
753 reachable = {startrev: 1}
753 reachable = {startrev: 1}
754 heads = {startrev: 1}
754 heads = {startrev: 1}
755
755
756 parentrevs = self.parentrevs
756 parentrevs = self.parentrevs
757 for r in xrange(startrev + 1, self.count()):
757 for r in xrange(startrev + 1, self.count()):
758 for p in parentrevs(r):
758 for p in parentrevs(r):
759 if p in reachable:
759 if p in reachable:
760 if r not in stoprevs:
760 if r not in stoprevs:
761 reachable[r] = 1
761 reachable[r] = 1
762 heads[r] = 1
762 heads[r] = 1
763 if p in heads and p not in stoprevs:
763 if p in heads and p not in stoprevs:
764 del heads[p]
764 del heads[p]
765
765
766 return [self.node(r) for r in heads]
766 return [self.node(r) for r in heads]
767
767
768 def children(self, node):
768 def children(self, node):
769 """find the children of a given node"""
769 """find the children of a given node"""
770 c = []
770 c = []
771 p = self.rev(node)
771 p = self.rev(node)
772 for r in range(p + 1, self.count()):
772 for r in range(p + 1, self.count()):
773 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
773 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
774 if prevs:
774 if prevs:
775 for pr in prevs:
775 for pr in prevs:
776 if pr == p:
776 if pr == p:
777 c.append(self.node(r))
777 c.append(self.node(r))
778 elif p == nullrev:
778 elif p == nullrev:
779 c.append(self.node(r))
779 c.append(self.node(r))
780 return c
780 return c
781
781
782 def _match(self, id):
782 def _match(self, id):
783 if isinstance(id, (long, int)):
783 if isinstance(id, (long, int)):
784 # rev
784 # rev
785 return self.node(id)
785 return self.node(id)
786 if len(id) == 20:
786 if len(id) == 20:
787 # possibly a binary node
787 # possibly a binary node
788 # odds of a binary node being all hex in ASCII are 1 in 10**25
788 # odds of a binary node being all hex in ASCII are 1 in 10**25
789 try:
789 try:
790 node = id
790 node = id
791 r = self.rev(node) # quick search the index
791 r = self.rev(node) # quick search the index
792 return node
792 return node
793 except LookupError:
793 except LookupError:
794 pass # may be partial hex id
794 pass # may be partial hex id
795 try:
795 try:
796 # str(rev)
796 # str(rev)
797 rev = int(id)
797 rev = int(id)
798 if str(rev) != id:
798 if str(rev) != id:
799 raise ValueError
799 raise ValueError
800 if rev < 0:
800 if rev < 0:
801 rev = self.count() + rev
801 rev = self.count() + rev
802 if rev < 0 or rev >= self.count():
802 if rev < 0 or rev >= self.count():
803 raise ValueError
803 raise ValueError
804 return self.node(rev)
804 return self.node(rev)
805 except (ValueError, OverflowError):
805 except (ValueError, OverflowError):
806 pass
806 pass
807 if len(id) == 40:
807 if len(id) == 40:
808 try:
808 try:
809 # a full hex nodeid?
809 # a full hex nodeid?
810 node = bin(id)
810 node = bin(id)
811 r = self.rev(node)
811 r = self.rev(node)
812 return node
812 return node
813 except TypeError:
813 except TypeError:
814 pass
814 pass
815
815
816 def _partialmatch(self, id):
816 def _partialmatch(self, id):
817 if len(id) < 40:
817 if len(id) < 40:
818 try:
818 try:
819 # hex(node)[:...]
819 # hex(node)[:...]
820 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
820 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
821 node = None
821 node = None
822 for n in self.nodemap:
822 for n in self.nodemap:
823 if n.startswith(bin_id) and hex(n).startswith(id):
823 if n.startswith(bin_id) and hex(n).startswith(id):
824 if node is not None:
824 if node is not None:
825 raise LookupError(_("Ambiguous identifier"))
825 raise LookupError(_("Ambiguous identifier"))
826 node = n
826 node = n
827 if node is not None:
827 if node is not None:
828 return node
828 return node
829 except TypeError:
829 except TypeError:
830 pass
830 pass
831
831
832 def lookup(self, id):
832 def lookup(self, id):
833 """locate a node based on:
833 """locate a node based on:
834 - revision number or str(revision number)
834 - revision number or str(revision number)
835 - nodeid or subset of hex nodeid
835 - nodeid or subset of hex nodeid
836 """
836 """
837 n = self._match(id)
837 n = self._match(id)
838 if n is not None:
838 if n is not None:
839 return n
839 return n
840 n = self._partialmatch(id)
840 n = self._partialmatch(id)
841 if n:
841 if n:
842 return n
842 return n
843
843
844 raise LookupError(_("No match found"))
844 raise LookupError(_("No match found"))
845
845
846 def cmp(self, node, text):
846 def cmp(self, node, text):
847 """compare text with a given file revision"""
847 """compare text with a given file revision"""
848 p1, p2 = self.parents(node)
848 p1, p2 = self.parents(node)
849 return hash(text, p1, p2) != node
849 return hash(text, p1, p2) != node
850
850
851 def chunk(self, rev, df=None):
851 def chunk(self, rev, df=None):
852 def loadcache(df):
852 def loadcache(df):
853 if not df:
853 if not df:
854 if self._inline:
854 if self._inline:
855 df = self.opener(self.indexfile)
855 df = self.opener(self.indexfile)
856 else:
856 else:
857 df = self.opener(self.datafile)
857 df = self.opener(self.datafile)
858 df.seek(start)
858 df.seek(start)
859 self._chunkcache = (start, df.read(cache_length))
859 self._chunkcache = (start, df.read(cache_length))
860
860
861 start, length = self.start(rev), self.length(rev)
861 start, length = self.start(rev), self.length(rev)
862 if self._inline:
862 if self._inline:
863 start += (rev + 1) * self._io.size
863 start += (rev + 1) * self._io.size
864 end = start + length
864 end = start + length
865
865
866 offset = 0
866 offset = 0
867 if not self._chunkcache:
867 if not self._chunkcache:
868 cache_length = max(65536, length)
868 cache_length = max(65536, length)
869 loadcache(df)
869 loadcache(df)
870 else:
870 else:
871 cache_start = self._chunkcache[0]
871 cache_start = self._chunkcache[0]
872 cache_length = len(self._chunkcache[1])
872 cache_length = len(self._chunkcache[1])
873 cache_end = cache_start + cache_length
873 cache_end = cache_start + cache_length
874 if start >= cache_start and end <= cache_end:
874 if start >= cache_start and end <= cache_end:
875 # it is cached
875 # it is cached
876 offset = start - cache_start
876 offset = start - cache_start
877 else:
877 else:
878 cache_length = max(65536, length)
878 cache_length = max(65536, length)
879 loadcache(df)
879 loadcache(df)
880
880
881 # avoid copying large chunks
881 # avoid copying large chunks
882 c = self._chunkcache[1]
882 c = self._chunkcache[1]
883 if cache_length != length:
883 if cache_length != length:
884 c = c[offset:offset + length]
884 c = c[offset:offset + length]
885
885
886 return decompress(c)
886 return decompress(c)
887
887
888 def delta(self, node):
888 def delta(self, node):
889 """return or calculate a delta between a node and its predecessor"""
889 """return or calculate a delta between a node and its predecessor"""
890 r = self.rev(node)
890 r = self.rev(node)
891 return self.revdiff(r - 1, r)
891 return self.revdiff(r - 1, r)
892
892
893 def revdiff(self, rev1, rev2):
893 def revdiff(self, rev1, rev2):
894 """return or calculate a delta between two revisions"""
894 """return or calculate a delta between two revisions"""
895 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
895 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
896 return self.chunk(rev2)
896 return self.chunk(rev2)
897
897
898 return mdiff.textdiff(self.revision(self.node(rev1)),
898 return mdiff.textdiff(self.revision(self.node(rev1)),
899 self.revision(self.node(rev2)))
899 self.revision(self.node(rev2)))
900
900
901 def revision(self, node):
901 def revision(self, node):
902 """return an uncompressed revision of a given"""
902 """return an uncompressed revision of a given"""
903 if node == nullid:
903 if node == nullid:
904 return ""
904 return ""
905 if self._cache and self._cache[0] == node:
905 if self._cache and self._cache[0] == node:
906 return self._cache[2]
906 return self._cache[2]
907
907
908 # look up what we need to read
908 # look up what we need to read
909 text = None
909 text = None
910 rev = self.rev(node)
910 rev = self.rev(node)
911 base = self.base(rev)
911 base = self.base(rev)
912
912
913 # check rev flags
913 # check rev flags
914 if self.index[rev][0] & 0xFFFF:
914 if self.index[rev][0] & 0xFFFF:
915 raise RevlogError(_('incompatible revision flag %x') %
915 raise RevlogError(_('incompatible revision flag %x') %
916 (self.index[rev][0] & 0xFFFF))
916 (self.index[rev][0] & 0xFFFF))
917
917
918 if self._inline:
918 if self._inline:
919 # we probably have the whole chunk cached
919 # we probably have the whole chunk cached
920 df = None
920 df = None
921 else:
921 else:
922 df = self.opener(self.datafile)
922 df = self.opener(self.datafile)
923
923
924 # do we have useful data cached?
924 # do we have useful data cached?
925 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
925 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
926 base = self._cache[1]
926 base = self._cache[1]
927 text = self._cache[2]
927 text = self._cache[2]
928 self._loadindex(base, rev + 1)
928 self._loadindex(base, rev + 1)
929 else:
929 else:
930 self._loadindex(base, rev + 1)
930 self._loadindex(base, rev + 1)
931 text = self.chunk(base, df=df)
931 text = self.chunk(base, df=df)
932
932
933 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
933 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
934 text = mdiff.patches(text, bins)
934 text = mdiff.patches(text, bins)
935 p1, p2 = self.parents(node)
935 p1, p2 = self.parents(node)
936 if node != hash(text, p1, p2):
936 if node != hash(text, p1, p2):
937 raise RevlogError(_("integrity check failed on %s:%d")
937 raise RevlogError(_("integrity check failed on %s:%d")
938 % (self.datafile, rev))
938 % (self.datafile, rev))
939
939
940 self._cache = (node, rev, text)
940 self._cache = (node, rev, text)
941 return text
941 return text
942
942
943 def checkinlinesize(self, tr, fp=None):
943 def checkinlinesize(self, tr, fp=None):
944 if not self._inline:
944 if not self._inline:
945 return
945 return
946 if not fp:
946 if not fp:
947 fp = self.opener(self.indexfile, 'r')
947 fp = self.opener(self.indexfile, 'r')
948 fp.seek(0, 2)
948 fp.seek(0, 2)
949 size = fp.tell()
949 size = fp.tell()
950 if size < 131072:
950 if size < 131072:
951 return
951 return
952 trinfo = tr.find(self.indexfile)
952 trinfo = tr.find(self.indexfile)
953 if trinfo == None:
953 if trinfo == None:
954 raise RevlogError(_("%s not found in the transaction")
954 raise RevlogError(_("%s not found in the transaction")
955 % self.indexfile)
955 % self.indexfile)
956
956
957 trindex = trinfo[2]
957 trindex = trinfo[2]
958 dataoff = self.start(trindex)
958 dataoff = self.start(trindex)
959
959
960 tr.add(self.datafile, dataoff)
960 tr.add(self.datafile, dataoff)
961 df = self.opener(self.datafile, 'w')
961 df = self.opener(self.datafile, 'w')
962 calc = self._io.size
962 calc = self._io.size
963 for r in xrange(self.count()):
963 for r in xrange(self.count()):
964 start = self.start(r) + (r + 1) * calc
964 start = self.start(r) + (r + 1) * calc
965 length = self.length(r)
965 length = self.length(r)
966 fp.seek(start)
966 fp.seek(start)
967 d = fp.read(length)
967 d = fp.read(length)
968 df.write(d)
968 df.write(d)
969 fp.close()
969 fp.close()
970 df.close()
970 df.close()
971 fp = self.opener(self.indexfile, 'w', atomictemp=True)
971 fp = self.opener(self.indexfile, 'w', atomictemp=True)
972 self.version &= ~(REVLOGNGINLINEDATA)
972 self.version &= ~(REVLOGNGINLINEDATA)
973 self._inline = False
973 self._inline = False
974 for i in xrange(self.count()):
974 for i in xrange(self.count()):
975 e = self._io.packentry(self.index[i], self.node, self.version, i)
975 e = self._io.packentry(self.index[i], self.node, self.version, i)
976 fp.write(e)
976 fp.write(e)
977
977
978 # if we don't call rename, the temp file will never replace the
978 # if we don't call rename, the temp file will never replace the
979 # real index
979 # real index
980 fp.rename()
980 fp.rename()
981
981
982 tr.replace(self.indexfile, trindex * calc)
982 tr.replace(self.indexfile, trindex * calc)
983 self._chunkcache = None
983 self._chunkcache = None
984
984
985 def addrevision(self, text, transaction, link, p1, p2, d=None):
985 def addrevision(self, text, transaction, link, p1, p2, d=None):
986 """add a revision to the log
986 """add a revision to the log
987
987
988 text - the revision data to add
988 text - the revision data to add
989 transaction - the transaction object used for rollback
989 transaction - the transaction object used for rollback
990 link - the linkrev data to add
990 link - the linkrev data to add
991 p1, p2 - the parent nodeids of the revision
991 p1, p2 - the parent nodeids of the revision
992 d - an optional precomputed delta
992 d - an optional precomputed delta
993 """
993 """
994 dfh = None
994 dfh = None
995 if not self._inline:
995 if not self._inline:
996 dfh = self.opener(self.datafile, "a")
996 dfh = self.opener(self.datafile, "a")
997 ifh = self.opener(self.indexfile, "a+")
997 ifh = self.opener(self.indexfile, "a+")
998 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
998 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
999
999
1000 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1000 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1001 node = hash(text, p1, p2)
1001 node = hash(text, p1, p2)
1002 if node in self.nodemap:
1002 if node in self.nodemap:
1003 return node
1003 return node
1004
1004
1005 curr = self.count()
1005 curr = self.count()
1006 prev = curr - 1
1006 prev = curr - 1
1007 base = self.base(prev)
1007 base = self.base(prev)
1008 offset = self.end(prev)
1008 offset = self.end(prev)
1009
1009
1010 if curr:
1010 if curr:
1011 if not d:
1011 if not d:
1012 ptext = self.revision(self.node(prev))
1012 ptext = self.revision(self.node(prev))
1013 d = mdiff.textdiff(ptext, text)
1013 d = mdiff.textdiff(ptext, text)
1014 data = compress(d)
1014 data = compress(d)
1015 l = len(data[1]) + len(data[0])
1015 l = len(data[1]) + len(data[0])
1016 dist = l + offset - self.start(base)
1016 dist = l + offset - self.start(base)
1017
1017
1018 # full versions are inserted when the needed deltas
1018 # full versions are inserted when the needed deltas
1019 # become comparable to the uncompressed text
1019 # become comparable to the uncompressed text
1020 if not curr or dist > len(text) * 2:
1020 if not curr or dist > len(text) * 2:
1021 data = compress(text)
1021 data = compress(text)
1022 l = len(data[1]) + len(data[0])
1022 l = len(data[1]) + len(data[0])
1023 base = curr
1023 base = curr
1024
1024
1025 e = (offset_type(offset, 0), l, len(text),
1025 e = (offset_type(offset, 0), l, len(text),
1026 base, link, self.rev(p1), self.rev(p2), node)
1026 base, link, self.rev(p1), self.rev(p2), node)
1027 self.index.insert(-1, e)
1027 self.index.insert(-1, e)
1028 self.nodemap[node] = curr
1028 self.nodemap[node] = curr
1029
1029
1030 entry = self._io.packentry(e, self.node, self.version, curr)
1030 entry = self._io.packentry(e, self.node, self.version, curr)
1031 if not self._inline:
1031 if not self._inline:
1032 transaction.add(self.datafile, offset)
1032 transaction.add(self.datafile, offset)
1033 transaction.add(self.indexfile, curr * len(entry))
1033 transaction.add(self.indexfile, curr * len(entry))
1034 if data[0]:
1034 if data[0]:
1035 dfh.write(data[0])
1035 dfh.write(data[0])
1036 dfh.write(data[1])
1036 dfh.write(data[1])
1037 dfh.flush()
1037 dfh.flush()
1038 ifh.write(entry)
1038 ifh.write(entry)
1039 else:
1039 else:
1040 offset += curr * self._io.size
1040 offset += curr * self._io.size
1041 transaction.add(self.indexfile, offset, curr)
1041 transaction.add(self.indexfile, offset, curr)
1042 ifh.write(entry)
1042 ifh.write(entry)
1043 ifh.write(data[0])
1043 ifh.write(data[0])
1044 ifh.write(data[1])
1044 ifh.write(data[1])
1045 self.checkinlinesize(transaction, ifh)
1045 self.checkinlinesize(transaction, ifh)
1046
1046
1047 self._cache = (node, curr, text)
1047 self._cache = (node, curr, text)
1048 return node
1048 return node
1049
1049
1050 def ancestor(self, a, b):
1050 def ancestor(self, a, b):
1051 """calculate the least common ancestor of nodes a and b"""
1051 """calculate the least common ancestor of nodes a and b"""
1052
1052
1053 def parents(rev):
1053 def parents(rev):
1054 return [p for p in self.parentrevs(rev) if p != nullrev]
1054 return [p for p in self.parentrevs(rev) if p != nullrev]
1055
1055
1056 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1056 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1057 if c is None:
1057 if c is None:
1058 return nullid
1058 return nullid
1059
1059
1060 return self.node(c)
1060 return self.node(c)
1061
1061
1062 def group(self, nodelist, lookup, infocollect=None):
1062 def group(self, nodelist, lookup, infocollect=None):
1063 """calculate a delta group
1063 """calculate a delta group
1064
1064
1065 Given a list of changeset revs, return a set of deltas and
1065 Given a list of changeset revs, return a set of deltas and
1066 metadata corresponding to nodes. the first delta is
1066 metadata corresponding to nodes. the first delta is
1067 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1067 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1068 have this parent as it has all history before these
1068 have this parent as it has all history before these
1069 changesets. parent is parent[0]
1069 changesets. parent is parent[0]
1070 """
1070 """
1071 revs = [self.rev(n) for n in nodelist]
1071 revs = [self.rev(n) for n in nodelist]
1072
1072
1073 # if we don't have any revisions touched by these changesets, bail
1073 # if we don't have any revisions touched by these changesets, bail
1074 if not revs:
1074 if not revs:
1075 yield changegroup.closechunk()
1075 yield changegroup.closechunk()
1076 return
1076 return
1077
1077
1078 # add the parent of the first rev
1078 # add the parent of the first rev
1079 p = self.parents(self.node(revs[0]))[0]
1079 p = self.parents(self.node(revs[0]))[0]
1080 revs.insert(0, self.rev(p))
1080 revs.insert(0, self.rev(p))
1081
1081
1082 # build deltas
1082 # build deltas
1083 for d in xrange(0, len(revs) - 1):
1083 for d in xrange(0, len(revs) - 1):
1084 a, b = revs[d], revs[d + 1]
1084 a, b = revs[d], revs[d + 1]
1085 nb = self.node(b)
1085 nb = self.node(b)
1086
1086
1087 if infocollect is not None:
1087 if infocollect is not None:
1088 infocollect(nb)
1088 infocollect(nb)
1089
1089
1090 p = self.parents(nb)
1090 p = self.parents(nb)
1091 meta = nb + p[0] + p[1] + lookup(nb)
1091 meta = nb + p[0] + p[1] + lookup(nb)
1092 if a == -1:
1092 if a == -1:
1093 d = self.revision(nb)
1093 d = self.revision(nb)
1094 meta += mdiff.trivialdiffheader(len(d))
1094 meta += mdiff.trivialdiffheader(len(d))
1095 else:
1095 else:
1096 d = self.revdiff(a, b)
1096 d = self.revdiff(a, b)
1097 yield changegroup.genchunk("%s%s" % (meta, d))
1097 yield changegroup.chunkheader(len(meta) + len(d))
1098 yield meta
1099 yield d
1098
1100
1099 yield changegroup.closechunk()
1101 yield changegroup.closechunk()
1100
1102
1101 def addgroup(self, revs, linkmapper, transaction, unique=0):
1103 def addgroup(self, revs, linkmapper, transaction, unique=0):
1102 """
1104 """
1103 add a delta group
1105 add a delta group
1104
1106
1105 given a set of deltas, add them to the revision log. the
1107 given a set of deltas, add them to the revision log. the
1106 first delta is against its parent, which should be in our
1108 first delta is against its parent, which should be in our
1107 log, the rest are against the previous delta.
1109 log, the rest are against the previous delta.
1108 """
1110 """
1109
1111
1110 #track the base of the current delta log
1112 #track the base of the current delta log
1111 r = self.count()
1113 r = self.count()
1112 t = r - 1
1114 t = r - 1
1113 node = None
1115 node = None
1114
1116
1115 base = prev = nullrev
1117 base = prev = nullrev
1116 start = end = textlen = 0
1118 start = end = textlen = 0
1117 if r:
1119 if r:
1118 end = self.end(t)
1120 end = self.end(t)
1119
1121
1120 ifh = self.opener(self.indexfile, "a+")
1122 ifh = self.opener(self.indexfile, "a+")
1121 isize = r * self._io.size
1123 isize = r * self._io.size
1122 if self._inline:
1124 if self._inline:
1123 transaction.add(self.indexfile, end + isize, r)
1125 transaction.add(self.indexfile, end + isize, r)
1124 dfh = None
1126 dfh = None
1125 else:
1127 else:
1126 transaction.add(self.indexfile, isize, r)
1128 transaction.add(self.indexfile, isize, r)
1127 transaction.add(self.datafile, end)
1129 transaction.add(self.datafile, end)
1128 dfh = self.opener(self.datafile, "a")
1130 dfh = self.opener(self.datafile, "a")
1129
1131
1130 # loop through our set of deltas
1132 # loop through our set of deltas
1131 chain = None
1133 chain = None
1132 for chunk in revs:
1134 for chunk in revs:
1133 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1135 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1134 link = linkmapper(cs)
1136 link = linkmapper(cs)
1135 if node in self.nodemap:
1137 if node in self.nodemap:
1136 # this can happen if two branches make the same change
1138 # this can happen if two branches make the same change
1137 # if unique:
1139 # if unique:
1138 # raise RevlogError(_("already have %s") % hex(node[:4]))
1140 # raise RevlogError(_("already have %s") % hex(node[:4]))
1139 chain = node
1141 chain = node
1140 continue
1142 continue
1141 delta = chunk[80:]
1143 delta = chunk[80:]
1142
1144
1143 for p in (p1, p2):
1145 for p in (p1, p2):
1144 if not p in self.nodemap:
1146 if not p in self.nodemap:
1145 raise LookupError(_("unknown parent %s") % short(p))
1147 raise LookupError(_("unknown parent %s") % short(p))
1146
1148
1147 if not chain:
1149 if not chain:
1148 # retrieve the parent revision of the delta chain
1150 # retrieve the parent revision of the delta chain
1149 chain = p1
1151 chain = p1
1150 if not chain in self.nodemap:
1152 if not chain in self.nodemap:
1151 raise LookupError(_("unknown base %s") % short(chain[:4]))
1153 raise LookupError(_("unknown base %s") % short(chain[:4]))
1152
1154
1153 # full versions are inserted when the needed deltas become
1155 # full versions are inserted when the needed deltas become
1154 # comparable to the uncompressed text or when the previous
1156 # comparable to the uncompressed text or when the previous
1155 # version is not the one we have a delta against. We use
1157 # version is not the one we have a delta against. We use
1156 # the size of the previous full rev as a proxy for the
1158 # the size of the previous full rev as a proxy for the
1157 # current size.
1159 # current size.
1158
1160
1159 if chain == prev:
1161 if chain == prev:
1160 tempd = compress(delta)
1162 tempd = compress(delta)
1161 cdelta = tempd[0] + tempd[1]
1163 cdelta = tempd[0] + tempd[1]
1162 textlen = mdiff.patchedsize(textlen, delta)
1164 textlen = mdiff.patchedsize(textlen, delta)
1163
1165
1164 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1166 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1165 # flush our writes here so we can read it in revision
1167 # flush our writes here so we can read it in revision
1166 if dfh:
1168 if dfh:
1167 dfh.flush()
1169 dfh.flush()
1168 ifh.flush()
1170 ifh.flush()
1169 text = self.revision(chain)
1171 text = self.revision(chain)
1170 text = mdiff.patches(text, [delta])
1172 text = mdiff.patches(text, [delta])
1171 chk = self._addrevision(text, transaction, link, p1, p2, None,
1173 chk = self._addrevision(text, transaction, link, p1, p2, None,
1172 ifh, dfh)
1174 ifh, dfh)
1173 if not dfh and not self._inline:
1175 if not dfh and not self._inline:
1174 # addrevision switched from inline to conventional
1176 # addrevision switched from inline to conventional
1175 # reopen the index
1177 # reopen the index
1176 dfh = self.opener(self.datafile, "a")
1178 dfh = self.opener(self.datafile, "a")
1177 ifh = self.opener(self.indexfile, "a")
1179 ifh = self.opener(self.indexfile, "a")
1178 if chk != node:
1180 if chk != node:
1179 raise RevlogError(_("consistency error adding group"))
1181 raise RevlogError(_("consistency error adding group"))
1180 textlen = len(text)
1182 textlen = len(text)
1181 else:
1183 else:
1182 e = (offset_type(end, 0), len(cdelta), textlen, base,
1184 e = (offset_type(end, 0), len(cdelta), textlen, base,
1183 link, self.rev(p1), self.rev(p2), node)
1185 link, self.rev(p1), self.rev(p2), node)
1184 self.index.insert(-1, e)
1186 self.index.insert(-1, e)
1185 self.nodemap[node] = r
1187 self.nodemap[node] = r
1186 entry = self._io.packentry(e, self.node, self.version, r)
1188 entry = self._io.packentry(e, self.node, self.version, r)
1187 if self._inline:
1189 if self._inline:
1188 ifh.write(entry)
1190 ifh.write(entry)
1189 ifh.write(cdelta)
1191 ifh.write(cdelta)
1190 self.checkinlinesize(transaction, ifh)
1192 self.checkinlinesize(transaction, ifh)
1191 if not self._inline:
1193 if not self._inline:
1192 dfh = self.opener(self.datafile, "a")
1194 dfh = self.opener(self.datafile, "a")
1193 ifh = self.opener(self.indexfile, "a")
1195 ifh = self.opener(self.indexfile, "a")
1194 else:
1196 else:
1195 dfh.write(cdelta)
1197 dfh.write(cdelta)
1196 ifh.write(entry)
1198 ifh.write(entry)
1197
1199
1198 t, r, chain, prev = r, r + 1, node, node
1200 t, r, chain, prev = r, r + 1, node, node
1199 base = self.base(t)
1201 base = self.base(t)
1200 start = self.start(base)
1202 start = self.start(base)
1201 end = self.end(t)
1203 end = self.end(t)
1202
1204
1203 return node
1205 return node
1204
1206
1205 def strip(self, rev, minlink):
1207 def strip(self, rev, minlink):
1206 if self.count() == 0 or rev >= self.count():
1208 if self.count() == 0 or rev >= self.count():
1207 return
1209 return
1208
1210
1209 if isinstance(self.index, lazyindex):
1211 if isinstance(self.index, lazyindex):
1210 self._loadindexmap()
1212 self._loadindexmap()
1211
1213
1212 # When stripping away a revision, we need to make sure it
1214 # When stripping away a revision, we need to make sure it
1213 # does not actually belong to an older changeset.
1215 # does not actually belong to an older changeset.
1214 # The minlink parameter defines the oldest revision
1216 # The minlink parameter defines the oldest revision
1215 # we're allowed to strip away.
1217 # we're allowed to strip away.
1216 while minlink > self.index[rev][4]:
1218 while minlink > self.index[rev][4]:
1217 rev += 1
1219 rev += 1
1218 if rev >= self.count():
1220 if rev >= self.count():
1219 return
1221 return
1220
1222
1221 # first truncate the files on disk
1223 # first truncate the files on disk
1222 end = self.start(rev)
1224 end = self.start(rev)
1223 if not self._inline:
1225 if not self._inline:
1224 df = self.opener(self.datafile, "a")
1226 df = self.opener(self.datafile, "a")
1225 df.truncate(end)
1227 df.truncate(end)
1226 end = rev * self._io.size
1228 end = rev * self._io.size
1227 else:
1229 else:
1228 end += rev * self._io.size
1230 end += rev * self._io.size
1229
1231
1230 indexf = self.opener(self.indexfile, "a")
1232 indexf = self.opener(self.indexfile, "a")
1231 indexf.truncate(end)
1233 indexf.truncate(end)
1232
1234
1233 # then reset internal state in memory to forget those revisions
1235 # then reset internal state in memory to forget those revisions
1234 self._cache = None
1236 self._cache = None
1235 self._chunkcache = None
1237 self._chunkcache = None
1236 for x in xrange(rev, self.count()):
1238 for x in xrange(rev, self.count()):
1237 del self.nodemap[self.node(x)]
1239 del self.nodemap[self.node(x)]
1238
1240
1239 del self.index[rev:-1]
1241 del self.index[rev:-1]
1240
1242
1241 def checksize(self):
1243 def checksize(self):
1242 expected = 0
1244 expected = 0
1243 if self.count():
1245 if self.count():
1244 expected = max(0, self.end(self.count() - 1))
1246 expected = max(0, self.end(self.count() - 1))
1245
1247
1246 try:
1248 try:
1247 f = self.opener(self.datafile)
1249 f = self.opener(self.datafile)
1248 f.seek(0, 2)
1250 f.seek(0, 2)
1249 actual = f.tell()
1251 actual = f.tell()
1250 dd = actual - expected
1252 dd = actual - expected
1251 except IOError, inst:
1253 except IOError, inst:
1252 if inst.errno != errno.ENOENT:
1254 if inst.errno != errno.ENOENT:
1253 raise
1255 raise
1254 dd = 0
1256 dd = 0
1255
1257
1256 try:
1258 try:
1257 f = self.opener(self.indexfile)
1259 f = self.opener(self.indexfile)
1258 f.seek(0, 2)
1260 f.seek(0, 2)
1259 actual = f.tell()
1261 actual = f.tell()
1260 s = self._io.size
1262 s = self._io.size
1261 i = max(0, actual / s)
1263 i = max(0, actual / s)
1262 di = actual - (i * s)
1264 di = actual - (i * s)
1263 if self._inline:
1265 if self._inline:
1264 databytes = 0
1266 databytes = 0
1265 for r in xrange(self.count()):
1267 for r in xrange(self.count()):
1266 databytes += max(0, self.length(r))
1268 databytes += max(0, self.length(r))
1267 dd = 0
1269 dd = 0
1268 di = actual - self.count() * s - databytes
1270 di = actual - self.count() * s - databytes
1269 except IOError, inst:
1271 except IOError, inst:
1270 if inst.errno != errno.ENOENT:
1272 if inst.errno != errno.ENOENT:
1271 raise
1273 raise
1272 di = 0
1274 di = 0
1273
1275
1274 return (dd, di)
1276 return (dd, di)
General Comments 0
You need to be logged in to leave comments. Login now