##// END OF EJS Templates
Sync with -stable
Matt Mackall -
r3928:4df475e2 merge default
parent child Browse files
Show More
@@ -1,3 +1,4 b''
1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
@@ -1,15 +1,16 b''
1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
11 3a56574f329a368d645853e0f9e09472aee62349 0.8
11 3a56574f329a368d645853e0f9e09472aee62349 0.8
12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
@@ -1,1863 +1,1863 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.encodepats = None
120 self.encodepats = None
121 self.decodepats = None
121 self.decodepats = None
122 self.transhandle = None
122 self.transhandle = None
123
123
124 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
124 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def url(self):
126 def url(self):
127 return 'file:' + self.root
127 return 'file:' + self.root
128
128
129 def hook(self, name, throw=False, **args):
129 def hook(self, name, throw=False, **args):
130 def callhook(hname, funcname):
130 def callhook(hname, funcname):
131 '''call python hook. hook is callable object, looked up as
131 '''call python hook. hook is callable object, looked up as
132 name in python module. if callable returns "true", hook
132 name in python module. if callable returns "true", hook
133 fails, else passes. if hook raises exception, treated as
133 fails, else passes. if hook raises exception, treated as
134 hook failure. exception propagates if throw is "true".
134 hook failure. exception propagates if throw is "true".
135
135
136 reason for "true" meaning "hook failed" is so that
136 reason for "true" meaning "hook failed" is so that
137 unmodified commands (e.g. mercurial.commands.update) can
137 unmodified commands (e.g. mercurial.commands.update) can
138 be run as hooks without wrappers to convert return values.'''
138 be run as hooks without wrappers to convert return values.'''
139
139
140 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
140 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
141 d = funcname.rfind('.')
141 d = funcname.rfind('.')
142 if d == -1:
142 if d == -1:
143 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
143 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
144 % (hname, funcname))
144 % (hname, funcname))
145 modname = funcname[:d]
145 modname = funcname[:d]
146 try:
146 try:
147 obj = __import__(modname)
147 obj = __import__(modname)
148 except ImportError:
148 except ImportError:
149 try:
149 try:
150 # extensions are loaded with hgext_ prefix
150 # extensions are loaded with hgext_ prefix
151 obj = __import__("hgext_%s" % modname)
151 obj = __import__("hgext_%s" % modname)
152 except ImportError:
152 except ImportError:
153 raise util.Abort(_('%s hook is invalid '
153 raise util.Abort(_('%s hook is invalid '
154 '(import of "%s" failed)') %
154 '(import of "%s" failed)') %
155 (hname, modname))
155 (hname, modname))
156 try:
156 try:
157 for p in funcname.split('.')[1:]:
157 for p in funcname.split('.')[1:]:
158 obj = getattr(obj, p)
158 obj = getattr(obj, p)
159 except AttributeError, err:
159 except AttributeError, err:
160 raise util.Abort(_('%s hook is invalid '
160 raise util.Abort(_('%s hook is invalid '
161 '("%s" is not defined)') %
161 '("%s" is not defined)') %
162 (hname, funcname))
162 (hname, funcname))
163 if not callable(obj):
163 if not callable(obj):
164 raise util.Abort(_('%s hook is invalid '
164 raise util.Abort(_('%s hook is invalid '
165 '("%s" is not callable)') %
165 '("%s" is not callable)') %
166 (hname, funcname))
166 (hname, funcname))
167 try:
167 try:
168 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
168 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
169 except (KeyboardInterrupt, util.SignalInterrupt):
169 except (KeyboardInterrupt, util.SignalInterrupt):
170 raise
170 raise
171 except Exception, exc:
171 except Exception, exc:
172 if isinstance(exc, util.Abort):
172 if isinstance(exc, util.Abort):
173 self.ui.warn(_('error: %s hook failed: %s\n') %
173 self.ui.warn(_('error: %s hook failed: %s\n') %
174 (hname, exc.args[0]))
174 (hname, exc.args[0]))
175 else:
175 else:
176 self.ui.warn(_('error: %s hook raised an exception: '
176 self.ui.warn(_('error: %s hook raised an exception: '
177 '%s\n') % (hname, exc))
177 '%s\n') % (hname, exc))
178 if throw:
178 if throw:
179 raise
179 raise
180 self.ui.print_exc()
180 self.ui.print_exc()
181 return True
181 return True
182 if r:
182 if r:
183 if throw:
183 if throw:
184 raise util.Abort(_('%s hook failed') % hname)
184 raise util.Abort(_('%s hook failed') % hname)
185 self.ui.warn(_('warning: %s hook failed\n') % hname)
185 self.ui.warn(_('warning: %s hook failed\n') % hname)
186 return r
186 return r
187
187
188 def runhook(name, cmd):
188 def runhook(name, cmd):
189 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
189 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
190 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
190 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
191 r = util.system(cmd, environ=env, cwd=self.root)
191 r = util.system(cmd, environ=env, cwd=self.root)
192 if r:
192 if r:
193 desc, r = util.explain_exit(r)
193 desc, r = util.explain_exit(r)
194 if throw:
194 if throw:
195 raise util.Abort(_('%s hook %s') % (name, desc))
195 raise util.Abort(_('%s hook %s') % (name, desc))
196 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
196 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
197 return r
197 return r
198
198
199 r = False
199 r = False
200 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
200 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
201 if hname.split(".", 1)[0] == name and cmd]
201 if hname.split(".", 1)[0] == name and cmd]
202 hooks.sort()
202 hooks.sort()
203 for hname, cmd in hooks:
203 for hname, cmd in hooks:
204 if cmd.startswith('python:'):
204 if cmd.startswith('python:'):
205 r = callhook(hname, cmd[7:].strip()) or r
205 r = callhook(hname, cmd[7:].strip()) or r
206 else:
206 else:
207 r = runhook(hname, cmd) or r
207 r = runhook(hname, cmd) or r
208 return r
208 return r
209
209
210 tag_disallowed = ':\r\n'
210 tag_disallowed = ':\r\n'
211
211
212 def tag(self, name, node, message, local, user, date):
212 def tag(self, name, node, message, local, user, date):
213 '''tag a revision with a symbolic name.
213 '''tag a revision with a symbolic name.
214
214
215 if local is True, the tag is stored in a per-repository file.
215 if local is True, the tag is stored in a per-repository file.
216 otherwise, it is stored in the .hgtags file, and a new
216 otherwise, it is stored in the .hgtags file, and a new
217 changeset is committed with the change.
217 changeset is committed with the change.
218
218
219 keyword arguments:
219 keyword arguments:
220
220
221 local: whether to store tag in non-version-controlled file
221 local: whether to store tag in non-version-controlled file
222 (default False)
222 (default False)
223
223
224 message: commit message to use if committing
224 message: commit message to use if committing
225
225
226 user: name of user to use if committing
226 user: name of user to use if committing
227
227
228 date: date tuple to use if committing'''
228 date: date tuple to use if committing'''
229
229
230 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
231 if c in name:
231 if c in name:
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233
233
234 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
234 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
235
235
236 if local:
236 if local:
237 # local tags are stored in the current charset
237 # local tags are stored in the current charset
238 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
238 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
239 self.hook('tag', node=hex(node), tag=name, local=local)
239 self.hook('tag', node=hex(node), tag=name, local=local)
240 return
240 return
241
241
242 for x in self.status()[:5]:
242 for x in self.status()[:5]:
243 if '.hgtags' in x:
243 if '.hgtags' in x:
244 raise util.Abort(_('working copy of .hgtags is changed '
244 raise util.Abort(_('working copy of .hgtags is changed '
245 '(please commit .hgtags manually)'))
245 '(please commit .hgtags manually)'))
246
246
247 # committed tags are stored in UTF-8
247 # committed tags are stored in UTF-8
248 line = '%s %s\n' % (hex(node), util.fromlocal(name))
248 line = '%s %s\n' % (hex(node), util.fromlocal(name))
249 self.wfile('.hgtags', 'ab').write(line)
249 self.wfile('.hgtags', 'ab').write(line)
250 if self.dirstate.state('.hgtags') == '?':
250 if self.dirstate.state('.hgtags') == '?':
251 self.add(['.hgtags'])
251 self.add(['.hgtags'])
252
252
253 self.commit(['.hgtags'], message, user, date)
253 self.commit(['.hgtags'], message, user, date)
254 self.hook('tag', node=hex(node), tag=name, local=local)
254 self.hook('tag', node=hex(node), tag=name, local=local)
255
255
256 def tags(self):
256 def tags(self):
257 '''return a mapping of tag to node'''
257 '''return a mapping of tag to node'''
258 if not self.tagscache:
258 if not self.tagscache:
259 self.tagscache = {}
259 self.tagscache = {}
260
260
261 def parsetag(line, context):
261 def parsetag(line, context):
262 if not line:
262 if not line:
263 return
263 return
264 s = l.split(" ", 1)
264 s = l.split(" ", 1)
265 if len(s) != 2:
265 if len(s) != 2:
266 self.ui.warn(_("%s: cannot parse entry\n") % context)
266 self.ui.warn(_("%s: cannot parse entry\n") % context)
267 return
267 return
268 node, key = s
268 node, key = s
269 key = util.tolocal(key.strip()) # stored in UTF-8
269 key = util.tolocal(key.strip()) # stored in UTF-8
270 try:
270 try:
271 bin_n = bin(node)
271 bin_n = bin(node)
272 except TypeError:
272 except TypeError:
273 self.ui.warn(_("%s: node '%s' is not well formed\n") %
273 self.ui.warn(_("%s: node '%s' is not well formed\n") %
274 (context, node))
274 (context, node))
275 return
275 return
276 if bin_n not in self.changelog.nodemap:
276 if bin_n not in self.changelog.nodemap:
277 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
277 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
278 (context, key))
278 (context, key))
279 return
279 return
280 self.tagscache[key] = bin_n
280 self.tagscache[key] = bin_n
281
281
282 # read the tags file from each head, ending with the tip,
282 # read the tags file from each head, ending with the tip,
283 # and add each tag found to the map, with "newer" ones
283 # and add each tag found to the map, with "newer" ones
284 # taking precedence
284 # taking precedence
285 f = None
285 f = None
286 for rev, node, fnode in self._hgtagsnodes():
286 for rev, node, fnode in self._hgtagsnodes():
287 f = (f and f.filectx(fnode) or
287 f = (f and f.filectx(fnode) or
288 self.filectx('.hgtags', fileid=fnode))
288 self.filectx('.hgtags', fileid=fnode))
289 count = 0
289 count = 0
290 for l in f.data().splitlines():
290 for l in f.data().splitlines():
291 count += 1
291 count += 1
292 parsetag(l, _("%s, line %d") % (str(f), count))
292 parsetag(l, _("%s, line %d") % (str(f), count))
293
293
294 try:
294 try:
295 f = self.opener("localtags")
295 f = self.opener("localtags")
296 count = 0
296 count = 0
297 for l in f:
297 for l in f:
298 # localtags are stored in the local character set
298 # localtags are stored in the local character set
299 # while the internal tag table is stored in UTF-8
299 # while the internal tag table is stored in UTF-8
300 l = util.fromlocal(l)
300 l = util.fromlocal(l)
301 count += 1
301 count += 1
302 parsetag(l, _("localtags, line %d") % count)
302 parsetag(l, _("localtags, line %d") % count)
303 except IOError:
303 except IOError:
304 pass
304 pass
305
305
306 self.tagscache['tip'] = self.changelog.tip()
306 self.tagscache['tip'] = self.changelog.tip()
307
307
308 return self.tagscache
308 return self.tagscache
309
309
310 def _hgtagsnodes(self):
310 def _hgtagsnodes(self):
311 heads = self.heads()
311 heads = self.heads()
312 heads.reverse()
312 heads.reverse()
313 last = {}
313 last = {}
314 ret = []
314 ret = []
315 for node in heads:
315 for node in heads:
316 c = self.changectx(node)
316 c = self.changectx(node)
317 rev = c.rev()
317 rev = c.rev()
318 try:
318 try:
319 fnode = c.filenode('.hgtags')
319 fnode = c.filenode('.hgtags')
320 except repo.LookupError:
320 except repo.LookupError:
321 continue
321 continue
322 ret.append((rev, node, fnode))
322 ret.append((rev, node, fnode))
323 if fnode in last:
323 if fnode in last:
324 ret[last[fnode]] = None
324 ret[last[fnode]] = None
325 last[fnode] = len(ret) - 1
325 last[fnode] = len(ret) - 1
326 return [item for item in ret if item]
326 return [item for item in ret if item]
327
327
328 def tagslist(self):
328 def tagslist(self):
329 '''return a list of tags ordered by revision'''
329 '''return a list of tags ordered by revision'''
330 l = []
330 l = []
331 for t, n in self.tags().items():
331 for t, n in self.tags().items():
332 try:
332 try:
333 r = self.changelog.rev(n)
333 r = self.changelog.rev(n)
334 except:
334 except:
335 r = -2 # sort to the beginning of the list if unknown
335 r = -2 # sort to the beginning of the list if unknown
336 l.append((r, t, n))
336 l.append((r, t, n))
337 l.sort()
337 l.sort()
338 return [(t, n) for r, t, n in l]
338 return [(t, n) for r, t, n in l]
339
339
340 def nodetags(self, node):
340 def nodetags(self, node):
341 '''return the tags associated with a node'''
341 '''return the tags associated with a node'''
342 if not self.nodetagscache:
342 if not self.nodetagscache:
343 self.nodetagscache = {}
343 self.nodetagscache = {}
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 self.nodetagscache.setdefault(n, []).append(t)
345 self.nodetagscache.setdefault(n, []).append(t)
346 return self.nodetagscache.get(node, [])
346 return self.nodetagscache.get(node, [])
347
347
348 def _branchtags(self):
348 def _branchtags(self):
349 partial, last, lrev = self._readbranchcache()
349 partial, last, lrev = self._readbranchcache()
350
350
351 tiprev = self.changelog.count() - 1
351 tiprev = self.changelog.count() - 1
352 if lrev != tiprev:
352 if lrev != tiprev:
353 self._updatebranchcache(partial, lrev+1, tiprev+1)
353 self._updatebranchcache(partial, lrev+1, tiprev+1)
354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
355
355
356 return partial
356 return partial
357
357
358 def branchtags(self):
358 def branchtags(self):
359 if self.branchcache is not None:
359 if self.branchcache is not None:
360 return self.branchcache
360 return self.branchcache
361
361
362 self.branchcache = {} # avoid recursion in changectx
362 self.branchcache = {} # avoid recursion in changectx
363 partial = self._branchtags()
363 partial = self._branchtags()
364
364
365 # the branch cache is stored on disk as UTF-8, but in the local
365 # the branch cache is stored on disk as UTF-8, but in the local
366 # charset internally
366 # charset internally
367 for k, v in partial.items():
367 for k, v in partial.items():
368 self.branchcache[util.tolocal(k)] = v
368 self.branchcache[util.tolocal(k)] = v
369 return self.branchcache
369 return self.branchcache
370
370
371 def _readbranchcache(self):
371 def _readbranchcache(self):
372 partial = {}
372 partial = {}
373 try:
373 try:
374 f = self.opener("branches.cache")
374 f = self.opener("branches.cache")
375 lines = f.read().split('\n')
375 lines = f.read().split('\n')
376 f.close()
376 f.close()
377 last, lrev = lines.pop(0).rstrip().split(" ", 1)
377 last, lrev = lines.pop(0).rstrip().split(" ", 1)
378 last, lrev = bin(last), int(lrev)
378 last, lrev = bin(last), int(lrev)
379 if not (lrev < self.changelog.count() and
379 if not (lrev < self.changelog.count() and
380 self.changelog.node(lrev) == last): # sanity check
380 self.changelog.node(lrev) == last): # sanity check
381 # invalidate the cache
381 # invalidate the cache
382 raise ValueError('Invalid branch cache: unknown tip')
382 raise ValueError('Invalid branch cache: unknown tip')
383 for l in lines:
383 for l in lines:
384 if not l: continue
384 if not l: continue
385 node, label = l.rstrip().split(" ", 1)
385 node, label = l.rstrip().split(" ", 1)
386 partial[label] = bin(node)
386 partial[label] = bin(node)
387 except (KeyboardInterrupt, util.SignalInterrupt):
387 except (KeyboardInterrupt, util.SignalInterrupt):
388 raise
388 raise
389 except Exception, inst:
389 except Exception, inst:
390 if self.ui.debugflag:
390 if self.ui.debugflag:
391 self.ui.warn(str(inst), '\n')
391 self.ui.warn(str(inst), '\n')
392 partial, last, lrev = {}, nullid, nullrev
392 partial, last, lrev = {}, nullid, nullrev
393 return partial, last, lrev
393 return partial, last, lrev
394
394
395 def _writebranchcache(self, branches, tip, tiprev):
395 def _writebranchcache(self, branches, tip, tiprev):
396 try:
396 try:
397 f = self.opener("branches.cache", "w")
397 f = self.opener("branches.cache", "w")
398 f.write("%s %s\n" % (hex(tip), tiprev))
398 f.write("%s %s\n" % (hex(tip), tiprev))
399 for label, node in branches.iteritems():
399 for label, node in branches.iteritems():
400 f.write("%s %s\n" % (hex(node), label))
400 f.write("%s %s\n" % (hex(node), label))
401 except IOError:
401 except IOError:
402 pass
402 pass
403
403
404 def _updatebranchcache(self, partial, start, end):
404 def _updatebranchcache(self, partial, start, end):
405 for r in xrange(start, end):
405 for r in xrange(start, end):
406 c = self.changectx(r)
406 c = self.changectx(r)
407 b = c.branch()
407 b = c.branch()
408 if b:
408 if b:
409 partial[b] = c.node()
409 partial[b] = c.node()
410
410
411 def lookup(self, key):
411 def lookup(self, key):
412 if key == '.':
412 if key == '.':
413 key = self.dirstate.parents()[0]
413 key = self.dirstate.parents()[0]
414 if key == nullid:
414 if key == nullid:
415 raise repo.RepoError(_("no revision checked out"))
415 raise repo.RepoError(_("no revision checked out"))
416 elif key == 'null':
416 elif key == 'null':
417 return nullid
417 return nullid
418 n = self.changelog._match(key)
418 n = self.changelog._match(key)
419 if n:
419 if n:
420 return n
420 return n
421 if key in self.tags():
421 if key in self.tags():
422 return self.tags()[key]
422 return self.tags()[key]
423 if key in self.branchtags():
423 if key in self.branchtags():
424 return self.branchtags()[key]
424 return self.branchtags()[key]
425 n = self.changelog._partialmatch(key)
425 n = self.changelog._partialmatch(key)
426 if n:
426 if n:
427 return n
427 return n
428 raise repo.RepoError(_("unknown revision '%s'") % key)
428 raise repo.RepoError(_("unknown revision '%s'") % key)
429
429
430 def dev(self):
430 def dev(self):
431 return os.lstat(self.path).st_dev
431 return os.lstat(self.path).st_dev
432
432
433 def local(self):
433 def local(self):
434 return True
434 return True
435
435
436 def join(self, f):
436 def join(self, f):
437 return os.path.join(self.path, f)
437 return os.path.join(self.path, f)
438
438
439 def sjoin(self, f):
439 def sjoin(self, f):
440 f = self.encodefn(f)
440 f = self.encodefn(f)
441 return os.path.join(self.spath, f)
441 return os.path.join(self.spath, f)
442
442
443 def wjoin(self, f):
443 def wjoin(self, f):
444 return os.path.join(self.root, f)
444 return os.path.join(self.root, f)
445
445
446 def file(self, f):
446 def file(self, f):
447 if f[0] == '/':
447 if f[0] == '/':
448 f = f[1:]
448 f = f[1:]
449 return filelog.filelog(self.sopener, f, self.revlogversion)
449 return filelog.filelog(self.sopener, f, self.revlogversion)
450
450
451 def changectx(self, changeid=None):
451 def changectx(self, changeid=None):
452 return context.changectx(self, changeid)
452 return context.changectx(self, changeid)
453
453
454 def workingctx(self):
454 def workingctx(self):
455 return context.workingctx(self)
455 return context.workingctx(self)
456
456
457 def parents(self, changeid=None):
457 def parents(self, changeid=None):
458 '''
458 '''
459 get list of changectxs for parents of changeid or working directory
459 get list of changectxs for parents of changeid or working directory
460 '''
460 '''
461 if changeid is None:
461 if changeid is None:
462 pl = self.dirstate.parents()
462 pl = self.dirstate.parents()
463 else:
463 else:
464 n = self.changelog.lookup(changeid)
464 n = self.changelog.lookup(changeid)
465 pl = self.changelog.parents(n)
465 pl = self.changelog.parents(n)
466 if pl[1] == nullid:
466 if pl[1] == nullid:
467 return [self.changectx(pl[0])]
467 return [self.changectx(pl[0])]
468 return [self.changectx(pl[0]), self.changectx(pl[1])]
468 return [self.changectx(pl[0]), self.changectx(pl[1])]
469
469
470 def filectx(self, path, changeid=None, fileid=None):
470 def filectx(self, path, changeid=None, fileid=None):
471 """changeid can be a changeset revision, node, or tag.
471 """changeid can be a changeset revision, node, or tag.
472 fileid can be a file revision or node."""
472 fileid can be a file revision or node."""
473 return context.filectx(self, path, changeid, fileid)
473 return context.filectx(self, path, changeid, fileid)
474
474
475 def getcwd(self):
475 def getcwd(self):
476 return self.dirstate.getcwd()
476 return self.dirstate.getcwd()
477
477
478 def wfile(self, f, mode='r'):
478 def wfile(self, f, mode='r'):
479 return self.wopener(f, mode)
479 return self.wopener(f, mode)
480
480
481 def wread(self, filename):
481 def wread(self, filename):
482 if self.encodepats == None:
482 if self.encodepats == None:
483 l = []
483 l = []
484 for pat, cmd in self.ui.configitems("encode"):
484 for pat, cmd in self.ui.configitems("encode"):
485 mf = util.matcher(self.root, "", [pat], [], [])[1]
485 mf = util.matcher(self.root, "", [pat], [], [])[1]
486 l.append((mf, cmd))
486 l.append((mf, cmd))
487 self.encodepats = l
487 self.encodepats = l
488
488
489 data = self.wopener(filename, 'r').read()
489 data = self.wopener(filename, 'r').read()
490
490
491 for mf, cmd in self.encodepats:
491 for mf, cmd in self.encodepats:
492 if mf(filename):
492 if mf(filename):
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 data = util.filter(data, cmd)
494 data = util.filter(data, cmd)
495 break
495 break
496
496
497 return data
497 return data
498
498
499 def wwrite(self, filename, data, fd=None):
499 def wwrite(self, filename, data, fd=None):
500 if self.decodepats == None:
500 if self.decodepats == None:
501 l = []
501 l = []
502 for pat, cmd in self.ui.configitems("decode"):
502 for pat, cmd in self.ui.configitems("decode"):
503 mf = util.matcher(self.root, "", [pat], [], [])[1]
503 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 l.append((mf, cmd))
504 l.append((mf, cmd))
505 self.decodepats = l
505 self.decodepats = l
506
506
507 for mf, cmd in self.decodepats:
507 for mf, cmd in self.decodepats:
508 if mf(filename):
508 if mf(filename):
509 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
509 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
510 data = util.filter(data, cmd)
510 data = util.filter(data, cmd)
511 break
511 break
512
512
513 if fd:
513 if fd:
514 return fd.write(data)
514 return fd.write(data)
515 return self.wopener(filename, 'w').write(data)
515 return self.wopener(filename, 'w').write(data)
516
516
517 def transaction(self):
517 def transaction(self):
518 tr = self.transhandle
518 tr = self.transhandle
519 if tr != None and tr.running():
519 if tr != None and tr.running():
520 return tr.nest()
520 return tr.nest()
521
521
522 # save dirstate for rollback
522 # save dirstate for rollback
523 try:
523 try:
524 ds = self.opener("dirstate").read()
524 ds = self.opener("dirstate").read()
525 except IOError:
525 except IOError:
526 ds = ""
526 ds = ""
527 self.opener("journal.dirstate", "w").write(ds)
527 self.opener("journal.dirstate", "w").write(ds)
528
528
529 renames = [(self.sjoin("journal"), self.sjoin("undo")),
529 renames = [(self.sjoin("journal"), self.sjoin("undo")),
530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
531 tr = transaction.transaction(self.ui.warn, self.sopener,
531 tr = transaction.transaction(self.ui.warn, self.sopener,
532 self.sjoin("journal"),
532 self.sjoin("journal"),
533 aftertrans(renames))
533 aftertrans(renames))
534 self.transhandle = tr
534 self.transhandle = tr
535 return tr
535 return tr
536
536
537 def recover(self):
537 def recover(self):
538 l = self.lock()
538 l = self.lock()
539 if os.path.exists(self.sjoin("journal")):
539 if os.path.exists(self.sjoin("journal")):
540 self.ui.status(_("rolling back interrupted transaction\n"))
540 self.ui.status(_("rolling back interrupted transaction\n"))
541 transaction.rollback(self.sopener, self.sjoin("journal"))
541 transaction.rollback(self.sopener, self.sjoin("journal"))
542 self.reload()
542 self.reload()
543 return True
543 return True
544 else:
544 else:
545 self.ui.warn(_("no interrupted transaction available\n"))
545 self.ui.warn(_("no interrupted transaction available\n"))
546 return False
546 return False
547
547
548 def rollback(self, wlock=None):
548 def rollback(self, wlock=None):
549 if not wlock:
549 if not wlock:
550 wlock = self.wlock()
550 wlock = self.wlock()
551 l = self.lock()
551 l = self.lock()
552 if os.path.exists(self.sjoin("undo")):
552 if os.path.exists(self.sjoin("undo")):
553 self.ui.status(_("rolling back last transaction\n"))
553 self.ui.status(_("rolling back last transaction\n"))
554 transaction.rollback(self.sopener, self.sjoin("undo"))
554 transaction.rollback(self.sopener, self.sjoin("undo"))
555 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
555 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
556 self.reload()
556 self.reload()
557 self.wreload()
557 self.wreload()
558 else:
558 else:
559 self.ui.warn(_("no rollback information available\n"))
559 self.ui.warn(_("no rollback information available\n"))
560
560
561 def wreload(self):
561 def wreload(self):
562 self.dirstate.read()
562 self.dirstate.read()
563
563
564 def reload(self):
564 def reload(self):
565 self.changelog.load()
565 self.changelog.load()
566 self.manifest.load()
566 self.manifest.load()
567 self.tagscache = None
567 self.tagscache = None
568 self.nodetagscache = None
568 self.nodetagscache = None
569
569
570 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
570 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
571 desc=None):
571 desc=None):
572 try:
572 try:
573 l = lock.lock(lockname, 0, releasefn, desc=desc)
573 l = lock.lock(lockname, 0, releasefn, desc=desc)
574 except lock.LockHeld, inst:
574 except lock.LockHeld, inst:
575 if not wait:
575 if not wait:
576 raise
576 raise
577 self.ui.warn(_("waiting for lock on %s held by %r\n") %
577 self.ui.warn(_("waiting for lock on %s held by %r\n") %
578 (desc, inst.locker))
578 (desc, inst.locker))
579 # default to 600 seconds timeout
579 # default to 600 seconds timeout
580 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
580 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
581 releasefn, desc=desc)
581 releasefn, desc=desc)
582 if acquirefn:
582 if acquirefn:
583 acquirefn()
583 acquirefn()
584 return l
584 return l
585
585
586 def lock(self, wait=1):
586 def lock(self, wait=1):
587 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
587 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
588 desc=_('repository %s') % self.origroot)
588 desc=_('repository %s') % self.origroot)
589
589
590 def wlock(self, wait=1):
590 def wlock(self, wait=1):
591 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
591 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
592 self.wreload,
592 self.wreload,
593 desc=_('working directory of %s') % self.origroot)
593 desc=_('working directory of %s') % self.origroot)
594
594
595 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
595 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
596 """
596 """
597 commit an individual file as part of a larger transaction
597 commit an individual file as part of a larger transaction
598 """
598 """
599
599
600 t = self.wread(fn)
600 t = self.wread(fn)
601 fl = self.file(fn)
601 fl = self.file(fn)
602 fp1 = manifest1.get(fn, nullid)
602 fp1 = manifest1.get(fn, nullid)
603 fp2 = manifest2.get(fn, nullid)
603 fp2 = manifest2.get(fn, nullid)
604
604
605 meta = {}
605 meta = {}
606 cp = self.dirstate.copied(fn)
606 cp = self.dirstate.copied(fn)
607 if cp:
607 if cp:
608 meta["copy"] = cp
608 meta["copy"] = cp
609 if not manifest2: # not a branch merge
609 if not manifest2: # not a branch merge
610 meta["copyrev"] = hex(manifest1.get(cp, nullid))
610 meta["copyrev"] = hex(manifest1.get(cp, nullid))
611 fp2 = nullid
611 fp2 = nullid
612 elif fp2 != nullid: # copied on remote side
612 elif fp2 != nullid: # copied on remote side
613 meta["copyrev"] = hex(manifest1.get(cp, nullid))
613 meta["copyrev"] = hex(manifest1.get(cp, nullid))
614 elif fp1 != nullid: # copied on local side, reversed
614 elif fp1 != nullid: # copied on local side, reversed
615 meta["copyrev"] = hex(manifest2.get(cp))
615 meta["copyrev"] = hex(manifest2.get(cp))
616 fp2 = nullid
616 fp2 = nullid
617 else: # directory rename
617 else: # directory rename
618 meta["copyrev"] = hex(manifest1.get(cp, nullid))
618 meta["copyrev"] = hex(manifest1.get(cp, nullid))
619 self.ui.debug(_(" %s: copy %s:%s\n") %
619 self.ui.debug(_(" %s: copy %s:%s\n") %
620 (fn, cp, meta["copyrev"]))
620 (fn, cp, meta["copyrev"]))
621 fp1 = nullid
621 fp1 = nullid
622 elif fp2 != nullid:
622 elif fp2 != nullid:
623 # is one parent an ancestor of the other?
623 # is one parent an ancestor of the other?
624 fpa = fl.ancestor(fp1, fp2)
624 fpa = fl.ancestor(fp1, fp2)
625 if fpa == fp1:
625 if fpa == fp1:
626 fp1, fp2 = fp2, nullid
626 fp1, fp2 = fp2, nullid
627 elif fpa == fp2:
627 elif fpa == fp2:
628 fp2 = nullid
628 fp2 = nullid
629
629
630 # is the file unmodified from the parent? report existing entry
630 # is the file unmodified from the parent? report existing entry
631 if fp2 == nullid and not fl.cmp(fp1, t):
631 if fp2 == nullid and not fl.cmp(fp1, t):
632 return fp1
632 return fp1
633
633
634 changelist.append(fn)
634 changelist.append(fn)
635 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
635 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
636
636
637 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
637 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
638 if p1 is None:
638 if p1 is None:
639 p1, p2 = self.dirstate.parents()
639 p1, p2 = self.dirstate.parents()
640 return self.commit(files=files, text=text, user=user, date=date,
640 return self.commit(files=files, text=text, user=user, date=date,
641 p1=p1, p2=p2, wlock=wlock)
641 p1=p1, p2=p2, wlock=wlock)
642
642
643 def commit(self, files=None, text="", user=None, date=None,
643 def commit(self, files=None, text="", user=None, date=None,
644 match=util.always, force=False, lock=None, wlock=None,
644 match=util.always, force=False, lock=None, wlock=None,
645 force_editor=False, p1=None, p2=None, extra={}):
645 force_editor=False, p1=None, p2=None, extra={}):
646
646
647 commit = []
647 commit = []
648 remove = []
648 remove = []
649 changed = []
649 changed = []
650 use_dirstate = (p1 is None) # not rawcommit
650 use_dirstate = (p1 is None) # not rawcommit
651 extra = extra.copy()
651 extra = extra.copy()
652
652
653 if use_dirstate:
653 if use_dirstate:
654 if files:
654 if files:
655 for f in files:
655 for f in files:
656 s = self.dirstate.state(f)
656 s = self.dirstate.state(f)
657 if s in 'nmai':
657 if s in 'nmai':
658 commit.append(f)
658 commit.append(f)
659 elif s == 'r':
659 elif s == 'r':
660 remove.append(f)
660 remove.append(f)
661 else:
661 else:
662 self.ui.warn(_("%s not tracked!\n") % f)
662 self.ui.warn(_("%s not tracked!\n") % f)
663 else:
663 else:
664 changes = self.status(match=match)[:5]
664 changes = self.status(match=match)[:5]
665 modified, added, removed, deleted, unknown = changes
665 modified, added, removed, deleted, unknown = changes
666 commit = modified + added
666 commit = modified + added
667 remove = removed
667 remove = removed
668 else:
668 else:
669 commit = files
669 commit = files
670
670
671 if use_dirstate:
671 if use_dirstate:
672 p1, p2 = self.dirstate.parents()
672 p1, p2 = self.dirstate.parents()
673 update_dirstate = True
673 update_dirstate = True
674 else:
674 else:
675 p1, p2 = p1, p2 or nullid
675 p1, p2 = p1, p2 or nullid
676 update_dirstate = (self.dirstate.parents()[0] == p1)
676 update_dirstate = (self.dirstate.parents()[0] == p1)
677
677
678 c1 = self.changelog.read(p1)
678 c1 = self.changelog.read(p1)
679 c2 = self.changelog.read(p2)
679 c2 = self.changelog.read(p2)
680 m1 = self.manifest.read(c1[0]).copy()
680 m1 = self.manifest.read(c1[0]).copy()
681 m2 = self.manifest.read(c2[0])
681 m2 = self.manifest.read(c2[0])
682
682
683 if use_dirstate:
683 if use_dirstate:
684 branchname = self.workingctx().branch()
684 branchname = self.workingctx().branch()
685 try:
685 try:
686 branchname = branchname.decode('UTF-8').encode('UTF-8')
686 branchname = branchname.decode('UTF-8').encode('UTF-8')
687 except UnicodeDecodeError:
687 except UnicodeDecodeError:
688 raise util.Abort(_('branch name not in UTF-8!'))
688 raise util.Abort(_('branch name not in UTF-8!'))
689 else:
689 else:
690 branchname = ""
690 branchname = ""
691
691
692 if use_dirstate:
692 if use_dirstate:
693 oldname = c1[5].get("branch", "") # stored in UTF-8
693 oldname = c1[5].get("branch", "") # stored in UTF-8
694 if not commit and not remove and not force and p2 == nullid and \
694 if not commit and not remove and not force and p2 == nullid and \
695 branchname == oldname:
695 branchname == oldname:
696 self.ui.status(_("nothing changed\n"))
696 self.ui.status(_("nothing changed\n"))
697 return None
697 return None
698
698
699 xp1 = hex(p1)
699 xp1 = hex(p1)
700 if p2 == nullid: xp2 = ''
700 if p2 == nullid: xp2 = ''
701 else: xp2 = hex(p2)
701 else: xp2 = hex(p2)
702
702
703 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
703 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
704
704
705 if not wlock:
705 if not wlock:
706 wlock = self.wlock()
706 wlock = self.wlock()
707 if not lock:
707 if not lock:
708 lock = self.lock()
708 lock = self.lock()
709 tr = self.transaction()
709 tr = self.transaction()
710
710
711 # check in files
711 # check in files
712 new = {}
712 new = {}
713 linkrev = self.changelog.count()
713 linkrev = self.changelog.count()
714 commit.sort()
714 commit.sort()
715 for f in commit:
715 for f in commit:
716 self.ui.note(f + "\n")
716 self.ui.note(f + "\n")
717 try:
717 try:
718 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
718 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
719 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
719 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
720 except IOError:
720 except IOError:
721 if use_dirstate:
721 if use_dirstate:
722 self.ui.warn(_("trouble committing %s!\n") % f)
722 self.ui.warn(_("trouble committing %s!\n") % f)
723 raise
723 raise
724 else:
724 else:
725 remove.append(f)
725 remove.append(f)
726
726
727 # update manifest
727 # update manifest
728 m1.update(new)
728 m1.update(new)
729 remove.sort()
729 remove.sort()
730
730
731 for f in remove:
731 for f in remove:
732 if f in m1:
732 if f in m1:
733 del m1[f]
733 del m1[f]
734 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
734 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
735
735
736 # add changeset
736 # add changeset
737 new = new.keys()
737 new = new.keys()
738 new.sort()
738 new.sort()
739
739
740 user = user or self.ui.username()
740 user = user or self.ui.username()
741 if not text or force_editor:
741 if not text or force_editor:
742 edittext = []
742 edittext = []
743 if text:
743 if text:
744 edittext.append(text)
744 edittext.append(text)
745 edittext.append("")
745 edittext.append("")
746 edittext.append("HG: user: %s" % user)
746 edittext.append("HG: user: %s" % user)
747 if p2 != nullid:
747 if p2 != nullid:
748 edittext.append("HG: branch merge")
748 edittext.append("HG: branch merge")
749 edittext.extend(["HG: changed %s" % f for f in changed])
749 edittext.extend(["HG: changed %s" % f for f in changed])
750 edittext.extend(["HG: removed %s" % f for f in remove])
750 edittext.extend(["HG: removed %s" % f for f in remove])
751 if not changed and not remove:
751 if not changed and not remove:
752 edittext.append("HG: no files changed")
752 edittext.append("HG: no files changed")
753 edittext.append("")
753 edittext.append("")
754 # run editor in the repository root
754 # run editor in the repository root
755 olddir = os.getcwd()
755 olddir = os.getcwd()
756 os.chdir(self.root)
756 os.chdir(self.root)
757 text = self.ui.edit("\n".join(edittext), user)
757 text = self.ui.edit("\n".join(edittext), user)
758 os.chdir(olddir)
758 os.chdir(olddir)
759
759
760 lines = [line.rstrip() for line in text.rstrip().splitlines()]
760 lines = [line.rstrip() for line in text.rstrip().splitlines()]
761 while lines and not lines[0]:
761 while lines and not lines[0]:
762 del lines[0]
762 del lines[0]
763 if not lines:
763 if not lines:
764 return None
764 return None
765 text = '\n'.join(lines)
765 text = '\n'.join(lines)
766 if branchname:
766 if branchname:
767 extra["branch"] = branchname
767 extra["branch"] = branchname
768 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
768 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
769 user, date, extra)
769 user, date, extra)
770 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
770 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
771 parent2=xp2)
771 parent2=xp2)
772 tr.close()
772 tr.close()
773
773
774 if use_dirstate or update_dirstate:
774 if use_dirstate or update_dirstate:
775 self.dirstate.setparents(n)
775 self.dirstate.setparents(n)
776 if use_dirstate:
776 if use_dirstate:
777 self.dirstate.update(new, "n")
777 self.dirstate.update(new, "n")
778 self.dirstate.forget(remove)
778 self.dirstate.forget(remove)
779
779
780 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
780 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
781 return n
781 return n
782
782
783 def walk(self, node=None, files=[], match=util.always, badmatch=None):
783 def walk(self, node=None, files=[], match=util.always, badmatch=None):
784 '''
784 '''
785 walk recursively through the directory tree or a given
785 walk recursively through the directory tree or a given
786 changeset, finding all files matched by the match
786 changeset, finding all files matched by the match
787 function
787 function
788
788
789 results are yielded in a tuple (src, filename), where src
789 results are yielded in a tuple (src, filename), where src
790 is one of:
790 is one of:
791 'f' the file was found in the directory tree
791 'f' the file was found in the directory tree
792 'm' the file was only in the dirstate and not in the tree
792 'm' the file was only in the dirstate and not in the tree
793 'b' file was not found and matched badmatch
793 'b' file was not found and matched badmatch
794 '''
794 '''
795
795
796 if node:
796 if node:
797 fdict = dict.fromkeys(files)
797 fdict = dict.fromkeys(files)
798 for fn in self.manifest.read(self.changelog.read(node)[0]):
798 for fn in self.manifest.read(self.changelog.read(node)[0]):
799 for ffn in fdict:
799 for ffn in fdict:
800 # match if the file is the exact name or a directory
800 # match if the file is the exact name or a directory
801 if ffn == fn or fn.startswith("%s/" % ffn):
801 if ffn == fn or fn.startswith("%s/" % ffn):
802 del fdict[ffn]
802 del fdict[ffn]
803 break
803 break
804 if match(fn):
804 if match(fn):
805 yield 'm', fn
805 yield 'm', fn
806 for fn in fdict:
806 for fn in fdict:
807 if badmatch and badmatch(fn):
807 if badmatch and badmatch(fn):
808 if match(fn):
808 if match(fn):
809 yield 'b', fn
809 yield 'b', fn
810 else:
810 else:
811 self.ui.warn(_('%s: No such file in rev %s\n') % (
811 self.ui.warn(_('%s: No such file in rev %s\n') % (
812 util.pathto(self.getcwd(), fn), short(node)))
812 util.pathto(self.getcwd(), fn), short(node)))
813 else:
813 else:
814 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
814 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
815 yield src, fn
815 yield src, fn
816
816
817 def status(self, node1=None, node2=None, files=[], match=util.always,
817 def status(self, node1=None, node2=None, files=[], match=util.always,
818 wlock=None, list_ignored=False, list_clean=False):
818 wlock=None, list_ignored=False, list_clean=False):
819 """return status of files between two nodes or node and working directory
819 """return status of files between two nodes or node and working directory
820
820
821 If node1 is None, use the first dirstate parent instead.
821 If node1 is None, use the first dirstate parent instead.
822 If node2 is None, compare node1 with working directory.
822 If node2 is None, compare node1 with working directory.
823 """
823 """
824
824
825 def fcmp(fn, mf):
825 def fcmp(fn, mf):
826 t1 = self.wread(fn)
826 t1 = self.wread(fn)
827 return self.file(fn).cmp(mf.get(fn, nullid), t1)
827 return self.file(fn).cmp(mf.get(fn, nullid), t1)
828
828
829 def mfmatches(node):
829 def mfmatches(node):
830 change = self.changelog.read(node)
830 change = self.changelog.read(node)
831 mf = self.manifest.read(change[0]).copy()
831 mf = self.manifest.read(change[0]).copy()
832 for fn in mf.keys():
832 for fn in mf.keys():
833 if not match(fn):
833 if not match(fn):
834 del mf[fn]
834 del mf[fn]
835 return mf
835 return mf
836
836
837 modified, added, removed, deleted, unknown = [], [], [], [], []
837 modified, added, removed, deleted, unknown = [], [], [], [], []
838 ignored, clean = [], []
838 ignored, clean = [], []
839
839
840 compareworking = False
840 compareworking = False
841 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
841 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
842 compareworking = True
842 compareworking = True
843
843
844 if not compareworking:
844 if not compareworking:
845 # read the manifest from node1 before the manifest from node2,
845 # read the manifest from node1 before the manifest from node2,
846 # so that we'll hit the manifest cache if we're going through
846 # so that we'll hit the manifest cache if we're going through
847 # all the revisions in parent->child order.
847 # all the revisions in parent->child order.
848 mf1 = mfmatches(node1)
848 mf1 = mfmatches(node1)
849
849
850 # are we comparing the working directory?
850 # are we comparing the working directory?
851 if not node2:
851 if not node2:
852 if not wlock:
852 if not wlock:
853 try:
853 try:
854 wlock = self.wlock(wait=0)
854 wlock = self.wlock(wait=0)
855 except lock.LockException:
855 except lock.LockException:
856 wlock = None
856 wlock = None
857 (lookup, modified, added, removed, deleted, unknown,
857 (lookup, modified, added, removed, deleted, unknown,
858 ignored, clean) = self.dirstate.status(files, match,
858 ignored, clean) = self.dirstate.status(files, match,
859 list_ignored, list_clean)
859 list_ignored, list_clean)
860
860
861 # are we comparing working dir against its parent?
861 # are we comparing working dir against its parent?
862 if compareworking:
862 if compareworking:
863 if lookup:
863 if lookup:
864 # do a full compare of any files that might have changed
864 # do a full compare of any files that might have changed
865 mf2 = mfmatches(self.dirstate.parents()[0])
865 mf2 = mfmatches(self.dirstate.parents()[0])
866 for f in lookup:
866 for f in lookup:
867 if fcmp(f, mf2):
867 if fcmp(f, mf2):
868 modified.append(f)
868 modified.append(f)
869 else:
869 else:
870 clean.append(f)
870 clean.append(f)
871 if wlock is not None:
871 if wlock is not None:
872 self.dirstate.update([f], "n")
872 self.dirstate.update([f], "n")
873 else:
873 else:
874 # we are comparing working dir against non-parent
874 # we are comparing working dir against non-parent
875 # generate a pseudo-manifest for the working dir
875 # generate a pseudo-manifest for the working dir
876 # XXX: create it in dirstate.py ?
876 # XXX: create it in dirstate.py ?
877 mf2 = mfmatches(self.dirstate.parents()[0])
877 mf2 = mfmatches(self.dirstate.parents()[0])
878 for f in lookup + modified + added:
878 for f in lookup + modified + added:
879 mf2[f] = ""
879 mf2[f] = ""
880 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
880 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
881 for f in removed:
881 for f in removed:
882 if f in mf2:
882 if f in mf2:
883 del mf2[f]
883 del mf2[f]
884 else:
884 else:
885 # we are comparing two revisions
885 # we are comparing two revisions
886 mf2 = mfmatches(node2)
886 mf2 = mfmatches(node2)
887
887
888 if not compareworking:
888 if not compareworking:
889 # flush lists from dirstate before comparing manifests
889 # flush lists from dirstate before comparing manifests
890 modified, added, clean = [], [], []
890 modified, added, clean = [], [], []
891
891
892 # make sure to sort the files so we talk to the disk in a
892 # make sure to sort the files so we talk to the disk in a
893 # reasonable order
893 # reasonable order
894 mf2keys = mf2.keys()
894 mf2keys = mf2.keys()
895 mf2keys.sort()
895 mf2keys.sort()
896 for fn in mf2keys:
896 for fn in mf2keys:
897 if mf1.has_key(fn):
897 if mf1.has_key(fn):
898 if mf1.flags(fn) != mf2.flags(fn) or \
898 if mf1.flags(fn) != mf2.flags(fn) or \
899 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
899 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
900 modified.append(fn)
900 modified.append(fn)
901 elif list_clean:
901 elif list_clean:
902 clean.append(fn)
902 clean.append(fn)
903 del mf1[fn]
903 del mf1[fn]
904 else:
904 else:
905 added.append(fn)
905 added.append(fn)
906
906
907 removed = mf1.keys()
907 removed = mf1.keys()
908
908
909 # sort and return results:
909 # sort and return results:
910 for l in modified, added, removed, deleted, unknown, ignored, clean:
910 for l in modified, added, removed, deleted, unknown, ignored, clean:
911 l.sort()
911 l.sort()
912 return (modified, added, removed, deleted, unknown, ignored, clean)
912 return (modified, added, removed, deleted, unknown, ignored, clean)
913
913
914 def add(self, list, wlock=None):
914 def add(self, list, wlock=None):
915 if not wlock:
915 if not wlock:
916 wlock = self.wlock()
916 wlock = self.wlock()
917 for f in list:
917 for f in list:
918 p = self.wjoin(f)
918 p = self.wjoin(f)
919 if not os.path.exists(p):
919 if not os.path.exists(p):
920 self.ui.warn(_("%s does not exist!\n") % f)
920 self.ui.warn(_("%s does not exist!\n") % f)
921 elif not os.path.isfile(p):
921 elif not os.path.isfile(p):
922 self.ui.warn(_("%s not added: only files supported currently\n")
922 self.ui.warn(_("%s not added: only files supported currently\n")
923 % f)
923 % f)
924 elif self.dirstate.state(f) in 'an':
924 elif self.dirstate.state(f) in 'an':
925 self.ui.warn(_("%s already tracked!\n") % f)
925 self.ui.warn(_("%s already tracked!\n") % f)
926 else:
926 else:
927 self.dirstate.update([f], "a")
927 self.dirstate.update([f], "a")
928
928
929 def forget(self, list, wlock=None):
929 def forget(self, list, wlock=None):
930 if not wlock:
930 if not wlock:
931 wlock = self.wlock()
931 wlock = self.wlock()
932 for f in list:
932 for f in list:
933 if self.dirstate.state(f) not in 'ai':
933 if self.dirstate.state(f) not in 'ai':
934 self.ui.warn(_("%s not added!\n") % f)
934 self.ui.warn(_("%s not added!\n") % f)
935 else:
935 else:
936 self.dirstate.forget([f])
936 self.dirstate.forget([f])
937
937
938 def remove(self, list, unlink=False, wlock=None):
938 def remove(self, list, unlink=False, wlock=None):
939 if unlink:
939 if unlink:
940 for f in list:
940 for f in list:
941 try:
941 try:
942 util.unlink(self.wjoin(f))
942 util.unlink(self.wjoin(f))
943 except OSError, inst:
943 except OSError, inst:
944 if inst.errno != errno.ENOENT:
944 if inst.errno != errno.ENOENT:
945 raise
945 raise
946 if not wlock:
946 if not wlock:
947 wlock = self.wlock()
947 wlock = self.wlock()
948 for f in list:
948 for f in list:
949 p = self.wjoin(f)
949 p = self.wjoin(f)
950 if os.path.exists(p):
950 if os.path.exists(p):
951 self.ui.warn(_("%s still exists!\n") % f)
951 self.ui.warn(_("%s still exists!\n") % f)
952 elif self.dirstate.state(f) == 'a':
952 elif self.dirstate.state(f) == 'a':
953 self.dirstate.forget([f])
953 self.dirstate.forget([f])
954 elif f not in self.dirstate:
954 elif f not in self.dirstate:
955 self.ui.warn(_("%s not tracked!\n") % f)
955 self.ui.warn(_("%s not tracked!\n") % f)
956 else:
956 else:
957 self.dirstate.update([f], "r")
957 self.dirstate.update([f], "r")
958
958
959 def undelete(self, list, wlock=None):
959 def undelete(self, list, wlock=None):
960 p = self.dirstate.parents()[0]
960 p = self.dirstate.parents()[0]
961 mn = self.changelog.read(p)[0]
961 mn = self.changelog.read(p)[0]
962 m = self.manifest.read(mn)
962 m = self.manifest.read(mn)
963 if not wlock:
963 if not wlock:
964 wlock = self.wlock()
964 wlock = self.wlock()
965 for f in list:
965 for f in list:
966 if self.dirstate.state(f) not in "r":
966 if self.dirstate.state(f) not in "r":
967 self.ui.warn("%s not removed!\n" % f)
967 self.ui.warn("%s not removed!\n" % f)
968 else:
968 else:
969 t = self.file(f).read(m[f])
969 t = self.file(f).read(m[f])
970 self.wwrite(f, t)
970 self.wwrite(f, t)
971 util.set_exec(self.wjoin(f), m.execf(f))
971 util.set_exec(self.wjoin(f), m.execf(f))
972 self.dirstate.update([f], "n")
972 self.dirstate.update([f], "n")
973
973
974 def copy(self, source, dest, wlock=None):
974 def copy(self, source, dest, wlock=None):
975 p = self.wjoin(dest)
975 p = self.wjoin(dest)
976 if not os.path.exists(p):
976 if not os.path.exists(p):
977 self.ui.warn(_("%s does not exist!\n") % dest)
977 self.ui.warn(_("%s does not exist!\n") % dest)
978 elif not os.path.isfile(p):
978 elif not os.path.isfile(p):
979 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
979 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
980 else:
980 else:
981 if not wlock:
981 if not wlock:
982 wlock = self.wlock()
982 wlock = self.wlock()
983 if self.dirstate.state(dest) == '?':
983 if self.dirstate.state(dest) == '?':
984 self.dirstate.update([dest], "a")
984 self.dirstate.update([dest], "a")
985 self.dirstate.copy(source, dest)
985 self.dirstate.copy(source, dest)
986
986
987 def heads(self, start=None):
987 def heads(self, start=None):
988 heads = self.changelog.heads(start)
988 heads = self.changelog.heads(start)
989 # sort the output in rev descending order
989 # sort the output in rev descending order
990 heads = [(-self.changelog.rev(h), h) for h in heads]
990 heads = [(-self.changelog.rev(h), h) for h in heads]
991 heads.sort()
991 heads.sort()
992 return [n for (r, n) in heads]
992 return [n for (r, n) in heads]
993
993
994 def branches(self, nodes):
994 def branches(self, nodes):
995 if not nodes:
995 if not nodes:
996 nodes = [self.changelog.tip()]
996 nodes = [self.changelog.tip()]
997 b = []
997 b = []
998 for n in nodes:
998 for n in nodes:
999 t = n
999 t = n
1000 while 1:
1000 while 1:
1001 p = self.changelog.parents(n)
1001 p = self.changelog.parents(n)
1002 if p[1] != nullid or p[0] == nullid:
1002 if p[1] != nullid or p[0] == nullid:
1003 b.append((t, n, p[0], p[1]))
1003 b.append((t, n, p[0], p[1]))
1004 break
1004 break
1005 n = p[0]
1005 n = p[0]
1006 return b
1006 return b
1007
1007
1008 def between(self, pairs):
1008 def between(self, pairs):
1009 r = []
1009 r = []
1010
1010
1011 for top, bottom in pairs:
1011 for top, bottom in pairs:
1012 n, l, i = top, [], 0
1012 n, l, i = top, [], 0
1013 f = 1
1013 f = 1
1014
1014
1015 while n != bottom:
1015 while n != bottom:
1016 p = self.changelog.parents(n)[0]
1016 p = self.changelog.parents(n)[0]
1017 if i == f:
1017 if i == f:
1018 l.append(n)
1018 l.append(n)
1019 f = f * 2
1019 f = f * 2
1020 n = p
1020 n = p
1021 i += 1
1021 i += 1
1022
1022
1023 r.append(l)
1023 r.append(l)
1024
1024
1025 return r
1025 return r
1026
1026
1027 def findincoming(self, remote, base=None, heads=None, force=False):
1027 def findincoming(self, remote, base=None, heads=None, force=False):
1028 """Return list of roots of the subsets of missing nodes from remote
1028 """Return list of roots of the subsets of missing nodes from remote
1029
1029
1030 If base dict is specified, assume that these nodes and their parents
1030 If base dict is specified, assume that these nodes and their parents
1031 exist on the remote side and that no child of a node of base exists
1031 exist on the remote side and that no child of a node of base exists
1032 in both remote and self.
1032 in both remote and self.
1033 Furthermore base will be updated to include the nodes that exists
1033 Furthermore base will be updated to include the nodes that exists
1034 in self and remote but no children exists in self and remote.
1034 in self and remote but no children exists in self and remote.
1035 If a list of heads is specified, return only nodes which are heads
1035 If a list of heads is specified, return only nodes which are heads
1036 or ancestors of these heads.
1036 or ancestors of these heads.
1037
1037
1038 All the ancestors of base are in self and in remote.
1038 All the ancestors of base are in self and in remote.
1039 All the descendants of the list returned are missing in self.
1039 All the descendants of the list returned are missing in self.
1040 (and so we know that the rest of the nodes are missing in remote, see
1040 (and so we know that the rest of the nodes are missing in remote, see
1041 outgoing)
1041 outgoing)
1042 """
1042 """
1043 m = self.changelog.nodemap
1043 m = self.changelog.nodemap
1044 search = []
1044 search = []
1045 fetch = {}
1045 fetch = {}
1046 seen = {}
1046 seen = {}
1047 seenbranch = {}
1047 seenbranch = {}
1048 if base == None:
1048 if base == None:
1049 base = {}
1049 base = {}
1050
1050
1051 if not heads:
1051 if not heads:
1052 heads = remote.heads()
1052 heads = remote.heads()
1053
1053
1054 if self.changelog.tip() == nullid:
1054 if self.changelog.tip() == nullid:
1055 base[nullid] = 1
1055 base[nullid] = 1
1056 if heads != [nullid]:
1056 if heads != [nullid]:
1057 return [nullid]
1057 return [nullid]
1058 return []
1058 return []
1059
1059
1060 # assume we're closer to the tip than the root
1060 # assume we're closer to the tip than the root
1061 # and start by examining the heads
1061 # and start by examining the heads
1062 self.ui.status(_("searching for changes\n"))
1062 self.ui.status(_("searching for changes\n"))
1063
1063
1064 unknown = []
1064 unknown = []
1065 for h in heads:
1065 for h in heads:
1066 if h not in m:
1066 if h not in m:
1067 unknown.append(h)
1067 unknown.append(h)
1068 else:
1068 else:
1069 base[h] = 1
1069 base[h] = 1
1070
1070
1071 if not unknown:
1071 if not unknown:
1072 return []
1072 return []
1073
1073
1074 req = dict.fromkeys(unknown)
1074 req = dict.fromkeys(unknown)
1075 reqcnt = 0
1075 reqcnt = 0
1076
1076
1077 # search through remote branches
1077 # search through remote branches
1078 # a 'branch' here is a linear segment of history, with four parts:
1078 # a 'branch' here is a linear segment of history, with four parts:
1079 # head, root, first parent, second parent
1079 # head, root, first parent, second parent
1080 # (a branch always has two parents (or none) by definition)
1080 # (a branch always has two parents (or none) by definition)
1081 unknown = remote.branches(unknown)
1081 unknown = remote.branches(unknown)
1082 while unknown:
1082 while unknown:
1083 r = []
1083 r = []
1084 while unknown:
1084 while unknown:
1085 n = unknown.pop(0)
1085 n = unknown.pop(0)
1086 if n[0] in seen:
1086 if n[0] in seen:
1087 continue
1087 continue
1088
1088
1089 self.ui.debug(_("examining %s:%s\n")
1089 self.ui.debug(_("examining %s:%s\n")
1090 % (short(n[0]), short(n[1])))
1090 % (short(n[0]), short(n[1])))
1091 if n[0] == nullid: # found the end of the branch
1091 if n[0] == nullid: # found the end of the branch
1092 pass
1092 pass
1093 elif n in seenbranch:
1093 elif n in seenbranch:
1094 self.ui.debug(_("branch already found\n"))
1094 self.ui.debug(_("branch already found\n"))
1095 continue
1095 continue
1096 elif n[1] and n[1] in m: # do we know the base?
1096 elif n[1] and n[1] in m: # do we know the base?
1097 self.ui.debug(_("found incomplete branch %s:%s\n")
1097 self.ui.debug(_("found incomplete branch %s:%s\n")
1098 % (short(n[0]), short(n[1])))
1098 % (short(n[0]), short(n[1])))
1099 search.append(n) # schedule branch range for scanning
1099 search.append(n) # schedule branch range for scanning
1100 seenbranch[n] = 1
1100 seenbranch[n] = 1
1101 else:
1101 else:
1102 if n[1] not in seen and n[1] not in fetch:
1102 if n[1] not in seen and n[1] not in fetch:
1103 if n[2] in m and n[3] in m:
1103 if n[2] in m and n[3] in m:
1104 self.ui.debug(_("found new changeset %s\n") %
1104 self.ui.debug(_("found new changeset %s\n") %
1105 short(n[1]))
1105 short(n[1]))
1106 fetch[n[1]] = 1 # earliest unknown
1106 fetch[n[1]] = 1 # earliest unknown
1107 for p in n[2:4]:
1107 for p in n[2:4]:
1108 if p in m:
1108 if p in m:
1109 base[p] = 1 # latest known
1109 base[p] = 1 # latest known
1110
1110
1111 for p in n[2:4]:
1111 for p in n[2:4]:
1112 if p not in req and p not in m:
1112 if p not in req and p not in m:
1113 r.append(p)
1113 r.append(p)
1114 req[p] = 1
1114 req[p] = 1
1115 seen[n[0]] = 1
1115 seen[n[0]] = 1
1116
1116
1117 if r:
1117 if r:
1118 reqcnt += 1
1118 reqcnt += 1
1119 self.ui.debug(_("request %d: %s\n") %
1119 self.ui.debug(_("request %d: %s\n") %
1120 (reqcnt, " ".join(map(short, r))))
1120 (reqcnt, " ".join(map(short, r))))
1121 for p in xrange(0, len(r), 10):
1121 for p in xrange(0, len(r), 10):
1122 for b in remote.branches(r[p:p+10]):
1122 for b in remote.branches(r[p:p+10]):
1123 self.ui.debug(_("received %s:%s\n") %
1123 self.ui.debug(_("received %s:%s\n") %
1124 (short(b[0]), short(b[1])))
1124 (short(b[0]), short(b[1])))
1125 unknown.append(b)
1125 unknown.append(b)
1126
1126
1127 # do binary search on the branches we found
1127 # do binary search on the branches we found
1128 while search:
1128 while search:
1129 n = search.pop(0)
1129 n = search.pop(0)
1130 reqcnt += 1
1130 reqcnt += 1
1131 l = remote.between([(n[0], n[1])])[0]
1131 l = remote.between([(n[0], n[1])])[0]
1132 l.append(n[1])
1132 l.append(n[1])
1133 p = n[0]
1133 p = n[0]
1134 f = 1
1134 f = 1
1135 for i in l:
1135 for i in l:
1136 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1136 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1137 if i in m:
1137 if i in m:
1138 if f <= 2:
1138 if f <= 2:
1139 self.ui.debug(_("found new branch changeset %s\n") %
1139 self.ui.debug(_("found new branch changeset %s\n") %
1140 short(p))
1140 short(p))
1141 fetch[p] = 1
1141 fetch[p] = 1
1142 base[i] = 1
1142 base[i] = 1
1143 else:
1143 else:
1144 self.ui.debug(_("narrowed branch search to %s:%s\n")
1144 self.ui.debug(_("narrowed branch search to %s:%s\n")
1145 % (short(p), short(i)))
1145 % (short(p), short(i)))
1146 search.append((p, i))
1146 search.append((p, i))
1147 break
1147 break
1148 p, f = i, f * 2
1148 p, f = i, f * 2
1149
1149
1150 # sanity check our fetch list
1150 # sanity check our fetch list
1151 for f in fetch.keys():
1151 for f in fetch.keys():
1152 if f in m:
1152 if f in m:
1153 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1153 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1154
1154
1155 if base.keys() == [nullid]:
1155 if base.keys() == [nullid]:
1156 if force:
1156 if force:
1157 self.ui.warn(_("warning: repository is unrelated\n"))
1157 self.ui.warn(_("warning: repository is unrelated\n"))
1158 else:
1158 else:
1159 raise util.Abort(_("repository is unrelated"))
1159 raise util.Abort(_("repository is unrelated"))
1160
1160
1161 self.ui.debug(_("found new changesets starting at ") +
1161 self.ui.debug(_("found new changesets starting at ") +
1162 " ".join([short(f) for f in fetch]) + "\n")
1162 " ".join([short(f) for f in fetch]) + "\n")
1163
1163
1164 self.ui.debug(_("%d total queries\n") % reqcnt)
1164 self.ui.debug(_("%d total queries\n") % reqcnt)
1165
1165
1166 return fetch.keys()
1166 return fetch.keys()
1167
1167
1168 def findoutgoing(self, remote, base=None, heads=None, force=False):
1168 def findoutgoing(self, remote, base=None, heads=None, force=False):
1169 """Return list of nodes that are roots of subsets not in remote
1169 """Return list of nodes that are roots of subsets not in remote
1170
1170
1171 If base dict is specified, assume that these nodes and their parents
1171 If base dict is specified, assume that these nodes and their parents
1172 exist on the remote side.
1172 exist on the remote side.
1173 If a list of heads is specified, return only nodes which are heads
1173 If a list of heads is specified, return only nodes which are heads
1174 or ancestors of these heads, and return a second element which
1174 or ancestors of these heads, and return a second element which
1175 contains all remote heads which get new children.
1175 contains all remote heads which get new children.
1176 """
1176 """
1177 if base == None:
1177 if base == None:
1178 base = {}
1178 base = {}
1179 self.findincoming(remote, base, heads, force=force)
1179 self.findincoming(remote, base, heads, force=force)
1180
1180
1181 self.ui.debug(_("common changesets up to ")
1181 self.ui.debug(_("common changesets up to ")
1182 + " ".join(map(short, base.keys())) + "\n")
1182 + " ".join(map(short, base.keys())) + "\n")
1183
1183
1184 remain = dict.fromkeys(self.changelog.nodemap)
1184 remain = dict.fromkeys(self.changelog.nodemap)
1185
1185
1186 # prune everything remote has from the tree
1186 # prune everything remote has from the tree
1187 del remain[nullid]
1187 del remain[nullid]
1188 remove = base.keys()
1188 remove = base.keys()
1189 while remove:
1189 while remove:
1190 n = remove.pop(0)
1190 n = remove.pop(0)
1191 if n in remain:
1191 if n in remain:
1192 del remain[n]
1192 del remain[n]
1193 for p in self.changelog.parents(n):
1193 for p in self.changelog.parents(n):
1194 remove.append(p)
1194 remove.append(p)
1195
1195
1196 # find every node whose parents have been pruned
1196 # find every node whose parents have been pruned
1197 subset = []
1197 subset = []
1198 # find every remote head that will get new children
1198 # find every remote head that will get new children
1199 updated_heads = {}
1199 updated_heads = {}
1200 for n in remain:
1200 for n in remain:
1201 p1, p2 = self.changelog.parents(n)
1201 p1, p2 = self.changelog.parents(n)
1202 if p1 not in remain and p2 not in remain:
1202 if p1 not in remain and p2 not in remain:
1203 subset.append(n)
1203 subset.append(n)
1204 if heads:
1204 if heads:
1205 if p1 in heads:
1205 if p1 in heads:
1206 updated_heads[p1] = True
1206 updated_heads[p1] = True
1207 if p2 in heads:
1207 if p2 in heads:
1208 updated_heads[p2] = True
1208 updated_heads[p2] = True
1209
1209
1210 # this is the set of all roots we have to push
1210 # this is the set of all roots we have to push
1211 if heads:
1211 if heads:
1212 return subset, updated_heads.keys()
1212 return subset, updated_heads.keys()
1213 else:
1213 else:
1214 return subset
1214 return subset
1215
1215
1216 def pull(self, remote, heads=None, force=False, lock=None):
1216 def pull(self, remote, heads=None, force=False, lock=None):
1217 mylock = False
1217 mylock = False
1218 if not lock:
1218 if not lock:
1219 lock = self.lock()
1219 lock = self.lock()
1220 mylock = True
1220 mylock = True
1221
1221
1222 try:
1222 try:
1223 fetch = self.findincoming(remote, force=force)
1223 fetch = self.findincoming(remote, force=force)
1224 if fetch == [nullid]:
1224 if fetch == [nullid]:
1225 self.ui.status(_("requesting all changes\n"))
1225 self.ui.status(_("requesting all changes\n"))
1226
1226
1227 if not fetch:
1227 if not fetch:
1228 self.ui.status(_("no changes found\n"))
1228 self.ui.status(_("no changes found\n"))
1229 return 0
1229 return 0
1230
1230
1231 if heads is None:
1231 if heads is None:
1232 cg = remote.changegroup(fetch, 'pull')
1232 cg = remote.changegroup(fetch, 'pull')
1233 else:
1233 else:
1234 if 'changegroupsubset' not in remote.capabilities:
1234 if 'changegroupsubset' not in remote.capabilities:
1235 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1235 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1236 cg = remote.changegroupsubset(fetch, heads, 'pull')
1236 cg = remote.changegroupsubset(fetch, heads, 'pull')
1237 return self.addchangegroup(cg, 'pull', remote.url())
1237 return self.addchangegroup(cg, 'pull', remote.url())
1238 finally:
1238 finally:
1239 if mylock:
1239 if mylock:
1240 lock.release()
1240 lock.release()
1241
1241
1242 def push(self, remote, force=False, revs=None):
1242 def push(self, remote, force=False, revs=None):
1243 # there are two ways to push to remote repo:
1243 # there are two ways to push to remote repo:
1244 #
1244 #
1245 # addchangegroup assumes local user can lock remote
1245 # addchangegroup assumes local user can lock remote
1246 # repo (local filesystem, old ssh servers).
1246 # repo (local filesystem, old ssh servers).
1247 #
1247 #
1248 # unbundle assumes local user cannot lock remote repo (new ssh
1248 # unbundle assumes local user cannot lock remote repo (new ssh
1249 # servers, http servers).
1249 # servers, http servers).
1250
1250
1251 if remote.capable('unbundle'):
1251 if remote.capable('unbundle'):
1252 return self.push_unbundle(remote, force, revs)
1252 return self.push_unbundle(remote, force, revs)
1253 return self.push_addchangegroup(remote, force, revs)
1253 return self.push_addchangegroup(remote, force, revs)
1254
1254
1255 def prepush(self, remote, force, revs):
1255 def prepush(self, remote, force, revs):
1256 base = {}
1256 base = {}
1257 remote_heads = remote.heads()
1257 remote_heads = remote.heads()
1258 inc = self.findincoming(remote, base, remote_heads, force=force)
1258 inc = self.findincoming(remote, base, remote_heads, force=force)
1259
1259
1260 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1260 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1261 if revs is not None:
1261 if revs is not None:
1262 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1262 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1263 else:
1263 else:
1264 bases, heads = update, self.changelog.heads()
1264 bases, heads = update, self.changelog.heads()
1265
1265
1266 if not bases:
1266 if not bases:
1267 self.ui.status(_("no changes found\n"))
1267 self.ui.status(_("no changes found\n"))
1268 return None, 1
1268 return None, 1
1269 elif not force:
1269 elif not force:
1270 # check if we're creating new remote heads
1270 # check if we're creating new remote heads
1271 # to be a remote head after push, node must be either
1271 # to be a remote head after push, node must be either
1272 # - unknown locally
1272 # - unknown locally
1273 # - a local outgoing head descended from update
1273 # - a local outgoing head descended from update
1274 # - a remote head that's known locally and not
1274 # - a remote head that's known locally and not
1275 # ancestral to an outgoing head
1275 # ancestral to an outgoing head
1276
1276
1277 warn = 0
1277 warn = 0
1278
1278
1279 if remote_heads == [nullid]:
1279 if remote_heads == [nullid]:
1280 warn = 0
1280 warn = 0
1281 elif not revs and len(heads) > len(remote_heads):
1281 elif not revs and len(heads) > len(remote_heads):
1282 warn = 1
1282 warn = 1
1283 else:
1283 else:
1284 newheads = list(heads)
1284 newheads = list(heads)
1285 for r in remote_heads:
1285 for r in remote_heads:
1286 if r in self.changelog.nodemap:
1286 if r in self.changelog.nodemap:
1287 desc = self.changelog.heads(r)
1287 desc = self.changelog.heads(r, heads)
1288 l = [h for h in heads if h in desc]
1288 l = [h for h in heads if h in desc]
1289 if not l:
1289 if not l:
1290 newheads.append(r)
1290 newheads.append(r)
1291 else:
1291 else:
1292 newheads.append(r)
1292 newheads.append(r)
1293 if len(newheads) > len(remote_heads):
1293 if len(newheads) > len(remote_heads):
1294 warn = 1
1294 warn = 1
1295
1295
1296 if warn:
1296 if warn:
1297 self.ui.warn(_("abort: push creates new remote branches!\n"))
1297 self.ui.warn(_("abort: push creates new remote branches!\n"))
1298 self.ui.status(_("(did you forget to merge?"
1298 self.ui.status(_("(did you forget to merge?"
1299 " use push -f to force)\n"))
1299 " use push -f to force)\n"))
1300 return None, 1
1300 return None, 1
1301 elif inc:
1301 elif inc:
1302 self.ui.warn(_("note: unsynced remote changes!\n"))
1302 self.ui.warn(_("note: unsynced remote changes!\n"))
1303
1303
1304
1304
1305 if revs is None:
1305 if revs is None:
1306 cg = self.changegroup(update, 'push')
1306 cg = self.changegroup(update, 'push')
1307 else:
1307 else:
1308 cg = self.changegroupsubset(update, revs, 'push')
1308 cg = self.changegroupsubset(update, revs, 'push')
1309 return cg, remote_heads
1309 return cg, remote_heads
1310
1310
1311 def push_addchangegroup(self, remote, force, revs):
1311 def push_addchangegroup(self, remote, force, revs):
1312 lock = remote.lock()
1312 lock = remote.lock()
1313
1313
1314 ret = self.prepush(remote, force, revs)
1314 ret = self.prepush(remote, force, revs)
1315 if ret[0] is not None:
1315 if ret[0] is not None:
1316 cg, remote_heads = ret
1316 cg, remote_heads = ret
1317 return remote.addchangegroup(cg, 'push', self.url())
1317 return remote.addchangegroup(cg, 'push', self.url())
1318 return ret[1]
1318 return ret[1]
1319
1319
1320 def push_unbundle(self, remote, force, revs):
1320 def push_unbundle(self, remote, force, revs):
1321 # local repo finds heads on server, finds out what revs it
1321 # local repo finds heads on server, finds out what revs it
1322 # must push. once revs transferred, if server finds it has
1322 # must push. once revs transferred, if server finds it has
1323 # different heads (someone else won commit/push race), server
1323 # different heads (someone else won commit/push race), server
1324 # aborts.
1324 # aborts.
1325
1325
1326 ret = self.prepush(remote, force, revs)
1326 ret = self.prepush(remote, force, revs)
1327 if ret[0] is not None:
1327 if ret[0] is not None:
1328 cg, remote_heads = ret
1328 cg, remote_heads = ret
1329 if force: remote_heads = ['force']
1329 if force: remote_heads = ['force']
1330 return remote.unbundle(cg, remote_heads, 'push')
1330 return remote.unbundle(cg, remote_heads, 'push')
1331 return ret[1]
1331 return ret[1]
1332
1332
1333 def changegroupinfo(self, nodes):
1333 def changegroupinfo(self, nodes):
1334 self.ui.note(_("%d changesets found\n") % len(nodes))
1334 self.ui.note(_("%d changesets found\n") % len(nodes))
1335 if self.ui.debugflag:
1335 if self.ui.debugflag:
1336 self.ui.debug(_("List of changesets:\n"))
1336 self.ui.debug(_("List of changesets:\n"))
1337 for node in nodes:
1337 for node in nodes:
1338 self.ui.debug("%s\n" % hex(node))
1338 self.ui.debug("%s\n" % hex(node))
1339
1339
1340 def changegroupsubset(self, bases, heads, source):
1340 def changegroupsubset(self, bases, heads, source):
1341 """This function generates a changegroup consisting of all the nodes
1341 """This function generates a changegroup consisting of all the nodes
1342 that are descendents of any of the bases, and ancestors of any of
1342 that are descendents of any of the bases, and ancestors of any of
1343 the heads.
1343 the heads.
1344
1344
1345 It is fairly complex as determining which filenodes and which
1345 It is fairly complex as determining which filenodes and which
1346 manifest nodes need to be included for the changeset to be complete
1346 manifest nodes need to be included for the changeset to be complete
1347 is non-trivial.
1347 is non-trivial.
1348
1348
1349 Another wrinkle is doing the reverse, figuring out which changeset in
1349 Another wrinkle is doing the reverse, figuring out which changeset in
1350 the changegroup a particular filenode or manifestnode belongs to."""
1350 the changegroup a particular filenode or manifestnode belongs to."""
1351
1351
1352 self.hook('preoutgoing', throw=True, source=source)
1352 self.hook('preoutgoing', throw=True, source=source)
1353
1353
1354 # Set up some initial variables
1354 # Set up some initial variables
1355 # Make it easy to refer to self.changelog
1355 # Make it easy to refer to self.changelog
1356 cl = self.changelog
1356 cl = self.changelog
1357 # msng is short for missing - compute the list of changesets in this
1357 # msng is short for missing - compute the list of changesets in this
1358 # changegroup.
1358 # changegroup.
1359 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1359 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1360 self.changegroupinfo(msng_cl_lst)
1360 self.changegroupinfo(msng_cl_lst)
1361 # Some bases may turn out to be superfluous, and some heads may be
1361 # Some bases may turn out to be superfluous, and some heads may be
1362 # too. nodesbetween will return the minimal set of bases and heads
1362 # too. nodesbetween will return the minimal set of bases and heads
1363 # necessary to re-create the changegroup.
1363 # necessary to re-create the changegroup.
1364
1364
1365 # Known heads are the list of heads that it is assumed the recipient
1365 # Known heads are the list of heads that it is assumed the recipient
1366 # of this changegroup will know about.
1366 # of this changegroup will know about.
1367 knownheads = {}
1367 knownheads = {}
1368 # We assume that all parents of bases are known heads.
1368 # We assume that all parents of bases are known heads.
1369 for n in bases:
1369 for n in bases:
1370 for p in cl.parents(n):
1370 for p in cl.parents(n):
1371 if p != nullid:
1371 if p != nullid:
1372 knownheads[p] = 1
1372 knownheads[p] = 1
1373 knownheads = knownheads.keys()
1373 knownheads = knownheads.keys()
1374 if knownheads:
1374 if knownheads:
1375 # Now that we know what heads are known, we can compute which
1375 # Now that we know what heads are known, we can compute which
1376 # changesets are known. The recipient must know about all
1376 # changesets are known. The recipient must know about all
1377 # changesets required to reach the known heads from the null
1377 # changesets required to reach the known heads from the null
1378 # changeset.
1378 # changeset.
1379 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1379 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1380 junk = None
1380 junk = None
1381 # Transform the list into an ersatz set.
1381 # Transform the list into an ersatz set.
1382 has_cl_set = dict.fromkeys(has_cl_set)
1382 has_cl_set = dict.fromkeys(has_cl_set)
1383 else:
1383 else:
1384 # If there were no known heads, the recipient cannot be assumed to
1384 # If there were no known heads, the recipient cannot be assumed to
1385 # know about any changesets.
1385 # know about any changesets.
1386 has_cl_set = {}
1386 has_cl_set = {}
1387
1387
1388 # Make it easy to refer to self.manifest
1388 # Make it easy to refer to self.manifest
1389 mnfst = self.manifest
1389 mnfst = self.manifest
1390 # We don't know which manifests are missing yet
1390 # We don't know which manifests are missing yet
1391 msng_mnfst_set = {}
1391 msng_mnfst_set = {}
1392 # Nor do we know which filenodes are missing.
1392 # Nor do we know which filenodes are missing.
1393 msng_filenode_set = {}
1393 msng_filenode_set = {}
1394
1394
1395 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1395 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1396 junk = None
1396 junk = None
1397
1397
1398 # A changeset always belongs to itself, so the changenode lookup
1398 # A changeset always belongs to itself, so the changenode lookup
1399 # function for a changenode is identity.
1399 # function for a changenode is identity.
1400 def identity(x):
1400 def identity(x):
1401 return x
1401 return x
1402
1402
1403 # A function generating function. Sets up an environment for the
1403 # A function generating function. Sets up an environment for the
1404 # inner function.
1404 # inner function.
1405 def cmp_by_rev_func(revlog):
1405 def cmp_by_rev_func(revlog):
1406 # Compare two nodes by their revision number in the environment's
1406 # Compare two nodes by their revision number in the environment's
1407 # revision history. Since the revision number both represents the
1407 # revision history. Since the revision number both represents the
1408 # most efficient order to read the nodes in, and represents a
1408 # most efficient order to read the nodes in, and represents a
1409 # topological sorting of the nodes, this function is often useful.
1409 # topological sorting of the nodes, this function is often useful.
1410 def cmp_by_rev(a, b):
1410 def cmp_by_rev(a, b):
1411 return cmp(revlog.rev(a), revlog.rev(b))
1411 return cmp(revlog.rev(a), revlog.rev(b))
1412 return cmp_by_rev
1412 return cmp_by_rev
1413
1413
1414 # If we determine that a particular file or manifest node must be a
1414 # If we determine that a particular file or manifest node must be a
1415 # node that the recipient of the changegroup will already have, we can
1415 # node that the recipient of the changegroup will already have, we can
1416 # also assume the recipient will have all the parents. This function
1416 # also assume the recipient will have all the parents. This function
1417 # prunes them from the set of missing nodes.
1417 # prunes them from the set of missing nodes.
1418 def prune_parents(revlog, hasset, msngset):
1418 def prune_parents(revlog, hasset, msngset):
1419 haslst = hasset.keys()
1419 haslst = hasset.keys()
1420 haslst.sort(cmp_by_rev_func(revlog))
1420 haslst.sort(cmp_by_rev_func(revlog))
1421 for node in haslst:
1421 for node in haslst:
1422 parentlst = [p for p in revlog.parents(node) if p != nullid]
1422 parentlst = [p for p in revlog.parents(node) if p != nullid]
1423 while parentlst:
1423 while parentlst:
1424 n = parentlst.pop()
1424 n = parentlst.pop()
1425 if n not in hasset:
1425 if n not in hasset:
1426 hasset[n] = 1
1426 hasset[n] = 1
1427 p = [p for p in revlog.parents(n) if p != nullid]
1427 p = [p for p in revlog.parents(n) if p != nullid]
1428 parentlst.extend(p)
1428 parentlst.extend(p)
1429 for n in hasset:
1429 for n in hasset:
1430 msngset.pop(n, None)
1430 msngset.pop(n, None)
1431
1431
1432 # This is a function generating function used to set up an environment
1432 # This is a function generating function used to set up an environment
1433 # for the inner function to execute in.
1433 # for the inner function to execute in.
1434 def manifest_and_file_collector(changedfileset):
1434 def manifest_and_file_collector(changedfileset):
1435 # This is an information gathering function that gathers
1435 # This is an information gathering function that gathers
1436 # information from each changeset node that goes out as part of
1436 # information from each changeset node that goes out as part of
1437 # the changegroup. The information gathered is a list of which
1437 # the changegroup. The information gathered is a list of which
1438 # manifest nodes are potentially required (the recipient may
1438 # manifest nodes are potentially required (the recipient may
1439 # already have them) and total list of all files which were
1439 # already have them) and total list of all files which were
1440 # changed in any changeset in the changegroup.
1440 # changed in any changeset in the changegroup.
1441 #
1441 #
1442 # We also remember the first changenode we saw any manifest
1442 # We also remember the first changenode we saw any manifest
1443 # referenced by so we can later determine which changenode 'owns'
1443 # referenced by so we can later determine which changenode 'owns'
1444 # the manifest.
1444 # the manifest.
1445 def collect_manifests_and_files(clnode):
1445 def collect_manifests_and_files(clnode):
1446 c = cl.read(clnode)
1446 c = cl.read(clnode)
1447 for f in c[3]:
1447 for f in c[3]:
1448 # This is to make sure we only have one instance of each
1448 # This is to make sure we only have one instance of each
1449 # filename string for each filename.
1449 # filename string for each filename.
1450 changedfileset.setdefault(f, f)
1450 changedfileset.setdefault(f, f)
1451 msng_mnfst_set.setdefault(c[0], clnode)
1451 msng_mnfst_set.setdefault(c[0], clnode)
1452 return collect_manifests_and_files
1452 return collect_manifests_and_files
1453
1453
1454 # Figure out which manifest nodes (of the ones we think might be part
1454 # Figure out which manifest nodes (of the ones we think might be part
1455 # of the changegroup) the recipient must know about and remove them
1455 # of the changegroup) the recipient must know about and remove them
1456 # from the changegroup.
1456 # from the changegroup.
1457 def prune_manifests():
1457 def prune_manifests():
1458 has_mnfst_set = {}
1458 has_mnfst_set = {}
1459 for n in msng_mnfst_set:
1459 for n in msng_mnfst_set:
1460 # If a 'missing' manifest thinks it belongs to a changenode
1460 # If a 'missing' manifest thinks it belongs to a changenode
1461 # the recipient is assumed to have, obviously the recipient
1461 # the recipient is assumed to have, obviously the recipient
1462 # must have that manifest.
1462 # must have that manifest.
1463 linknode = cl.node(mnfst.linkrev(n))
1463 linknode = cl.node(mnfst.linkrev(n))
1464 if linknode in has_cl_set:
1464 if linknode in has_cl_set:
1465 has_mnfst_set[n] = 1
1465 has_mnfst_set[n] = 1
1466 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1466 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1467
1467
1468 # Use the information collected in collect_manifests_and_files to say
1468 # Use the information collected in collect_manifests_and_files to say
1469 # which changenode any manifestnode belongs to.
1469 # which changenode any manifestnode belongs to.
1470 def lookup_manifest_link(mnfstnode):
1470 def lookup_manifest_link(mnfstnode):
1471 return msng_mnfst_set[mnfstnode]
1471 return msng_mnfst_set[mnfstnode]
1472
1472
1473 # A function generating function that sets up the initial environment
1473 # A function generating function that sets up the initial environment
1474 # the inner function.
1474 # the inner function.
1475 def filenode_collector(changedfiles):
1475 def filenode_collector(changedfiles):
1476 next_rev = [0]
1476 next_rev = [0]
1477 # This gathers information from each manifestnode included in the
1477 # This gathers information from each manifestnode included in the
1478 # changegroup about which filenodes the manifest node references
1478 # changegroup about which filenodes the manifest node references
1479 # so we can include those in the changegroup too.
1479 # so we can include those in the changegroup too.
1480 #
1480 #
1481 # It also remembers which changenode each filenode belongs to. It
1481 # It also remembers which changenode each filenode belongs to. It
1482 # does this by assuming the a filenode belongs to the changenode
1482 # does this by assuming the a filenode belongs to the changenode
1483 # the first manifest that references it belongs to.
1483 # the first manifest that references it belongs to.
1484 def collect_msng_filenodes(mnfstnode):
1484 def collect_msng_filenodes(mnfstnode):
1485 r = mnfst.rev(mnfstnode)
1485 r = mnfst.rev(mnfstnode)
1486 if r == next_rev[0]:
1486 if r == next_rev[0]:
1487 # If the last rev we looked at was the one just previous,
1487 # If the last rev we looked at was the one just previous,
1488 # we only need to see a diff.
1488 # we only need to see a diff.
1489 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1489 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1490 # For each line in the delta
1490 # For each line in the delta
1491 for dline in delta.splitlines():
1491 for dline in delta.splitlines():
1492 # get the filename and filenode for that line
1492 # get the filename and filenode for that line
1493 f, fnode = dline.split('\0')
1493 f, fnode = dline.split('\0')
1494 fnode = bin(fnode[:40])
1494 fnode = bin(fnode[:40])
1495 f = changedfiles.get(f, None)
1495 f = changedfiles.get(f, None)
1496 # And if the file is in the list of files we care
1496 # And if the file is in the list of files we care
1497 # about.
1497 # about.
1498 if f is not None:
1498 if f is not None:
1499 # Get the changenode this manifest belongs to
1499 # Get the changenode this manifest belongs to
1500 clnode = msng_mnfst_set[mnfstnode]
1500 clnode = msng_mnfst_set[mnfstnode]
1501 # Create the set of filenodes for the file if
1501 # Create the set of filenodes for the file if
1502 # there isn't one already.
1502 # there isn't one already.
1503 ndset = msng_filenode_set.setdefault(f, {})
1503 ndset = msng_filenode_set.setdefault(f, {})
1504 # And set the filenode's changelog node to the
1504 # And set the filenode's changelog node to the
1505 # manifest's if it hasn't been set already.
1505 # manifest's if it hasn't been set already.
1506 ndset.setdefault(fnode, clnode)
1506 ndset.setdefault(fnode, clnode)
1507 else:
1507 else:
1508 # Otherwise we need a full manifest.
1508 # Otherwise we need a full manifest.
1509 m = mnfst.read(mnfstnode)
1509 m = mnfst.read(mnfstnode)
1510 # For every file in we care about.
1510 # For every file in we care about.
1511 for f in changedfiles:
1511 for f in changedfiles:
1512 fnode = m.get(f, None)
1512 fnode = m.get(f, None)
1513 # If it's in the manifest
1513 # If it's in the manifest
1514 if fnode is not None:
1514 if fnode is not None:
1515 # See comments above.
1515 # See comments above.
1516 clnode = msng_mnfst_set[mnfstnode]
1516 clnode = msng_mnfst_set[mnfstnode]
1517 ndset = msng_filenode_set.setdefault(f, {})
1517 ndset = msng_filenode_set.setdefault(f, {})
1518 ndset.setdefault(fnode, clnode)
1518 ndset.setdefault(fnode, clnode)
1519 # Remember the revision we hope to see next.
1519 # Remember the revision we hope to see next.
1520 next_rev[0] = r + 1
1520 next_rev[0] = r + 1
1521 return collect_msng_filenodes
1521 return collect_msng_filenodes
1522
1522
1523 # We have a list of filenodes we think we need for a file, lets remove
1523 # We have a list of filenodes we think we need for a file, lets remove
1524 # all those we now the recipient must have.
1524 # all those we now the recipient must have.
1525 def prune_filenodes(f, filerevlog):
1525 def prune_filenodes(f, filerevlog):
1526 msngset = msng_filenode_set[f]
1526 msngset = msng_filenode_set[f]
1527 hasset = {}
1527 hasset = {}
1528 # If a 'missing' filenode thinks it belongs to a changenode we
1528 # If a 'missing' filenode thinks it belongs to a changenode we
1529 # assume the recipient must have, then the recipient must have
1529 # assume the recipient must have, then the recipient must have
1530 # that filenode.
1530 # that filenode.
1531 for n in msngset:
1531 for n in msngset:
1532 clnode = cl.node(filerevlog.linkrev(n))
1532 clnode = cl.node(filerevlog.linkrev(n))
1533 if clnode in has_cl_set:
1533 if clnode in has_cl_set:
1534 hasset[n] = 1
1534 hasset[n] = 1
1535 prune_parents(filerevlog, hasset, msngset)
1535 prune_parents(filerevlog, hasset, msngset)
1536
1536
1537 # A function generator function that sets up the a context for the
1537 # A function generator function that sets up the a context for the
1538 # inner function.
1538 # inner function.
1539 def lookup_filenode_link_func(fname):
1539 def lookup_filenode_link_func(fname):
1540 msngset = msng_filenode_set[fname]
1540 msngset = msng_filenode_set[fname]
1541 # Lookup the changenode the filenode belongs to.
1541 # Lookup the changenode the filenode belongs to.
1542 def lookup_filenode_link(fnode):
1542 def lookup_filenode_link(fnode):
1543 return msngset[fnode]
1543 return msngset[fnode]
1544 return lookup_filenode_link
1544 return lookup_filenode_link
1545
1545
1546 # Now that we have all theses utility functions to help out and
1546 # Now that we have all theses utility functions to help out and
1547 # logically divide up the task, generate the group.
1547 # logically divide up the task, generate the group.
1548 def gengroup():
1548 def gengroup():
1549 # The set of changed files starts empty.
1549 # The set of changed files starts empty.
1550 changedfiles = {}
1550 changedfiles = {}
1551 # Create a changenode group generator that will call our functions
1551 # Create a changenode group generator that will call our functions
1552 # back to lookup the owning changenode and collect information.
1552 # back to lookup the owning changenode and collect information.
1553 group = cl.group(msng_cl_lst, identity,
1553 group = cl.group(msng_cl_lst, identity,
1554 manifest_and_file_collector(changedfiles))
1554 manifest_and_file_collector(changedfiles))
1555 for chnk in group:
1555 for chnk in group:
1556 yield chnk
1556 yield chnk
1557
1557
1558 # The list of manifests has been collected by the generator
1558 # The list of manifests has been collected by the generator
1559 # calling our functions back.
1559 # calling our functions back.
1560 prune_manifests()
1560 prune_manifests()
1561 msng_mnfst_lst = msng_mnfst_set.keys()
1561 msng_mnfst_lst = msng_mnfst_set.keys()
1562 # Sort the manifestnodes by revision number.
1562 # Sort the manifestnodes by revision number.
1563 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1563 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1564 # Create a generator for the manifestnodes that calls our lookup
1564 # Create a generator for the manifestnodes that calls our lookup
1565 # and data collection functions back.
1565 # and data collection functions back.
1566 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1566 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1567 filenode_collector(changedfiles))
1567 filenode_collector(changedfiles))
1568 for chnk in group:
1568 for chnk in group:
1569 yield chnk
1569 yield chnk
1570
1570
1571 # These are no longer needed, dereference and toss the memory for
1571 # These are no longer needed, dereference and toss the memory for
1572 # them.
1572 # them.
1573 msng_mnfst_lst = None
1573 msng_mnfst_lst = None
1574 msng_mnfst_set.clear()
1574 msng_mnfst_set.clear()
1575
1575
1576 changedfiles = changedfiles.keys()
1576 changedfiles = changedfiles.keys()
1577 changedfiles.sort()
1577 changedfiles.sort()
1578 # Go through all our files in order sorted by name.
1578 # Go through all our files in order sorted by name.
1579 for fname in changedfiles:
1579 for fname in changedfiles:
1580 filerevlog = self.file(fname)
1580 filerevlog = self.file(fname)
1581 # Toss out the filenodes that the recipient isn't really
1581 # Toss out the filenodes that the recipient isn't really
1582 # missing.
1582 # missing.
1583 if msng_filenode_set.has_key(fname):
1583 if msng_filenode_set.has_key(fname):
1584 prune_filenodes(fname, filerevlog)
1584 prune_filenodes(fname, filerevlog)
1585 msng_filenode_lst = msng_filenode_set[fname].keys()
1585 msng_filenode_lst = msng_filenode_set[fname].keys()
1586 else:
1586 else:
1587 msng_filenode_lst = []
1587 msng_filenode_lst = []
1588 # If any filenodes are left, generate the group for them,
1588 # If any filenodes are left, generate the group for them,
1589 # otherwise don't bother.
1589 # otherwise don't bother.
1590 if len(msng_filenode_lst) > 0:
1590 if len(msng_filenode_lst) > 0:
1591 yield changegroup.genchunk(fname)
1591 yield changegroup.genchunk(fname)
1592 # Sort the filenodes by their revision #
1592 # Sort the filenodes by their revision #
1593 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1593 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1594 # Create a group generator and only pass in a changenode
1594 # Create a group generator and only pass in a changenode
1595 # lookup function as we need to collect no information
1595 # lookup function as we need to collect no information
1596 # from filenodes.
1596 # from filenodes.
1597 group = filerevlog.group(msng_filenode_lst,
1597 group = filerevlog.group(msng_filenode_lst,
1598 lookup_filenode_link_func(fname))
1598 lookup_filenode_link_func(fname))
1599 for chnk in group:
1599 for chnk in group:
1600 yield chnk
1600 yield chnk
1601 if msng_filenode_set.has_key(fname):
1601 if msng_filenode_set.has_key(fname):
1602 # Don't need this anymore, toss it to free memory.
1602 # Don't need this anymore, toss it to free memory.
1603 del msng_filenode_set[fname]
1603 del msng_filenode_set[fname]
1604 # Signal that no more groups are left.
1604 # Signal that no more groups are left.
1605 yield changegroup.closechunk()
1605 yield changegroup.closechunk()
1606
1606
1607 if msng_cl_lst:
1607 if msng_cl_lst:
1608 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1608 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1609
1609
1610 return util.chunkbuffer(gengroup())
1610 return util.chunkbuffer(gengroup())
1611
1611
1612 def changegroup(self, basenodes, source):
1612 def changegroup(self, basenodes, source):
1613 """Generate a changegroup of all nodes that we have that a recipient
1613 """Generate a changegroup of all nodes that we have that a recipient
1614 doesn't.
1614 doesn't.
1615
1615
1616 This is much easier than the previous function as we can assume that
1616 This is much easier than the previous function as we can assume that
1617 the recipient has any changenode we aren't sending them."""
1617 the recipient has any changenode we aren't sending them."""
1618
1618
1619 self.hook('preoutgoing', throw=True, source=source)
1619 self.hook('preoutgoing', throw=True, source=source)
1620
1620
1621 cl = self.changelog
1621 cl = self.changelog
1622 nodes = cl.nodesbetween(basenodes, None)[0]
1622 nodes = cl.nodesbetween(basenodes, None)[0]
1623 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1623 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1624 self.changegroupinfo(nodes)
1624 self.changegroupinfo(nodes)
1625
1625
1626 def identity(x):
1626 def identity(x):
1627 return x
1627 return x
1628
1628
1629 def gennodelst(revlog):
1629 def gennodelst(revlog):
1630 for r in xrange(0, revlog.count()):
1630 for r in xrange(0, revlog.count()):
1631 n = revlog.node(r)
1631 n = revlog.node(r)
1632 if revlog.linkrev(n) in revset:
1632 if revlog.linkrev(n) in revset:
1633 yield n
1633 yield n
1634
1634
1635 def changed_file_collector(changedfileset):
1635 def changed_file_collector(changedfileset):
1636 def collect_changed_files(clnode):
1636 def collect_changed_files(clnode):
1637 c = cl.read(clnode)
1637 c = cl.read(clnode)
1638 for fname in c[3]:
1638 for fname in c[3]:
1639 changedfileset[fname] = 1
1639 changedfileset[fname] = 1
1640 return collect_changed_files
1640 return collect_changed_files
1641
1641
1642 def lookuprevlink_func(revlog):
1642 def lookuprevlink_func(revlog):
1643 def lookuprevlink(n):
1643 def lookuprevlink(n):
1644 return cl.node(revlog.linkrev(n))
1644 return cl.node(revlog.linkrev(n))
1645 return lookuprevlink
1645 return lookuprevlink
1646
1646
1647 def gengroup():
1647 def gengroup():
1648 # construct a list of all changed files
1648 # construct a list of all changed files
1649 changedfiles = {}
1649 changedfiles = {}
1650
1650
1651 for chnk in cl.group(nodes, identity,
1651 for chnk in cl.group(nodes, identity,
1652 changed_file_collector(changedfiles)):
1652 changed_file_collector(changedfiles)):
1653 yield chnk
1653 yield chnk
1654 changedfiles = changedfiles.keys()
1654 changedfiles = changedfiles.keys()
1655 changedfiles.sort()
1655 changedfiles.sort()
1656
1656
1657 mnfst = self.manifest
1657 mnfst = self.manifest
1658 nodeiter = gennodelst(mnfst)
1658 nodeiter = gennodelst(mnfst)
1659 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1659 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1660 yield chnk
1660 yield chnk
1661
1661
1662 for fname in changedfiles:
1662 for fname in changedfiles:
1663 filerevlog = self.file(fname)
1663 filerevlog = self.file(fname)
1664 nodeiter = gennodelst(filerevlog)
1664 nodeiter = gennodelst(filerevlog)
1665 nodeiter = list(nodeiter)
1665 nodeiter = list(nodeiter)
1666 if nodeiter:
1666 if nodeiter:
1667 yield changegroup.genchunk(fname)
1667 yield changegroup.genchunk(fname)
1668 lookup = lookuprevlink_func(filerevlog)
1668 lookup = lookuprevlink_func(filerevlog)
1669 for chnk in filerevlog.group(nodeiter, lookup):
1669 for chnk in filerevlog.group(nodeiter, lookup):
1670 yield chnk
1670 yield chnk
1671
1671
1672 yield changegroup.closechunk()
1672 yield changegroup.closechunk()
1673
1673
1674 if nodes:
1674 if nodes:
1675 self.hook('outgoing', node=hex(nodes[0]), source=source)
1675 self.hook('outgoing', node=hex(nodes[0]), source=source)
1676
1676
1677 return util.chunkbuffer(gengroup())
1677 return util.chunkbuffer(gengroup())
1678
1678
1679 def addchangegroup(self, source, srctype, url):
1679 def addchangegroup(self, source, srctype, url):
1680 """add changegroup to repo.
1680 """add changegroup to repo.
1681
1681
1682 return values:
1682 return values:
1683 - nothing changed or no source: 0
1683 - nothing changed or no source: 0
1684 - more heads than before: 1+added heads (2..n)
1684 - more heads than before: 1+added heads (2..n)
1685 - less heads than before: -1-removed heads (-2..-n)
1685 - less heads than before: -1-removed heads (-2..-n)
1686 - number of heads stays the same: 1
1686 - number of heads stays the same: 1
1687 """
1687 """
1688 def csmap(x):
1688 def csmap(x):
1689 self.ui.debug(_("add changeset %s\n") % short(x))
1689 self.ui.debug(_("add changeset %s\n") % short(x))
1690 return cl.count()
1690 return cl.count()
1691
1691
1692 def revmap(x):
1692 def revmap(x):
1693 return cl.rev(x)
1693 return cl.rev(x)
1694
1694
1695 if not source:
1695 if not source:
1696 return 0
1696 return 0
1697
1697
1698 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1698 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1699
1699
1700 changesets = files = revisions = 0
1700 changesets = files = revisions = 0
1701
1701
1702 tr = self.transaction()
1702 tr = self.transaction()
1703
1703
1704 # write changelog data to temp files so concurrent readers will not see
1704 # write changelog data to temp files so concurrent readers will not see
1705 # inconsistent view
1705 # inconsistent view
1706 cl = None
1706 cl = None
1707 try:
1707 try:
1708 cl = appendfile.appendchangelog(self.sopener,
1708 cl = appendfile.appendchangelog(self.sopener,
1709 self.changelog.version)
1709 self.changelog.version)
1710
1710
1711 oldheads = len(cl.heads())
1711 oldheads = len(cl.heads())
1712
1712
1713 # pull off the changeset group
1713 # pull off the changeset group
1714 self.ui.status(_("adding changesets\n"))
1714 self.ui.status(_("adding changesets\n"))
1715 cor = cl.count() - 1
1715 cor = cl.count() - 1
1716 chunkiter = changegroup.chunkiter(source)
1716 chunkiter = changegroup.chunkiter(source)
1717 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1717 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1718 raise util.Abort(_("received changelog group is empty"))
1718 raise util.Abort(_("received changelog group is empty"))
1719 cnr = cl.count() - 1
1719 cnr = cl.count() - 1
1720 changesets = cnr - cor
1720 changesets = cnr - cor
1721
1721
1722 # pull off the manifest group
1722 # pull off the manifest group
1723 self.ui.status(_("adding manifests\n"))
1723 self.ui.status(_("adding manifests\n"))
1724 chunkiter = changegroup.chunkiter(source)
1724 chunkiter = changegroup.chunkiter(source)
1725 # no need to check for empty manifest group here:
1725 # no need to check for empty manifest group here:
1726 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1726 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1727 # no new manifest will be created and the manifest group will
1727 # no new manifest will be created and the manifest group will
1728 # be empty during the pull
1728 # be empty during the pull
1729 self.manifest.addgroup(chunkiter, revmap, tr)
1729 self.manifest.addgroup(chunkiter, revmap, tr)
1730
1730
1731 # process the files
1731 # process the files
1732 self.ui.status(_("adding file changes\n"))
1732 self.ui.status(_("adding file changes\n"))
1733 while 1:
1733 while 1:
1734 f = changegroup.getchunk(source)
1734 f = changegroup.getchunk(source)
1735 if not f:
1735 if not f:
1736 break
1736 break
1737 self.ui.debug(_("adding %s revisions\n") % f)
1737 self.ui.debug(_("adding %s revisions\n") % f)
1738 fl = self.file(f)
1738 fl = self.file(f)
1739 o = fl.count()
1739 o = fl.count()
1740 chunkiter = changegroup.chunkiter(source)
1740 chunkiter = changegroup.chunkiter(source)
1741 if fl.addgroup(chunkiter, revmap, tr) is None:
1741 if fl.addgroup(chunkiter, revmap, tr) is None:
1742 raise util.Abort(_("received file revlog group is empty"))
1742 raise util.Abort(_("received file revlog group is empty"))
1743 revisions += fl.count() - o
1743 revisions += fl.count() - o
1744 files += 1
1744 files += 1
1745
1745
1746 cl.writedata()
1746 cl.writedata()
1747 finally:
1747 finally:
1748 if cl:
1748 if cl:
1749 cl.cleanup()
1749 cl.cleanup()
1750
1750
1751 # make changelog see real files again
1751 # make changelog see real files again
1752 self.changelog = changelog.changelog(self.sopener,
1752 self.changelog = changelog.changelog(self.sopener,
1753 self.changelog.version)
1753 self.changelog.version)
1754 self.changelog.checkinlinesize(tr)
1754 self.changelog.checkinlinesize(tr)
1755
1755
1756 newheads = len(self.changelog.heads())
1756 newheads = len(self.changelog.heads())
1757 heads = ""
1757 heads = ""
1758 if oldheads and newheads != oldheads:
1758 if oldheads and newheads != oldheads:
1759 heads = _(" (%+d heads)") % (newheads - oldheads)
1759 heads = _(" (%+d heads)") % (newheads - oldheads)
1760
1760
1761 self.ui.status(_("added %d changesets"
1761 self.ui.status(_("added %d changesets"
1762 " with %d changes to %d files%s\n")
1762 " with %d changes to %d files%s\n")
1763 % (changesets, revisions, files, heads))
1763 % (changesets, revisions, files, heads))
1764
1764
1765 if changesets > 0:
1765 if changesets > 0:
1766 self.hook('pretxnchangegroup', throw=True,
1766 self.hook('pretxnchangegroup', throw=True,
1767 node=hex(self.changelog.node(cor+1)), source=srctype,
1767 node=hex(self.changelog.node(cor+1)), source=srctype,
1768 url=url)
1768 url=url)
1769
1769
1770 tr.close()
1770 tr.close()
1771
1771
1772 if changesets > 0:
1772 if changesets > 0:
1773 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1773 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1774 source=srctype, url=url)
1774 source=srctype, url=url)
1775
1775
1776 for i in xrange(cor + 1, cnr + 1):
1776 for i in xrange(cor + 1, cnr + 1):
1777 self.hook("incoming", node=hex(self.changelog.node(i)),
1777 self.hook("incoming", node=hex(self.changelog.node(i)),
1778 source=srctype, url=url)
1778 source=srctype, url=url)
1779
1779
1780 # never return 0 here:
1780 # never return 0 here:
1781 if newheads < oldheads:
1781 if newheads < oldheads:
1782 return newheads - oldheads - 1
1782 return newheads - oldheads - 1
1783 else:
1783 else:
1784 return newheads - oldheads + 1
1784 return newheads - oldheads + 1
1785
1785
1786
1786
1787 def stream_in(self, remote):
1787 def stream_in(self, remote):
1788 fp = remote.stream_out()
1788 fp = remote.stream_out()
1789 l = fp.readline()
1789 l = fp.readline()
1790 try:
1790 try:
1791 resp = int(l)
1791 resp = int(l)
1792 except ValueError:
1792 except ValueError:
1793 raise util.UnexpectedOutput(
1793 raise util.UnexpectedOutput(
1794 _('Unexpected response from remote server:'), l)
1794 _('Unexpected response from remote server:'), l)
1795 if resp == 1:
1795 if resp == 1:
1796 raise util.Abort(_('operation forbidden by server'))
1796 raise util.Abort(_('operation forbidden by server'))
1797 elif resp == 2:
1797 elif resp == 2:
1798 raise util.Abort(_('locking the remote repository failed'))
1798 raise util.Abort(_('locking the remote repository failed'))
1799 elif resp != 0:
1799 elif resp != 0:
1800 raise util.Abort(_('the server sent an unknown error code'))
1800 raise util.Abort(_('the server sent an unknown error code'))
1801 self.ui.status(_('streaming all changes\n'))
1801 self.ui.status(_('streaming all changes\n'))
1802 l = fp.readline()
1802 l = fp.readline()
1803 try:
1803 try:
1804 total_files, total_bytes = map(int, l.split(' ', 1))
1804 total_files, total_bytes = map(int, l.split(' ', 1))
1805 except ValueError, TypeError:
1805 except ValueError, TypeError:
1806 raise util.UnexpectedOutput(
1806 raise util.UnexpectedOutput(
1807 _('Unexpected response from remote server:'), l)
1807 _('Unexpected response from remote server:'), l)
1808 self.ui.status(_('%d files to transfer, %s of data\n') %
1808 self.ui.status(_('%d files to transfer, %s of data\n') %
1809 (total_files, util.bytecount(total_bytes)))
1809 (total_files, util.bytecount(total_bytes)))
1810 start = time.time()
1810 start = time.time()
1811 for i in xrange(total_files):
1811 for i in xrange(total_files):
1812 # XXX doesn't support '\n' or '\r' in filenames
1812 # XXX doesn't support '\n' or '\r' in filenames
1813 l = fp.readline()
1813 l = fp.readline()
1814 try:
1814 try:
1815 name, size = l.split('\0', 1)
1815 name, size = l.split('\0', 1)
1816 size = int(size)
1816 size = int(size)
1817 except ValueError, TypeError:
1817 except ValueError, TypeError:
1818 raise util.UnexpectedOutput(
1818 raise util.UnexpectedOutput(
1819 _('Unexpected response from remote server:'), l)
1819 _('Unexpected response from remote server:'), l)
1820 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1820 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1821 ofp = self.sopener(name, 'w')
1821 ofp = self.sopener(name, 'w')
1822 for chunk in util.filechunkiter(fp, limit=size):
1822 for chunk in util.filechunkiter(fp, limit=size):
1823 ofp.write(chunk)
1823 ofp.write(chunk)
1824 ofp.close()
1824 ofp.close()
1825 elapsed = time.time() - start
1825 elapsed = time.time() - start
1826 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1826 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1827 (util.bytecount(total_bytes), elapsed,
1827 (util.bytecount(total_bytes), elapsed,
1828 util.bytecount(total_bytes / elapsed)))
1828 util.bytecount(total_bytes / elapsed)))
1829 self.reload()
1829 self.reload()
1830 return len(self.heads()) + 1
1830 return len(self.heads()) + 1
1831
1831
1832 def clone(self, remote, heads=[], stream=False):
1832 def clone(self, remote, heads=[], stream=False):
1833 '''clone remote repository.
1833 '''clone remote repository.
1834
1834
1835 keyword arguments:
1835 keyword arguments:
1836 heads: list of revs to clone (forces use of pull)
1836 heads: list of revs to clone (forces use of pull)
1837 stream: use streaming clone if possible'''
1837 stream: use streaming clone if possible'''
1838
1838
1839 # now, all clients that can request uncompressed clones can
1839 # now, all clients that can request uncompressed clones can
1840 # read repo formats supported by all servers that can serve
1840 # read repo formats supported by all servers that can serve
1841 # them.
1841 # them.
1842
1842
1843 # if revlog format changes, client will have to check version
1843 # if revlog format changes, client will have to check version
1844 # and format flags on "stream" capability, and use
1844 # and format flags on "stream" capability, and use
1845 # uncompressed only if compatible.
1845 # uncompressed only if compatible.
1846
1846
1847 if stream and not heads and remote.capable('stream'):
1847 if stream and not heads and remote.capable('stream'):
1848 return self.stream_in(remote)
1848 return self.stream_in(remote)
1849 return self.pull(remote, heads)
1849 return self.pull(remote, heads)
1850
1850
1851 # used to avoid circular references so destructors work
1851 # used to avoid circular references so destructors work
1852 def aftertrans(files):
1852 def aftertrans(files):
1853 renamefiles = [tuple(t) for t in files]
1853 renamefiles = [tuple(t) for t in files]
1854 def a():
1854 def a():
1855 for src, dest in renamefiles:
1855 for src, dest in renamefiles:
1856 util.rename(src, dest)
1856 util.rename(src, dest)
1857 return a
1857 return a
1858
1858
1859 def instance(ui, path, create):
1859 def instance(ui, path, create):
1860 return localrepository(ui, util.drop_scheme('file', path), create)
1860 return localrepository(ui, util.drop_scheme('file', path), create)
1861
1861
1862 def islocal(path):
1862 def islocal(path):
1863 return True
1863 return True
@@ -1,1285 +1,1291 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
7 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import _
14 from i18n import _
15 import binascii, changegroup, errno, ancestor, mdiff, os
15 import binascii, changegroup, errno, ancestor, mdiff, os
16 import sha, struct, util, zlib
16 import sha, struct, util, zlib
17
17
18 # revlog version strings
18 # revlog version strings
19 REVLOGV0 = 0
19 REVLOGV0 = 0
20 REVLOGNG = 1
20 REVLOGNG = 1
21
21
22 # revlog flags
22 # revlog flags
23 REVLOGNGINLINEDATA = (1 << 16)
23 REVLOGNGINLINEDATA = (1 << 16)
24 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
24 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
25
25
26 REVLOG_DEFAULT_FORMAT = REVLOGNG
26 REVLOG_DEFAULT_FORMAT = REVLOGNG
27 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
27 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
28
28
29 def flagstr(flag):
29 def flagstr(flag):
30 if flag == "inline":
30 if flag == "inline":
31 return REVLOGNGINLINEDATA
31 return REVLOGNGINLINEDATA
32 raise RevlogError(_("unknown revlog flag %s") % flag)
32 raise RevlogError(_("unknown revlog flag %s") % flag)
33
33
34 def hash(text, p1, p2):
34 def hash(text, p1, p2):
35 """generate a hash from the given text and its parent hashes
35 """generate a hash from the given text and its parent hashes
36
36
37 This hash combines both the current file contents and its history
37 This hash combines both the current file contents and its history
38 in a manner that makes it easy to distinguish nodes with the same
38 in a manner that makes it easy to distinguish nodes with the same
39 content in the revision graph.
39 content in the revision graph.
40 """
40 """
41 l = [p1, p2]
41 l = [p1, p2]
42 l.sort()
42 l.sort()
43 s = sha.new(l[0])
43 s = sha.new(l[0])
44 s.update(l[1])
44 s.update(l[1])
45 s.update(text)
45 s.update(text)
46 return s.digest()
46 return s.digest()
47
47
48 def compress(text):
48 def compress(text):
49 """ generate a possibly-compressed representation of text """
49 """ generate a possibly-compressed representation of text """
50 if not text: return ("", text)
50 if not text: return ("", text)
51 if len(text) < 44:
51 if len(text) < 44:
52 if text[0] == '\0': return ("", text)
52 if text[0] == '\0': return ("", text)
53 return ('u', text)
53 return ('u', text)
54 bin = zlib.compress(text)
54 bin = zlib.compress(text)
55 if len(bin) > len(text):
55 if len(bin) > len(text):
56 if text[0] == '\0': return ("", text)
56 if text[0] == '\0': return ("", text)
57 return ('u', text)
57 return ('u', text)
58 return ("", bin)
58 return ("", bin)
59
59
60 def decompress(bin):
60 def decompress(bin):
61 """ decompress the given input """
61 """ decompress the given input """
62 if not bin: return bin
62 if not bin: return bin
63 t = bin[0]
63 t = bin[0]
64 if t == '\0': return bin
64 if t == '\0': return bin
65 if t == 'x': return zlib.decompress(bin)
65 if t == 'x': return zlib.decompress(bin)
66 if t == 'u': return bin[1:]
66 if t == 'u': return bin[1:]
67 raise RevlogError(_("unknown compression type %r") % t)
67 raise RevlogError(_("unknown compression type %r") % t)
68
68
69 indexformatv0 = ">4l20s20s20s"
69 indexformatv0 = ">4l20s20s20s"
70 v0shaoffset = 56
70 v0shaoffset = 56
71 # index ng:
71 # index ng:
72 # 6 bytes offset
72 # 6 bytes offset
73 # 2 bytes flags
73 # 2 bytes flags
74 # 4 bytes compressed length
74 # 4 bytes compressed length
75 # 4 bytes uncompressed length
75 # 4 bytes uncompressed length
76 # 4 bytes: base rev
76 # 4 bytes: base rev
77 # 4 bytes link rev
77 # 4 bytes link rev
78 # 4 bytes parent 1 rev
78 # 4 bytes parent 1 rev
79 # 4 bytes parent 2 rev
79 # 4 bytes parent 2 rev
80 # 32 bytes: nodeid
80 # 32 bytes: nodeid
81 indexformatng = ">Qiiiiii20s12x"
81 indexformatng = ">Qiiiiii20s12x"
82 ngshaoffset = 32
82 ngshaoffset = 32
83 versionformat = ">I"
83 versionformat = ">I"
84
84
85 class lazyparser(object):
85 class lazyparser(object):
86 """
86 """
87 this class avoids the need to parse the entirety of large indices
87 this class avoids the need to parse the entirety of large indices
88 """
88 """
89
89
90 # lazyparser is not safe to use on windows if win32 extensions not
90 # lazyparser is not safe to use on windows if win32 extensions not
91 # available. it keeps file handle open, which make it not possible
91 # available. it keeps file handle open, which make it not possible
92 # to break hardlinks on local cloned repos.
92 # to break hardlinks on local cloned repos.
93 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
93 safe_to_use = os.name != 'nt' or (not util.is_win_9x() and
94 hasattr(util, 'win32api'))
94 hasattr(util, 'win32api'))
95
95
96 def __init__(self, dataf, size, indexformat, shaoffset):
96 def __init__(self, dataf, size, indexformat, shaoffset):
97 self.dataf = dataf
97 self.dataf = dataf
98 self.format = indexformat
98 self.format = indexformat
99 self.s = struct.calcsize(indexformat)
99 self.s = struct.calcsize(indexformat)
100 self.indexformat = indexformat
100 self.indexformat = indexformat
101 self.datasize = size
101 self.datasize = size
102 self.l = size/self.s
102 self.l = size/self.s
103 self.index = [None] * self.l
103 self.index = [None] * self.l
104 self.map = {nullid: nullrev}
104 self.map = {nullid: nullrev}
105 self.allmap = 0
105 self.allmap = 0
106 self.all = 0
106 self.all = 0
107 self.mapfind_count = 0
107 self.mapfind_count = 0
108 self.shaoffset = shaoffset
108 self.shaoffset = shaoffset
109
109
110 def loadmap(self):
110 def loadmap(self):
111 """
111 """
112 during a commit, we need to make sure the rev being added is
112 during a commit, we need to make sure the rev being added is
113 not a duplicate. This requires loading the entire index,
113 not a duplicate. This requires loading the entire index,
114 which is fairly slow. loadmap can load up just the node map,
114 which is fairly slow. loadmap can load up just the node map,
115 which takes much less time.
115 which takes much less time.
116 """
116 """
117 if self.allmap: return
117 if self.allmap: return
118 end = self.datasize
118 end = self.datasize
119 self.allmap = 1
119 self.allmap = 1
120 cur = 0
120 cur = 0
121 count = 0
121 count = 0
122 blocksize = self.s * 256
122 blocksize = self.s * 256
123 self.dataf.seek(0)
123 self.dataf.seek(0)
124 while cur < end:
124 while cur < end:
125 data = self.dataf.read(blocksize)
125 data = self.dataf.read(blocksize)
126 off = 0
126 off = 0
127 for x in xrange(256):
127 for x in xrange(256):
128 n = data[off + self.shaoffset:off + self.shaoffset + 20]
128 n = data[off + self.shaoffset:off + self.shaoffset + 20]
129 self.map[n] = count
129 self.map[n] = count
130 count += 1
130 count += 1
131 if count >= self.l:
131 if count >= self.l:
132 break
132 break
133 off += self.s
133 off += self.s
134 cur += blocksize
134 cur += blocksize
135
135
136 def loadblock(self, blockstart, blocksize, data=None):
136 def loadblock(self, blockstart, blocksize, data=None):
137 if self.all: return
137 if self.all: return
138 if data is None:
138 if data is None:
139 self.dataf.seek(blockstart)
139 self.dataf.seek(blockstart)
140 if blockstart + blocksize > self.datasize:
140 if blockstart + blocksize > self.datasize:
141 # the revlog may have grown since we've started running,
141 # the revlog may have grown since we've started running,
142 # but we don't have space in self.index for more entries.
142 # but we don't have space in self.index for more entries.
143 # limit blocksize so that we don't get too much data.
143 # limit blocksize so that we don't get too much data.
144 blocksize = max(self.datasize - blockstart, 0)
144 blocksize = max(self.datasize - blockstart, 0)
145 data = self.dataf.read(blocksize)
145 data = self.dataf.read(blocksize)
146 lend = len(data) / self.s
146 lend = len(data) / self.s
147 i = blockstart / self.s
147 i = blockstart / self.s
148 off = 0
148 off = 0
149 for x in xrange(lend):
149 for x in xrange(lend):
150 if self.index[i + x] == None:
150 if self.index[i + x] == None:
151 b = data[off : off + self.s]
151 b = data[off : off + self.s]
152 self.index[i + x] = b
152 self.index[i + x] = b
153 n = b[self.shaoffset:self.shaoffset + 20]
153 n = b[self.shaoffset:self.shaoffset + 20]
154 self.map[n] = i + x
154 self.map[n] = i + x
155 off += self.s
155 off += self.s
156
156
157 def findnode(self, node):
157 def findnode(self, node):
158 """search backwards through the index file for a specific node"""
158 """search backwards through the index file for a specific node"""
159 if self.allmap: return None
159 if self.allmap: return None
160
160
161 # hg log will cause many many searches for the manifest
161 # hg log will cause many many searches for the manifest
162 # nodes. After we get called a few times, just load the whole
162 # nodes. After we get called a few times, just load the whole
163 # thing.
163 # thing.
164 if self.mapfind_count > 8:
164 if self.mapfind_count > 8:
165 self.loadmap()
165 self.loadmap()
166 if node in self.map:
166 if node in self.map:
167 return node
167 return node
168 return None
168 return None
169 self.mapfind_count += 1
169 self.mapfind_count += 1
170 last = self.l - 1
170 last = self.l - 1
171 while self.index[last] != None:
171 while self.index[last] != None:
172 if last == 0:
172 if last == 0:
173 self.all = 1
173 self.all = 1
174 self.allmap = 1
174 self.allmap = 1
175 return None
175 return None
176 last -= 1
176 last -= 1
177 end = (last + 1) * self.s
177 end = (last + 1) * self.s
178 blocksize = self.s * 256
178 blocksize = self.s * 256
179 while end >= 0:
179 while end >= 0:
180 start = max(end - blocksize, 0)
180 start = max(end - blocksize, 0)
181 self.dataf.seek(start)
181 self.dataf.seek(start)
182 data = self.dataf.read(end - start)
182 data = self.dataf.read(end - start)
183 findend = end - start
183 findend = end - start
184 while True:
184 while True:
185 # we're searching backwards, so weh have to make sure
185 # we're searching backwards, so weh have to make sure
186 # we don't find a changeset where this node is a parent
186 # we don't find a changeset where this node is a parent
187 off = data.rfind(node, 0, findend)
187 off = data.rfind(node, 0, findend)
188 findend = off
188 findend = off
189 if off >= 0:
189 if off >= 0:
190 i = off / self.s
190 i = off / self.s
191 off = i * self.s
191 off = i * self.s
192 n = data[off + self.shaoffset:off + self.shaoffset + 20]
192 n = data[off + self.shaoffset:off + self.shaoffset + 20]
193 if n == node:
193 if n == node:
194 self.map[n] = i + start / self.s
194 self.map[n] = i + start / self.s
195 return node
195 return node
196 else:
196 else:
197 break
197 break
198 end -= blocksize
198 end -= blocksize
199 return None
199 return None
200
200
201 def loadindex(self, i=None, end=None):
201 def loadindex(self, i=None, end=None):
202 if self.all: return
202 if self.all: return
203 all = False
203 all = False
204 if i == None:
204 if i == None:
205 blockstart = 0
205 blockstart = 0
206 blocksize = (512 / self.s) * self.s
206 blocksize = (512 / self.s) * self.s
207 end = self.datasize
207 end = self.datasize
208 all = True
208 all = True
209 else:
209 else:
210 if end:
210 if end:
211 blockstart = i * self.s
211 blockstart = i * self.s
212 end = end * self.s
212 end = end * self.s
213 blocksize = end - blockstart
213 blocksize = end - blockstart
214 else:
214 else:
215 blockstart = (i & ~(32)) * self.s
215 blockstart = (i & ~(32)) * self.s
216 blocksize = self.s * 64
216 blocksize = self.s * 64
217 end = blockstart + blocksize
217 end = blockstart + blocksize
218 while blockstart < end:
218 while blockstart < end:
219 self.loadblock(blockstart, blocksize)
219 self.loadblock(blockstart, blocksize)
220 blockstart += blocksize
220 blockstart += blocksize
221 if all: self.all = True
221 if all: self.all = True
222
222
223 class lazyindex(object):
223 class lazyindex(object):
224 """a lazy version of the index array"""
224 """a lazy version of the index array"""
225 def __init__(self, parser):
225 def __init__(self, parser):
226 self.p = parser
226 self.p = parser
227 def __len__(self):
227 def __len__(self):
228 return len(self.p.index)
228 return len(self.p.index)
229 def load(self, pos):
229 def load(self, pos):
230 if pos < 0:
230 if pos < 0:
231 pos += len(self.p.index)
231 pos += len(self.p.index)
232 self.p.loadindex(pos)
232 self.p.loadindex(pos)
233 return self.p.index[pos]
233 return self.p.index[pos]
234 def __getitem__(self, pos):
234 def __getitem__(self, pos):
235 ret = self.p.index[pos] or self.load(pos)
235 ret = self.p.index[pos] or self.load(pos)
236 if isinstance(ret, str):
236 if isinstance(ret, str):
237 ret = struct.unpack(self.p.indexformat, ret)
237 ret = struct.unpack(self.p.indexformat, ret)
238 return ret
238 return ret
239 def __setitem__(self, pos, item):
239 def __setitem__(self, pos, item):
240 self.p.index[pos] = item
240 self.p.index[pos] = item
241 def __delitem__(self, pos):
241 def __delitem__(self, pos):
242 del self.p.index[pos]
242 del self.p.index[pos]
243 def append(self, e):
243 def append(self, e):
244 self.p.index.append(e)
244 self.p.index.append(e)
245
245
246 class lazymap(object):
246 class lazymap(object):
247 """a lazy version of the node map"""
247 """a lazy version of the node map"""
248 def __init__(self, parser):
248 def __init__(self, parser):
249 self.p = parser
249 self.p = parser
250 def load(self, key):
250 def load(self, key):
251 n = self.p.findnode(key)
251 n = self.p.findnode(key)
252 if n == None:
252 if n == None:
253 raise KeyError(key)
253 raise KeyError(key)
254 def __contains__(self, key):
254 def __contains__(self, key):
255 if key in self.p.map:
255 if key in self.p.map:
256 return True
256 return True
257 self.p.loadmap()
257 self.p.loadmap()
258 return key in self.p.map
258 return key in self.p.map
259 def __iter__(self):
259 def __iter__(self):
260 yield nullid
260 yield nullid
261 for i in xrange(self.p.l):
261 for i in xrange(self.p.l):
262 ret = self.p.index[i]
262 ret = self.p.index[i]
263 if not ret:
263 if not ret:
264 self.p.loadindex(i)
264 self.p.loadindex(i)
265 ret = self.p.index[i]
265 ret = self.p.index[i]
266 if isinstance(ret, str):
266 if isinstance(ret, str):
267 ret = struct.unpack(self.p.indexformat, ret)
267 ret = struct.unpack(self.p.indexformat, ret)
268 yield ret[-1]
268 yield ret[-1]
269 def __getitem__(self, key):
269 def __getitem__(self, key):
270 try:
270 try:
271 return self.p.map[key]
271 return self.p.map[key]
272 except KeyError:
272 except KeyError:
273 try:
273 try:
274 self.load(key)
274 self.load(key)
275 return self.p.map[key]
275 return self.p.map[key]
276 except KeyError:
276 except KeyError:
277 raise KeyError("node " + hex(key))
277 raise KeyError("node " + hex(key))
278 def __setitem__(self, key, val):
278 def __setitem__(self, key, val):
279 self.p.map[key] = val
279 self.p.map[key] = val
280 def __delitem__(self, key):
280 def __delitem__(self, key):
281 del self.p.map[key]
281 del self.p.map[key]
282
282
283 class RevlogError(Exception): pass
283 class RevlogError(Exception): pass
284
284
285 class revlog(object):
285 class revlog(object):
286 """
286 """
287 the underlying revision storage object
287 the underlying revision storage object
288
288
289 A revlog consists of two parts, an index and the revision data.
289 A revlog consists of two parts, an index and the revision data.
290
290
291 The index is a file with a fixed record size containing
291 The index is a file with a fixed record size containing
292 information on each revision, includings its nodeid (hash), the
292 information on each revision, includings its nodeid (hash), the
293 nodeids of its parents, the position and offset of its data within
293 nodeids of its parents, the position and offset of its data within
294 the data file, and the revision it's based on. Finally, each entry
294 the data file, and the revision it's based on. Finally, each entry
295 contains a linkrev entry that can serve as a pointer to external
295 contains a linkrev entry that can serve as a pointer to external
296 data.
296 data.
297
297
298 The revision data itself is a linear collection of data chunks.
298 The revision data itself is a linear collection of data chunks.
299 Each chunk represents a revision and is usually represented as a
299 Each chunk represents a revision and is usually represented as a
300 delta against the previous chunk. To bound lookup time, runs of
300 delta against the previous chunk. To bound lookup time, runs of
301 deltas are limited to about 2 times the length of the original
301 deltas are limited to about 2 times the length of the original
302 version data. This makes retrieval of a version proportional to
302 version data. This makes retrieval of a version proportional to
303 its size, or O(1) relative to the number of revisions.
303 its size, or O(1) relative to the number of revisions.
304
304
305 Both pieces of the revlog are written to in an append-only
305 Both pieces of the revlog are written to in an append-only
306 fashion, which means we never need to rewrite a file to insert or
306 fashion, which means we never need to rewrite a file to insert or
307 remove data, and can use some simple techniques to avoid the need
307 remove data, and can use some simple techniques to avoid the need
308 for locking while reading.
308 for locking while reading.
309 """
309 """
310 def __init__(self, opener, indexfile, datafile,
310 def __init__(self, opener, indexfile, datafile,
311 defversion=REVLOG_DEFAULT_VERSION):
311 defversion=REVLOG_DEFAULT_VERSION):
312 """
312 """
313 create a revlog object
313 create a revlog object
314
314
315 opener is a function that abstracts the file opening operation
315 opener is a function that abstracts the file opening operation
316 and can be used to implement COW semantics or the like.
316 and can be used to implement COW semantics or the like.
317 """
317 """
318 self.indexfile = indexfile
318 self.indexfile = indexfile
319 self.datafile = datafile
319 self.datafile = datafile
320 self.opener = opener
320 self.opener = opener
321
321
322 self.indexstat = None
322 self.indexstat = None
323 self.cache = None
323 self.cache = None
324 self.chunkcache = None
324 self.chunkcache = None
325 self.defversion = defversion
325 self.defversion = defversion
326 self.load()
326 self.load()
327
327
328 def load(self):
328 def load(self):
329 v = self.defversion
329 v = self.defversion
330 try:
330 try:
331 f = self.opener(self.indexfile)
331 f = self.opener(self.indexfile)
332 i = f.read(4)
332 i = f.read(4)
333 f.seek(0)
333 f.seek(0)
334 except IOError, inst:
334 except IOError, inst:
335 if inst.errno != errno.ENOENT:
335 if inst.errno != errno.ENOENT:
336 raise
336 raise
337 i = ""
337 i = ""
338 else:
338 else:
339 try:
339 try:
340 st = util.fstat(f)
340 st = util.fstat(f)
341 except AttributeError, inst:
341 except AttributeError, inst:
342 st = None
342 st = None
343 else:
343 else:
344 oldst = self.indexstat
344 oldst = self.indexstat
345 if (oldst and st.st_dev == oldst.st_dev
345 if (oldst and st.st_dev == oldst.st_dev
346 and st.st_ino == oldst.st_ino
346 and st.st_ino == oldst.st_ino
347 and st.st_mtime == oldst.st_mtime
347 and st.st_mtime == oldst.st_mtime
348 and st.st_ctime == oldst.st_ctime):
348 and st.st_ctime == oldst.st_ctime):
349 return
349 return
350 self.indexstat = st
350 self.indexstat = st
351 if len(i) > 0:
351 if len(i) > 0:
352 v = struct.unpack(versionformat, i)[0]
352 v = struct.unpack(versionformat, i)[0]
353 flags = v & ~0xFFFF
353 flags = v & ~0xFFFF
354 fmt = v & 0xFFFF
354 fmt = v & 0xFFFF
355 if fmt == REVLOGV0:
355 if fmt == REVLOGV0:
356 if flags:
356 if flags:
357 raise RevlogError(_("index %s unknown flags %#04x for format v0")
357 raise RevlogError(_("index %s unknown flags %#04x for format v0")
358 % (self.indexfile, flags >> 16))
358 % (self.indexfile, flags >> 16))
359 elif fmt == REVLOGNG:
359 elif fmt == REVLOGNG:
360 if flags & ~REVLOGNGINLINEDATA:
360 if flags & ~REVLOGNGINLINEDATA:
361 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
361 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
362 % (self.indexfile, flags >> 16))
362 % (self.indexfile, flags >> 16))
363 else:
363 else:
364 raise RevlogError(_("index %s unknown format %d")
364 raise RevlogError(_("index %s unknown format %d")
365 % (self.indexfile, fmt))
365 % (self.indexfile, fmt))
366 self.version = v
366 self.version = v
367 if v == REVLOGV0:
367 if v == REVLOGV0:
368 self.indexformat = indexformatv0
368 self.indexformat = indexformatv0
369 shaoffset = v0shaoffset
369 shaoffset = v0shaoffset
370 else:
370 else:
371 self.indexformat = indexformatng
371 self.indexformat = indexformatng
372 shaoffset = ngshaoffset
372 shaoffset = ngshaoffset
373
373
374 if i:
374 if i:
375 if (lazyparser.safe_to_use and not self.inlinedata() and
375 if (lazyparser.safe_to_use and not self.inlinedata() and
376 st and st.st_size > 10000):
376 st and st.st_size > 10000):
377 # big index, let's parse it on demand
377 # big index, let's parse it on demand
378 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
378 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
379 self.index = lazyindex(parser)
379 self.index = lazyindex(parser)
380 self.nodemap = lazymap(parser)
380 self.nodemap = lazymap(parser)
381 else:
381 else:
382 self.parseindex(f, st)
382 self.parseindex(f, st)
383 if self.version != REVLOGV0:
383 if self.version != REVLOGV0:
384 e = list(self.index[0])
384 e = list(self.index[0])
385 type = self.ngtype(e[0])
385 type = self.ngtype(e[0])
386 e[0] = self.offset_type(0, type)
386 e[0] = self.offset_type(0, type)
387 self.index[0] = e
387 self.index[0] = e
388 else:
388 else:
389 self.nodemap = {nullid: nullrev}
389 self.nodemap = {nullid: nullrev}
390 self.index = []
390 self.index = []
391
391
392
392
393 def parseindex(self, fp, st):
393 def parseindex(self, fp, st):
394 s = struct.calcsize(self.indexformat)
394 s = struct.calcsize(self.indexformat)
395 self.index = []
395 self.index = []
396 self.nodemap = {nullid: nullrev}
396 self.nodemap = {nullid: nullrev}
397 inline = self.inlinedata()
397 inline = self.inlinedata()
398 n = 0
398 n = 0
399 leftover = None
399 leftover = None
400 while True:
400 while True:
401 if st:
401 if st:
402 data = fp.read(65536)
402 data = fp.read(65536)
403 else:
403 else:
404 # hack for httprangereader, it doesn't do partial reads well
404 # hack for httprangereader, it doesn't do partial reads well
405 data = fp.read()
405 data = fp.read()
406 if not data:
406 if not data:
407 break
407 break
408 if n == 0 and self.inlinedata():
408 if n == 0 and self.inlinedata():
409 # cache the first chunk
409 # cache the first chunk
410 self.chunkcache = (0, data)
410 self.chunkcache = (0, data)
411 if leftover:
411 if leftover:
412 data = leftover + data
412 data = leftover + data
413 leftover = None
413 leftover = None
414 off = 0
414 off = 0
415 l = len(data)
415 l = len(data)
416 while off < l:
416 while off < l:
417 if l - off < s:
417 if l - off < s:
418 leftover = data[off:]
418 leftover = data[off:]
419 break
419 break
420 cur = data[off:off + s]
420 cur = data[off:off + s]
421 off += s
421 off += s
422 e = struct.unpack(self.indexformat, cur)
422 e = struct.unpack(self.indexformat, cur)
423 self.index.append(e)
423 self.index.append(e)
424 self.nodemap[e[-1]] = n
424 self.nodemap[e[-1]] = n
425 n += 1
425 n += 1
426 if inline:
426 if inline:
427 off += e[1]
427 off += e[1]
428 if off > l:
428 if off > l:
429 # some things don't seek well, just read it
429 # some things don't seek well, just read it
430 fp.read(off - l)
430 fp.read(off - l)
431 if not st:
431 if not st:
432 break
432 break
433
433
434
434
435 def ngoffset(self, q):
435 def ngoffset(self, q):
436 if q & 0xFFFF:
436 if q & 0xFFFF:
437 raise RevlogError(_('%s: incompatible revision flag %x') %
437 raise RevlogError(_('%s: incompatible revision flag %x') %
438 (self.indexfile, q))
438 (self.indexfile, q))
439 return long(q >> 16)
439 return long(q >> 16)
440
440
441 def ngtype(self, q):
441 def ngtype(self, q):
442 return int(q & 0xFFFF)
442 return int(q & 0xFFFF)
443
443
444 def offset_type(self, offset, type):
444 def offset_type(self, offset, type):
445 return long(long(offset) << 16 | type)
445 return long(long(offset) << 16 | type)
446
446
447 def loadindex(self, start, end):
447 def loadindex(self, start, end):
448 """load a block of indexes all at once from the lazy parser"""
448 """load a block of indexes all at once from the lazy parser"""
449 if isinstance(self.index, lazyindex):
449 if isinstance(self.index, lazyindex):
450 self.index.p.loadindex(start, end)
450 self.index.p.loadindex(start, end)
451
451
452 def loadindexmap(self):
452 def loadindexmap(self):
453 """loads both the map and the index from the lazy parser"""
453 """loads both the map and the index from the lazy parser"""
454 if isinstance(self.index, lazyindex):
454 if isinstance(self.index, lazyindex):
455 p = self.index.p
455 p = self.index.p
456 p.loadindex()
456 p.loadindex()
457 self.nodemap = p.map
457 self.nodemap = p.map
458
458
459 def loadmap(self):
459 def loadmap(self):
460 """loads the map from the lazy parser"""
460 """loads the map from the lazy parser"""
461 if isinstance(self.nodemap, lazymap):
461 if isinstance(self.nodemap, lazymap):
462 self.nodemap.p.loadmap()
462 self.nodemap.p.loadmap()
463 self.nodemap = self.nodemap.p.map
463 self.nodemap = self.nodemap.p.map
464
464
465 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
465 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
466 def tip(self): return self.node(len(self.index) - 1)
466 def tip(self): return self.node(len(self.index) - 1)
467 def count(self): return len(self.index)
467 def count(self): return len(self.index)
468 def node(self, rev):
468 def node(self, rev):
469 return rev == nullrev and nullid or self.index[rev][-1]
469 return rev == nullrev and nullid or self.index[rev][-1]
470 def rev(self, node):
470 def rev(self, node):
471 try:
471 try:
472 return self.nodemap[node]
472 return self.nodemap[node]
473 except KeyError:
473 except KeyError:
474 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
474 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
475 def linkrev(self, node):
475 def linkrev(self, node):
476 return (node == nullid) and nullrev or self.index[self.rev(node)][-4]
476 return (node == nullid) and nullrev or self.index[self.rev(node)][-4]
477 def parents(self, node):
477 def parents(self, node):
478 if node == nullid: return (nullid, nullid)
478 if node == nullid: return (nullid, nullid)
479 r = self.rev(node)
479 r = self.rev(node)
480 d = self.index[r][-3:-1]
480 d = self.index[r][-3:-1]
481 if self.version == REVLOGV0:
481 if self.version == REVLOGV0:
482 return d
482 return d
483 return (self.node(d[0]), self.node(d[1]))
483 return (self.node(d[0]), self.node(d[1]))
484 def parentrevs(self, rev):
484 def parentrevs(self, rev):
485 if rev == nullrev:
485 if rev == nullrev:
486 return (nullrev, nullrev)
486 return (nullrev, nullrev)
487 d = self.index[rev][-3:-1]
487 d = self.index[rev][-3:-1]
488 if self.version == REVLOGV0:
488 if self.version == REVLOGV0:
489 return (self.rev(d[0]), self.rev(d[1]))
489 return (self.rev(d[0]), self.rev(d[1]))
490 return d
490 return d
491 def start(self, rev):
491 def start(self, rev):
492 if rev == nullrev:
492 if rev == nullrev:
493 return 0
493 return 0
494 if self.version != REVLOGV0:
494 if self.version != REVLOGV0:
495 return self.ngoffset(self.index[rev][0])
495 return self.ngoffset(self.index[rev][0])
496 return self.index[rev][0]
496 return self.index[rev][0]
497
497
498 def end(self, rev): return self.start(rev) + self.length(rev)
498 def end(self, rev): return self.start(rev) + self.length(rev)
499
499
500 def size(self, rev):
500 def size(self, rev):
501 """return the length of the uncompressed text for a given revision"""
501 """return the length of the uncompressed text for a given revision"""
502 if rev == nullrev:
502 if rev == nullrev:
503 return 0
503 return 0
504 l = -1
504 l = -1
505 if self.version != REVLOGV0:
505 if self.version != REVLOGV0:
506 l = self.index[rev][2]
506 l = self.index[rev][2]
507 if l >= 0:
507 if l >= 0:
508 return l
508 return l
509
509
510 t = self.revision(self.node(rev))
510 t = self.revision(self.node(rev))
511 return len(t)
511 return len(t)
512
512
513 # alternate implementation, The advantage to this code is it
513 # alternate implementation, The advantage to this code is it
514 # will be faster for a single revision. But, the results are not
514 # will be faster for a single revision. But, the results are not
515 # cached, so finding the size of every revision will be slower.
515 # cached, so finding the size of every revision will be slower.
516 """
516 """
517 if self.cache and self.cache[1] == rev:
517 if self.cache and self.cache[1] == rev:
518 return len(self.cache[2])
518 return len(self.cache[2])
519
519
520 base = self.base(rev)
520 base = self.base(rev)
521 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
521 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
522 base = self.cache[1]
522 base = self.cache[1]
523 text = self.cache[2]
523 text = self.cache[2]
524 else:
524 else:
525 text = self.revision(self.node(base))
525 text = self.revision(self.node(base))
526
526
527 l = len(text)
527 l = len(text)
528 for x in xrange(base + 1, rev + 1):
528 for x in xrange(base + 1, rev + 1):
529 l = mdiff.patchedsize(l, self.chunk(x))
529 l = mdiff.patchedsize(l, self.chunk(x))
530 return l
530 return l
531 """
531 """
532
532
533 def length(self, rev):
533 def length(self, rev):
534 if rev == nullrev:
534 if rev == nullrev:
535 return 0
535 return 0
536 else:
536 else:
537 return self.index[rev][1]
537 return self.index[rev][1]
538 def base(self, rev):
538 def base(self, rev):
539 if (rev == nullrev):
539 if (rev == nullrev):
540 return nullrev
540 return nullrev
541 else:
541 else:
542 return self.index[rev][-5]
542 return self.index[rev][-5]
543
543
544 def reachable(self, node, stop=None):
544 def reachable(self, node, stop=None):
545 """return a hash of all nodes ancestral to a given node, including
545 """return a hash of all nodes ancestral to a given node, including
546 the node itself, stopping when stop is matched"""
546 the node itself, stopping when stop is matched"""
547 reachable = {}
547 reachable = {}
548 visit = [node]
548 visit = [node]
549 reachable[node] = 1
549 reachable[node] = 1
550 if stop:
550 if stop:
551 stopn = self.rev(stop)
551 stopn = self.rev(stop)
552 else:
552 else:
553 stopn = 0
553 stopn = 0
554 while visit:
554 while visit:
555 n = visit.pop(0)
555 n = visit.pop(0)
556 if n == stop:
556 if n == stop:
557 continue
557 continue
558 if n == nullid:
558 if n == nullid:
559 continue
559 continue
560 for p in self.parents(n):
560 for p in self.parents(n):
561 if self.rev(p) < stopn:
561 if self.rev(p) < stopn:
562 continue
562 continue
563 if p not in reachable:
563 if p not in reachable:
564 reachable[p] = 1
564 reachable[p] = 1
565 visit.append(p)
565 visit.append(p)
566 return reachable
566 return reachable
567
567
568 def nodesbetween(self, roots=None, heads=None):
568 def nodesbetween(self, roots=None, heads=None):
569 """Return a tuple containing three elements. Elements 1 and 2 contain
569 """Return a tuple containing three elements. Elements 1 and 2 contain
570 a final list bases and heads after all the unreachable ones have been
570 a final list bases and heads after all the unreachable ones have been
571 pruned. Element 0 contains a topologically sorted list of all
571 pruned. Element 0 contains a topologically sorted list of all
572
572
573 nodes that satisfy these constraints:
573 nodes that satisfy these constraints:
574 1. All nodes must be descended from a node in roots (the nodes on
574 1. All nodes must be descended from a node in roots (the nodes on
575 roots are considered descended from themselves).
575 roots are considered descended from themselves).
576 2. All nodes must also be ancestors of a node in heads (the nodes in
576 2. All nodes must also be ancestors of a node in heads (the nodes in
577 heads are considered to be their own ancestors).
577 heads are considered to be their own ancestors).
578
578
579 If roots is unspecified, nullid is assumed as the only root.
579 If roots is unspecified, nullid is assumed as the only root.
580 If heads is unspecified, it is taken to be the output of the
580 If heads is unspecified, it is taken to be the output of the
581 heads method (i.e. a list of all nodes in the repository that
581 heads method (i.e. a list of all nodes in the repository that
582 have no children)."""
582 have no children)."""
583 nonodes = ([], [], [])
583 nonodes = ([], [], [])
584 if roots is not None:
584 if roots is not None:
585 roots = list(roots)
585 roots = list(roots)
586 if not roots:
586 if not roots:
587 return nonodes
587 return nonodes
588 lowestrev = min([self.rev(n) for n in roots])
588 lowestrev = min([self.rev(n) for n in roots])
589 else:
589 else:
590 roots = [nullid] # Everybody's a descendent of nullid
590 roots = [nullid] # Everybody's a descendent of nullid
591 lowestrev = nullrev
591 lowestrev = nullrev
592 if (lowestrev == nullrev) and (heads is None):
592 if (lowestrev == nullrev) and (heads is None):
593 # We want _all_ the nodes!
593 # We want _all_ the nodes!
594 return ([self.node(r) for r in xrange(0, self.count())],
594 return ([self.node(r) for r in xrange(0, self.count())],
595 [nullid], list(self.heads()))
595 [nullid], list(self.heads()))
596 if heads is None:
596 if heads is None:
597 # All nodes are ancestors, so the latest ancestor is the last
597 # All nodes are ancestors, so the latest ancestor is the last
598 # node.
598 # node.
599 highestrev = self.count() - 1
599 highestrev = self.count() - 1
600 # Set ancestors to None to signal that every node is an ancestor.
600 # Set ancestors to None to signal that every node is an ancestor.
601 ancestors = None
601 ancestors = None
602 # Set heads to an empty dictionary for later discovery of heads
602 # Set heads to an empty dictionary for later discovery of heads
603 heads = {}
603 heads = {}
604 else:
604 else:
605 heads = list(heads)
605 heads = list(heads)
606 if not heads:
606 if not heads:
607 return nonodes
607 return nonodes
608 ancestors = {}
608 ancestors = {}
609 # Turn heads into a dictionary so we can remove 'fake' heads.
609 # Turn heads into a dictionary so we can remove 'fake' heads.
610 # Also, later we will be using it to filter out the heads we can't
610 # Also, later we will be using it to filter out the heads we can't
611 # find from roots.
611 # find from roots.
612 heads = dict.fromkeys(heads, 0)
612 heads = dict.fromkeys(heads, 0)
613 # Start at the top and keep marking parents until we're done.
613 # Start at the top and keep marking parents until we're done.
614 nodestotag = heads.keys()
614 nodestotag = heads.keys()
615 # Remember where the top was so we can use it as a limit later.
615 # Remember where the top was so we can use it as a limit later.
616 highestrev = max([self.rev(n) for n in nodestotag])
616 highestrev = max([self.rev(n) for n in nodestotag])
617 while nodestotag:
617 while nodestotag:
618 # grab a node to tag
618 # grab a node to tag
619 n = nodestotag.pop()
619 n = nodestotag.pop()
620 # Never tag nullid
620 # Never tag nullid
621 if n == nullid:
621 if n == nullid:
622 continue
622 continue
623 # A node's revision number represents its place in a
623 # A node's revision number represents its place in a
624 # topologically sorted list of nodes.
624 # topologically sorted list of nodes.
625 r = self.rev(n)
625 r = self.rev(n)
626 if r >= lowestrev:
626 if r >= lowestrev:
627 if n not in ancestors:
627 if n not in ancestors:
628 # If we are possibly a descendent of one of the roots
628 # If we are possibly a descendent of one of the roots
629 # and we haven't already been marked as an ancestor
629 # and we haven't already been marked as an ancestor
630 ancestors[n] = 1 # Mark as ancestor
630 ancestors[n] = 1 # Mark as ancestor
631 # Add non-nullid parents to list of nodes to tag.
631 # Add non-nullid parents to list of nodes to tag.
632 nodestotag.extend([p for p in self.parents(n) if
632 nodestotag.extend([p for p in self.parents(n) if
633 p != nullid])
633 p != nullid])
634 elif n in heads: # We've seen it before, is it a fake head?
634 elif n in heads: # We've seen it before, is it a fake head?
635 # So it is, real heads should not be the ancestors of
635 # So it is, real heads should not be the ancestors of
636 # any other heads.
636 # any other heads.
637 heads.pop(n)
637 heads.pop(n)
638 if not ancestors:
638 if not ancestors:
639 return nonodes
639 return nonodes
640 # Now that we have our set of ancestors, we want to remove any
640 # Now that we have our set of ancestors, we want to remove any
641 # roots that are not ancestors.
641 # roots that are not ancestors.
642
642
643 # If one of the roots was nullid, everything is included anyway.
643 # If one of the roots was nullid, everything is included anyway.
644 if lowestrev > nullrev:
644 if lowestrev > nullrev:
645 # But, since we weren't, let's recompute the lowest rev to not
645 # But, since we weren't, let's recompute the lowest rev to not
646 # include roots that aren't ancestors.
646 # include roots that aren't ancestors.
647
647
648 # Filter out roots that aren't ancestors of heads
648 # Filter out roots that aren't ancestors of heads
649 roots = [n for n in roots if n in ancestors]
649 roots = [n for n in roots if n in ancestors]
650 # Recompute the lowest revision
650 # Recompute the lowest revision
651 if roots:
651 if roots:
652 lowestrev = min([self.rev(n) for n in roots])
652 lowestrev = min([self.rev(n) for n in roots])
653 else:
653 else:
654 # No more roots? Return empty list
654 # No more roots? Return empty list
655 return nonodes
655 return nonodes
656 else:
656 else:
657 # We are descending from nullid, and don't need to care about
657 # We are descending from nullid, and don't need to care about
658 # any other roots.
658 # any other roots.
659 lowestrev = nullrev
659 lowestrev = nullrev
660 roots = [nullid]
660 roots = [nullid]
661 # Transform our roots list into a 'set' (i.e. a dictionary where the
661 # Transform our roots list into a 'set' (i.e. a dictionary where the
662 # values don't matter.
662 # values don't matter.
663 descendents = dict.fromkeys(roots, 1)
663 descendents = dict.fromkeys(roots, 1)
664 # Also, keep the original roots so we can filter out roots that aren't
664 # Also, keep the original roots so we can filter out roots that aren't
665 # 'real' roots (i.e. are descended from other roots).
665 # 'real' roots (i.e. are descended from other roots).
666 roots = descendents.copy()
666 roots = descendents.copy()
667 # Our topologically sorted list of output nodes.
667 # Our topologically sorted list of output nodes.
668 orderedout = []
668 orderedout = []
669 # Don't start at nullid since we don't want nullid in our output list,
669 # Don't start at nullid since we don't want nullid in our output list,
670 # and if nullid shows up in descedents, empty parents will look like
670 # and if nullid shows up in descedents, empty parents will look like
671 # they're descendents.
671 # they're descendents.
672 for r in xrange(max(lowestrev, 0), highestrev + 1):
672 for r in xrange(max(lowestrev, 0), highestrev + 1):
673 n = self.node(r)
673 n = self.node(r)
674 isdescendent = False
674 isdescendent = False
675 if lowestrev == nullrev: # Everybody is a descendent of nullid
675 if lowestrev == nullrev: # Everybody is a descendent of nullid
676 isdescendent = True
676 isdescendent = True
677 elif n in descendents:
677 elif n in descendents:
678 # n is already a descendent
678 # n is already a descendent
679 isdescendent = True
679 isdescendent = True
680 # This check only needs to be done here because all the roots
680 # This check only needs to be done here because all the roots
681 # will start being marked is descendents before the loop.
681 # will start being marked is descendents before the loop.
682 if n in roots:
682 if n in roots:
683 # If n was a root, check if it's a 'real' root.
683 # If n was a root, check if it's a 'real' root.
684 p = tuple(self.parents(n))
684 p = tuple(self.parents(n))
685 # If any of its parents are descendents, it's not a root.
685 # If any of its parents are descendents, it's not a root.
686 if (p[0] in descendents) or (p[1] in descendents):
686 if (p[0] in descendents) or (p[1] in descendents):
687 roots.pop(n)
687 roots.pop(n)
688 else:
688 else:
689 p = tuple(self.parents(n))
689 p = tuple(self.parents(n))
690 # A node is a descendent if either of its parents are
690 # A node is a descendent if either of its parents are
691 # descendents. (We seeded the dependents list with the roots
691 # descendents. (We seeded the dependents list with the roots
692 # up there, remember?)
692 # up there, remember?)
693 if (p[0] in descendents) or (p[1] in descendents):
693 if (p[0] in descendents) or (p[1] in descendents):
694 descendents[n] = 1
694 descendents[n] = 1
695 isdescendent = True
695 isdescendent = True
696 if isdescendent and ((ancestors is None) or (n in ancestors)):
696 if isdescendent and ((ancestors is None) or (n in ancestors)):
697 # Only include nodes that are both descendents and ancestors.
697 # Only include nodes that are both descendents and ancestors.
698 orderedout.append(n)
698 orderedout.append(n)
699 if (ancestors is not None) and (n in heads):
699 if (ancestors is not None) and (n in heads):
700 # We're trying to figure out which heads are reachable
700 # We're trying to figure out which heads are reachable
701 # from roots.
701 # from roots.
702 # Mark this head as having been reached
702 # Mark this head as having been reached
703 heads[n] = 1
703 heads[n] = 1
704 elif ancestors is None:
704 elif ancestors is None:
705 # Otherwise, we're trying to discover the heads.
705 # Otherwise, we're trying to discover the heads.
706 # Assume this is a head because if it isn't, the next step
706 # Assume this is a head because if it isn't, the next step
707 # will eventually remove it.
707 # will eventually remove it.
708 heads[n] = 1
708 heads[n] = 1
709 # But, obviously its parents aren't.
709 # But, obviously its parents aren't.
710 for p in self.parents(n):
710 for p in self.parents(n):
711 heads.pop(p, None)
711 heads.pop(p, None)
712 heads = [n for n in heads.iterkeys() if heads[n] != 0]
712 heads = [n for n in heads.iterkeys() if heads[n] != 0]
713 roots = roots.keys()
713 roots = roots.keys()
714 assert orderedout
714 assert orderedout
715 assert roots
715 assert roots
716 assert heads
716 assert heads
717 return (orderedout, roots, heads)
717 return (orderedout, roots, heads)
718
718
719 def heads(self, start=None):
719 def heads(self, start=None, stop=None):
720 """return the list of all nodes that have no children
720 """return the list of all nodes that have no children
721
721
722 if start is specified, only heads that are descendants of
722 if start is specified, only heads that are descendants of
723 start will be returned
723 start will be returned
724
724 if stop is specified, it will consider all the revs from stop
725 as if they had no children
725 """
726 """
726 if start is None:
727 if start is None:
727 start = nullid
728 start = nullid
729 if stop is None:
730 stop = []
731 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
728 startrev = self.rev(start)
732 startrev = self.rev(start)
729 reachable = {startrev: 1}
733 reachable = {startrev: 1}
730 heads = {startrev: 1}
734 heads = {startrev: 1}
731
735
732 parentrevs = self.parentrevs
736 parentrevs = self.parentrevs
733 for r in xrange(startrev + 1, self.count()):
737 for r in xrange(startrev + 1, self.count()):
734 for p in parentrevs(r):
738 for p in parentrevs(r):
735 if p in reachable:
739 if p in reachable:
736 reachable[r] = 1
740 if r not in stoprevs:
741 reachable[r] = 1
737 heads[r] = 1
742 heads[r] = 1
738 if p in heads:
743 if p in heads and p not in stoprevs:
739 del heads[p]
744 del heads[p]
745
740 return [self.node(r) for r in heads]
746 return [self.node(r) for r in heads]
741
747
742 def children(self, node):
748 def children(self, node):
743 """find the children of a given node"""
749 """find the children of a given node"""
744 c = []
750 c = []
745 p = self.rev(node)
751 p = self.rev(node)
746 for r in range(p + 1, self.count()):
752 for r in range(p + 1, self.count()):
747 for pr in self.parentrevs(r):
753 for pr in self.parentrevs(r):
748 if pr == p:
754 if pr == p:
749 c.append(self.node(r))
755 c.append(self.node(r))
750 return c
756 return c
751
757
752 def _match(self, id):
758 def _match(self, id):
753 if isinstance(id, (long, int)):
759 if isinstance(id, (long, int)):
754 # rev
760 # rev
755 return self.node(id)
761 return self.node(id)
756 if len(id) == 20:
762 if len(id) == 20:
757 # possibly a binary node
763 # possibly a binary node
758 # odds of a binary node being all hex in ASCII are 1 in 10**25
764 # odds of a binary node being all hex in ASCII are 1 in 10**25
759 try:
765 try:
760 node = id
766 node = id
761 r = self.rev(node) # quick search the index
767 r = self.rev(node) # quick search the index
762 return node
768 return node
763 except RevlogError:
769 except RevlogError:
764 pass # may be partial hex id
770 pass # may be partial hex id
765 try:
771 try:
766 # str(rev)
772 # str(rev)
767 rev = int(id)
773 rev = int(id)
768 if str(rev) != id: raise ValueError
774 if str(rev) != id: raise ValueError
769 if rev < 0: rev = self.count() + rev
775 if rev < 0: rev = self.count() + rev
770 if rev < 0 or rev >= self.count(): raise ValueError
776 if rev < 0 or rev >= self.count(): raise ValueError
771 return self.node(rev)
777 return self.node(rev)
772 except (ValueError, OverflowError):
778 except (ValueError, OverflowError):
773 pass
779 pass
774 if len(id) == 40:
780 if len(id) == 40:
775 try:
781 try:
776 # a full hex nodeid?
782 # a full hex nodeid?
777 node = bin(id)
783 node = bin(id)
778 r = self.rev(node)
784 r = self.rev(node)
779 return node
785 return node
780 except TypeError:
786 except TypeError:
781 pass
787 pass
782
788
783 def _partialmatch(self, id):
789 def _partialmatch(self, id):
784 if len(id) < 40:
790 if len(id) < 40:
785 try:
791 try:
786 # hex(node)[:...]
792 # hex(node)[:...]
787 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
793 bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits
788 node = None
794 node = None
789 for n in self.nodemap:
795 for n in self.nodemap:
790 if n.startswith(bin_id) and hex(n).startswith(id):
796 if n.startswith(bin_id) and hex(n).startswith(id):
791 if node is not None:
797 if node is not None:
792 raise RevlogError(_("Ambiguous identifier"))
798 raise RevlogError(_("Ambiguous identifier"))
793 node = n
799 node = n
794 if node is not None:
800 if node is not None:
795 return node
801 return node
796 except TypeError:
802 except TypeError:
797 pass
803 pass
798
804
799 def lookup(self, id):
805 def lookup(self, id):
800 """locate a node based on:
806 """locate a node based on:
801 - revision number or str(revision number)
807 - revision number or str(revision number)
802 - nodeid or subset of hex nodeid
808 - nodeid or subset of hex nodeid
803 """
809 """
804
810
805 n = self._match(id)
811 n = self._match(id)
806 if n is not None:
812 if n is not None:
807 return n
813 return n
808 n = self._partialmatch(id)
814 n = self._partialmatch(id)
809 if n:
815 if n:
810 return n
816 return n
811
817
812 raise RevlogError(_("No match found"))
818 raise RevlogError(_("No match found"))
813
819
814 def cmp(self, node, text):
820 def cmp(self, node, text):
815 """compare text with a given file revision"""
821 """compare text with a given file revision"""
816 p1, p2 = self.parents(node)
822 p1, p2 = self.parents(node)
817 return hash(text, p1, p2) != node
823 return hash(text, p1, p2) != node
818
824
819 def makenode(self, node, text):
825 def makenode(self, node, text):
820 """calculate a file nodeid for text, descended or possibly
826 """calculate a file nodeid for text, descended or possibly
821 unchanged from node"""
827 unchanged from node"""
822
828
823 if self.cmp(node, text):
829 if self.cmp(node, text):
824 return hash(text, node, nullid)
830 return hash(text, node, nullid)
825 return node
831 return node
826
832
827 def diff(self, a, b):
833 def diff(self, a, b):
828 """return a delta between two revisions"""
834 """return a delta between two revisions"""
829 return mdiff.textdiff(a, b)
835 return mdiff.textdiff(a, b)
830
836
831 def patches(self, t, pl):
837 def patches(self, t, pl):
832 """apply a list of patches to a string"""
838 """apply a list of patches to a string"""
833 return mdiff.patches(t, pl)
839 return mdiff.patches(t, pl)
834
840
835 def chunk(self, rev, df=None, cachelen=4096):
841 def chunk(self, rev, df=None, cachelen=4096):
836 start, length = self.start(rev), self.length(rev)
842 start, length = self.start(rev), self.length(rev)
837 inline = self.inlinedata()
843 inline = self.inlinedata()
838 if inline:
844 if inline:
839 start += (rev + 1) * struct.calcsize(self.indexformat)
845 start += (rev + 1) * struct.calcsize(self.indexformat)
840 end = start + length
846 end = start + length
841 def loadcache(df):
847 def loadcache(df):
842 cache_length = max(cachelen, length) # 4k
848 cache_length = max(cachelen, length) # 4k
843 if not df:
849 if not df:
844 if inline:
850 if inline:
845 df = self.opener(self.indexfile)
851 df = self.opener(self.indexfile)
846 else:
852 else:
847 df = self.opener(self.datafile)
853 df = self.opener(self.datafile)
848 df.seek(start)
854 df.seek(start)
849 self.chunkcache = (start, df.read(cache_length))
855 self.chunkcache = (start, df.read(cache_length))
850
856
851 if not self.chunkcache:
857 if not self.chunkcache:
852 loadcache(df)
858 loadcache(df)
853
859
854 cache_start = self.chunkcache[0]
860 cache_start = self.chunkcache[0]
855 cache_end = cache_start + len(self.chunkcache[1])
861 cache_end = cache_start + len(self.chunkcache[1])
856 if start >= cache_start and end <= cache_end:
862 if start >= cache_start and end <= cache_end:
857 # it is cached
863 # it is cached
858 offset = start - cache_start
864 offset = start - cache_start
859 else:
865 else:
860 loadcache(df)
866 loadcache(df)
861 offset = 0
867 offset = 0
862
868
863 #def checkchunk():
869 #def checkchunk():
864 # df = self.opener(self.datafile)
870 # df = self.opener(self.datafile)
865 # df.seek(start)
871 # df.seek(start)
866 # return df.read(length)
872 # return df.read(length)
867 #assert s == checkchunk()
873 #assert s == checkchunk()
868 return decompress(self.chunkcache[1][offset:offset + length])
874 return decompress(self.chunkcache[1][offset:offset + length])
869
875
870 def delta(self, node):
876 def delta(self, node):
871 """return or calculate a delta between a node and its predecessor"""
877 """return or calculate a delta between a node and its predecessor"""
872 r = self.rev(node)
878 r = self.rev(node)
873 return self.revdiff(r - 1, r)
879 return self.revdiff(r - 1, r)
874
880
875 def revdiff(self, rev1, rev2):
881 def revdiff(self, rev1, rev2):
876 """return or calculate a delta between two revisions"""
882 """return or calculate a delta between two revisions"""
877 b1 = self.base(rev1)
883 b1 = self.base(rev1)
878 b2 = self.base(rev2)
884 b2 = self.base(rev2)
879 if b1 == b2 and rev1 + 1 == rev2:
885 if b1 == b2 and rev1 + 1 == rev2:
880 return self.chunk(rev2)
886 return self.chunk(rev2)
881 else:
887 else:
882 return self.diff(self.revision(self.node(rev1)),
888 return self.diff(self.revision(self.node(rev1)),
883 self.revision(self.node(rev2)))
889 self.revision(self.node(rev2)))
884
890
885 def revision(self, node):
891 def revision(self, node):
886 """return an uncompressed revision of a given"""
892 """return an uncompressed revision of a given"""
887 if node == nullid: return ""
893 if node == nullid: return ""
888 if self.cache and self.cache[0] == node: return self.cache[2]
894 if self.cache and self.cache[0] == node: return self.cache[2]
889
895
890 # look up what we need to read
896 # look up what we need to read
891 text = None
897 text = None
892 rev = self.rev(node)
898 rev = self.rev(node)
893 base = self.base(rev)
899 base = self.base(rev)
894
900
895 if self.inlinedata():
901 if self.inlinedata():
896 # we probably have the whole chunk cached
902 # we probably have the whole chunk cached
897 df = None
903 df = None
898 else:
904 else:
899 df = self.opener(self.datafile)
905 df = self.opener(self.datafile)
900
906
901 # do we have useful data cached?
907 # do we have useful data cached?
902 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
908 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
903 base = self.cache[1]
909 base = self.cache[1]
904 text = self.cache[2]
910 text = self.cache[2]
905 self.loadindex(base, rev + 1)
911 self.loadindex(base, rev + 1)
906 else:
912 else:
907 self.loadindex(base, rev + 1)
913 self.loadindex(base, rev + 1)
908 text = self.chunk(base, df=df)
914 text = self.chunk(base, df=df)
909
915
910 bins = []
916 bins = []
911 for r in xrange(base + 1, rev + 1):
917 for r in xrange(base + 1, rev + 1):
912 bins.append(self.chunk(r, df=df))
918 bins.append(self.chunk(r, df=df))
913
919
914 text = self.patches(text, bins)
920 text = self.patches(text, bins)
915
921
916 p1, p2 = self.parents(node)
922 p1, p2 = self.parents(node)
917 if node != hash(text, p1, p2):
923 if node != hash(text, p1, p2):
918 raise RevlogError(_("integrity check failed on %s:%d")
924 raise RevlogError(_("integrity check failed on %s:%d")
919 % (self.datafile, rev))
925 % (self.datafile, rev))
920
926
921 self.cache = (node, rev, text)
927 self.cache = (node, rev, text)
922 return text
928 return text
923
929
924 def checkinlinesize(self, tr, fp=None):
930 def checkinlinesize(self, tr, fp=None):
925 if not self.inlinedata():
931 if not self.inlinedata():
926 return
932 return
927 if not fp:
933 if not fp:
928 fp = self.opener(self.indexfile, 'r')
934 fp = self.opener(self.indexfile, 'r')
929 fp.seek(0, 2)
935 fp.seek(0, 2)
930 size = fp.tell()
936 size = fp.tell()
931 if size < 131072:
937 if size < 131072:
932 return
938 return
933 trinfo = tr.find(self.indexfile)
939 trinfo = tr.find(self.indexfile)
934 if trinfo == None:
940 if trinfo == None:
935 raise RevlogError(_("%s not found in the transaction")
941 raise RevlogError(_("%s not found in the transaction")
936 % self.indexfile)
942 % self.indexfile)
937
943
938 trindex = trinfo[2]
944 trindex = trinfo[2]
939 dataoff = self.start(trindex)
945 dataoff = self.start(trindex)
940
946
941 tr.add(self.datafile, dataoff)
947 tr.add(self.datafile, dataoff)
942 df = self.opener(self.datafile, 'w')
948 df = self.opener(self.datafile, 'w')
943 calc = struct.calcsize(self.indexformat)
949 calc = struct.calcsize(self.indexformat)
944 for r in xrange(self.count()):
950 for r in xrange(self.count()):
945 start = self.start(r) + (r + 1) * calc
951 start = self.start(r) + (r + 1) * calc
946 length = self.length(r)
952 length = self.length(r)
947 fp.seek(start)
953 fp.seek(start)
948 d = fp.read(length)
954 d = fp.read(length)
949 df.write(d)
955 df.write(d)
950 fp.close()
956 fp.close()
951 df.close()
957 df.close()
952 fp = self.opener(self.indexfile, 'w', atomictemp=True)
958 fp = self.opener(self.indexfile, 'w', atomictemp=True)
953 self.version &= ~(REVLOGNGINLINEDATA)
959 self.version &= ~(REVLOGNGINLINEDATA)
954 if self.count():
960 if self.count():
955 x = self.index[0]
961 x = self.index[0]
956 e = struct.pack(self.indexformat, *x)[4:]
962 e = struct.pack(self.indexformat, *x)[4:]
957 l = struct.pack(versionformat, self.version)
963 l = struct.pack(versionformat, self.version)
958 fp.write(l)
964 fp.write(l)
959 fp.write(e)
965 fp.write(e)
960
966
961 for i in xrange(1, self.count()):
967 for i in xrange(1, self.count()):
962 x = self.index[i]
968 x = self.index[i]
963 e = struct.pack(self.indexformat, *x)
969 e = struct.pack(self.indexformat, *x)
964 fp.write(e)
970 fp.write(e)
965
971
966 # if we don't call rename, the temp file will never replace the
972 # if we don't call rename, the temp file will never replace the
967 # real index
973 # real index
968 fp.rename()
974 fp.rename()
969
975
970 tr.replace(self.indexfile, trindex * calc)
976 tr.replace(self.indexfile, trindex * calc)
971 self.chunkcache = None
977 self.chunkcache = None
972
978
973 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
979 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
974 """add a revision to the log
980 """add a revision to the log
975
981
976 text - the revision data to add
982 text - the revision data to add
977 transaction - the transaction object used for rollback
983 transaction - the transaction object used for rollback
978 link - the linkrev data to add
984 link - the linkrev data to add
979 p1, p2 - the parent nodeids of the revision
985 p1, p2 - the parent nodeids of the revision
980 d - an optional precomputed delta
986 d - an optional precomputed delta
981 """
987 """
982 if not self.inlinedata():
988 if not self.inlinedata():
983 dfh = self.opener(self.datafile, "a")
989 dfh = self.opener(self.datafile, "a")
984 else:
990 else:
985 dfh = None
991 dfh = None
986 ifh = self.opener(self.indexfile, "a+")
992 ifh = self.opener(self.indexfile, "a+")
987 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
993 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
988
994
989 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
995 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
990 if text is None: text = ""
996 if text is None: text = ""
991 if p1 is None: p1 = self.tip()
997 if p1 is None: p1 = self.tip()
992 if p2 is None: p2 = nullid
998 if p2 is None: p2 = nullid
993
999
994 node = hash(text, p1, p2)
1000 node = hash(text, p1, p2)
995
1001
996 if node in self.nodemap:
1002 if node in self.nodemap:
997 return node
1003 return node
998
1004
999 n = self.count()
1005 n = self.count()
1000 t = n - 1
1006 t = n - 1
1001
1007
1002 if n:
1008 if n:
1003 base = self.base(t)
1009 base = self.base(t)
1004 start = self.start(base)
1010 start = self.start(base)
1005 end = self.end(t)
1011 end = self.end(t)
1006 if not d:
1012 if not d:
1007 prev = self.revision(self.tip())
1013 prev = self.revision(self.tip())
1008 d = self.diff(prev, text)
1014 d = self.diff(prev, text)
1009 data = compress(d)
1015 data = compress(d)
1010 l = len(data[1]) + len(data[0])
1016 l = len(data[1]) + len(data[0])
1011 dist = end - start + l
1017 dist = end - start + l
1012
1018
1013 # full versions are inserted when the needed deltas
1019 # full versions are inserted when the needed deltas
1014 # become comparable to the uncompressed text
1020 # become comparable to the uncompressed text
1015 if not n or dist > len(text) * 2:
1021 if not n or dist > len(text) * 2:
1016 data = compress(text)
1022 data = compress(text)
1017 l = len(data[1]) + len(data[0])
1023 l = len(data[1]) + len(data[0])
1018 base = n
1024 base = n
1019 else:
1025 else:
1020 base = self.base(t)
1026 base = self.base(t)
1021
1027
1022 offset = 0
1028 offset = 0
1023 if t >= 0:
1029 if t >= 0:
1024 offset = self.end(t)
1030 offset = self.end(t)
1025
1031
1026 if self.version == REVLOGV0:
1032 if self.version == REVLOGV0:
1027 e = (offset, l, base, link, p1, p2, node)
1033 e = (offset, l, base, link, p1, p2, node)
1028 else:
1034 else:
1029 e = (self.offset_type(offset, 0), l, len(text),
1035 e = (self.offset_type(offset, 0), l, len(text),
1030 base, link, self.rev(p1), self.rev(p2), node)
1036 base, link, self.rev(p1), self.rev(p2), node)
1031
1037
1032 self.index.append(e)
1038 self.index.append(e)
1033 self.nodemap[node] = n
1039 self.nodemap[node] = n
1034 entry = struct.pack(self.indexformat, *e)
1040 entry = struct.pack(self.indexformat, *e)
1035
1041
1036 if not self.inlinedata():
1042 if not self.inlinedata():
1037 transaction.add(self.datafile, offset)
1043 transaction.add(self.datafile, offset)
1038 transaction.add(self.indexfile, n * len(entry))
1044 transaction.add(self.indexfile, n * len(entry))
1039 if data[0]:
1045 if data[0]:
1040 dfh.write(data[0])
1046 dfh.write(data[0])
1041 dfh.write(data[1])
1047 dfh.write(data[1])
1042 dfh.flush()
1048 dfh.flush()
1043 else:
1049 else:
1044 ifh.seek(0, 2)
1050 ifh.seek(0, 2)
1045 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1051 transaction.add(self.indexfile, ifh.tell(), self.count() - 1)
1046
1052
1047 if len(self.index) == 1 and self.version != REVLOGV0:
1053 if len(self.index) == 1 and self.version != REVLOGV0:
1048 l = struct.pack(versionformat, self.version)
1054 l = struct.pack(versionformat, self.version)
1049 ifh.write(l)
1055 ifh.write(l)
1050 entry = entry[4:]
1056 entry = entry[4:]
1051
1057
1052 ifh.write(entry)
1058 ifh.write(entry)
1053
1059
1054 if self.inlinedata():
1060 if self.inlinedata():
1055 ifh.write(data[0])
1061 ifh.write(data[0])
1056 ifh.write(data[1])
1062 ifh.write(data[1])
1057 self.checkinlinesize(transaction, ifh)
1063 self.checkinlinesize(transaction, ifh)
1058
1064
1059 self.cache = (node, n, text)
1065 self.cache = (node, n, text)
1060 return node
1066 return node
1061
1067
1062 def ancestor(self, a, b):
1068 def ancestor(self, a, b):
1063 """calculate the least common ancestor of nodes a and b"""
1069 """calculate the least common ancestor of nodes a and b"""
1064
1070
1065 def parents(rev):
1071 def parents(rev):
1066 return [p for p in self.parentrevs(rev) if p != nullrev]
1072 return [p for p in self.parentrevs(rev) if p != nullrev]
1067
1073
1068 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1074 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1069 if c is None:
1075 if c is None:
1070 return nullid
1076 return nullid
1071
1077
1072 return self.node(c)
1078 return self.node(c)
1073
1079
1074 def group(self, nodelist, lookup, infocollect=None):
1080 def group(self, nodelist, lookup, infocollect=None):
1075 """calculate a delta group
1081 """calculate a delta group
1076
1082
1077 Given a list of changeset revs, return a set of deltas and
1083 Given a list of changeset revs, return a set of deltas and
1078 metadata corresponding to nodes. the first delta is
1084 metadata corresponding to nodes. the first delta is
1079 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1085 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1080 have this parent as it has all history before these
1086 have this parent as it has all history before these
1081 changesets. parent is parent[0]
1087 changesets. parent is parent[0]
1082 """
1088 """
1083 revs = [self.rev(n) for n in nodelist]
1089 revs = [self.rev(n) for n in nodelist]
1084
1090
1085 # if we don't have any revisions touched by these changesets, bail
1091 # if we don't have any revisions touched by these changesets, bail
1086 if not revs:
1092 if not revs:
1087 yield changegroup.closechunk()
1093 yield changegroup.closechunk()
1088 return
1094 return
1089
1095
1090 # add the parent of the first rev
1096 # add the parent of the first rev
1091 p = self.parents(self.node(revs[0]))[0]
1097 p = self.parents(self.node(revs[0]))[0]
1092 revs.insert(0, self.rev(p))
1098 revs.insert(0, self.rev(p))
1093
1099
1094 # build deltas
1100 # build deltas
1095 for d in xrange(0, len(revs) - 1):
1101 for d in xrange(0, len(revs) - 1):
1096 a, b = revs[d], revs[d + 1]
1102 a, b = revs[d], revs[d + 1]
1097 nb = self.node(b)
1103 nb = self.node(b)
1098
1104
1099 if infocollect is not None:
1105 if infocollect is not None:
1100 infocollect(nb)
1106 infocollect(nb)
1101
1107
1102 d = self.revdiff(a, b)
1108 d = self.revdiff(a, b)
1103 p = self.parents(nb)
1109 p = self.parents(nb)
1104 meta = nb + p[0] + p[1] + lookup(nb)
1110 meta = nb + p[0] + p[1] + lookup(nb)
1105 yield changegroup.genchunk("%s%s" % (meta, d))
1111 yield changegroup.genchunk("%s%s" % (meta, d))
1106
1112
1107 yield changegroup.closechunk()
1113 yield changegroup.closechunk()
1108
1114
1109 def addgroup(self, revs, linkmapper, transaction, unique=0):
1115 def addgroup(self, revs, linkmapper, transaction, unique=0):
1110 """
1116 """
1111 add a delta group
1117 add a delta group
1112
1118
1113 given a set of deltas, add them to the revision log. the
1119 given a set of deltas, add them to the revision log. the
1114 first delta is against its parent, which should be in our
1120 first delta is against its parent, which should be in our
1115 log, the rest are against the previous delta.
1121 log, the rest are against the previous delta.
1116 """
1122 """
1117
1123
1118 #track the base of the current delta log
1124 #track the base of the current delta log
1119 r = self.count()
1125 r = self.count()
1120 t = r - 1
1126 t = r - 1
1121 node = None
1127 node = None
1122
1128
1123 base = prev = nullrev
1129 base = prev = nullrev
1124 start = end = textlen = 0
1130 start = end = textlen = 0
1125 if r:
1131 if r:
1126 end = self.end(t)
1132 end = self.end(t)
1127
1133
1128 ifh = self.opener(self.indexfile, "a+")
1134 ifh = self.opener(self.indexfile, "a+")
1129 ifh.seek(0, 2)
1135 ifh.seek(0, 2)
1130 transaction.add(self.indexfile, ifh.tell(), self.count())
1136 transaction.add(self.indexfile, ifh.tell(), self.count())
1131 if self.inlinedata():
1137 if self.inlinedata():
1132 dfh = None
1138 dfh = None
1133 else:
1139 else:
1134 transaction.add(self.datafile, end)
1140 transaction.add(self.datafile, end)
1135 dfh = self.opener(self.datafile, "a")
1141 dfh = self.opener(self.datafile, "a")
1136
1142
1137 # loop through our set of deltas
1143 # loop through our set of deltas
1138 chain = None
1144 chain = None
1139 for chunk in revs:
1145 for chunk in revs:
1140 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1146 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1141 link = linkmapper(cs)
1147 link = linkmapper(cs)
1142 if node in self.nodemap:
1148 if node in self.nodemap:
1143 # this can happen if two branches make the same change
1149 # this can happen if two branches make the same change
1144 # if unique:
1150 # if unique:
1145 # raise RevlogError(_("already have %s") % hex(node[:4]))
1151 # raise RevlogError(_("already have %s") % hex(node[:4]))
1146 chain = node
1152 chain = node
1147 continue
1153 continue
1148 delta = chunk[80:]
1154 delta = chunk[80:]
1149
1155
1150 for p in (p1, p2):
1156 for p in (p1, p2):
1151 if not p in self.nodemap:
1157 if not p in self.nodemap:
1152 raise RevlogError(_("unknown parent %s") % short(p))
1158 raise RevlogError(_("unknown parent %s") % short(p))
1153
1159
1154 if not chain:
1160 if not chain:
1155 # retrieve the parent revision of the delta chain
1161 # retrieve the parent revision of the delta chain
1156 chain = p1
1162 chain = p1
1157 if not chain in self.nodemap:
1163 if not chain in self.nodemap:
1158 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1164 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1159
1165
1160 # full versions are inserted when the needed deltas become
1166 # full versions are inserted when the needed deltas become
1161 # comparable to the uncompressed text or when the previous
1167 # comparable to the uncompressed text or when the previous
1162 # version is not the one we have a delta against. We use
1168 # version is not the one we have a delta against. We use
1163 # the size of the previous full rev as a proxy for the
1169 # the size of the previous full rev as a proxy for the
1164 # current size.
1170 # current size.
1165
1171
1166 if chain == prev:
1172 if chain == prev:
1167 tempd = compress(delta)
1173 tempd = compress(delta)
1168 cdelta = tempd[0] + tempd[1]
1174 cdelta = tempd[0] + tempd[1]
1169 textlen = mdiff.patchedsize(textlen, delta)
1175 textlen = mdiff.patchedsize(textlen, delta)
1170
1176
1171 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1177 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1172 # flush our writes here so we can read it in revision
1178 # flush our writes here so we can read it in revision
1173 if dfh:
1179 if dfh:
1174 dfh.flush()
1180 dfh.flush()
1175 ifh.flush()
1181 ifh.flush()
1176 text = self.revision(chain)
1182 text = self.revision(chain)
1177 text = self.patches(text, [delta])
1183 text = self.patches(text, [delta])
1178 chk = self._addrevision(text, transaction, link, p1, p2, None,
1184 chk = self._addrevision(text, transaction, link, p1, p2, None,
1179 ifh, dfh)
1185 ifh, dfh)
1180 if not dfh and not self.inlinedata():
1186 if not dfh and not self.inlinedata():
1181 # addrevision switched from inline to conventional
1187 # addrevision switched from inline to conventional
1182 # reopen the index
1188 # reopen the index
1183 dfh = self.opener(self.datafile, "a")
1189 dfh = self.opener(self.datafile, "a")
1184 ifh = self.opener(self.indexfile, "a")
1190 ifh = self.opener(self.indexfile, "a")
1185 if chk != node:
1191 if chk != node:
1186 raise RevlogError(_("consistency error adding group"))
1192 raise RevlogError(_("consistency error adding group"))
1187 textlen = len(text)
1193 textlen = len(text)
1188 else:
1194 else:
1189 if self.version == REVLOGV0:
1195 if self.version == REVLOGV0:
1190 e = (end, len(cdelta), base, link, p1, p2, node)
1196 e = (end, len(cdelta), base, link, p1, p2, node)
1191 else:
1197 else:
1192 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1198 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1193 link, self.rev(p1), self.rev(p2), node)
1199 link, self.rev(p1), self.rev(p2), node)
1194 self.index.append(e)
1200 self.index.append(e)
1195 self.nodemap[node] = r
1201 self.nodemap[node] = r
1196 if self.inlinedata():
1202 if self.inlinedata():
1197 ifh.write(struct.pack(self.indexformat, *e))
1203 ifh.write(struct.pack(self.indexformat, *e))
1198 ifh.write(cdelta)
1204 ifh.write(cdelta)
1199 self.checkinlinesize(transaction, ifh)
1205 self.checkinlinesize(transaction, ifh)
1200 if not self.inlinedata():
1206 if not self.inlinedata():
1201 dfh = self.opener(self.datafile, "a")
1207 dfh = self.opener(self.datafile, "a")
1202 ifh = self.opener(self.indexfile, "a")
1208 ifh = self.opener(self.indexfile, "a")
1203 else:
1209 else:
1204 dfh.write(cdelta)
1210 dfh.write(cdelta)
1205 ifh.write(struct.pack(self.indexformat, *e))
1211 ifh.write(struct.pack(self.indexformat, *e))
1206
1212
1207 t, r, chain, prev = r, r + 1, node, node
1213 t, r, chain, prev = r, r + 1, node, node
1208 base = self.base(t)
1214 base = self.base(t)
1209 start = self.start(base)
1215 start = self.start(base)
1210 end = self.end(t)
1216 end = self.end(t)
1211
1217
1212 return node
1218 return node
1213
1219
1214 def strip(self, rev, minlink):
1220 def strip(self, rev, minlink):
1215 if self.count() == 0 or rev >= self.count():
1221 if self.count() == 0 or rev >= self.count():
1216 return
1222 return
1217
1223
1218 if isinstance(self.index, lazyindex):
1224 if isinstance(self.index, lazyindex):
1219 self.loadindexmap()
1225 self.loadindexmap()
1220
1226
1221 # When stripping away a revision, we need to make sure it
1227 # When stripping away a revision, we need to make sure it
1222 # does not actually belong to an older changeset.
1228 # does not actually belong to an older changeset.
1223 # The minlink parameter defines the oldest revision
1229 # The minlink parameter defines the oldest revision
1224 # we're allowed to strip away.
1230 # we're allowed to strip away.
1225 while minlink > self.index[rev][-4]:
1231 while minlink > self.index[rev][-4]:
1226 rev += 1
1232 rev += 1
1227 if rev >= self.count():
1233 if rev >= self.count():
1228 return
1234 return
1229
1235
1230 # first truncate the files on disk
1236 # first truncate the files on disk
1231 end = self.start(rev)
1237 end = self.start(rev)
1232 if not self.inlinedata():
1238 if not self.inlinedata():
1233 df = self.opener(self.datafile, "a")
1239 df = self.opener(self.datafile, "a")
1234 df.truncate(end)
1240 df.truncate(end)
1235 end = rev * struct.calcsize(self.indexformat)
1241 end = rev * struct.calcsize(self.indexformat)
1236 else:
1242 else:
1237 end += rev * struct.calcsize(self.indexformat)
1243 end += rev * struct.calcsize(self.indexformat)
1238
1244
1239 indexf = self.opener(self.indexfile, "a")
1245 indexf = self.opener(self.indexfile, "a")
1240 indexf.truncate(end)
1246 indexf.truncate(end)
1241
1247
1242 # then reset internal state in memory to forget those revisions
1248 # then reset internal state in memory to forget those revisions
1243 self.cache = None
1249 self.cache = None
1244 self.chunkcache = None
1250 self.chunkcache = None
1245 for x in xrange(rev, self.count()):
1251 for x in xrange(rev, self.count()):
1246 del self.nodemap[self.node(x)]
1252 del self.nodemap[self.node(x)]
1247
1253
1248 del self.index[rev:]
1254 del self.index[rev:]
1249
1255
1250 def checksize(self):
1256 def checksize(self):
1251 expected = 0
1257 expected = 0
1252 if self.count():
1258 if self.count():
1253 expected = self.end(self.count() - 1)
1259 expected = self.end(self.count() - 1)
1254
1260
1255 try:
1261 try:
1256 f = self.opener(self.datafile)
1262 f = self.opener(self.datafile)
1257 f.seek(0, 2)
1263 f.seek(0, 2)
1258 actual = f.tell()
1264 actual = f.tell()
1259 dd = actual - expected
1265 dd = actual - expected
1260 except IOError, inst:
1266 except IOError, inst:
1261 if inst.errno != errno.ENOENT:
1267 if inst.errno != errno.ENOENT:
1262 raise
1268 raise
1263 dd = 0
1269 dd = 0
1264
1270
1265 try:
1271 try:
1266 f = self.opener(self.indexfile)
1272 f = self.opener(self.indexfile)
1267 f.seek(0, 2)
1273 f.seek(0, 2)
1268 actual = f.tell()
1274 actual = f.tell()
1269 s = struct.calcsize(self.indexformat)
1275 s = struct.calcsize(self.indexformat)
1270 i = actual / s
1276 i = actual / s
1271 di = actual - (i * s)
1277 di = actual - (i * s)
1272 if self.inlinedata():
1278 if self.inlinedata():
1273 databytes = 0
1279 databytes = 0
1274 for r in xrange(self.count()):
1280 for r in xrange(self.count()):
1275 databytes += self.length(r)
1281 databytes += self.length(r)
1276 dd = 0
1282 dd = 0
1277 di = actual - self.count() * s - databytes
1283 di = actual - self.count() * s - databytes
1278 except IOError, inst:
1284 except IOError, inst:
1279 if inst.errno != errno.ENOENT:
1285 if inst.errno != errno.ENOENT:
1280 raise
1286 raise
1281 di = 0
1287 di = 0
1282
1288
1283 return (dd, di)
1289 return (dd, di)
1284
1290
1285
1291
@@ -1,57 +1,62 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir a
3 mkdir a
4 cd a
4 cd a
5 hg init
5 hg init
6 echo foo > t1
6 echo foo > t1
7 hg add t1
7 hg add t1
8 hg commit -m "1" -d "1000000 0"
8 hg commit -m "1" -d "1000000 0"
9
9
10 cd ..
10 cd ..
11 hg clone a b
11 hg clone a b
12
12
13 cd a
13 cd a
14 echo foo > t2
14 echo foo > t2
15 hg add t2
15 hg add t2
16 hg commit -m "2" -d "1000000 0"
16 hg commit -m "2" -d "1000000 0"
17
17
18 cd ../b
18 cd ../b
19 echo foo > t3
19 echo foo > t3
20 hg add t3
20 hg add t3
21 hg commit -m "3" -d "1000000 0"
21 hg commit -m "3" -d "1000000 0"
22
22
23 hg push ../a
23 hg push ../a
24 hg pull ../a
24 hg pull ../a
25 hg push ../a
25 hg push ../a
26 hg merge
26 hg merge
27 hg commit -m "4" -d "1000000 0"
27 hg commit -m "4" -d "1000000 0"
28 hg push ../a
28 hg push ../a
29 cd ..
29 cd ..
30
30
31 hg init c
31 hg init c
32 cd c
32 cd c
33 for i in 0 1 2; do
33 for i in 0 1 2; do
34 echo $i >> foo
34 echo $i >> foo
35 hg ci -Am $i -d "1000000 0"
35 hg ci -Am $i -d "1000000 0"
36 done
36 done
37 cd ..
37 cd ..
38
38
39 hg clone c d
39 hg clone c d
40 cd d
40 cd d
41 for i in 0 1; do
41 for i in 0 1; do
42 hg co -C $i
42 hg co -C $i
43 echo d-$i >> foo
43 echo d-$i >> foo
44 hg ci -m d-$i -d "1000000 0"
44 hg ci -m d-$i -d "1000000 0"
45 done
45 done
46
46
47 HGMERGE=true hg merge 3
47 HGMERGE=true hg merge 3
48 hg ci -m c-d -d "1000000 0"
48 hg ci -m c-d -d "1000000 0"
49
49
50 hg push ../c; echo $?
50 hg push ../c; echo $?
51 hg push -r 2 ../c; echo $?
51 hg push -r 2 ../c; echo $?
52 hg push -r 3 ../c; echo $?
52 hg push -r 3 ../c; echo $?
53 hg push -r 3 -r 4 ../c; echo $?
53 hg push -r 3 -r 4 ../c; echo $?
54 hg push -f -r 3 -r 4 ../c; echo $?
54 hg push -f -r 3 -r 4 ../c; echo $?
55 hg push -r 5 ../c; echo $?
55 hg push -r 5 ../c; echo $?
56
56
57 # issue 450
58 hg init ../e
59 hg push -r 0 ../e ; echo $?
60 hg push -r 1 ../e ; echo $?
61
57 exit 0
62 exit 0
@@ -1,64 +1,78 b''
1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 pushing to ../a
2 pushing to ../a
3 searching for changes
3 searching for changes
4 abort: push creates new remote branches!
4 abort: push creates new remote branches!
5 (did you forget to merge? use push -f to force)
5 (did you forget to merge? use push -f to force)
6 pulling from ../a
6 pulling from ../a
7 searching for changes
7 searching for changes
8 adding changesets
8 adding changesets
9 adding manifests
9 adding manifests
10 adding file changes
10 adding file changes
11 added 1 changesets with 1 changes to 1 files (+1 heads)
11 added 1 changesets with 1 changes to 1 files (+1 heads)
12 (run 'hg heads' to see heads, 'hg merge' to merge)
12 (run 'hg heads' to see heads, 'hg merge' to merge)
13 pushing to ../a
13 pushing to ../a
14 searching for changes
14 searching for changes
15 abort: push creates new remote branches!
15 abort: push creates new remote branches!
16 (did you forget to merge? use push -f to force)
16 (did you forget to merge? use push -f to force)
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 (branch merge, don't forget to commit)
18 (branch merge, don't forget to commit)
19 pushing to ../a
19 pushing to ../a
20 searching for changes
20 searching for changes
21 adding changesets
21 adding changesets
22 adding manifests
22 adding manifests
23 adding file changes
23 adding file changes
24 added 2 changesets with 1 changes to 1 files
24 added 2 changesets with 1 changes to 1 files
25 adding foo
25 adding foo
26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 merging foo
29 merging foo
30 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
30 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
31 (branch merge, don't forget to commit)
31 (branch merge, don't forget to commit)
32 pushing to ../c
32 pushing to ../c
33 searching for changes
33 searching for changes
34 abort: push creates new remote branches!
34 abort: push creates new remote branches!
35 (did you forget to merge? use push -f to force)
35 (did you forget to merge? use push -f to force)
36 0
36 0
37 pushing to ../c
37 pushing to ../c
38 searching for changes
38 searching for changes
39 no changes found
39 no changes found
40 0
40 0
41 pushing to ../c
41 pushing to ../c
42 searching for changes
42 searching for changes
43 abort: push creates new remote branches!
43 abort: push creates new remote branches!
44 (did you forget to merge? use push -f to force)
44 (did you forget to merge? use push -f to force)
45 0
45 0
46 pushing to ../c
46 pushing to ../c
47 searching for changes
47 searching for changes
48 abort: push creates new remote branches!
48 abort: push creates new remote branches!
49 (did you forget to merge? use push -f to force)
49 (did you forget to merge? use push -f to force)
50 0
50 0
51 pushing to ../c
51 pushing to ../c
52 searching for changes
52 searching for changes
53 adding changesets
53 adding changesets
54 adding manifests
54 adding manifests
55 adding file changes
55 adding file changes
56 added 2 changesets with 2 changes to 1 files (+2 heads)
56 added 2 changesets with 2 changes to 1 files (+2 heads)
57 0
57 0
58 pushing to ../c
58 pushing to ../c
59 searching for changes
59 searching for changes
60 adding changesets
60 adding changesets
61 adding manifests
61 adding manifests
62 adding file changes
62 adding file changes
63 added 1 changesets with 1 changes to 1 files (-1 heads)
63 added 1 changesets with 1 changes to 1 files (-1 heads)
64 0
64 0
65 pushing to ../e
66 searching for changes
67 adding changesets
68 adding manifests
69 adding file changes
70 added 1 changesets with 1 changes to 1 files
71 0
72 pushing to ../e
73 searching for changes
74 adding changesets
75 adding manifests
76 adding file changes
77 added 1 changesets with 1 changes to 1 files
78 0
General Comments 0
You need to be logged in to leave comments. Login now