##// END OF EJS Templates
Remove manifest.readflags
Matt Mackall -
r2841:e3fb4223 default
parent child Browse files
Show More
@@ -1,1758 +1,1758 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("no repo found"))
30 raise repo.RepoError(_("no repo found"))
31 path = p
31 path = p
32 self.path = os.path.join(path, ".hg")
32 self.path = os.path.join(path, ".hg")
33
33
34 if not create and not os.path.isdir(self.path):
34 if not create and not os.path.isdir(self.path):
35 raise repo.RepoError(_("repository %s not found") % path)
35 raise repo.RepoError(_("repository %s not found") % path)
36
36
37 self.root = os.path.abspath(path)
37 self.root = os.path.abspath(path)
38 self.origroot = path
38 self.origroot = path
39 self.ui = ui.ui(parentui=parentui)
39 self.ui = ui.ui(parentui=parentui)
40 self.opener = util.opener(self.path)
40 self.opener = util.opener(self.path)
41 self.wopener = util.opener(self.root)
41 self.wopener = util.opener(self.root)
42
42
43 try:
43 try:
44 self.ui.readconfig(self.join("hgrc"), self.root)
44 self.ui.readconfig(self.join("hgrc"), self.root)
45 except IOError:
45 except IOError:
46 pass
46 pass
47
47
48 v = self.ui.revlogopts
48 v = self.ui.revlogopts
49 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
49 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
50 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
50 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
51 fl = v.get('flags', None)
51 fl = v.get('flags', None)
52 flags = 0
52 flags = 0
53 if fl != None:
53 if fl != None:
54 for x in fl.split():
54 for x in fl.split():
55 flags |= revlog.flagstr(x)
55 flags |= revlog.flagstr(x)
56 elif self.revlogv1:
56 elif self.revlogv1:
57 flags = revlog.REVLOG_DEFAULT_FLAGS
57 flags = revlog.REVLOG_DEFAULT_FLAGS
58
58
59 v = self.revlogversion | flags
59 v = self.revlogversion | flags
60 self.manifest = manifest.manifest(self.opener, v)
60 self.manifest = manifest.manifest(self.opener, v)
61 self.changelog = changelog.changelog(self.opener, v)
61 self.changelog = changelog.changelog(self.opener, v)
62
62
63 # the changelog might not have the inline index flag
63 # the changelog might not have the inline index flag
64 # on. If the format of the changelog is the same as found in
64 # on. If the format of the changelog is the same as found in
65 # .hgrc, apply any flags found in the .hgrc as well.
65 # .hgrc, apply any flags found in the .hgrc as well.
66 # Otherwise, just version from the changelog
66 # Otherwise, just version from the changelog
67 v = self.changelog.version
67 v = self.changelog.version
68 if v == self.revlogversion:
68 if v == self.revlogversion:
69 v |= flags
69 v |= flags
70 self.revlogversion = v
70 self.revlogversion = v
71
71
72 self.tagscache = None
72 self.tagscache = None
73 self.nodetagscache = None
73 self.nodetagscache = None
74 self.encodepats = None
74 self.encodepats = None
75 self.decodepats = None
75 self.decodepats = None
76 self.transhandle = None
76 self.transhandle = None
77
77
78 if create:
78 if create:
79 if not os.path.exists(path):
79 if not os.path.exists(path):
80 os.mkdir(path)
80 os.mkdir(path)
81 os.mkdir(self.path)
81 os.mkdir(self.path)
82 os.mkdir(self.join("data"))
82 os.mkdir(self.join("data"))
83
83
84 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
84 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
85
85
86 def url(self):
86 def url(self):
87 return 'file:' + self.root
87 return 'file:' + self.root
88
88
89 def hook(self, name, throw=False, **args):
89 def hook(self, name, throw=False, **args):
90 def callhook(hname, funcname):
90 def callhook(hname, funcname):
91 '''call python hook. hook is callable object, looked up as
91 '''call python hook. hook is callable object, looked up as
92 name in python module. if callable returns "true", hook
92 name in python module. if callable returns "true", hook
93 fails, else passes. if hook raises exception, treated as
93 fails, else passes. if hook raises exception, treated as
94 hook failure. exception propagates if throw is "true".
94 hook failure. exception propagates if throw is "true".
95
95
96 reason for "true" meaning "hook failed" is so that
96 reason for "true" meaning "hook failed" is so that
97 unmodified commands (e.g. mercurial.commands.update) can
97 unmodified commands (e.g. mercurial.commands.update) can
98 be run as hooks without wrappers to convert return values.'''
98 be run as hooks without wrappers to convert return values.'''
99
99
100 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
100 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
101 d = funcname.rfind('.')
101 d = funcname.rfind('.')
102 if d == -1:
102 if d == -1:
103 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
103 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
104 % (hname, funcname))
104 % (hname, funcname))
105 modname = funcname[:d]
105 modname = funcname[:d]
106 try:
106 try:
107 obj = __import__(modname)
107 obj = __import__(modname)
108 except ImportError:
108 except ImportError:
109 try:
109 try:
110 # extensions are loaded with hgext_ prefix
110 # extensions are loaded with hgext_ prefix
111 obj = __import__("hgext_%s" % modname)
111 obj = __import__("hgext_%s" % modname)
112 except ImportError:
112 except ImportError:
113 raise util.Abort(_('%s hook is invalid '
113 raise util.Abort(_('%s hook is invalid '
114 '(import of "%s" failed)') %
114 '(import of "%s" failed)') %
115 (hname, modname))
115 (hname, modname))
116 try:
116 try:
117 for p in funcname.split('.')[1:]:
117 for p in funcname.split('.')[1:]:
118 obj = getattr(obj, p)
118 obj = getattr(obj, p)
119 except AttributeError, err:
119 except AttributeError, err:
120 raise util.Abort(_('%s hook is invalid '
120 raise util.Abort(_('%s hook is invalid '
121 '("%s" is not defined)') %
121 '("%s" is not defined)') %
122 (hname, funcname))
122 (hname, funcname))
123 if not callable(obj):
123 if not callable(obj):
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not callable)') %
125 '("%s" is not callable)') %
126 (hname, funcname))
126 (hname, funcname))
127 try:
127 try:
128 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
128 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
129 except (KeyboardInterrupt, util.SignalInterrupt):
129 except (KeyboardInterrupt, util.SignalInterrupt):
130 raise
130 raise
131 except Exception, exc:
131 except Exception, exc:
132 if isinstance(exc, util.Abort):
132 if isinstance(exc, util.Abort):
133 self.ui.warn(_('error: %s hook failed: %s\n') %
133 self.ui.warn(_('error: %s hook failed: %s\n') %
134 (hname, exc.args[0] % exc.args[1:]))
134 (hname, exc.args[0] % exc.args[1:]))
135 else:
135 else:
136 self.ui.warn(_('error: %s hook raised an exception: '
136 self.ui.warn(_('error: %s hook raised an exception: '
137 '%s\n') % (hname, exc))
137 '%s\n') % (hname, exc))
138 if throw:
138 if throw:
139 raise
139 raise
140 self.ui.print_exc()
140 self.ui.print_exc()
141 return True
141 return True
142 if r:
142 if r:
143 if throw:
143 if throw:
144 raise util.Abort(_('%s hook failed') % hname)
144 raise util.Abort(_('%s hook failed') % hname)
145 self.ui.warn(_('warning: %s hook failed\n') % hname)
145 self.ui.warn(_('warning: %s hook failed\n') % hname)
146 return r
146 return r
147
147
148 def runhook(name, cmd):
148 def runhook(name, cmd):
149 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
149 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
150 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
150 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
151 r = util.system(cmd, environ=env, cwd=self.root)
151 r = util.system(cmd, environ=env, cwd=self.root)
152 if r:
152 if r:
153 desc, r = util.explain_exit(r)
153 desc, r = util.explain_exit(r)
154 if throw:
154 if throw:
155 raise util.Abort(_('%s hook %s') % (name, desc))
155 raise util.Abort(_('%s hook %s') % (name, desc))
156 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
156 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
157 return r
157 return r
158
158
159 r = False
159 r = False
160 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
160 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
161 if hname.split(".", 1)[0] == name and cmd]
161 if hname.split(".", 1)[0] == name and cmd]
162 hooks.sort()
162 hooks.sort()
163 for hname, cmd in hooks:
163 for hname, cmd in hooks:
164 if cmd.startswith('python:'):
164 if cmd.startswith('python:'):
165 r = callhook(hname, cmd[7:].strip()) or r
165 r = callhook(hname, cmd[7:].strip()) or r
166 else:
166 else:
167 r = runhook(hname, cmd) or r
167 r = runhook(hname, cmd) or r
168 return r
168 return r
169
169
170 tag_disallowed = ':\r\n'
170 tag_disallowed = ':\r\n'
171
171
172 def tag(self, name, node, local=False, message=None, user=None, date=None):
172 def tag(self, name, node, local=False, message=None, user=None, date=None):
173 '''tag a revision with a symbolic name.
173 '''tag a revision with a symbolic name.
174
174
175 if local is True, the tag is stored in a per-repository file.
175 if local is True, the tag is stored in a per-repository file.
176 otherwise, it is stored in the .hgtags file, and a new
176 otherwise, it is stored in the .hgtags file, and a new
177 changeset is committed with the change.
177 changeset is committed with the change.
178
178
179 keyword arguments:
179 keyword arguments:
180
180
181 local: whether to store tag in non-version-controlled file
181 local: whether to store tag in non-version-controlled file
182 (default False)
182 (default False)
183
183
184 message: commit message to use if committing
184 message: commit message to use if committing
185
185
186 user: name of user to use if committing
186 user: name of user to use if committing
187
187
188 date: date tuple to use if committing'''
188 date: date tuple to use if committing'''
189
189
190 for c in self.tag_disallowed:
190 for c in self.tag_disallowed:
191 if c in name:
191 if c in name:
192 raise util.Abort(_('%r cannot be used in a tag name') % c)
192 raise util.Abort(_('%r cannot be used in a tag name') % c)
193
193
194 self.hook('pretag', throw=True, node=node, tag=name, local=local)
194 self.hook('pretag', throw=True, node=node, tag=name, local=local)
195
195
196 if local:
196 if local:
197 self.opener('localtags', 'a').write('%s %s\n' % (node, name))
197 self.opener('localtags', 'a').write('%s %s\n' % (node, name))
198 self.hook('tag', node=node, tag=name, local=local)
198 self.hook('tag', node=node, tag=name, local=local)
199 return
199 return
200
200
201 for x in self.changes():
201 for x in self.changes():
202 if '.hgtags' in x:
202 if '.hgtags' in x:
203 raise util.Abort(_('working copy of .hgtags is changed '
203 raise util.Abort(_('working copy of .hgtags is changed '
204 '(please commit .hgtags manually)'))
204 '(please commit .hgtags manually)'))
205
205
206 self.wfile('.hgtags', 'ab').write('%s %s\n' % (node, name))
206 self.wfile('.hgtags', 'ab').write('%s %s\n' % (node, name))
207 if self.dirstate.state('.hgtags') == '?':
207 if self.dirstate.state('.hgtags') == '?':
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 if not message:
210 if not message:
211 message = _('Added tag %s for changeset %s') % (name, node)
211 message = _('Added tag %s for changeset %s') % (name, node)
212
212
213 self.commit(['.hgtags'], message, user, date)
213 self.commit(['.hgtags'], message, user, date)
214 self.hook('tag', node=node, tag=name, local=local)
214 self.hook('tag', node=node, tag=name, local=local)
215
215
216 def tags(self):
216 def tags(self):
217 '''return a mapping of tag to node'''
217 '''return a mapping of tag to node'''
218 if not self.tagscache:
218 if not self.tagscache:
219 self.tagscache = {}
219 self.tagscache = {}
220
220
221 def parsetag(line, context):
221 def parsetag(line, context):
222 if not line:
222 if not line:
223 return
223 return
224 s = l.split(" ", 1)
224 s = l.split(" ", 1)
225 if len(s) != 2:
225 if len(s) != 2:
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 return
227 return
228 node, key = s
228 node, key = s
229 key = key.strip()
229 key = key.strip()
230 try:
230 try:
231 bin_n = bin(node)
231 bin_n = bin(node)
232 except TypeError:
232 except TypeError:
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 (context, node))
234 (context, node))
235 return
235 return
236 if bin_n not in self.changelog.nodemap:
236 if bin_n not in self.changelog.nodemap:
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 (context, key))
238 (context, key))
239 return
239 return
240 self.tagscache[key] = bin_n
240 self.tagscache[key] = bin_n
241
241
242 # read the tags file from each head, ending with the tip,
242 # read the tags file from each head, ending with the tip,
243 # and add each tag found to the map, with "newer" ones
243 # and add each tag found to the map, with "newer" ones
244 # taking precedence
244 # taking precedence
245 heads = self.heads()
245 heads = self.heads()
246 heads.reverse()
246 heads.reverse()
247 fl = self.file(".hgtags")
247 fl = self.file(".hgtags")
248 for node in heads:
248 for node in heads:
249 change = self.changelog.read(node)
249 change = self.changelog.read(node)
250 rev = self.changelog.rev(node)
250 rev = self.changelog.rev(node)
251 fn, ff = self.manifest.find(change[0], '.hgtags')
251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 if fn is None: continue
252 if fn is None: continue
253 count = 0
253 count = 0
254 for l in fl.read(fn).splitlines():
254 for l in fl.read(fn).splitlines():
255 count += 1
255 count += 1
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 (rev, short(node), count))
257 (rev, short(node), count))
258 try:
258 try:
259 f = self.opener("localtags")
259 f = self.opener("localtags")
260 count = 0
260 count = 0
261 for l in f:
261 for l in f:
262 count += 1
262 count += 1
263 parsetag(l, _("localtags, line %d") % count)
263 parsetag(l, _("localtags, line %d") % count)
264 except IOError:
264 except IOError:
265 pass
265 pass
266
266
267 self.tagscache['tip'] = self.changelog.tip()
267 self.tagscache['tip'] = self.changelog.tip()
268
268
269 return self.tagscache
269 return self.tagscache
270
270
271 def tagslist(self):
271 def tagslist(self):
272 '''return a list of tags ordered by revision'''
272 '''return a list of tags ordered by revision'''
273 l = []
273 l = []
274 for t, n in self.tags().items():
274 for t, n in self.tags().items():
275 try:
275 try:
276 r = self.changelog.rev(n)
276 r = self.changelog.rev(n)
277 except:
277 except:
278 r = -2 # sort to the beginning of the list if unknown
278 r = -2 # sort to the beginning of the list if unknown
279 l.append((r, t, n))
279 l.append((r, t, n))
280 l.sort()
280 l.sort()
281 return [(t, n) for r, t, n in l]
281 return [(t, n) for r, t, n in l]
282
282
283 def nodetags(self, node):
283 def nodetags(self, node):
284 '''return the tags associated with a node'''
284 '''return the tags associated with a node'''
285 if not self.nodetagscache:
285 if not self.nodetagscache:
286 self.nodetagscache = {}
286 self.nodetagscache = {}
287 for t, n in self.tags().items():
287 for t, n in self.tags().items():
288 self.nodetagscache.setdefault(n, []).append(t)
288 self.nodetagscache.setdefault(n, []).append(t)
289 return self.nodetagscache.get(node, [])
289 return self.nodetagscache.get(node, [])
290
290
291 def lookup(self, key):
291 def lookup(self, key):
292 try:
292 try:
293 return self.tags()[key]
293 return self.tags()[key]
294 except KeyError:
294 except KeyError:
295 if key == '.':
295 if key == '.':
296 key = self.dirstate.parents()[0]
296 key = self.dirstate.parents()[0]
297 if key == nullid:
297 if key == nullid:
298 raise repo.RepoError(_("no revision checked out"))
298 raise repo.RepoError(_("no revision checked out"))
299 try:
299 try:
300 return self.changelog.lookup(key)
300 return self.changelog.lookup(key)
301 except:
301 except:
302 raise repo.RepoError(_("unknown revision '%s'") % key)
302 raise repo.RepoError(_("unknown revision '%s'") % key)
303
303
304 def dev(self):
304 def dev(self):
305 return os.lstat(self.path).st_dev
305 return os.lstat(self.path).st_dev
306
306
307 def local(self):
307 def local(self):
308 return True
308 return True
309
309
310 def join(self, f):
310 def join(self, f):
311 return os.path.join(self.path, f)
311 return os.path.join(self.path, f)
312
312
313 def wjoin(self, f):
313 def wjoin(self, f):
314 return os.path.join(self.root, f)
314 return os.path.join(self.root, f)
315
315
316 def file(self, f):
316 def file(self, f):
317 if f[0] == '/':
317 if f[0] == '/':
318 f = f[1:]
318 f = f[1:]
319 return filelog.filelog(self.opener, f, self.revlogversion)
319 return filelog.filelog(self.opener, f, self.revlogversion)
320
320
321 def changectx(self, changeid):
321 def changectx(self, changeid):
322 return context.changectx(self, changeid)
322 return context.changectx(self, changeid)
323
323
324 def filectx(self, path, changeid=None, fileid=None):
324 def filectx(self, path, changeid=None, fileid=None):
325 """changeid can be a changeset revision, node, or tag.
325 """changeid can be a changeset revision, node, or tag.
326 fileid can be a file revision or node."""
326 fileid can be a file revision or node."""
327 return context.filectx(self, path, changeid, fileid)
327 return context.filectx(self, path, changeid, fileid)
328
328
329 def getcwd(self):
329 def getcwd(self):
330 return self.dirstate.getcwd()
330 return self.dirstate.getcwd()
331
331
332 def wfile(self, f, mode='r'):
332 def wfile(self, f, mode='r'):
333 return self.wopener(f, mode)
333 return self.wopener(f, mode)
334
334
335 def wread(self, filename):
335 def wread(self, filename):
336 if self.encodepats == None:
336 if self.encodepats == None:
337 l = []
337 l = []
338 for pat, cmd in self.ui.configitems("encode"):
338 for pat, cmd in self.ui.configitems("encode"):
339 mf = util.matcher(self.root, "", [pat], [], [])[1]
339 mf = util.matcher(self.root, "", [pat], [], [])[1]
340 l.append((mf, cmd))
340 l.append((mf, cmd))
341 self.encodepats = l
341 self.encodepats = l
342
342
343 data = self.wopener(filename, 'r').read()
343 data = self.wopener(filename, 'r').read()
344
344
345 for mf, cmd in self.encodepats:
345 for mf, cmd in self.encodepats:
346 if mf(filename):
346 if mf(filename):
347 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
347 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
348 data = util.filter(data, cmd)
348 data = util.filter(data, cmd)
349 break
349 break
350
350
351 return data
351 return data
352
352
353 def wwrite(self, filename, data, fd=None):
353 def wwrite(self, filename, data, fd=None):
354 if self.decodepats == None:
354 if self.decodepats == None:
355 l = []
355 l = []
356 for pat, cmd in self.ui.configitems("decode"):
356 for pat, cmd in self.ui.configitems("decode"):
357 mf = util.matcher(self.root, "", [pat], [], [])[1]
357 mf = util.matcher(self.root, "", [pat], [], [])[1]
358 l.append((mf, cmd))
358 l.append((mf, cmd))
359 self.decodepats = l
359 self.decodepats = l
360
360
361 for mf, cmd in self.decodepats:
361 for mf, cmd in self.decodepats:
362 if mf(filename):
362 if mf(filename):
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
364 data = util.filter(data, cmd)
364 data = util.filter(data, cmd)
365 break
365 break
366
366
367 if fd:
367 if fd:
368 return fd.write(data)
368 return fd.write(data)
369 return self.wopener(filename, 'w').write(data)
369 return self.wopener(filename, 'w').write(data)
370
370
371 def transaction(self):
371 def transaction(self):
372 tr = self.transhandle
372 tr = self.transhandle
373 if tr != None and tr.running():
373 if tr != None and tr.running():
374 return tr.nest()
374 return tr.nest()
375
375
376 # save dirstate for rollback
376 # save dirstate for rollback
377 try:
377 try:
378 ds = self.opener("dirstate").read()
378 ds = self.opener("dirstate").read()
379 except IOError:
379 except IOError:
380 ds = ""
380 ds = ""
381 self.opener("journal.dirstate", "w").write(ds)
381 self.opener("journal.dirstate", "w").write(ds)
382
382
383 tr = transaction.transaction(self.ui.warn, self.opener,
383 tr = transaction.transaction(self.ui.warn, self.opener,
384 self.join("journal"),
384 self.join("journal"),
385 aftertrans(self.path))
385 aftertrans(self.path))
386 self.transhandle = tr
386 self.transhandle = tr
387 return tr
387 return tr
388
388
389 def recover(self):
389 def recover(self):
390 l = self.lock()
390 l = self.lock()
391 if os.path.exists(self.join("journal")):
391 if os.path.exists(self.join("journal")):
392 self.ui.status(_("rolling back interrupted transaction\n"))
392 self.ui.status(_("rolling back interrupted transaction\n"))
393 transaction.rollback(self.opener, self.join("journal"))
393 transaction.rollback(self.opener, self.join("journal"))
394 self.reload()
394 self.reload()
395 return True
395 return True
396 else:
396 else:
397 self.ui.warn(_("no interrupted transaction available\n"))
397 self.ui.warn(_("no interrupted transaction available\n"))
398 return False
398 return False
399
399
400 def rollback(self, wlock=None):
400 def rollback(self, wlock=None):
401 if not wlock:
401 if not wlock:
402 wlock = self.wlock()
402 wlock = self.wlock()
403 l = self.lock()
403 l = self.lock()
404 if os.path.exists(self.join("undo")):
404 if os.path.exists(self.join("undo")):
405 self.ui.status(_("rolling back last transaction\n"))
405 self.ui.status(_("rolling back last transaction\n"))
406 transaction.rollback(self.opener, self.join("undo"))
406 transaction.rollback(self.opener, self.join("undo"))
407 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
407 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
408 self.reload()
408 self.reload()
409 self.wreload()
409 self.wreload()
410 else:
410 else:
411 self.ui.warn(_("no rollback information available\n"))
411 self.ui.warn(_("no rollback information available\n"))
412
412
413 def wreload(self):
413 def wreload(self):
414 self.dirstate.read()
414 self.dirstate.read()
415
415
416 def reload(self):
416 def reload(self):
417 self.changelog.load()
417 self.changelog.load()
418 self.manifest.load()
418 self.manifest.load()
419 self.tagscache = None
419 self.tagscache = None
420 self.nodetagscache = None
420 self.nodetagscache = None
421
421
422 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
422 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
423 desc=None):
423 desc=None):
424 try:
424 try:
425 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
425 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
426 except lock.LockHeld, inst:
426 except lock.LockHeld, inst:
427 if not wait:
427 if not wait:
428 raise
428 raise
429 self.ui.warn(_("waiting for lock on %s held by %s\n") %
429 self.ui.warn(_("waiting for lock on %s held by %s\n") %
430 (desc, inst.args[0]))
430 (desc, inst.args[0]))
431 # default to 600 seconds timeout
431 # default to 600 seconds timeout
432 l = lock.lock(self.join(lockname),
432 l = lock.lock(self.join(lockname),
433 int(self.ui.config("ui", "timeout") or 600),
433 int(self.ui.config("ui", "timeout") or 600),
434 releasefn, desc=desc)
434 releasefn, desc=desc)
435 if acquirefn:
435 if acquirefn:
436 acquirefn()
436 acquirefn()
437 return l
437 return l
438
438
439 def lock(self, wait=1):
439 def lock(self, wait=1):
440 return self.do_lock("lock", wait, acquirefn=self.reload,
440 return self.do_lock("lock", wait, acquirefn=self.reload,
441 desc=_('repository %s') % self.origroot)
441 desc=_('repository %s') % self.origroot)
442
442
443 def wlock(self, wait=1):
443 def wlock(self, wait=1):
444 return self.do_lock("wlock", wait, self.dirstate.write,
444 return self.do_lock("wlock", wait, self.dirstate.write,
445 self.wreload,
445 self.wreload,
446 desc=_('working directory of %s') % self.origroot)
446 desc=_('working directory of %s') % self.origroot)
447
447
448 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
448 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
449 "determine whether a new filenode is needed"
449 "determine whether a new filenode is needed"
450 fp1 = manifest1.get(filename, nullid)
450 fp1 = manifest1.get(filename, nullid)
451 fp2 = manifest2.get(filename, nullid)
451 fp2 = manifest2.get(filename, nullid)
452
452
453 if fp2 != nullid:
453 if fp2 != nullid:
454 # is one parent an ancestor of the other?
454 # is one parent an ancestor of the other?
455 fpa = filelog.ancestor(fp1, fp2)
455 fpa = filelog.ancestor(fp1, fp2)
456 if fpa == fp1:
456 if fpa == fp1:
457 fp1, fp2 = fp2, nullid
457 fp1, fp2 = fp2, nullid
458 elif fpa == fp2:
458 elif fpa == fp2:
459 fp2 = nullid
459 fp2 = nullid
460
460
461 # is the file unmodified from the parent? report existing entry
461 # is the file unmodified from the parent? report existing entry
462 if fp2 == nullid and text == filelog.read(fp1):
462 if fp2 == nullid and text == filelog.read(fp1):
463 return (fp1, None, None)
463 return (fp1, None, None)
464
464
465 return (None, fp1, fp2)
465 return (None, fp1, fp2)
466
466
467 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
467 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
468 orig_parent = self.dirstate.parents()[0] or nullid
468 orig_parent = self.dirstate.parents()[0] or nullid
469 p1 = p1 or self.dirstate.parents()[0] or nullid
469 p1 = p1 or self.dirstate.parents()[0] or nullid
470 p2 = p2 or self.dirstate.parents()[1] or nullid
470 p2 = p2 or self.dirstate.parents()[1] or nullid
471 c1 = self.changelog.read(p1)
471 c1 = self.changelog.read(p1)
472 c2 = self.changelog.read(p2)
472 c2 = self.changelog.read(p2)
473 m1 = self.manifest.read(c1[0]).copy()
473 m1 = self.manifest.read(c1[0]).copy()
474 m2 = self.manifest.read(c2[0])
474 m2 = self.manifest.read(c2[0])
475 changed = []
475 changed = []
476
476
477 if orig_parent == p1:
477 if orig_parent == p1:
478 update_dirstate = 1
478 update_dirstate = 1
479 else:
479 else:
480 update_dirstate = 0
480 update_dirstate = 0
481
481
482 if not wlock:
482 if not wlock:
483 wlock = self.wlock()
483 wlock = self.wlock()
484 l = self.lock()
484 l = self.lock()
485 tr = self.transaction()
485 tr = self.transaction()
486 linkrev = self.changelog.count()
486 linkrev = self.changelog.count()
487 for f in files:
487 for f in files:
488 try:
488 try:
489 t = self.wread(f)
489 t = self.wread(f)
490 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
490 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
491 r = self.file(f)
491 r = self.file(f)
492
492
493 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
493 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
494 if entry:
494 if entry:
495 m1[f] = entry
495 m1[f] = entry
496 continue
496 continue
497
497
498 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
498 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
499 changed.append(f)
499 changed.append(f)
500 if update_dirstate:
500 if update_dirstate:
501 self.dirstate.update([f], "n")
501 self.dirstate.update([f], "n")
502 except IOError:
502 except IOError:
503 try:
503 try:
504 del m1[f]
504 del m1[f]
505 del m1[f]
505 del m1[f]
506 if update_dirstate:
506 if update_dirstate:
507 self.dirstate.forget([f])
507 self.dirstate.forget([f])
508 except:
508 except:
509 # deleted from p2?
509 # deleted from p2?
510 pass
510 pass
511
511
512 mnode = self.manifest.add(m1, m1, tr, linkrev, c1[0], c2[0])
512 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
513 user = user or self.ui.username()
513 user = user or self.ui.username()
514 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
514 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
515 tr.close()
515 tr.close()
516 if update_dirstate:
516 if update_dirstate:
517 self.dirstate.setparents(n, nullid)
517 self.dirstate.setparents(n, nullid)
518
518
519 def commit(self, files=None, text="", user=None, date=None,
519 def commit(self, files=None, text="", user=None, date=None,
520 match=util.always, force=False, lock=None, wlock=None,
520 match=util.always, force=False, lock=None, wlock=None,
521 force_editor=False):
521 force_editor=False):
522 commit = []
522 commit = []
523 remove = []
523 remove = []
524 changed = []
524 changed = []
525
525
526 if files:
526 if files:
527 for f in files:
527 for f in files:
528 s = self.dirstate.state(f)
528 s = self.dirstate.state(f)
529 if s in 'nmai':
529 if s in 'nmai':
530 commit.append(f)
530 commit.append(f)
531 elif s == 'r':
531 elif s == 'r':
532 remove.append(f)
532 remove.append(f)
533 else:
533 else:
534 self.ui.warn(_("%s not tracked!\n") % f)
534 self.ui.warn(_("%s not tracked!\n") % f)
535 else:
535 else:
536 modified, added, removed, deleted, unknown = self.changes(match=match)
536 modified, added, removed, deleted, unknown = self.changes(match=match)
537 commit = modified + added
537 commit = modified + added
538 remove = removed
538 remove = removed
539
539
540 p1, p2 = self.dirstate.parents()
540 p1, p2 = self.dirstate.parents()
541 c1 = self.changelog.read(p1)
541 c1 = self.changelog.read(p1)
542 c2 = self.changelog.read(p2)
542 c2 = self.changelog.read(p2)
543 m1 = self.manifest.read(c1[0]).copy()
543 m1 = self.manifest.read(c1[0]).copy()
544 m2 = self.manifest.read(c2[0])
544 m2 = self.manifest.read(c2[0])
545
545
546 if not commit and not remove and not force and p2 == nullid:
546 if not commit and not remove and not force and p2 == nullid:
547 self.ui.status(_("nothing changed\n"))
547 self.ui.status(_("nothing changed\n"))
548 return None
548 return None
549
549
550 xp1 = hex(p1)
550 xp1 = hex(p1)
551 if p2 == nullid: xp2 = ''
551 if p2 == nullid: xp2 = ''
552 else: xp2 = hex(p2)
552 else: xp2 = hex(p2)
553
553
554 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
554 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
555
555
556 if not wlock:
556 if not wlock:
557 wlock = self.wlock()
557 wlock = self.wlock()
558 if not lock:
558 if not lock:
559 lock = self.lock()
559 lock = self.lock()
560 tr = self.transaction()
560 tr = self.transaction()
561
561
562 # check in files
562 # check in files
563 new = {}
563 new = {}
564 linkrev = self.changelog.count()
564 linkrev = self.changelog.count()
565 commit.sort()
565 commit.sort()
566 for f in commit:
566 for f in commit:
567 self.ui.note(f + "\n")
567 self.ui.note(f + "\n")
568 try:
568 try:
569 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
569 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
570 t = self.wread(f)
570 t = self.wread(f)
571 except IOError:
571 except IOError:
572 self.ui.warn(_("trouble committing %s!\n") % f)
572 self.ui.warn(_("trouble committing %s!\n") % f)
573 raise
573 raise
574
574
575 r = self.file(f)
575 r = self.file(f)
576
576
577 meta = {}
577 meta = {}
578 cp = self.dirstate.copied(f)
578 cp = self.dirstate.copied(f)
579 if cp:
579 if cp:
580 meta["copy"] = cp
580 meta["copy"] = cp
581 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
581 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
582 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
582 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
583 fp1, fp2 = nullid, nullid
583 fp1, fp2 = nullid, nullid
584 else:
584 else:
585 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
585 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
586 if entry:
586 if entry:
587 new[f] = entry
587 new[f] = entry
588 continue
588 continue
589
589
590 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
590 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
591 # remember what we've added so that we can later calculate
591 # remember what we've added so that we can later calculate
592 # the files to pull from a set of changesets
592 # the files to pull from a set of changesets
593 changed.append(f)
593 changed.append(f)
594
594
595 # update manifest
595 # update manifest
596 m1.update(new)
596 m1.update(new)
597 for f in remove:
597 for f in remove:
598 if f in m1:
598 if f in m1:
599 del m1[f]
599 del m1[f]
600 mn = self.manifest.add(m1, m1, tr, linkrev, c1[0], c2[0],
600 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
601 (new, remove))
601 (new, remove))
602
602
603 # add changeset
603 # add changeset
604 new = new.keys()
604 new = new.keys()
605 new.sort()
605 new.sort()
606
606
607 user = user or self.ui.username()
607 user = user or self.ui.username()
608 if not text or force_editor:
608 if not text or force_editor:
609 edittext = []
609 edittext = []
610 if text:
610 if text:
611 edittext.append(text)
611 edittext.append(text)
612 edittext.append("")
612 edittext.append("")
613 if p2 != nullid:
613 if p2 != nullid:
614 edittext.append("HG: branch merge")
614 edittext.append("HG: branch merge")
615 edittext.extend(["HG: changed %s" % f for f in changed])
615 edittext.extend(["HG: changed %s" % f for f in changed])
616 edittext.extend(["HG: removed %s" % f for f in remove])
616 edittext.extend(["HG: removed %s" % f for f in remove])
617 if not changed and not remove:
617 if not changed and not remove:
618 edittext.append("HG: no files changed")
618 edittext.append("HG: no files changed")
619 edittext.append("")
619 edittext.append("")
620 # run editor in the repository root
620 # run editor in the repository root
621 olddir = os.getcwd()
621 olddir = os.getcwd()
622 os.chdir(self.root)
622 os.chdir(self.root)
623 text = self.ui.edit("\n".join(edittext), user)
623 text = self.ui.edit("\n".join(edittext), user)
624 os.chdir(olddir)
624 os.chdir(olddir)
625
625
626 lines = [line.rstrip() for line in text.rstrip().splitlines()]
626 lines = [line.rstrip() for line in text.rstrip().splitlines()]
627 while lines and not lines[0]:
627 while lines and not lines[0]:
628 del lines[0]
628 del lines[0]
629 if not lines:
629 if not lines:
630 return None
630 return None
631 text = '\n'.join(lines)
631 text = '\n'.join(lines)
632 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
632 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
633 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
633 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
634 parent2=xp2)
634 parent2=xp2)
635 tr.close()
635 tr.close()
636
636
637 self.dirstate.setparents(n)
637 self.dirstate.setparents(n)
638 self.dirstate.update(new, "n")
638 self.dirstate.update(new, "n")
639 self.dirstate.forget(remove)
639 self.dirstate.forget(remove)
640
640
641 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
641 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
642 return n
642 return n
643
643
644 def walk(self, node=None, files=[], match=util.always, badmatch=None):
644 def walk(self, node=None, files=[], match=util.always, badmatch=None):
645 if node:
645 if node:
646 fdict = dict.fromkeys(files)
646 fdict = dict.fromkeys(files)
647 for fn in self.manifest.read(self.changelog.read(node)[0]):
647 for fn in self.manifest.read(self.changelog.read(node)[0]):
648 fdict.pop(fn, None)
648 fdict.pop(fn, None)
649 if match(fn):
649 if match(fn):
650 yield 'm', fn
650 yield 'm', fn
651 for fn in fdict:
651 for fn in fdict:
652 if badmatch and badmatch(fn):
652 if badmatch and badmatch(fn):
653 if match(fn):
653 if match(fn):
654 yield 'b', fn
654 yield 'b', fn
655 else:
655 else:
656 self.ui.warn(_('%s: No such file in rev %s\n') % (
656 self.ui.warn(_('%s: No such file in rev %s\n') % (
657 util.pathto(self.getcwd(), fn), short(node)))
657 util.pathto(self.getcwd(), fn), short(node)))
658 else:
658 else:
659 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
659 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
660 yield src, fn
660 yield src, fn
661
661
662 def status(self, node1=None, node2=None, files=[], match=util.always,
662 def status(self, node1=None, node2=None, files=[], match=util.always,
663 wlock=None, list_ignored=False, list_clean=False):
663 wlock=None, list_ignored=False, list_clean=False):
664 """return status of files between two nodes or node and working directory
664 """return status of files between two nodes or node and working directory
665
665
666 If node1 is None, use the first dirstate parent instead.
666 If node1 is None, use the first dirstate parent instead.
667 If node2 is None, compare node1 with working directory.
667 If node2 is None, compare node1 with working directory.
668 """
668 """
669
669
670 def fcmp(fn, mf):
670 def fcmp(fn, mf):
671 t1 = self.wread(fn)
671 t1 = self.wread(fn)
672 t2 = self.file(fn).read(mf.get(fn, nullid))
672 t2 = self.file(fn).read(mf.get(fn, nullid))
673 return cmp(t1, t2)
673 return cmp(t1, t2)
674
674
675 def mfmatches(node):
675 def mfmatches(node):
676 change = self.changelog.read(node)
676 change = self.changelog.read(node)
677 mf = dict(self.manifest.read(change[0]))
677 mf = dict(self.manifest.read(change[0]))
678 for fn in mf.keys():
678 for fn in mf.keys():
679 if not match(fn):
679 if not match(fn):
680 del mf[fn]
680 del mf[fn]
681 return mf
681 return mf
682
682
683 modified, added, removed, deleted, unknown = [], [], [], [], []
683 modified, added, removed, deleted, unknown = [], [], [], [], []
684 ignored, clean = [], []
684 ignored, clean = [], []
685
685
686 compareworking = False
686 compareworking = False
687 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
687 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
688 compareworking = True
688 compareworking = True
689
689
690 if not compareworking:
690 if not compareworking:
691 # read the manifest from node1 before the manifest from node2,
691 # read the manifest from node1 before the manifest from node2,
692 # so that we'll hit the manifest cache if we're going through
692 # so that we'll hit the manifest cache if we're going through
693 # all the revisions in parent->child order.
693 # all the revisions in parent->child order.
694 mf1 = mfmatches(node1)
694 mf1 = mfmatches(node1)
695
695
696 # are we comparing the working directory?
696 # are we comparing the working directory?
697 if not node2:
697 if not node2:
698 if not wlock:
698 if not wlock:
699 try:
699 try:
700 wlock = self.wlock(wait=0)
700 wlock = self.wlock(wait=0)
701 except lock.LockException:
701 except lock.LockException:
702 wlock = None
702 wlock = None
703 (lookup, modified, added, removed, deleted, unknown,
703 (lookup, modified, added, removed, deleted, unknown,
704 ignored, clean) = self.dirstate.status(files, match,
704 ignored, clean) = self.dirstate.status(files, match,
705 list_ignored, list_clean)
705 list_ignored, list_clean)
706
706
707 # are we comparing working dir against its parent?
707 # are we comparing working dir against its parent?
708 if compareworking:
708 if compareworking:
709 if lookup:
709 if lookup:
710 # do a full compare of any files that might have changed
710 # do a full compare of any files that might have changed
711 mf2 = mfmatches(self.dirstate.parents()[0])
711 mf2 = mfmatches(self.dirstate.parents()[0])
712 for f in lookup:
712 for f in lookup:
713 if fcmp(f, mf2):
713 if fcmp(f, mf2):
714 modified.append(f)
714 modified.append(f)
715 elif wlock is not None:
715 elif wlock is not None:
716 self.dirstate.update([f], "n")
716 self.dirstate.update([f], "n")
717 else:
717 else:
718 # we are comparing working dir against non-parent
718 # we are comparing working dir against non-parent
719 # generate a pseudo-manifest for the working dir
719 # generate a pseudo-manifest for the working dir
720 mf2 = mfmatches(self.dirstate.parents()[0])
720 mf2 = mfmatches(self.dirstate.parents()[0])
721 for f in lookup + modified + added:
721 for f in lookup + modified + added:
722 mf2[f] = ""
722 mf2[f] = ""
723 for f in removed:
723 for f in removed:
724 if f in mf2:
724 if f in mf2:
725 del mf2[f]
725 del mf2[f]
726 else:
726 else:
727 # we are comparing two revisions
727 # we are comparing two revisions
728 mf2 = mfmatches(node2)
728 mf2 = mfmatches(node2)
729
729
730 if not compareworking:
730 if not compareworking:
731 # flush lists from dirstate before comparing manifests
731 # flush lists from dirstate before comparing manifests
732 modified, added, clean = [], [], []
732 modified, added, clean = [], [], []
733
733
734 # make sure to sort the files so we talk to the disk in a
734 # make sure to sort the files so we talk to the disk in a
735 # reasonable order
735 # reasonable order
736 mf2keys = mf2.keys()
736 mf2keys = mf2.keys()
737 mf2keys.sort()
737 mf2keys.sort()
738 for fn in mf2keys:
738 for fn in mf2keys:
739 if mf1.has_key(fn):
739 if mf1.has_key(fn):
740 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
740 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
741 modified.append(fn)
741 modified.append(fn)
742 elif list_clean:
742 elif list_clean:
743 clean.append(fn)
743 clean.append(fn)
744 del mf1[fn]
744 del mf1[fn]
745 else:
745 else:
746 added.append(fn)
746 added.append(fn)
747
747
748 removed = mf1.keys()
748 removed = mf1.keys()
749
749
750 # sort and return results:
750 # sort and return results:
751 for l in modified, added, removed, deleted, unknown, ignored, clean:
751 for l in modified, added, removed, deleted, unknown, ignored, clean:
752 l.sort()
752 l.sort()
753 return (modified, added, removed, deleted, unknown, ignored, clean)
753 return (modified, added, removed, deleted, unknown, ignored, clean)
754
754
755 def changes(self, node1=None, node2=None, files=[], match=util.always,
755 def changes(self, node1=None, node2=None, files=[], match=util.always,
756 wlock=None, list_ignored=False, list_clean=False):
756 wlock=None, list_ignored=False, list_clean=False):
757 '''DEPRECATED - use status instead'''
757 '''DEPRECATED - use status instead'''
758 marduit = self.status(node1, node2, files, match, wlock,
758 marduit = self.status(node1, node2, files, match, wlock,
759 list_ignored, list_clean)
759 list_ignored, list_clean)
760 if list_ignored:
760 if list_ignored:
761 return marduit[:-1]
761 return marduit[:-1]
762 else:
762 else:
763 return marduit[:-2]
763 return marduit[:-2]
764
764
765 def add(self, list, wlock=None):
765 def add(self, list, wlock=None):
766 if not wlock:
766 if not wlock:
767 wlock = self.wlock()
767 wlock = self.wlock()
768 for f in list:
768 for f in list:
769 p = self.wjoin(f)
769 p = self.wjoin(f)
770 if not os.path.exists(p):
770 if not os.path.exists(p):
771 self.ui.warn(_("%s does not exist!\n") % f)
771 self.ui.warn(_("%s does not exist!\n") % f)
772 elif not os.path.isfile(p):
772 elif not os.path.isfile(p):
773 self.ui.warn(_("%s not added: only files supported currently\n")
773 self.ui.warn(_("%s not added: only files supported currently\n")
774 % f)
774 % f)
775 elif self.dirstate.state(f) in 'an':
775 elif self.dirstate.state(f) in 'an':
776 self.ui.warn(_("%s already tracked!\n") % f)
776 self.ui.warn(_("%s already tracked!\n") % f)
777 else:
777 else:
778 self.dirstate.update([f], "a")
778 self.dirstate.update([f], "a")
779
779
780 def forget(self, list, wlock=None):
780 def forget(self, list, wlock=None):
781 if not wlock:
781 if not wlock:
782 wlock = self.wlock()
782 wlock = self.wlock()
783 for f in list:
783 for f in list:
784 if self.dirstate.state(f) not in 'ai':
784 if self.dirstate.state(f) not in 'ai':
785 self.ui.warn(_("%s not added!\n") % f)
785 self.ui.warn(_("%s not added!\n") % f)
786 else:
786 else:
787 self.dirstate.forget([f])
787 self.dirstate.forget([f])
788
788
789 def remove(self, list, unlink=False, wlock=None):
789 def remove(self, list, unlink=False, wlock=None):
790 if unlink:
790 if unlink:
791 for f in list:
791 for f in list:
792 try:
792 try:
793 util.unlink(self.wjoin(f))
793 util.unlink(self.wjoin(f))
794 except OSError, inst:
794 except OSError, inst:
795 if inst.errno != errno.ENOENT:
795 if inst.errno != errno.ENOENT:
796 raise
796 raise
797 if not wlock:
797 if not wlock:
798 wlock = self.wlock()
798 wlock = self.wlock()
799 for f in list:
799 for f in list:
800 p = self.wjoin(f)
800 p = self.wjoin(f)
801 if os.path.exists(p):
801 if os.path.exists(p):
802 self.ui.warn(_("%s still exists!\n") % f)
802 self.ui.warn(_("%s still exists!\n") % f)
803 elif self.dirstate.state(f) == 'a':
803 elif self.dirstate.state(f) == 'a':
804 self.dirstate.forget([f])
804 self.dirstate.forget([f])
805 elif f not in self.dirstate:
805 elif f not in self.dirstate:
806 self.ui.warn(_("%s not tracked!\n") % f)
806 self.ui.warn(_("%s not tracked!\n") % f)
807 else:
807 else:
808 self.dirstate.update([f], "r")
808 self.dirstate.update([f], "r")
809
809
810 def undelete(self, list, wlock=None):
810 def undelete(self, list, wlock=None):
811 p = self.dirstate.parents()[0]
811 p = self.dirstate.parents()[0]
812 mn = self.changelog.read(p)[0]
812 mn = self.changelog.read(p)[0]
813 m = self.manifest.read(mn)
813 m = self.manifest.read(mn)
814 if not wlock:
814 if not wlock:
815 wlock = self.wlock()
815 wlock = self.wlock()
816 for f in list:
816 for f in list:
817 if self.dirstate.state(f) not in "r":
817 if self.dirstate.state(f) not in "r":
818 self.ui.warn("%s not removed!\n" % f)
818 self.ui.warn("%s not removed!\n" % f)
819 else:
819 else:
820 t = self.file(f).read(m[f])
820 t = self.file(f).read(m[f])
821 self.wwrite(f, t)
821 self.wwrite(f, t)
822 util.set_exec(self.wjoin(f), m.execf(f))
822 util.set_exec(self.wjoin(f), m.execf(f))
823 self.dirstate.update([f], "n")
823 self.dirstate.update([f], "n")
824
824
825 def copy(self, source, dest, wlock=None):
825 def copy(self, source, dest, wlock=None):
826 p = self.wjoin(dest)
826 p = self.wjoin(dest)
827 if not os.path.exists(p):
827 if not os.path.exists(p):
828 self.ui.warn(_("%s does not exist!\n") % dest)
828 self.ui.warn(_("%s does not exist!\n") % dest)
829 elif not os.path.isfile(p):
829 elif not os.path.isfile(p):
830 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
830 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
831 else:
831 else:
832 if not wlock:
832 if not wlock:
833 wlock = self.wlock()
833 wlock = self.wlock()
834 if self.dirstate.state(dest) == '?':
834 if self.dirstate.state(dest) == '?':
835 self.dirstate.update([dest], "a")
835 self.dirstate.update([dest], "a")
836 self.dirstate.copy(source, dest)
836 self.dirstate.copy(source, dest)
837
837
838 def heads(self, start=None):
838 def heads(self, start=None):
839 heads = self.changelog.heads(start)
839 heads = self.changelog.heads(start)
840 # sort the output in rev descending order
840 # sort the output in rev descending order
841 heads = [(-self.changelog.rev(h), h) for h in heads]
841 heads = [(-self.changelog.rev(h), h) for h in heads]
842 heads.sort()
842 heads.sort()
843 return [n for (r, n) in heads]
843 return [n for (r, n) in heads]
844
844
845 # branchlookup returns a dict giving a list of branches for
845 # branchlookup returns a dict giving a list of branches for
846 # each head. A branch is defined as the tag of a node or
846 # each head. A branch is defined as the tag of a node or
847 # the branch of the node's parents. If a node has multiple
847 # the branch of the node's parents. If a node has multiple
848 # branch tags, tags are eliminated if they are visible from other
848 # branch tags, tags are eliminated if they are visible from other
849 # branch tags.
849 # branch tags.
850 #
850 #
851 # So, for this graph: a->b->c->d->e
851 # So, for this graph: a->b->c->d->e
852 # \ /
852 # \ /
853 # aa -----/
853 # aa -----/
854 # a has tag 2.6.12
854 # a has tag 2.6.12
855 # d has tag 2.6.13
855 # d has tag 2.6.13
856 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
856 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
857 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
857 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
858 # from the list.
858 # from the list.
859 #
859 #
860 # It is possible that more than one head will have the same branch tag.
860 # It is possible that more than one head will have the same branch tag.
861 # callers need to check the result for multiple heads under the same
861 # callers need to check the result for multiple heads under the same
862 # branch tag if that is a problem for them (ie checkout of a specific
862 # branch tag if that is a problem for them (ie checkout of a specific
863 # branch).
863 # branch).
864 #
864 #
865 # passing in a specific branch will limit the depth of the search
865 # passing in a specific branch will limit the depth of the search
866 # through the parents. It won't limit the branches returned in the
866 # through the parents. It won't limit the branches returned in the
867 # result though.
867 # result though.
868 def branchlookup(self, heads=None, branch=None):
868 def branchlookup(self, heads=None, branch=None):
869 if not heads:
869 if not heads:
870 heads = self.heads()
870 heads = self.heads()
871 headt = [ h for h in heads ]
871 headt = [ h for h in heads ]
872 chlog = self.changelog
872 chlog = self.changelog
873 branches = {}
873 branches = {}
874 merges = []
874 merges = []
875 seenmerge = {}
875 seenmerge = {}
876
876
877 # traverse the tree once for each head, recording in the branches
877 # traverse the tree once for each head, recording in the branches
878 # dict which tags are visible from this head. The branches
878 # dict which tags are visible from this head. The branches
879 # dict also records which tags are visible from each tag
879 # dict also records which tags are visible from each tag
880 # while we traverse.
880 # while we traverse.
881 while headt or merges:
881 while headt or merges:
882 if merges:
882 if merges:
883 n, found = merges.pop()
883 n, found = merges.pop()
884 visit = [n]
884 visit = [n]
885 else:
885 else:
886 h = headt.pop()
886 h = headt.pop()
887 visit = [h]
887 visit = [h]
888 found = [h]
888 found = [h]
889 seen = {}
889 seen = {}
890 while visit:
890 while visit:
891 n = visit.pop()
891 n = visit.pop()
892 if n in seen:
892 if n in seen:
893 continue
893 continue
894 pp = chlog.parents(n)
894 pp = chlog.parents(n)
895 tags = self.nodetags(n)
895 tags = self.nodetags(n)
896 if tags:
896 if tags:
897 for x in tags:
897 for x in tags:
898 if x == 'tip':
898 if x == 'tip':
899 continue
899 continue
900 for f in found:
900 for f in found:
901 branches.setdefault(f, {})[n] = 1
901 branches.setdefault(f, {})[n] = 1
902 branches.setdefault(n, {})[n] = 1
902 branches.setdefault(n, {})[n] = 1
903 break
903 break
904 if n not in found:
904 if n not in found:
905 found.append(n)
905 found.append(n)
906 if branch in tags:
906 if branch in tags:
907 continue
907 continue
908 seen[n] = 1
908 seen[n] = 1
909 if pp[1] != nullid and n not in seenmerge:
909 if pp[1] != nullid and n not in seenmerge:
910 merges.append((pp[1], [x for x in found]))
910 merges.append((pp[1], [x for x in found]))
911 seenmerge[n] = 1
911 seenmerge[n] = 1
912 if pp[0] != nullid:
912 if pp[0] != nullid:
913 visit.append(pp[0])
913 visit.append(pp[0])
914 # traverse the branches dict, eliminating branch tags from each
914 # traverse the branches dict, eliminating branch tags from each
915 # head that are visible from another branch tag for that head.
915 # head that are visible from another branch tag for that head.
916 out = {}
916 out = {}
917 viscache = {}
917 viscache = {}
918 for h in heads:
918 for h in heads:
919 def visible(node):
919 def visible(node):
920 if node in viscache:
920 if node in viscache:
921 return viscache[node]
921 return viscache[node]
922 ret = {}
922 ret = {}
923 visit = [node]
923 visit = [node]
924 while visit:
924 while visit:
925 x = visit.pop()
925 x = visit.pop()
926 if x in viscache:
926 if x in viscache:
927 ret.update(viscache[x])
927 ret.update(viscache[x])
928 elif x not in ret:
928 elif x not in ret:
929 ret[x] = 1
929 ret[x] = 1
930 if x in branches:
930 if x in branches:
931 visit[len(visit):] = branches[x].keys()
931 visit[len(visit):] = branches[x].keys()
932 viscache[node] = ret
932 viscache[node] = ret
933 return ret
933 return ret
934 if h not in branches:
934 if h not in branches:
935 continue
935 continue
936 # O(n^2), but somewhat limited. This only searches the
936 # O(n^2), but somewhat limited. This only searches the
937 # tags visible from a specific head, not all the tags in the
937 # tags visible from a specific head, not all the tags in the
938 # whole repo.
938 # whole repo.
939 for b in branches[h]:
939 for b in branches[h]:
940 vis = False
940 vis = False
941 for bb in branches[h].keys():
941 for bb in branches[h].keys():
942 if b != bb:
942 if b != bb:
943 if b in visible(bb):
943 if b in visible(bb):
944 vis = True
944 vis = True
945 break
945 break
946 if not vis:
946 if not vis:
947 l = out.setdefault(h, [])
947 l = out.setdefault(h, [])
948 l[len(l):] = self.nodetags(b)
948 l[len(l):] = self.nodetags(b)
949 return out
949 return out
950
950
951 def branches(self, nodes):
951 def branches(self, nodes):
952 if not nodes:
952 if not nodes:
953 nodes = [self.changelog.tip()]
953 nodes = [self.changelog.tip()]
954 b = []
954 b = []
955 for n in nodes:
955 for n in nodes:
956 t = n
956 t = n
957 while 1:
957 while 1:
958 p = self.changelog.parents(n)
958 p = self.changelog.parents(n)
959 if p[1] != nullid or p[0] == nullid:
959 if p[1] != nullid or p[0] == nullid:
960 b.append((t, n, p[0], p[1]))
960 b.append((t, n, p[0], p[1]))
961 break
961 break
962 n = p[0]
962 n = p[0]
963 return b
963 return b
964
964
965 def between(self, pairs):
965 def between(self, pairs):
966 r = []
966 r = []
967
967
968 for top, bottom in pairs:
968 for top, bottom in pairs:
969 n, l, i = top, [], 0
969 n, l, i = top, [], 0
970 f = 1
970 f = 1
971
971
972 while n != bottom:
972 while n != bottom:
973 p = self.changelog.parents(n)[0]
973 p = self.changelog.parents(n)[0]
974 if i == f:
974 if i == f:
975 l.append(n)
975 l.append(n)
976 f = f * 2
976 f = f * 2
977 n = p
977 n = p
978 i += 1
978 i += 1
979
979
980 r.append(l)
980 r.append(l)
981
981
982 return r
982 return r
983
983
984 def findincoming(self, remote, base=None, heads=None, force=False):
984 def findincoming(self, remote, base=None, heads=None, force=False):
985 """Return list of roots of the subsets of missing nodes from remote
985 """Return list of roots of the subsets of missing nodes from remote
986
986
987 If base dict is specified, assume that these nodes and their parents
987 If base dict is specified, assume that these nodes and their parents
988 exist on the remote side and that no child of a node of base exists
988 exist on the remote side and that no child of a node of base exists
989 in both remote and self.
989 in both remote and self.
990 Furthermore base will be updated to include the nodes that exists
990 Furthermore base will be updated to include the nodes that exists
991 in self and remote but no children exists in self and remote.
991 in self and remote but no children exists in self and remote.
992 If a list of heads is specified, return only nodes which are heads
992 If a list of heads is specified, return only nodes which are heads
993 or ancestors of these heads.
993 or ancestors of these heads.
994
994
995 All the ancestors of base are in self and in remote.
995 All the ancestors of base are in self and in remote.
996 All the descendants of the list returned are missing in self.
996 All the descendants of the list returned are missing in self.
997 (and so we know that the rest of the nodes are missing in remote, see
997 (and so we know that the rest of the nodes are missing in remote, see
998 outgoing)
998 outgoing)
999 """
999 """
1000 m = self.changelog.nodemap
1000 m = self.changelog.nodemap
1001 search = []
1001 search = []
1002 fetch = {}
1002 fetch = {}
1003 seen = {}
1003 seen = {}
1004 seenbranch = {}
1004 seenbranch = {}
1005 if base == None:
1005 if base == None:
1006 base = {}
1006 base = {}
1007
1007
1008 if not heads:
1008 if not heads:
1009 heads = remote.heads()
1009 heads = remote.heads()
1010
1010
1011 if self.changelog.tip() == nullid:
1011 if self.changelog.tip() == nullid:
1012 base[nullid] = 1
1012 base[nullid] = 1
1013 if heads != [nullid]:
1013 if heads != [nullid]:
1014 return [nullid]
1014 return [nullid]
1015 return []
1015 return []
1016
1016
1017 # assume we're closer to the tip than the root
1017 # assume we're closer to the tip than the root
1018 # and start by examining the heads
1018 # and start by examining the heads
1019 self.ui.status(_("searching for changes\n"))
1019 self.ui.status(_("searching for changes\n"))
1020
1020
1021 unknown = []
1021 unknown = []
1022 for h in heads:
1022 for h in heads:
1023 if h not in m:
1023 if h not in m:
1024 unknown.append(h)
1024 unknown.append(h)
1025 else:
1025 else:
1026 base[h] = 1
1026 base[h] = 1
1027
1027
1028 if not unknown:
1028 if not unknown:
1029 return []
1029 return []
1030
1030
1031 req = dict.fromkeys(unknown)
1031 req = dict.fromkeys(unknown)
1032 reqcnt = 0
1032 reqcnt = 0
1033
1033
1034 # search through remote branches
1034 # search through remote branches
1035 # a 'branch' here is a linear segment of history, with four parts:
1035 # a 'branch' here is a linear segment of history, with four parts:
1036 # head, root, first parent, second parent
1036 # head, root, first parent, second parent
1037 # (a branch always has two parents (or none) by definition)
1037 # (a branch always has two parents (or none) by definition)
1038 unknown = remote.branches(unknown)
1038 unknown = remote.branches(unknown)
1039 while unknown:
1039 while unknown:
1040 r = []
1040 r = []
1041 while unknown:
1041 while unknown:
1042 n = unknown.pop(0)
1042 n = unknown.pop(0)
1043 if n[0] in seen:
1043 if n[0] in seen:
1044 continue
1044 continue
1045
1045
1046 self.ui.debug(_("examining %s:%s\n")
1046 self.ui.debug(_("examining %s:%s\n")
1047 % (short(n[0]), short(n[1])))
1047 % (short(n[0]), short(n[1])))
1048 if n[0] == nullid: # found the end of the branch
1048 if n[0] == nullid: # found the end of the branch
1049 pass
1049 pass
1050 elif n in seenbranch:
1050 elif n in seenbranch:
1051 self.ui.debug(_("branch already found\n"))
1051 self.ui.debug(_("branch already found\n"))
1052 continue
1052 continue
1053 elif n[1] and n[1] in m: # do we know the base?
1053 elif n[1] and n[1] in m: # do we know the base?
1054 self.ui.debug(_("found incomplete branch %s:%s\n")
1054 self.ui.debug(_("found incomplete branch %s:%s\n")
1055 % (short(n[0]), short(n[1])))
1055 % (short(n[0]), short(n[1])))
1056 search.append(n) # schedule branch range for scanning
1056 search.append(n) # schedule branch range for scanning
1057 seenbranch[n] = 1
1057 seenbranch[n] = 1
1058 else:
1058 else:
1059 if n[1] not in seen and n[1] not in fetch:
1059 if n[1] not in seen and n[1] not in fetch:
1060 if n[2] in m and n[3] in m:
1060 if n[2] in m and n[3] in m:
1061 self.ui.debug(_("found new changeset %s\n") %
1061 self.ui.debug(_("found new changeset %s\n") %
1062 short(n[1]))
1062 short(n[1]))
1063 fetch[n[1]] = 1 # earliest unknown
1063 fetch[n[1]] = 1 # earliest unknown
1064 for p in n[2:4]:
1064 for p in n[2:4]:
1065 if p in m:
1065 if p in m:
1066 base[p] = 1 # latest known
1066 base[p] = 1 # latest known
1067
1067
1068 for p in n[2:4]:
1068 for p in n[2:4]:
1069 if p not in req and p not in m:
1069 if p not in req and p not in m:
1070 r.append(p)
1070 r.append(p)
1071 req[p] = 1
1071 req[p] = 1
1072 seen[n[0]] = 1
1072 seen[n[0]] = 1
1073
1073
1074 if r:
1074 if r:
1075 reqcnt += 1
1075 reqcnt += 1
1076 self.ui.debug(_("request %d: %s\n") %
1076 self.ui.debug(_("request %d: %s\n") %
1077 (reqcnt, " ".join(map(short, r))))
1077 (reqcnt, " ".join(map(short, r))))
1078 for p in range(0, len(r), 10):
1078 for p in range(0, len(r), 10):
1079 for b in remote.branches(r[p:p+10]):
1079 for b in remote.branches(r[p:p+10]):
1080 self.ui.debug(_("received %s:%s\n") %
1080 self.ui.debug(_("received %s:%s\n") %
1081 (short(b[0]), short(b[1])))
1081 (short(b[0]), short(b[1])))
1082 unknown.append(b)
1082 unknown.append(b)
1083
1083
1084 # do binary search on the branches we found
1084 # do binary search on the branches we found
1085 while search:
1085 while search:
1086 n = search.pop(0)
1086 n = search.pop(0)
1087 reqcnt += 1
1087 reqcnt += 1
1088 l = remote.between([(n[0], n[1])])[0]
1088 l = remote.between([(n[0], n[1])])[0]
1089 l.append(n[1])
1089 l.append(n[1])
1090 p = n[0]
1090 p = n[0]
1091 f = 1
1091 f = 1
1092 for i in l:
1092 for i in l:
1093 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1093 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1094 if i in m:
1094 if i in m:
1095 if f <= 2:
1095 if f <= 2:
1096 self.ui.debug(_("found new branch changeset %s\n") %
1096 self.ui.debug(_("found new branch changeset %s\n") %
1097 short(p))
1097 short(p))
1098 fetch[p] = 1
1098 fetch[p] = 1
1099 base[i] = 1
1099 base[i] = 1
1100 else:
1100 else:
1101 self.ui.debug(_("narrowed branch search to %s:%s\n")
1101 self.ui.debug(_("narrowed branch search to %s:%s\n")
1102 % (short(p), short(i)))
1102 % (short(p), short(i)))
1103 search.append((p, i))
1103 search.append((p, i))
1104 break
1104 break
1105 p, f = i, f * 2
1105 p, f = i, f * 2
1106
1106
1107 # sanity check our fetch list
1107 # sanity check our fetch list
1108 for f in fetch.keys():
1108 for f in fetch.keys():
1109 if f in m:
1109 if f in m:
1110 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1110 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1111
1111
1112 if base.keys() == [nullid]:
1112 if base.keys() == [nullid]:
1113 if force:
1113 if force:
1114 self.ui.warn(_("warning: repository is unrelated\n"))
1114 self.ui.warn(_("warning: repository is unrelated\n"))
1115 else:
1115 else:
1116 raise util.Abort(_("repository is unrelated"))
1116 raise util.Abort(_("repository is unrelated"))
1117
1117
1118 self.ui.note(_("found new changesets starting at ") +
1118 self.ui.note(_("found new changesets starting at ") +
1119 " ".join([short(f) for f in fetch]) + "\n")
1119 " ".join([short(f) for f in fetch]) + "\n")
1120
1120
1121 self.ui.debug(_("%d total queries\n") % reqcnt)
1121 self.ui.debug(_("%d total queries\n") % reqcnt)
1122
1122
1123 return fetch.keys()
1123 return fetch.keys()
1124
1124
1125 def findoutgoing(self, remote, base=None, heads=None, force=False):
1125 def findoutgoing(self, remote, base=None, heads=None, force=False):
1126 """Return list of nodes that are roots of subsets not in remote
1126 """Return list of nodes that are roots of subsets not in remote
1127
1127
1128 If base dict is specified, assume that these nodes and their parents
1128 If base dict is specified, assume that these nodes and their parents
1129 exist on the remote side.
1129 exist on the remote side.
1130 If a list of heads is specified, return only nodes which are heads
1130 If a list of heads is specified, return only nodes which are heads
1131 or ancestors of these heads, and return a second element which
1131 or ancestors of these heads, and return a second element which
1132 contains all remote heads which get new children.
1132 contains all remote heads which get new children.
1133 """
1133 """
1134 if base == None:
1134 if base == None:
1135 base = {}
1135 base = {}
1136 self.findincoming(remote, base, heads, force=force)
1136 self.findincoming(remote, base, heads, force=force)
1137
1137
1138 self.ui.debug(_("common changesets up to ")
1138 self.ui.debug(_("common changesets up to ")
1139 + " ".join(map(short, base.keys())) + "\n")
1139 + " ".join(map(short, base.keys())) + "\n")
1140
1140
1141 remain = dict.fromkeys(self.changelog.nodemap)
1141 remain = dict.fromkeys(self.changelog.nodemap)
1142
1142
1143 # prune everything remote has from the tree
1143 # prune everything remote has from the tree
1144 del remain[nullid]
1144 del remain[nullid]
1145 remove = base.keys()
1145 remove = base.keys()
1146 while remove:
1146 while remove:
1147 n = remove.pop(0)
1147 n = remove.pop(0)
1148 if n in remain:
1148 if n in remain:
1149 del remain[n]
1149 del remain[n]
1150 for p in self.changelog.parents(n):
1150 for p in self.changelog.parents(n):
1151 remove.append(p)
1151 remove.append(p)
1152
1152
1153 # find every node whose parents have been pruned
1153 # find every node whose parents have been pruned
1154 subset = []
1154 subset = []
1155 # find every remote head that will get new children
1155 # find every remote head that will get new children
1156 updated_heads = {}
1156 updated_heads = {}
1157 for n in remain:
1157 for n in remain:
1158 p1, p2 = self.changelog.parents(n)
1158 p1, p2 = self.changelog.parents(n)
1159 if p1 not in remain and p2 not in remain:
1159 if p1 not in remain and p2 not in remain:
1160 subset.append(n)
1160 subset.append(n)
1161 if heads:
1161 if heads:
1162 if p1 in heads:
1162 if p1 in heads:
1163 updated_heads[p1] = True
1163 updated_heads[p1] = True
1164 if p2 in heads:
1164 if p2 in heads:
1165 updated_heads[p2] = True
1165 updated_heads[p2] = True
1166
1166
1167 # this is the set of all roots we have to push
1167 # this is the set of all roots we have to push
1168 if heads:
1168 if heads:
1169 return subset, updated_heads.keys()
1169 return subset, updated_heads.keys()
1170 else:
1170 else:
1171 return subset
1171 return subset
1172
1172
1173 def pull(self, remote, heads=None, force=False, lock=None):
1173 def pull(self, remote, heads=None, force=False, lock=None):
1174 mylock = False
1174 mylock = False
1175 if not lock:
1175 if not lock:
1176 lock = self.lock()
1176 lock = self.lock()
1177 mylock = True
1177 mylock = True
1178
1178
1179 try:
1179 try:
1180 fetch = self.findincoming(remote, force=force)
1180 fetch = self.findincoming(remote, force=force)
1181 if fetch == [nullid]:
1181 if fetch == [nullid]:
1182 self.ui.status(_("requesting all changes\n"))
1182 self.ui.status(_("requesting all changes\n"))
1183
1183
1184 if not fetch:
1184 if not fetch:
1185 self.ui.status(_("no changes found\n"))
1185 self.ui.status(_("no changes found\n"))
1186 return 0
1186 return 0
1187
1187
1188 if heads is None:
1188 if heads is None:
1189 cg = remote.changegroup(fetch, 'pull')
1189 cg = remote.changegroup(fetch, 'pull')
1190 else:
1190 else:
1191 cg = remote.changegroupsubset(fetch, heads, 'pull')
1191 cg = remote.changegroupsubset(fetch, heads, 'pull')
1192 return self.addchangegroup(cg, 'pull', remote.url())
1192 return self.addchangegroup(cg, 'pull', remote.url())
1193 finally:
1193 finally:
1194 if mylock:
1194 if mylock:
1195 lock.release()
1195 lock.release()
1196
1196
1197 def push(self, remote, force=False, revs=None):
1197 def push(self, remote, force=False, revs=None):
1198 # there are two ways to push to remote repo:
1198 # there are two ways to push to remote repo:
1199 #
1199 #
1200 # addchangegroup assumes local user can lock remote
1200 # addchangegroup assumes local user can lock remote
1201 # repo (local filesystem, old ssh servers).
1201 # repo (local filesystem, old ssh servers).
1202 #
1202 #
1203 # unbundle assumes local user cannot lock remote repo (new ssh
1203 # unbundle assumes local user cannot lock remote repo (new ssh
1204 # servers, http servers).
1204 # servers, http servers).
1205
1205
1206 if remote.capable('unbundle'):
1206 if remote.capable('unbundle'):
1207 return self.push_unbundle(remote, force, revs)
1207 return self.push_unbundle(remote, force, revs)
1208 return self.push_addchangegroup(remote, force, revs)
1208 return self.push_addchangegroup(remote, force, revs)
1209
1209
1210 def prepush(self, remote, force, revs):
1210 def prepush(self, remote, force, revs):
1211 base = {}
1211 base = {}
1212 remote_heads = remote.heads()
1212 remote_heads = remote.heads()
1213 inc = self.findincoming(remote, base, remote_heads, force=force)
1213 inc = self.findincoming(remote, base, remote_heads, force=force)
1214 if not force and inc:
1214 if not force and inc:
1215 self.ui.warn(_("abort: unsynced remote changes!\n"))
1215 self.ui.warn(_("abort: unsynced remote changes!\n"))
1216 self.ui.status(_("(did you forget to sync?"
1216 self.ui.status(_("(did you forget to sync?"
1217 " use push -f to force)\n"))
1217 " use push -f to force)\n"))
1218 return None, 1
1218 return None, 1
1219
1219
1220 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1220 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1221 if revs is not None:
1221 if revs is not None:
1222 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1222 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1223 else:
1223 else:
1224 bases, heads = update, self.changelog.heads()
1224 bases, heads = update, self.changelog.heads()
1225
1225
1226 if not bases:
1226 if not bases:
1227 self.ui.status(_("no changes found\n"))
1227 self.ui.status(_("no changes found\n"))
1228 return None, 1
1228 return None, 1
1229 elif not force:
1229 elif not force:
1230 # FIXME we don't properly detect creation of new heads
1230 # FIXME we don't properly detect creation of new heads
1231 # in the push -r case, assume the user knows what he's doing
1231 # in the push -r case, assume the user knows what he's doing
1232 if not revs and len(remote_heads) < len(heads) \
1232 if not revs and len(remote_heads) < len(heads) \
1233 and remote_heads != [nullid]:
1233 and remote_heads != [nullid]:
1234 self.ui.warn(_("abort: push creates new remote branches!\n"))
1234 self.ui.warn(_("abort: push creates new remote branches!\n"))
1235 self.ui.status(_("(did you forget to merge?"
1235 self.ui.status(_("(did you forget to merge?"
1236 " use push -f to force)\n"))
1236 " use push -f to force)\n"))
1237 return None, 1
1237 return None, 1
1238
1238
1239 if revs is None:
1239 if revs is None:
1240 cg = self.changegroup(update, 'push')
1240 cg = self.changegroup(update, 'push')
1241 else:
1241 else:
1242 cg = self.changegroupsubset(update, revs, 'push')
1242 cg = self.changegroupsubset(update, revs, 'push')
1243 return cg, remote_heads
1243 return cg, remote_heads
1244
1244
1245 def push_addchangegroup(self, remote, force, revs):
1245 def push_addchangegroup(self, remote, force, revs):
1246 lock = remote.lock()
1246 lock = remote.lock()
1247
1247
1248 ret = self.prepush(remote, force, revs)
1248 ret = self.prepush(remote, force, revs)
1249 if ret[0] is not None:
1249 if ret[0] is not None:
1250 cg, remote_heads = ret
1250 cg, remote_heads = ret
1251 return remote.addchangegroup(cg, 'push', self.url())
1251 return remote.addchangegroup(cg, 'push', self.url())
1252 return ret[1]
1252 return ret[1]
1253
1253
1254 def push_unbundle(self, remote, force, revs):
1254 def push_unbundle(self, remote, force, revs):
1255 # local repo finds heads on server, finds out what revs it
1255 # local repo finds heads on server, finds out what revs it
1256 # must push. once revs transferred, if server finds it has
1256 # must push. once revs transferred, if server finds it has
1257 # different heads (someone else won commit/push race), server
1257 # different heads (someone else won commit/push race), server
1258 # aborts.
1258 # aborts.
1259
1259
1260 ret = self.prepush(remote, force, revs)
1260 ret = self.prepush(remote, force, revs)
1261 if ret[0] is not None:
1261 if ret[0] is not None:
1262 cg, remote_heads = ret
1262 cg, remote_heads = ret
1263 if force: remote_heads = ['force']
1263 if force: remote_heads = ['force']
1264 return remote.unbundle(cg, remote_heads, 'push')
1264 return remote.unbundle(cg, remote_heads, 'push')
1265 return ret[1]
1265 return ret[1]
1266
1266
1267 def changegroupsubset(self, bases, heads, source):
1267 def changegroupsubset(self, bases, heads, source):
1268 """This function generates a changegroup consisting of all the nodes
1268 """This function generates a changegroup consisting of all the nodes
1269 that are descendents of any of the bases, and ancestors of any of
1269 that are descendents of any of the bases, and ancestors of any of
1270 the heads.
1270 the heads.
1271
1271
1272 It is fairly complex as determining which filenodes and which
1272 It is fairly complex as determining which filenodes and which
1273 manifest nodes need to be included for the changeset to be complete
1273 manifest nodes need to be included for the changeset to be complete
1274 is non-trivial.
1274 is non-trivial.
1275
1275
1276 Another wrinkle is doing the reverse, figuring out which changeset in
1276 Another wrinkle is doing the reverse, figuring out which changeset in
1277 the changegroup a particular filenode or manifestnode belongs to."""
1277 the changegroup a particular filenode or manifestnode belongs to."""
1278
1278
1279 self.hook('preoutgoing', throw=True, source=source)
1279 self.hook('preoutgoing', throw=True, source=source)
1280
1280
1281 # Set up some initial variables
1281 # Set up some initial variables
1282 # Make it easy to refer to self.changelog
1282 # Make it easy to refer to self.changelog
1283 cl = self.changelog
1283 cl = self.changelog
1284 # msng is short for missing - compute the list of changesets in this
1284 # msng is short for missing - compute the list of changesets in this
1285 # changegroup.
1285 # changegroup.
1286 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1286 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1287 # Some bases may turn out to be superfluous, and some heads may be
1287 # Some bases may turn out to be superfluous, and some heads may be
1288 # too. nodesbetween will return the minimal set of bases and heads
1288 # too. nodesbetween will return the minimal set of bases and heads
1289 # necessary to re-create the changegroup.
1289 # necessary to re-create the changegroup.
1290
1290
1291 # Known heads are the list of heads that it is assumed the recipient
1291 # Known heads are the list of heads that it is assumed the recipient
1292 # of this changegroup will know about.
1292 # of this changegroup will know about.
1293 knownheads = {}
1293 knownheads = {}
1294 # We assume that all parents of bases are known heads.
1294 # We assume that all parents of bases are known heads.
1295 for n in bases:
1295 for n in bases:
1296 for p in cl.parents(n):
1296 for p in cl.parents(n):
1297 if p != nullid:
1297 if p != nullid:
1298 knownheads[p] = 1
1298 knownheads[p] = 1
1299 knownheads = knownheads.keys()
1299 knownheads = knownheads.keys()
1300 if knownheads:
1300 if knownheads:
1301 # Now that we know what heads are known, we can compute which
1301 # Now that we know what heads are known, we can compute which
1302 # changesets are known. The recipient must know about all
1302 # changesets are known. The recipient must know about all
1303 # changesets required to reach the known heads from the null
1303 # changesets required to reach the known heads from the null
1304 # changeset.
1304 # changeset.
1305 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1305 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1306 junk = None
1306 junk = None
1307 # Transform the list into an ersatz set.
1307 # Transform the list into an ersatz set.
1308 has_cl_set = dict.fromkeys(has_cl_set)
1308 has_cl_set = dict.fromkeys(has_cl_set)
1309 else:
1309 else:
1310 # If there were no known heads, the recipient cannot be assumed to
1310 # If there were no known heads, the recipient cannot be assumed to
1311 # know about any changesets.
1311 # know about any changesets.
1312 has_cl_set = {}
1312 has_cl_set = {}
1313
1313
1314 # Make it easy to refer to self.manifest
1314 # Make it easy to refer to self.manifest
1315 mnfst = self.manifest
1315 mnfst = self.manifest
1316 # We don't know which manifests are missing yet
1316 # We don't know which manifests are missing yet
1317 msng_mnfst_set = {}
1317 msng_mnfst_set = {}
1318 # Nor do we know which filenodes are missing.
1318 # Nor do we know which filenodes are missing.
1319 msng_filenode_set = {}
1319 msng_filenode_set = {}
1320
1320
1321 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1321 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1322 junk = None
1322 junk = None
1323
1323
1324 # A changeset always belongs to itself, so the changenode lookup
1324 # A changeset always belongs to itself, so the changenode lookup
1325 # function for a changenode is identity.
1325 # function for a changenode is identity.
1326 def identity(x):
1326 def identity(x):
1327 return x
1327 return x
1328
1328
1329 # A function generating function. Sets up an environment for the
1329 # A function generating function. Sets up an environment for the
1330 # inner function.
1330 # inner function.
1331 def cmp_by_rev_func(revlog):
1331 def cmp_by_rev_func(revlog):
1332 # Compare two nodes by their revision number in the environment's
1332 # Compare two nodes by their revision number in the environment's
1333 # revision history. Since the revision number both represents the
1333 # revision history. Since the revision number both represents the
1334 # most efficient order to read the nodes in, and represents a
1334 # most efficient order to read the nodes in, and represents a
1335 # topological sorting of the nodes, this function is often useful.
1335 # topological sorting of the nodes, this function is often useful.
1336 def cmp_by_rev(a, b):
1336 def cmp_by_rev(a, b):
1337 return cmp(revlog.rev(a), revlog.rev(b))
1337 return cmp(revlog.rev(a), revlog.rev(b))
1338 return cmp_by_rev
1338 return cmp_by_rev
1339
1339
1340 # If we determine that a particular file or manifest node must be a
1340 # If we determine that a particular file or manifest node must be a
1341 # node that the recipient of the changegroup will already have, we can
1341 # node that the recipient of the changegroup will already have, we can
1342 # also assume the recipient will have all the parents. This function
1342 # also assume the recipient will have all the parents. This function
1343 # prunes them from the set of missing nodes.
1343 # prunes them from the set of missing nodes.
1344 def prune_parents(revlog, hasset, msngset):
1344 def prune_parents(revlog, hasset, msngset):
1345 haslst = hasset.keys()
1345 haslst = hasset.keys()
1346 haslst.sort(cmp_by_rev_func(revlog))
1346 haslst.sort(cmp_by_rev_func(revlog))
1347 for node in haslst:
1347 for node in haslst:
1348 parentlst = [p for p in revlog.parents(node) if p != nullid]
1348 parentlst = [p for p in revlog.parents(node) if p != nullid]
1349 while parentlst:
1349 while parentlst:
1350 n = parentlst.pop()
1350 n = parentlst.pop()
1351 if n not in hasset:
1351 if n not in hasset:
1352 hasset[n] = 1
1352 hasset[n] = 1
1353 p = [p for p in revlog.parents(n) if p != nullid]
1353 p = [p for p in revlog.parents(n) if p != nullid]
1354 parentlst.extend(p)
1354 parentlst.extend(p)
1355 for n in hasset:
1355 for n in hasset:
1356 msngset.pop(n, None)
1356 msngset.pop(n, None)
1357
1357
1358 # This is a function generating function used to set up an environment
1358 # This is a function generating function used to set up an environment
1359 # for the inner function to execute in.
1359 # for the inner function to execute in.
1360 def manifest_and_file_collector(changedfileset):
1360 def manifest_and_file_collector(changedfileset):
1361 # This is an information gathering function that gathers
1361 # This is an information gathering function that gathers
1362 # information from each changeset node that goes out as part of
1362 # information from each changeset node that goes out as part of
1363 # the changegroup. The information gathered is a list of which
1363 # the changegroup. The information gathered is a list of which
1364 # manifest nodes are potentially required (the recipient may
1364 # manifest nodes are potentially required (the recipient may
1365 # already have them) and total list of all files which were
1365 # already have them) and total list of all files which were
1366 # changed in any changeset in the changegroup.
1366 # changed in any changeset in the changegroup.
1367 #
1367 #
1368 # We also remember the first changenode we saw any manifest
1368 # We also remember the first changenode we saw any manifest
1369 # referenced by so we can later determine which changenode 'owns'
1369 # referenced by so we can later determine which changenode 'owns'
1370 # the manifest.
1370 # the manifest.
1371 def collect_manifests_and_files(clnode):
1371 def collect_manifests_and_files(clnode):
1372 c = cl.read(clnode)
1372 c = cl.read(clnode)
1373 for f in c[3]:
1373 for f in c[3]:
1374 # This is to make sure we only have one instance of each
1374 # This is to make sure we only have one instance of each
1375 # filename string for each filename.
1375 # filename string for each filename.
1376 changedfileset.setdefault(f, f)
1376 changedfileset.setdefault(f, f)
1377 msng_mnfst_set.setdefault(c[0], clnode)
1377 msng_mnfst_set.setdefault(c[0], clnode)
1378 return collect_manifests_and_files
1378 return collect_manifests_and_files
1379
1379
1380 # Figure out which manifest nodes (of the ones we think might be part
1380 # Figure out which manifest nodes (of the ones we think might be part
1381 # of the changegroup) the recipient must know about and remove them
1381 # of the changegroup) the recipient must know about and remove them
1382 # from the changegroup.
1382 # from the changegroup.
1383 def prune_manifests():
1383 def prune_manifests():
1384 has_mnfst_set = {}
1384 has_mnfst_set = {}
1385 for n in msng_mnfst_set:
1385 for n in msng_mnfst_set:
1386 # If a 'missing' manifest thinks it belongs to a changenode
1386 # If a 'missing' manifest thinks it belongs to a changenode
1387 # the recipient is assumed to have, obviously the recipient
1387 # the recipient is assumed to have, obviously the recipient
1388 # must have that manifest.
1388 # must have that manifest.
1389 linknode = cl.node(mnfst.linkrev(n))
1389 linknode = cl.node(mnfst.linkrev(n))
1390 if linknode in has_cl_set:
1390 if linknode in has_cl_set:
1391 has_mnfst_set[n] = 1
1391 has_mnfst_set[n] = 1
1392 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1392 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1393
1393
1394 # Use the information collected in collect_manifests_and_files to say
1394 # Use the information collected in collect_manifests_and_files to say
1395 # which changenode any manifestnode belongs to.
1395 # which changenode any manifestnode belongs to.
1396 def lookup_manifest_link(mnfstnode):
1396 def lookup_manifest_link(mnfstnode):
1397 return msng_mnfst_set[mnfstnode]
1397 return msng_mnfst_set[mnfstnode]
1398
1398
1399 # A function generating function that sets up the initial environment
1399 # A function generating function that sets up the initial environment
1400 # the inner function.
1400 # the inner function.
1401 def filenode_collector(changedfiles):
1401 def filenode_collector(changedfiles):
1402 next_rev = [0]
1402 next_rev = [0]
1403 # This gathers information from each manifestnode included in the
1403 # This gathers information from each manifestnode included in the
1404 # changegroup about which filenodes the manifest node references
1404 # changegroup about which filenodes the manifest node references
1405 # so we can include those in the changegroup too.
1405 # so we can include those in the changegroup too.
1406 #
1406 #
1407 # It also remembers which changenode each filenode belongs to. It
1407 # It also remembers which changenode each filenode belongs to. It
1408 # does this by assuming the a filenode belongs to the changenode
1408 # does this by assuming the a filenode belongs to the changenode
1409 # the first manifest that references it belongs to.
1409 # the first manifest that references it belongs to.
1410 def collect_msng_filenodes(mnfstnode):
1410 def collect_msng_filenodes(mnfstnode):
1411 r = mnfst.rev(mnfstnode)
1411 r = mnfst.rev(mnfstnode)
1412 if r == next_rev[0]:
1412 if r == next_rev[0]:
1413 # If the last rev we looked at was the one just previous,
1413 # If the last rev we looked at was the one just previous,
1414 # we only need to see a diff.
1414 # we only need to see a diff.
1415 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1415 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1416 # For each line in the delta
1416 # For each line in the delta
1417 for dline in delta.splitlines():
1417 for dline in delta.splitlines():
1418 # get the filename and filenode for that line
1418 # get the filename and filenode for that line
1419 f, fnode = dline.split('\0')
1419 f, fnode = dline.split('\0')
1420 fnode = bin(fnode[:40])
1420 fnode = bin(fnode[:40])
1421 f = changedfiles.get(f, None)
1421 f = changedfiles.get(f, None)
1422 # And if the file is in the list of files we care
1422 # And if the file is in the list of files we care
1423 # about.
1423 # about.
1424 if f is not None:
1424 if f is not None:
1425 # Get the changenode this manifest belongs to
1425 # Get the changenode this manifest belongs to
1426 clnode = msng_mnfst_set[mnfstnode]
1426 clnode = msng_mnfst_set[mnfstnode]
1427 # Create the set of filenodes for the file if
1427 # Create the set of filenodes for the file if
1428 # there isn't one already.
1428 # there isn't one already.
1429 ndset = msng_filenode_set.setdefault(f, {})
1429 ndset = msng_filenode_set.setdefault(f, {})
1430 # And set the filenode's changelog node to the
1430 # And set the filenode's changelog node to the
1431 # manifest's if it hasn't been set already.
1431 # manifest's if it hasn't been set already.
1432 ndset.setdefault(fnode, clnode)
1432 ndset.setdefault(fnode, clnode)
1433 else:
1433 else:
1434 # Otherwise we need a full manifest.
1434 # Otherwise we need a full manifest.
1435 m = mnfst.read(mnfstnode)
1435 m = mnfst.read(mnfstnode)
1436 # For every file in we care about.
1436 # For every file in we care about.
1437 for f in changedfiles:
1437 for f in changedfiles:
1438 fnode = m.get(f, None)
1438 fnode = m.get(f, None)
1439 # If it's in the manifest
1439 # If it's in the manifest
1440 if fnode is not None:
1440 if fnode is not None:
1441 # See comments above.
1441 # See comments above.
1442 clnode = msng_mnfst_set[mnfstnode]
1442 clnode = msng_mnfst_set[mnfstnode]
1443 ndset = msng_filenode_set.setdefault(f, {})
1443 ndset = msng_filenode_set.setdefault(f, {})
1444 ndset.setdefault(fnode, clnode)
1444 ndset.setdefault(fnode, clnode)
1445 # Remember the revision we hope to see next.
1445 # Remember the revision we hope to see next.
1446 next_rev[0] = r + 1
1446 next_rev[0] = r + 1
1447 return collect_msng_filenodes
1447 return collect_msng_filenodes
1448
1448
1449 # We have a list of filenodes we think we need for a file, lets remove
1449 # We have a list of filenodes we think we need for a file, lets remove
1450 # all those we now the recipient must have.
1450 # all those we now the recipient must have.
1451 def prune_filenodes(f, filerevlog):
1451 def prune_filenodes(f, filerevlog):
1452 msngset = msng_filenode_set[f]
1452 msngset = msng_filenode_set[f]
1453 hasset = {}
1453 hasset = {}
1454 # If a 'missing' filenode thinks it belongs to a changenode we
1454 # If a 'missing' filenode thinks it belongs to a changenode we
1455 # assume the recipient must have, then the recipient must have
1455 # assume the recipient must have, then the recipient must have
1456 # that filenode.
1456 # that filenode.
1457 for n in msngset:
1457 for n in msngset:
1458 clnode = cl.node(filerevlog.linkrev(n))
1458 clnode = cl.node(filerevlog.linkrev(n))
1459 if clnode in has_cl_set:
1459 if clnode in has_cl_set:
1460 hasset[n] = 1
1460 hasset[n] = 1
1461 prune_parents(filerevlog, hasset, msngset)
1461 prune_parents(filerevlog, hasset, msngset)
1462
1462
1463 # A function generator function that sets up the a context for the
1463 # A function generator function that sets up the a context for the
1464 # inner function.
1464 # inner function.
1465 def lookup_filenode_link_func(fname):
1465 def lookup_filenode_link_func(fname):
1466 msngset = msng_filenode_set[fname]
1466 msngset = msng_filenode_set[fname]
1467 # Lookup the changenode the filenode belongs to.
1467 # Lookup the changenode the filenode belongs to.
1468 def lookup_filenode_link(fnode):
1468 def lookup_filenode_link(fnode):
1469 return msngset[fnode]
1469 return msngset[fnode]
1470 return lookup_filenode_link
1470 return lookup_filenode_link
1471
1471
1472 # Now that we have all theses utility functions to help out and
1472 # Now that we have all theses utility functions to help out and
1473 # logically divide up the task, generate the group.
1473 # logically divide up the task, generate the group.
1474 def gengroup():
1474 def gengroup():
1475 # The set of changed files starts empty.
1475 # The set of changed files starts empty.
1476 changedfiles = {}
1476 changedfiles = {}
1477 # Create a changenode group generator that will call our functions
1477 # Create a changenode group generator that will call our functions
1478 # back to lookup the owning changenode and collect information.
1478 # back to lookup the owning changenode and collect information.
1479 group = cl.group(msng_cl_lst, identity,
1479 group = cl.group(msng_cl_lst, identity,
1480 manifest_and_file_collector(changedfiles))
1480 manifest_and_file_collector(changedfiles))
1481 for chnk in group:
1481 for chnk in group:
1482 yield chnk
1482 yield chnk
1483
1483
1484 # The list of manifests has been collected by the generator
1484 # The list of manifests has been collected by the generator
1485 # calling our functions back.
1485 # calling our functions back.
1486 prune_manifests()
1486 prune_manifests()
1487 msng_mnfst_lst = msng_mnfst_set.keys()
1487 msng_mnfst_lst = msng_mnfst_set.keys()
1488 # Sort the manifestnodes by revision number.
1488 # Sort the manifestnodes by revision number.
1489 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1489 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1490 # Create a generator for the manifestnodes that calls our lookup
1490 # Create a generator for the manifestnodes that calls our lookup
1491 # and data collection functions back.
1491 # and data collection functions back.
1492 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1492 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1493 filenode_collector(changedfiles))
1493 filenode_collector(changedfiles))
1494 for chnk in group:
1494 for chnk in group:
1495 yield chnk
1495 yield chnk
1496
1496
1497 # These are no longer needed, dereference and toss the memory for
1497 # These are no longer needed, dereference and toss the memory for
1498 # them.
1498 # them.
1499 msng_mnfst_lst = None
1499 msng_mnfst_lst = None
1500 msng_mnfst_set.clear()
1500 msng_mnfst_set.clear()
1501
1501
1502 changedfiles = changedfiles.keys()
1502 changedfiles = changedfiles.keys()
1503 changedfiles.sort()
1503 changedfiles.sort()
1504 # Go through all our files in order sorted by name.
1504 # Go through all our files in order sorted by name.
1505 for fname in changedfiles:
1505 for fname in changedfiles:
1506 filerevlog = self.file(fname)
1506 filerevlog = self.file(fname)
1507 # Toss out the filenodes that the recipient isn't really
1507 # Toss out the filenodes that the recipient isn't really
1508 # missing.
1508 # missing.
1509 if msng_filenode_set.has_key(fname):
1509 if msng_filenode_set.has_key(fname):
1510 prune_filenodes(fname, filerevlog)
1510 prune_filenodes(fname, filerevlog)
1511 msng_filenode_lst = msng_filenode_set[fname].keys()
1511 msng_filenode_lst = msng_filenode_set[fname].keys()
1512 else:
1512 else:
1513 msng_filenode_lst = []
1513 msng_filenode_lst = []
1514 # If any filenodes are left, generate the group for them,
1514 # If any filenodes are left, generate the group for them,
1515 # otherwise don't bother.
1515 # otherwise don't bother.
1516 if len(msng_filenode_lst) > 0:
1516 if len(msng_filenode_lst) > 0:
1517 yield changegroup.genchunk(fname)
1517 yield changegroup.genchunk(fname)
1518 # Sort the filenodes by their revision #
1518 # Sort the filenodes by their revision #
1519 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1519 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1520 # Create a group generator and only pass in a changenode
1520 # Create a group generator and only pass in a changenode
1521 # lookup function as we need to collect no information
1521 # lookup function as we need to collect no information
1522 # from filenodes.
1522 # from filenodes.
1523 group = filerevlog.group(msng_filenode_lst,
1523 group = filerevlog.group(msng_filenode_lst,
1524 lookup_filenode_link_func(fname))
1524 lookup_filenode_link_func(fname))
1525 for chnk in group:
1525 for chnk in group:
1526 yield chnk
1526 yield chnk
1527 if msng_filenode_set.has_key(fname):
1527 if msng_filenode_set.has_key(fname):
1528 # Don't need this anymore, toss it to free memory.
1528 # Don't need this anymore, toss it to free memory.
1529 del msng_filenode_set[fname]
1529 del msng_filenode_set[fname]
1530 # Signal that no more groups are left.
1530 # Signal that no more groups are left.
1531 yield changegroup.closechunk()
1531 yield changegroup.closechunk()
1532
1532
1533 if msng_cl_lst:
1533 if msng_cl_lst:
1534 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1534 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1535
1535
1536 return util.chunkbuffer(gengroup())
1536 return util.chunkbuffer(gengroup())
1537
1537
1538 def changegroup(self, basenodes, source):
1538 def changegroup(self, basenodes, source):
1539 """Generate a changegroup of all nodes that we have that a recipient
1539 """Generate a changegroup of all nodes that we have that a recipient
1540 doesn't.
1540 doesn't.
1541
1541
1542 This is much easier than the previous function as we can assume that
1542 This is much easier than the previous function as we can assume that
1543 the recipient has any changenode we aren't sending them."""
1543 the recipient has any changenode we aren't sending them."""
1544
1544
1545 self.hook('preoutgoing', throw=True, source=source)
1545 self.hook('preoutgoing', throw=True, source=source)
1546
1546
1547 cl = self.changelog
1547 cl = self.changelog
1548 nodes = cl.nodesbetween(basenodes, None)[0]
1548 nodes = cl.nodesbetween(basenodes, None)[0]
1549 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1549 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1550
1550
1551 def identity(x):
1551 def identity(x):
1552 return x
1552 return x
1553
1553
1554 def gennodelst(revlog):
1554 def gennodelst(revlog):
1555 for r in xrange(0, revlog.count()):
1555 for r in xrange(0, revlog.count()):
1556 n = revlog.node(r)
1556 n = revlog.node(r)
1557 if revlog.linkrev(n) in revset:
1557 if revlog.linkrev(n) in revset:
1558 yield n
1558 yield n
1559
1559
1560 def changed_file_collector(changedfileset):
1560 def changed_file_collector(changedfileset):
1561 def collect_changed_files(clnode):
1561 def collect_changed_files(clnode):
1562 c = cl.read(clnode)
1562 c = cl.read(clnode)
1563 for fname in c[3]:
1563 for fname in c[3]:
1564 changedfileset[fname] = 1
1564 changedfileset[fname] = 1
1565 return collect_changed_files
1565 return collect_changed_files
1566
1566
1567 def lookuprevlink_func(revlog):
1567 def lookuprevlink_func(revlog):
1568 def lookuprevlink(n):
1568 def lookuprevlink(n):
1569 return cl.node(revlog.linkrev(n))
1569 return cl.node(revlog.linkrev(n))
1570 return lookuprevlink
1570 return lookuprevlink
1571
1571
1572 def gengroup():
1572 def gengroup():
1573 # construct a list of all changed files
1573 # construct a list of all changed files
1574 changedfiles = {}
1574 changedfiles = {}
1575
1575
1576 for chnk in cl.group(nodes, identity,
1576 for chnk in cl.group(nodes, identity,
1577 changed_file_collector(changedfiles)):
1577 changed_file_collector(changedfiles)):
1578 yield chnk
1578 yield chnk
1579 changedfiles = changedfiles.keys()
1579 changedfiles = changedfiles.keys()
1580 changedfiles.sort()
1580 changedfiles.sort()
1581
1581
1582 mnfst = self.manifest
1582 mnfst = self.manifest
1583 nodeiter = gennodelst(mnfst)
1583 nodeiter = gennodelst(mnfst)
1584 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1584 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1585 yield chnk
1585 yield chnk
1586
1586
1587 for fname in changedfiles:
1587 for fname in changedfiles:
1588 filerevlog = self.file(fname)
1588 filerevlog = self.file(fname)
1589 nodeiter = gennodelst(filerevlog)
1589 nodeiter = gennodelst(filerevlog)
1590 nodeiter = list(nodeiter)
1590 nodeiter = list(nodeiter)
1591 if nodeiter:
1591 if nodeiter:
1592 yield changegroup.genchunk(fname)
1592 yield changegroup.genchunk(fname)
1593 lookup = lookuprevlink_func(filerevlog)
1593 lookup = lookuprevlink_func(filerevlog)
1594 for chnk in filerevlog.group(nodeiter, lookup):
1594 for chnk in filerevlog.group(nodeiter, lookup):
1595 yield chnk
1595 yield chnk
1596
1596
1597 yield changegroup.closechunk()
1597 yield changegroup.closechunk()
1598
1598
1599 if nodes:
1599 if nodes:
1600 self.hook('outgoing', node=hex(nodes[0]), source=source)
1600 self.hook('outgoing', node=hex(nodes[0]), source=source)
1601
1601
1602 return util.chunkbuffer(gengroup())
1602 return util.chunkbuffer(gengroup())
1603
1603
1604 def addchangegroup(self, source, srctype, url):
1604 def addchangegroup(self, source, srctype, url):
1605 """add changegroup to repo.
1605 """add changegroup to repo.
1606 returns number of heads modified or added + 1."""
1606 returns number of heads modified or added + 1."""
1607
1607
1608 def csmap(x):
1608 def csmap(x):
1609 self.ui.debug(_("add changeset %s\n") % short(x))
1609 self.ui.debug(_("add changeset %s\n") % short(x))
1610 return cl.count()
1610 return cl.count()
1611
1611
1612 def revmap(x):
1612 def revmap(x):
1613 return cl.rev(x)
1613 return cl.rev(x)
1614
1614
1615 if not source:
1615 if not source:
1616 return 0
1616 return 0
1617
1617
1618 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1618 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1619
1619
1620 changesets = files = revisions = 0
1620 changesets = files = revisions = 0
1621
1621
1622 tr = self.transaction()
1622 tr = self.transaction()
1623
1623
1624 # write changelog data to temp files so concurrent readers will not see
1624 # write changelog data to temp files so concurrent readers will not see
1625 # inconsistent view
1625 # inconsistent view
1626 cl = None
1626 cl = None
1627 try:
1627 try:
1628 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1628 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1629
1629
1630 oldheads = len(cl.heads())
1630 oldheads = len(cl.heads())
1631
1631
1632 # pull off the changeset group
1632 # pull off the changeset group
1633 self.ui.status(_("adding changesets\n"))
1633 self.ui.status(_("adding changesets\n"))
1634 cor = cl.count() - 1
1634 cor = cl.count() - 1
1635 chunkiter = changegroup.chunkiter(source)
1635 chunkiter = changegroup.chunkiter(source)
1636 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1636 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1637 raise util.Abort(_("received changelog group is empty"))
1637 raise util.Abort(_("received changelog group is empty"))
1638 cnr = cl.count() - 1
1638 cnr = cl.count() - 1
1639 changesets = cnr - cor
1639 changesets = cnr - cor
1640
1640
1641 # pull off the manifest group
1641 # pull off the manifest group
1642 self.ui.status(_("adding manifests\n"))
1642 self.ui.status(_("adding manifests\n"))
1643 chunkiter = changegroup.chunkiter(source)
1643 chunkiter = changegroup.chunkiter(source)
1644 # no need to check for empty manifest group here:
1644 # no need to check for empty manifest group here:
1645 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1645 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1646 # no new manifest will be created and the manifest group will
1646 # no new manifest will be created and the manifest group will
1647 # be empty during the pull
1647 # be empty during the pull
1648 self.manifest.addgroup(chunkiter, revmap, tr)
1648 self.manifest.addgroup(chunkiter, revmap, tr)
1649
1649
1650 # process the files
1650 # process the files
1651 self.ui.status(_("adding file changes\n"))
1651 self.ui.status(_("adding file changes\n"))
1652 while 1:
1652 while 1:
1653 f = changegroup.getchunk(source)
1653 f = changegroup.getchunk(source)
1654 if not f:
1654 if not f:
1655 break
1655 break
1656 self.ui.debug(_("adding %s revisions\n") % f)
1656 self.ui.debug(_("adding %s revisions\n") % f)
1657 fl = self.file(f)
1657 fl = self.file(f)
1658 o = fl.count()
1658 o = fl.count()
1659 chunkiter = changegroup.chunkiter(source)
1659 chunkiter = changegroup.chunkiter(source)
1660 if fl.addgroup(chunkiter, revmap, tr) is None:
1660 if fl.addgroup(chunkiter, revmap, tr) is None:
1661 raise util.Abort(_("received file revlog group is empty"))
1661 raise util.Abort(_("received file revlog group is empty"))
1662 revisions += fl.count() - o
1662 revisions += fl.count() - o
1663 files += 1
1663 files += 1
1664
1664
1665 cl.writedata()
1665 cl.writedata()
1666 finally:
1666 finally:
1667 if cl:
1667 if cl:
1668 cl.cleanup()
1668 cl.cleanup()
1669
1669
1670 # make changelog see real files again
1670 # make changelog see real files again
1671 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1671 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1672 self.changelog.checkinlinesize(tr)
1672 self.changelog.checkinlinesize(tr)
1673
1673
1674 newheads = len(self.changelog.heads())
1674 newheads = len(self.changelog.heads())
1675 heads = ""
1675 heads = ""
1676 if oldheads and newheads != oldheads:
1676 if oldheads and newheads != oldheads:
1677 heads = _(" (%+d heads)") % (newheads - oldheads)
1677 heads = _(" (%+d heads)") % (newheads - oldheads)
1678
1678
1679 self.ui.status(_("added %d changesets"
1679 self.ui.status(_("added %d changesets"
1680 " with %d changes to %d files%s\n")
1680 " with %d changes to %d files%s\n")
1681 % (changesets, revisions, files, heads))
1681 % (changesets, revisions, files, heads))
1682
1682
1683 if changesets > 0:
1683 if changesets > 0:
1684 self.hook('pretxnchangegroup', throw=True,
1684 self.hook('pretxnchangegroup', throw=True,
1685 node=hex(self.changelog.node(cor+1)), source=srctype,
1685 node=hex(self.changelog.node(cor+1)), source=srctype,
1686 url=url)
1686 url=url)
1687
1687
1688 tr.close()
1688 tr.close()
1689
1689
1690 if changesets > 0:
1690 if changesets > 0:
1691 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1691 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1692 source=srctype, url=url)
1692 source=srctype, url=url)
1693
1693
1694 for i in range(cor + 1, cnr + 1):
1694 for i in range(cor + 1, cnr + 1):
1695 self.hook("incoming", node=hex(self.changelog.node(i)),
1695 self.hook("incoming", node=hex(self.changelog.node(i)),
1696 source=srctype, url=url)
1696 source=srctype, url=url)
1697
1697
1698 return newheads - oldheads + 1
1698 return newheads - oldheads + 1
1699
1699
1700
1700
1701 def stream_in(self, remote):
1701 def stream_in(self, remote):
1702 fp = remote.stream_out()
1702 fp = remote.stream_out()
1703 resp = int(fp.readline())
1703 resp = int(fp.readline())
1704 if resp != 0:
1704 if resp != 0:
1705 raise util.Abort(_('operation forbidden by server'))
1705 raise util.Abort(_('operation forbidden by server'))
1706 self.ui.status(_('streaming all changes\n'))
1706 self.ui.status(_('streaming all changes\n'))
1707 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1707 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1708 self.ui.status(_('%d files to transfer, %s of data\n') %
1708 self.ui.status(_('%d files to transfer, %s of data\n') %
1709 (total_files, util.bytecount(total_bytes)))
1709 (total_files, util.bytecount(total_bytes)))
1710 start = time.time()
1710 start = time.time()
1711 for i in xrange(total_files):
1711 for i in xrange(total_files):
1712 name, size = fp.readline().split('\0', 1)
1712 name, size = fp.readline().split('\0', 1)
1713 size = int(size)
1713 size = int(size)
1714 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1714 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1715 ofp = self.opener(name, 'w')
1715 ofp = self.opener(name, 'w')
1716 for chunk in util.filechunkiter(fp, limit=size):
1716 for chunk in util.filechunkiter(fp, limit=size):
1717 ofp.write(chunk)
1717 ofp.write(chunk)
1718 ofp.close()
1718 ofp.close()
1719 elapsed = time.time() - start
1719 elapsed = time.time() - start
1720 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1720 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1721 (util.bytecount(total_bytes), elapsed,
1721 (util.bytecount(total_bytes), elapsed,
1722 util.bytecount(total_bytes / elapsed)))
1722 util.bytecount(total_bytes / elapsed)))
1723 self.reload()
1723 self.reload()
1724 return len(self.heads()) + 1
1724 return len(self.heads()) + 1
1725
1725
1726 def clone(self, remote, heads=[], stream=False):
1726 def clone(self, remote, heads=[], stream=False):
1727 '''clone remote repository.
1727 '''clone remote repository.
1728
1728
1729 keyword arguments:
1729 keyword arguments:
1730 heads: list of revs to clone (forces use of pull)
1730 heads: list of revs to clone (forces use of pull)
1731 stream: use streaming clone if possible'''
1731 stream: use streaming clone if possible'''
1732
1732
1733 # now, all clients that can request uncompressed clones can
1733 # now, all clients that can request uncompressed clones can
1734 # read repo formats supported by all servers that can serve
1734 # read repo formats supported by all servers that can serve
1735 # them.
1735 # them.
1736
1736
1737 # if revlog format changes, client will have to check version
1737 # if revlog format changes, client will have to check version
1738 # and format flags on "stream" capability, and use
1738 # and format flags on "stream" capability, and use
1739 # uncompressed only if compatible.
1739 # uncompressed only if compatible.
1740
1740
1741 if stream and not heads and remote.capable('stream'):
1741 if stream and not heads and remote.capable('stream'):
1742 return self.stream_in(remote)
1742 return self.stream_in(remote)
1743 return self.pull(remote, heads)
1743 return self.pull(remote, heads)
1744
1744
1745 # used to avoid circular references so destructors work
1745 # used to avoid circular references so destructors work
1746 def aftertrans(base):
1746 def aftertrans(base):
1747 p = base
1747 p = base
1748 def a():
1748 def a():
1749 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1749 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1750 util.rename(os.path.join(p, "journal.dirstate"),
1750 util.rename(os.path.join(p, "journal.dirstate"),
1751 os.path.join(p, "undo.dirstate"))
1751 os.path.join(p, "undo.dirstate"))
1752 return a
1752 return a
1753
1753
1754 def instance(ui, path, create):
1754 def instance(ui, path, create):
1755 return localrepository(ui, util.drop_scheme('file', path), create)
1755 return localrepository(ui, util.drop_scheme('file', path), create)
1756
1756
1757 def islocal(path):
1757 def islocal(path):
1758 return True
1758 return True
@@ -1,202 +1,199 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from revlog import *
8 from revlog import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 demandload(globals(), "array bisect struct")
11 demandload(globals(), "array bisect struct")
12
12
13 class manifestdict(dict):
13 class manifestdict(dict):
14 def __init__(self, mapping={}, flags={}):
14 def __init__(self, mapping={}, flags={}):
15 dict.__init__(self, mapping)
15 dict.__init__(self, mapping)
16 self._flags = flags
16 self._flags = flags
17 def flags(self, f):
17 def flags(self, f):
18 return self._flags.get(f, "")
18 return self._flags.get(f, "")
19 def execf(self, f):
19 def execf(self, f):
20 "test for executable in manifest flags"
20 "test for executable in manifest flags"
21 return "x" in self.flags(f)
21 return "x" in self.flags(f)
22 def linkf(self, f):
22 def linkf(self, f):
23 "test for symlink in manifest flags"
23 "test for symlink in manifest flags"
24 return "l" in self.flags(f)
24 return "l" in self.flags(f)
25 def rawset(self, f, entry):
25 def rawset(self, f, entry):
26 self[f] = bin(entry[:40])
26 self[f] = bin(entry[:40])
27 fl = entry[40:-1]
27 fl = entry[40:-1]
28 if fl: self._flags[f] = fl
28 if fl: self._flags[f] = fl
29 def set(self, f, execf=False, linkf=False):
29 def set(self, f, execf=False, linkf=False):
30 if execf: self._flags[f] = "x"
30 if execf: self._flags[f] = "x"
31 if linkf: self._flags[f] = "x"
31 if linkf: self._flags[f] = "x"
32 def copy(self):
32 def copy(self):
33 return manifestdict(dict.copy(self), dict.copy(self._flags))
33 return manifestdict(dict.copy(self), dict.copy(self._flags))
34
34
35 class manifest(revlog):
35 class manifest(revlog):
36 def __init__(self, opener, defversion=REVLOGV0):
36 def __init__(self, opener, defversion=REVLOGV0):
37 self.mapcache = None
37 self.mapcache = None
38 self.listcache = None
38 self.listcache = None
39 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
39 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
40 defversion)
40 defversion)
41
41
42 def read(self, node):
42 def read(self, node):
43 if node == nullid: return manifestdict() # don't upset local cache
43 if node == nullid: return manifestdict() # don't upset local cache
44 if self.mapcache and self.mapcache[0] == node:
44 if self.mapcache and self.mapcache[0] == node:
45 return self.mapcache[1]
45 return self.mapcache[1]
46 text = self.revision(node)
46 text = self.revision(node)
47 self.listcache = array.array('c', text)
47 self.listcache = array.array('c', text)
48 lines = text.splitlines(1)
48 lines = text.splitlines(1)
49 mapping = manifestdict()
49 mapping = manifestdict()
50 for l in lines:
50 for l in lines:
51 (f, n) = l.split('\0')
51 (f, n) = l.split('\0')
52 mapping.rawset(f, n)
52 mapping.rawset(f, n)
53 self.mapcache = (node, mapping)
53 self.mapcache = (node, mapping)
54 return mapping
54 return mapping
55
55
56 def readflags(self, node):
57 return self.read(node)
58
59 def diff(self, a, b):
56 def diff(self, a, b):
60 return mdiff.textdiff(str(a), str(b))
57 return mdiff.textdiff(str(a), str(b))
61
58
62 def _search(self, m, s, lo=0, hi=None):
59 def _search(self, m, s, lo=0, hi=None):
63 '''return a tuple (start, end) that says where to find s within m.
60 '''return a tuple (start, end) that says where to find s within m.
64
61
65 If the string is found m[start:end] are the line containing
62 If the string is found m[start:end] are the line containing
66 that string. If start == end the string was not found and
63 that string. If start == end the string was not found and
67 they indicate the proper sorted insertion point. This was
64 they indicate the proper sorted insertion point. This was
68 taken from bisect_left, and modified to find line start/end as
65 taken from bisect_left, and modified to find line start/end as
69 it goes along.
66 it goes along.
70
67
71 m should be a buffer or a string
68 m should be a buffer or a string
72 s is a string'''
69 s is a string'''
73 def advance(i, c):
70 def advance(i, c):
74 while i < lenm and m[i] != c:
71 while i < lenm and m[i] != c:
75 i += 1
72 i += 1
76 return i
73 return i
77 lenm = len(m)
74 lenm = len(m)
78 if not hi:
75 if not hi:
79 hi = lenm
76 hi = lenm
80 while lo < hi:
77 while lo < hi:
81 mid = (lo + hi) // 2
78 mid = (lo + hi) // 2
82 start = mid
79 start = mid
83 while start > 0 and m[start-1] != '\n':
80 while start > 0 and m[start-1] != '\n':
84 start -= 1
81 start -= 1
85 end = advance(start, '\0')
82 end = advance(start, '\0')
86 if m[start:end] < s:
83 if m[start:end] < s:
87 # we know that after the null there are 40 bytes of sha1
84 # we know that after the null there are 40 bytes of sha1
88 # this translates to the bisect lo = mid + 1
85 # this translates to the bisect lo = mid + 1
89 lo = advance(end + 40, '\n') + 1
86 lo = advance(end + 40, '\n') + 1
90 else:
87 else:
91 # this translates to the bisect hi = mid
88 # this translates to the bisect hi = mid
92 hi = start
89 hi = start
93 end = advance(lo, '\0')
90 end = advance(lo, '\0')
94 found = m[lo:end]
91 found = m[lo:end]
95 if cmp(s, found) == 0:
92 if cmp(s, found) == 0:
96 # we know that after the null there are 40 bytes of sha1
93 # we know that after the null there are 40 bytes of sha1
97 end = advance(end + 40, '\n')
94 end = advance(end + 40, '\n')
98 return (lo, end+1)
95 return (lo, end+1)
99 else:
96 else:
100 return (lo, lo)
97 return (lo, lo)
101
98
102 def find(self, node, f):
99 def find(self, node, f):
103 '''look up entry for a single file efficiently.
100 '''look up entry for a single file efficiently.
104 return (node, flag) pair if found, (None, None) if not.'''
101 return (node, flag) pair if found, (None, None) if not.'''
105 if self.mapcache and node == self.mapcache[0]:
102 if self.mapcache and node == self.mapcache[0]:
106 return self.mapcache[1].get(f), self.mapcache[1].flags(f)
103 return self.mapcache[1].get(f), self.mapcache[1].flags(f)
107 text = self.revision(node)
104 text = self.revision(node)
108 start, end = self._search(text, f)
105 start, end = self._search(text, f)
109 if start == end:
106 if start == end:
110 return None, None
107 return None, None
111 l = text[start:end]
108 l = text[start:end]
112 f, n = l.split('\0')
109 f, n = l.split('\0')
113 return bin(n[:40]), n[40:-1] == 'x'
110 return bin(n[:40]), n[40:-1] == 'x'
114
111
115 def add(self, map, flags, transaction, link, p1=None, p2=None,
112 def add(self, map, transaction, link, p1=None, p2=None,
116 changed=None):
113 changed=None):
117 # apply the changes collected during the bisect loop to our addlist
114 # apply the changes collected during the bisect loop to our addlist
118 # return a delta suitable for addrevision
115 # return a delta suitable for addrevision
119 def addlistdelta(addlist, x):
116 def addlistdelta(addlist, x):
120 # start from the bottom up
117 # start from the bottom up
121 # so changes to the offsets don't mess things up.
118 # so changes to the offsets don't mess things up.
122 i = len(x)
119 i = len(x)
123 while i > 0:
120 while i > 0:
124 i -= 1
121 i -= 1
125 start = x[i][0]
122 start = x[i][0]
126 end = x[i][1]
123 end = x[i][1]
127 if x[i][2]:
124 if x[i][2]:
128 addlist[start:end] = array.array('c', x[i][2])
125 addlist[start:end] = array.array('c', x[i][2])
129 else:
126 else:
130 del addlist[start:end]
127 del addlist[start:end]
131 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
128 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
132 for d in x ])
129 for d in x ])
133
130
134 # if we're using the listcache, make sure it is valid and
131 # if we're using the listcache, make sure it is valid and
135 # parented by the same node we're diffing against
132 # parented by the same node we're diffing against
136 if not changed or not self.listcache or not p1 or \
133 if not changed or not self.listcache or not p1 or \
137 self.mapcache[0] != p1:
134 self.mapcache[0] != p1:
138 files = map.keys()
135 files = map.keys()
139 files.sort()
136 files.sort()
140
137
141 # if this is changed to support newlines in filenames,
138 # if this is changed to support newlines in filenames,
142 # be sure to check the templates/ dir again (especially *-raw.tmpl)
139 # be sure to check the templates/ dir again (especially *-raw.tmpl)
143 text = ["%s\000%s%s\n" % (f, hex(map[f]), flags.flags(f)) for f in files]
140 text = ["%s\000%s%s\n" % (f, hex(map[f]), map.flags(f)) for f in files]
144 self.listcache = array.array('c', "".join(text))
141 self.listcache = array.array('c', "".join(text))
145 cachedelta = None
142 cachedelta = None
146 else:
143 else:
147 addlist = self.listcache
144 addlist = self.listcache
148
145
149 # combine the changed lists into one list for sorting
146 # combine the changed lists into one list for sorting
150 work = [[x, 0] for x in changed[0]]
147 work = [[x, 0] for x in changed[0]]
151 work[len(work):] = [[x, 1] for x in changed[1]]
148 work[len(work):] = [[x, 1] for x in changed[1]]
152 work.sort()
149 work.sort()
153
150
154 delta = []
151 delta = []
155 dstart = None
152 dstart = None
156 dend = None
153 dend = None
157 dline = [""]
154 dline = [""]
158 start = 0
155 start = 0
159 # zero copy representation of addlist as a buffer
156 # zero copy representation of addlist as a buffer
160 addbuf = buffer(addlist)
157 addbuf = buffer(addlist)
161
158
162 # start with a readonly loop that finds the offset of
159 # start with a readonly loop that finds the offset of
163 # each line and creates the deltas
160 # each line and creates the deltas
164 for w in work:
161 for w in work:
165 f = w[0]
162 f = w[0]
166 # bs will either be the index of the item or the insert point
163 # bs will either be the index of the item or the insert point
167 start, end = self._search(addbuf, f, start)
164 start, end = self._search(addbuf, f, start)
168 if w[1] == 0:
165 if w[1] == 0:
169 l = "%s\000%s%s\n" % (f, hex(map[f]), flags.flags(f))
166 l = "%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
170 else:
167 else:
171 l = ""
168 l = ""
172 if start == end and w[1] == 1:
169 if start == end and w[1] == 1:
173 # item we want to delete was not found, error out
170 # item we want to delete was not found, error out
174 raise AssertionError(
171 raise AssertionError(
175 _("failed to remove %s from manifest\n") % f)
172 _("failed to remove %s from manifest\n") % f)
176 if dstart != None and dstart <= start and dend >= start:
173 if dstart != None and dstart <= start and dend >= start:
177 if dend < end:
174 if dend < end:
178 dend = end
175 dend = end
179 if l:
176 if l:
180 dline.append(l)
177 dline.append(l)
181 else:
178 else:
182 if dstart != None:
179 if dstart != None:
183 delta.append([dstart, dend, "".join(dline)])
180 delta.append([dstart, dend, "".join(dline)])
184 dstart = start
181 dstart = start
185 dend = end
182 dend = end
186 dline = [l]
183 dline = [l]
187
184
188 if dstart != None:
185 if dstart != None:
189 delta.append([dstart, dend, "".join(dline)])
186 delta.append([dstart, dend, "".join(dline)])
190 # apply the delta to the addlist, and get a delta for addrevision
187 # apply the delta to the addlist, and get a delta for addrevision
191 cachedelta = addlistdelta(addlist, delta)
188 cachedelta = addlistdelta(addlist, delta)
192
189
193 # the delta is only valid if we've been processing the tip revision
190 # the delta is only valid if we've been processing the tip revision
194 if self.mapcache[0] != self.tip():
191 if self.mapcache[0] != self.tip():
195 cachedelta = None
192 cachedelta = None
196 self.listcache = addlist
193 self.listcache = addlist
197
194
198 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
195 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
199 p2, cachedelta)
196 p2, cachedelta)
200 self.mapcache = (n, map)
197 self.mapcache = (n, map)
201
198
202 return n
199 return n
General Comments 0
You need to be logged in to leave comments. Login now