##// END OF EJS Templates
Add localrepo.parents to get parent changectxs.
Matt Mackall -
r3163:1605e336 default
parent child Browse files
Show More
@@ -1,1752 +1,1763 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.revlogopts
57 v = self.ui.revlogopts
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.nodetagscache = None
82 self.nodetagscache = None
83 self.encodepats = None
83 self.encodepats = None
84 self.decodepats = None
84 self.decodepats = None
85 self.transhandle = None
85 self.transhandle = None
86
86
87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88
88
89 def url(self):
89 def url(self):
90 return 'file:' + self.root
90 return 'file:' + self.root
91
91
92 def hook(self, name, throw=False, **args):
92 def hook(self, name, throw=False, **args):
93 def callhook(hname, funcname):
93 def callhook(hname, funcname):
94 '''call python hook. hook is callable object, looked up as
94 '''call python hook. hook is callable object, looked up as
95 name in python module. if callable returns "true", hook
95 name in python module. if callable returns "true", hook
96 fails, else passes. if hook raises exception, treated as
96 fails, else passes. if hook raises exception, treated as
97 hook failure. exception propagates if throw is "true".
97 hook failure. exception propagates if throw is "true".
98
98
99 reason for "true" meaning "hook failed" is so that
99 reason for "true" meaning "hook failed" is so that
100 unmodified commands (e.g. mercurial.commands.update) can
100 unmodified commands (e.g. mercurial.commands.update) can
101 be run as hooks without wrappers to convert return values.'''
101 be run as hooks without wrappers to convert return values.'''
102
102
103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 d = funcname.rfind('.')
104 d = funcname.rfind('.')
105 if d == -1:
105 if d == -1:
106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 % (hname, funcname))
107 % (hname, funcname))
108 modname = funcname[:d]
108 modname = funcname[:d]
109 try:
109 try:
110 obj = __import__(modname)
110 obj = __import__(modname)
111 except ImportError:
111 except ImportError:
112 try:
112 try:
113 # extensions are loaded with hgext_ prefix
113 # extensions are loaded with hgext_ prefix
114 obj = __import__("hgext_%s" % modname)
114 obj = __import__("hgext_%s" % modname)
115 except ImportError:
115 except ImportError:
116 raise util.Abort(_('%s hook is invalid '
116 raise util.Abort(_('%s hook is invalid '
117 '(import of "%s" failed)') %
117 '(import of "%s" failed)') %
118 (hname, modname))
118 (hname, modname))
119 try:
119 try:
120 for p in funcname.split('.')[1:]:
120 for p in funcname.split('.')[1:]:
121 obj = getattr(obj, p)
121 obj = getattr(obj, p)
122 except AttributeError, err:
122 except AttributeError, err:
123 raise util.Abort(_('%s hook is invalid '
123 raise util.Abort(_('%s hook is invalid '
124 '("%s" is not defined)') %
124 '("%s" is not defined)') %
125 (hname, funcname))
125 (hname, funcname))
126 if not callable(obj):
126 if not callable(obj):
127 raise util.Abort(_('%s hook is invalid '
127 raise util.Abort(_('%s hook is invalid '
128 '("%s" is not callable)') %
128 '("%s" is not callable)') %
129 (hname, funcname))
129 (hname, funcname))
130 try:
130 try:
131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 except (KeyboardInterrupt, util.SignalInterrupt):
132 except (KeyboardInterrupt, util.SignalInterrupt):
133 raise
133 raise
134 except Exception, exc:
134 except Exception, exc:
135 if isinstance(exc, util.Abort):
135 if isinstance(exc, util.Abort):
136 self.ui.warn(_('error: %s hook failed: %s\n') %
136 self.ui.warn(_('error: %s hook failed: %s\n') %
137 (hname, exc.args[0]))
137 (hname, exc.args[0]))
138 else:
138 else:
139 self.ui.warn(_('error: %s hook raised an exception: '
139 self.ui.warn(_('error: %s hook raised an exception: '
140 '%s\n') % (hname, exc))
140 '%s\n') % (hname, exc))
141 if throw:
141 if throw:
142 raise
142 raise
143 self.ui.print_exc()
143 self.ui.print_exc()
144 return True
144 return True
145 if r:
145 if r:
146 if throw:
146 if throw:
147 raise util.Abort(_('%s hook failed') % hname)
147 raise util.Abort(_('%s hook failed') % hname)
148 self.ui.warn(_('warning: %s hook failed\n') % hname)
148 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 return r
149 return r
150
150
151 def runhook(name, cmd):
151 def runhook(name, cmd):
152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 r = util.system(cmd, environ=env, cwd=self.root)
154 r = util.system(cmd, environ=env, cwd=self.root)
155 if r:
155 if r:
156 desc, r = util.explain_exit(r)
156 desc, r = util.explain_exit(r)
157 if throw:
157 if throw:
158 raise util.Abort(_('%s hook %s') % (name, desc))
158 raise util.Abort(_('%s hook %s') % (name, desc))
159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 return r
160 return r
161
161
162 r = False
162 r = False
163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 if hname.split(".", 1)[0] == name and cmd]
164 if hname.split(".", 1)[0] == name and cmd]
165 hooks.sort()
165 hooks.sort()
166 for hname, cmd in hooks:
166 for hname, cmd in hooks:
167 if cmd.startswith('python:'):
167 if cmd.startswith('python:'):
168 r = callhook(hname, cmd[7:].strip()) or r
168 r = callhook(hname, cmd[7:].strip()) or r
169 else:
169 else:
170 r = runhook(hname, cmd) or r
170 r = runhook(hname, cmd) or r
171 return r
171 return r
172
172
173 tag_disallowed = ':\r\n'
173 tag_disallowed = ':\r\n'
174
174
175 def tag(self, name, node, message, local, user, date):
175 def tag(self, name, node, message, local, user, date):
176 '''tag a revision with a symbolic name.
176 '''tag a revision with a symbolic name.
177
177
178 if local is True, the tag is stored in a per-repository file.
178 if local is True, the tag is stored in a per-repository file.
179 otherwise, it is stored in the .hgtags file, and a new
179 otherwise, it is stored in the .hgtags file, and a new
180 changeset is committed with the change.
180 changeset is committed with the change.
181
181
182 keyword arguments:
182 keyword arguments:
183
183
184 local: whether to store tag in non-version-controlled file
184 local: whether to store tag in non-version-controlled file
185 (default False)
185 (default False)
186
186
187 message: commit message to use if committing
187 message: commit message to use if committing
188
188
189 user: name of user to use if committing
189 user: name of user to use if committing
190
190
191 date: date tuple to use if committing'''
191 date: date tuple to use if committing'''
192
192
193 for c in self.tag_disallowed:
193 for c in self.tag_disallowed:
194 if c in name:
194 if c in name:
195 raise util.Abort(_('%r cannot be used in a tag name') % c)
195 raise util.Abort(_('%r cannot be used in a tag name') % c)
196
196
197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198
198
199 if local:
199 if local:
200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
202 return
202 return
203
203
204 for x in self.status()[:5]:
204 for x in self.status()[:5]:
205 if '.hgtags' in x:
205 if '.hgtags' in x:
206 raise util.Abort(_('working copy of .hgtags is changed '
206 raise util.Abort(_('working copy of .hgtags is changed '
207 '(please commit .hgtags manually)'))
207 '(please commit .hgtags manually)'))
208
208
209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 if self.dirstate.state('.hgtags') == '?':
210 if self.dirstate.state('.hgtags') == '?':
211 self.add(['.hgtags'])
211 self.add(['.hgtags'])
212
212
213 self.commit(['.hgtags'], message, user, date)
213 self.commit(['.hgtags'], message, user, date)
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 def tags(self):
216 def tags(self):
217 '''return a mapping of tag to node'''
217 '''return a mapping of tag to node'''
218 if not self.tagscache:
218 if not self.tagscache:
219 self.tagscache = {}
219 self.tagscache = {}
220
220
221 def parsetag(line, context):
221 def parsetag(line, context):
222 if not line:
222 if not line:
223 return
223 return
224 s = l.split(" ", 1)
224 s = l.split(" ", 1)
225 if len(s) != 2:
225 if len(s) != 2:
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 return
227 return
228 node, key = s
228 node, key = s
229 key = key.strip()
229 key = key.strip()
230 try:
230 try:
231 bin_n = bin(node)
231 bin_n = bin(node)
232 except TypeError:
232 except TypeError:
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 (context, node))
234 (context, node))
235 return
235 return
236 if bin_n not in self.changelog.nodemap:
236 if bin_n not in self.changelog.nodemap:
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 (context, key))
238 (context, key))
239 return
239 return
240 self.tagscache[key] = bin_n
240 self.tagscache[key] = bin_n
241
241
242 # read the tags file from each head, ending with the tip,
242 # read the tags file from each head, ending with the tip,
243 # and add each tag found to the map, with "newer" ones
243 # and add each tag found to the map, with "newer" ones
244 # taking precedence
244 # taking precedence
245 heads = self.heads()
245 heads = self.heads()
246 heads.reverse()
246 heads.reverse()
247 fl = self.file(".hgtags")
247 fl = self.file(".hgtags")
248 for node in heads:
248 for node in heads:
249 change = self.changelog.read(node)
249 change = self.changelog.read(node)
250 rev = self.changelog.rev(node)
250 rev = self.changelog.rev(node)
251 fn, ff = self.manifest.find(change[0], '.hgtags')
251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 if fn is None: continue
252 if fn is None: continue
253 count = 0
253 count = 0
254 for l in fl.read(fn).splitlines():
254 for l in fl.read(fn).splitlines():
255 count += 1
255 count += 1
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 (rev, short(node), count))
257 (rev, short(node), count))
258 try:
258 try:
259 f = self.opener("localtags")
259 f = self.opener("localtags")
260 count = 0
260 count = 0
261 for l in f:
261 for l in f:
262 count += 1
262 count += 1
263 parsetag(l, _("localtags, line %d") % count)
263 parsetag(l, _("localtags, line %d") % count)
264 except IOError:
264 except IOError:
265 pass
265 pass
266
266
267 self.tagscache['tip'] = self.changelog.tip()
267 self.tagscache['tip'] = self.changelog.tip()
268
268
269 return self.tagscache
269 return self.tagscache
270
270
271 def tagslist(self):
271 def tagslist(self):
272 '''return a list of tags ordered by revision'''
272 '''return a list of tags ordered by revision'''
273 l = []
273 l = []
274 for t, n in self.tags().items():
274 for t, n in self.tags().items():
275 try:
275 try:
276 r = self.changelog.rev(n)
276 r = self.changelog.rev(n)
277 except:
277 except:
278 r = -2 # sort to the beginning of the list if unknown
278 r = -2 # sort to the beginning of the list if unknown
279 l.append((r, t, n))
279 l.append((r, t, n))
280 l.sort()
280 l.sort()
281 return [(t, n) for r, t, n in l]
281 return [(t, n) for r, t, n in l]
282
282
283 def nodetags(self, node):
283 def nodetags(self, node):
284 '''return the tags associated with a node'''
284 '''return the tags associated with a node'''
285 if not self.nodetagscache:
285 if not self.nodetagscache:
286 self.nodetagscache = {}
286 self.nodetagscache = {}
287 for t, n in self.tags().items():
287 for t, n in self.tags().items():
288 self.nodetagscache.setdefault(n, []).append(t)
288 self.nodetagscache.setdefault(n, []).append(t)
289 return self.nodetagscache.get(node, [])
289 return self.nodetagscache.get(node, [])
290
290
291 def lookup(self, key):
291 def lookup(self, key):
292 try:
292 try:
293 return self.tags()[key]
293 return self.tags()[key]
294 except KeyError:
294 except KeyError:
295 if key == '.':
295 if key == '.':
296 key = self.dirstate.parents()[0]
296 key = self.dirstate.parents()[0]
297 if key == nullid:
297 if key == nullid:
298 raise repo.RepoError(_("no revision checked out"))
298 raise repo.RepoError(_("no revision checked out"))
299 try:
299 try:
300 return self.changelog.lookup(key)
300 return self.changelog.lookup(key)
301 except:
301 except:
302 raise repo.RepoError(_("unknown revision '%s'") % key)
302 raise repo.RepoError(_("unknown revision '%s'") % key)
303
303
304 def dev(self):
304 def dev(self):
305 return os.lstat(self.path).st_dev
305 return os.lstat(self.path).st_dev
306
306
307 def local(self):
307 def local(self):
308 return True
308 return True
309
309
310 def join(self, f):
310 def join(self, f):
311 return os.path.join(self.path, f)
311 return os.path.join(self.path, f)
312
312
313 def wjoin(self, f):
313 def wjoin(self, f):
314 return os.path.join(self.root, f)
314 return os.path.join(self.root, f)
315
315
316 def file(self, f):
316 def file(self, f):
317 if f[0] == '/':
317 if f[0] == '/':
318 f = f[1:]
318 f = f[1:]
319 return filelog.filelog(self.opener, f, self.revlogversion)
319 return filelog.filelog(self.opener, f, self.revlogversion)
320
320
321 def changectx(self, changeid=None):
321 def changectx(self, changeid=None):
322 return context.changectx(self, changeid)
322 return context.changectx(self, changeid)
323
323
324 def parents(self, changeid=None):
325 '''
326 get list of changectxs for parents of changeid or working directory
327 '''
328 if changeid is None:
329 pl = self.dirstate.parents()
330 else:
331 n = self.changelog.lookup(changeid)
332 pl = self.changelog.parents(n)
333 return [self.changectx(n) for n in pl if n != nullid]
334
324 def filectx(self, path, changeid=None, fileid=None):
335 def filectx(self, path, changeid=None, fileid=None):
325 """changeid can be a changeset revision, node, or tag.
336 """changeid can be a changeset revision, node, or tag.
326 fileid can be a file revision or node."""
337 fileid can be a file revision or node."""
327 return context.filectx(self, path, changeid, fileid)
338 return context.filectx(self, path, changeid, fileid)
328
339
329 def getcwd(self):
340 def getcwd(self):
330 return self.dirstate.getcwd()
341 return self.dirstate.getcwd()
331
342
332 def wfile(self, f, mode='r'):
343 def wfile(self, f, mode='r'):
333 return self.wopener(f, mode)
344 return self.wopener(f, mode)
334
345
335 def wread(self, filename):
346 def wread(self, filename):
336 if self.encodepats == None:
347 if self.encodepats == None:
337 l = []
348 l = []
338 for pat, cmd in self.ui.configitems("encode"):
349 for pat, cmd in self.ui.configitems("encode"):
339 mf = util.matcher(self.root, "", [pat], [], [])[1]
350 mf = util.matcher(self.root, "", [pat], [], [])[1]
340 l.append((mf, cmd))
351 l.append((mf, cmd))
341 self.encodepats = l
352 self.encodepats = l
342
353
343 data = self.wopener(filename, 'r').read()
354 data = self.wopener(filename, 'r').read()
344
355
345 for mf, cmd in self.encodepats:
356 for mf, cmd in self.encodepats:
346 if mf(filename):
357 if mf(filename):
347 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
358 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
348 data = util.filter(data, cmd)
359 data = util.filter(data, cmd)
349 break
360 break
350
361
351 return data
362 return data
352
363
353 def wwrite(self, filename, data, fd=None):
364 def wwrite(self, filename, data, fd=None):
354 if self.decodepats == None:
365 if self.decodepats == None:
355 l = []
366 l = []
356 for pat, cmd in self.ui.configitems("decode"):
367 for pat, cmd in self.ui.configitems("decode"):
357 mf = util.matcher(self.root, "", [pat], [], [])[1]
368 mf = util.matcher(self.root, "", [pat], [], [])[1]
358 l.append((mf, cmd))
369 l.append((mf, cmd))
359 self.decodepats = l
370 self.decodepats = l
360
371
361 for mf, cmd in self.decodepats:
372 for mf, cmd in self.decodepats:
362 if mf(filename):
373 if mf(filename):
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
374 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
364 data = util.filter(data, cmd)
375 data = util.filter(data, cmd)
365 break
376 break
366
377
367 if fd:
378 if fd:
368 return fd.write(data)
379 return fd.write(data)
369 return self.wopener(filename, 'w').write(data)
380 return self.wopener(filename, 'w').write(data)
370
381
371 def transaction(self):
382 def transaction(self):
372 tr = self.transhandle
383 tr = self.transhandle
373 if tr != None and tr.running():
384 if tr != None and tr.running():
374 return tr.nest()
385 return tr.nest()
375
386
376 # save dirstate for rollback
387 # save dirstate for rollback
377 try:
388 try:
378 ds = self.opener("dirstate").read()
389 ds = self.opener("dirstate").read()
379 except IOError:
390 except IOError:
380 ds = ""
391 ds = ""
381 self.opener("journal.dirstate", "w").write(ds)
392 self.opener("journal.dirstate", "w").write(ds)
382
393
383 tr = transaction.transaction(self.ui.warn, self.opener,
394 tr = transaction.transaction(self.ui.warn, self.opener,
384 self.join("journal"),
395 self.join("journal"),
385 aftertrans(self.path))
396 aftertrans(self.path))
386 self.transhandle = tr
397 self.transhandle = tr
387 return tr
398 return tr
388
399
389 def recover(self):
400 def recover(self):
390 l = self.lock()
401 l = self.lock()
391 if os.path.exists(self.join("journal")):
402 if os.path.exists(self.join("journal")):
392 self.ui.status(_("rolling back interrupted transaction\n"))
403 self.ui.status(_("rolling back interrupted transaction\n"))
393 transaction.rollback(self.opener, self.join("journal"))
404 transaction.rollback(self.opener, self.join("journal"))
394 self.reload()
405 self.reload()
395 return True
406 return True
396 else:
407 else:
397 self.ui.warn(_("no interrupted transaction available\n"))
408 self.ui.warn(_("no interrupted transaction available\n"))
398 return False
409 return False
399
410
400 def rollback(self, wlock=None):
411 def rollback(self, wlock=None):
401 if not wlock:
412 if not wlock:
402 wlock = self.wlock()
413 wlock = self.wlock()
403 l = self.lock()
414 l = self.lock()
404 if os.path.exists(self.join("undo")):
415 if os.path.exists(self.join("undo")):
405 self.ui.status(_("rolling back last transaction\n"))
416 self.ui.status(_("rolling back last transaction\n"))
406 transaction.rollback(self.opener, self.join("undo"))
417 transaction.rollback(self.opener, self.join("undo"))
407 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
418 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
408 self.reload()
419 self.reload()
409 self.wreload()
420 self.wreload()
410 else:
421 else:
411 self.ui.warn(_("no rollback information available\n"))
422 self.ui.warn(_("no rollback information available\n"))
412
423
413 def wreload(self):
424 def wreload(self):
414 self.dirstate.read()
425 self.dirstate.read()
415
426
416 def reload(self):
427 def reload(self):
417 self.changelog.load()
428 self.changelog.load()
418 self.manifest.load()
429 self.manifest.load()
419 self.tagscache = None
430 self.tagscache = None
420 self.nodetagscache = None
431 self.nodetagscache = None
421
432
422 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
433 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
423 desc=None):
434 desc=None):
424 try:
435 try:
425 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
436 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
426 except lock.LockHeld, inst:
437 except lock.LockHeld, inst:
427 if not wait:
438 if not wait:
428 raise
439 raise
429 self.ui.warn(_("waiting for lock on %s held by %s\n") %
440 self.ui.warn(_("waiting for lock on %s held by %s\n") %
430 (desc, inst.args[0]))
441 (desc, inst.args[0]))
431 # default to 600 seconds timeout
442 # default to 600 seconds timeout
432 l = lock.lock(self.join(lockname),
443 l = lock.lock(self.join(lockname),
433 int(self.ui.config("ui", "timeout") or 600),
444 int(self.ui.config("ui", "timeout") or 600),
434 releasefn, desc=desc)
445 releasefn, desc=desc)
435 if acquirefn:
446 if acquirefn:
436 acquirefn()
447 acquirefn()
437 return l
448 return l
438
449
439 def lock(self, wait=1):
450 def lock(self, wait=1):
440 return self.do_lock("lock", wait, acquirefn=self.reload,
451 return self.do_lock("lock", wait, acquirefn=self.reload,
441 desc=_('repository %s') % self.origroot)
452 desc=_('repository %s') % self.origroot)
442
453
443 def wlock(self, wait=1):
454 def wlock(self, wait=1):
444 return self.do_lock("wlock", wait, self.dirstate.write,
455 return self.do_lock("wlock", wait, self.dirstate.write,
445 self.wreload,
456 self.wreload,
446 desc=_('working directory of %s') % self.origroot)
457 desc=_('working directory of %s') % self.origroot)
447
458
448 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
459 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
449 "determine whether a new filenode is needed"
460 "determine whether a new filenode is needed"
450 fp1 = manifest1.get(filename, nullid)
461 fp1 = manifest1.get(filename, nullid)
451 fp2 = manifest2.get(filename, nullid)
462 fp2 = manifest2.get(filename, nullid)
452
463
453 if fp2 != nullid:
464 if fp2 != nullid:
454 # is one parent an ancestor of the other?
465 # is one parent an ancestor of the other?
455 fpa = filelog.ancestor(fp1, fp2)
466 fpa = filelog.ancestor(fp1, fp2)
456 if fpa == fp1:
467 if fpa == fp1:
457 fp1, fp2 = fp2, nullid
468 fp1, fp2 = fp2, nullid
458 elif fpa == fp2:
469 elif fpa == fp2:
459 fp2 = nullid
470 fp2 = nullid
460
471
461 # is the file unmodified from the parent? report existing entry
472 # is the file unmodified from the parent? report existing entry
462 if fp2 == nullid and text == filelog.read(fp1):
473 if fp2 == nullid and text == filelog.read(fp1):
463 return (fp1, None, None)
474 return (fp1, None, None)
464
475
465 return (None, fp1, fp2)
476 return (None, fp1, fp2)
466
477
467 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
478 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
468 orig_parent = self.dirstate.parents()[0] or nullid
479 orig_parent = self.dirstate.parents()[0] or nullid
469 p1 = p1 or self.dirstate.parents()[0] or nullid
480 p1 = p1 or self.dirstate.parents()[0] or nullid
470 p2 = p2 or self.dirstate.parents()[1] or nullid
481 p2 = p2 or self.dirstate.parents()[1] or nullid
471 c1 = self.changelog.read(p1)
482 c1 = self.changelog.read(p1)
472 c2 = self.changelog.read(p2)
483 c2 = self.changelog.read(p2)
473 m1 = self.manifest.read(c1[0]).copy()
484 m1 = self.manifest.read(c1[0]).copy()
474 m2 = self.manifest.read(c2[0])
485 m2 = self.manifest.read(c2[0])
475 changed = []
486 changed = []
476
487
477 if orig_parent == p1:
488 if orig_parent == p1:
478 update_dirstate = 1
489 update_dirstate = 1
479 else:
490 else:
480 update_dirstate = 0
491 update_dirstate = 0
481
492
482 if not wlock:
493 if not wlock:
483 wlock = self.wlock()
494 wlock = self.wlock()
484 l = self.lock()
495 l = self.lock()
485 tr = self.transaction()
496 tr = self.transaction()
486 linkrev = self.changelog.count()
497 linkrev = self.changelog.count()
487 for f in files:
498 for f in files:
488 try:
499 try:
489 t = self.wread(f)
500 t = self.wread(f)
490 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
501 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
491 r = self.file(f)
502 r = self.file(f)
492
503
493 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
504 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
494 if entry:
505 if entry:
495 m1[f] = entry
506 m1[f] = entry
496 continue
507 continue
497
508
498 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
509 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
499 changed.append(f)
510 changed.append(f)
500 if update_dirstate:
511 if update_dirstate:
501 self.dirstate.update([f], "n")
512 self.dirstate.update([f], "n")
502 except IOError:
513 except IOError:
503 try:
514 try:
504 del m1[f]
515 del m1[f]
505 if update_dirstate:
516 if update_dirstate:
506 self.dirstate.forget([f])
517 self.dirstate.forget([f])
507 except:
518 except:
508 # deleted from p2?
519 # deleted from p2?
509 pass
520 pass
510
521
511 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
522 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
512 user = user or self.ui.username()
523 user = user or self.ui.username()
513 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
524 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
514 tr.close()
525 tr.close()
515 if update_dirstate:
526 if update_dirstate:
516 self.dirstate.setparents(n, nullid)
527 self.dirstate.setparents(n, nullid)
517
528
518 def commit(self, files=None, text="", user=None, date=None,
529 def commit(self, files=None, text="", user=None, date=None,
519 match=util.always, force=False, lock=None, wlock=None,
530 match=util.always, force=False, lock=None, wlock=None,
520 force_editor=False):
531 force_editor=False):
521 commit = []
532 commit = []
522 remove = []
533 remove = []
523 changed = []
534 changed = []
524
535
525 if files:
536 if files:
526 for f in files:
537 for f in files:
527 s = self.dirstate.state(f)
538 s = self.dirstate.state(f)
528 if s in 'nmai':
539 if s in 'nmai':
529 commit.append(f)
540 commit.append(f)
530 elif s == 'r':
541 elif s == 'r':
531 remove.append(f)
542 remove.append(f)
532 else:
543 else:
533 self.ui.warn(_("%s not tracked!\n") % f)
544 self.ui.warn(_("%s not tracked!\n") % f)
534 else:
545 else:
535 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
546 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
536 commit = modified + added
547 commit = modified + added
537 remove = removed
548 remove = removed
538
549
539 p1, p2 = self.dirstate.parents()
550 p1, p2 = self.dirstate.parents()
540 c1 = self.changelog.read(p1)
551 c1 = self.changelog.read(p1)
541 c2 = self.changelog.read(p2)
552 c2 = self.changelog.read(p2)
542 m1 = self.manifest.read(c1[0]).copy()
553 m1 = self.manifest.read(c1[0]).copy()
543 m2 = self.manifest.read(c2[0])
554 m2 = self.manifest.read(c2[0])
544
555
545 if not commit and not remove and not force and p2 == nullid:
556 if not commit and not remove and not force and p2 == nullid:
546 self.ui.status(_("nothing changed\n"))
557 self.ui.status(_("nothing changed\n"))
547 return None
558 return None
548
559
549 xp1 = hex(p1)
560 xp1 = hex(p1)
550 if p2 == nullid: xp2 = ''
561 if p2 == nullid: xp2 = ''
551 else: xp2 = hex(p2)
562 else: xp2 = hex(p2)
552
563
553 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
564 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
554
565
555 if not wlock:
566 if not wlock:
556 wlock = self.wlock()
567 wlock = self.wlock()
557 if not lock:
568 if not lock:
558 lock = self.lock()
569 lock = self.lock()
559 tr = self.transaction()
570 tr = self.transaction()
560
571
561 # check in files
572 # check in files
562 new = {}
573 new = {}
563 linkrev = self.changelog.count()
574 linkrev = self.changelog.count()
564 commit.sort()
575 commit.sort()
565 for f in commit:
576 for f in commit:
566 self.ui.note(f + "\n")
577 self.ui.note(f + "\n")
567 try:
578 try:
568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
579 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
569 t = self.wread(f)
580 t = self.wread(f)
570 except IOError:
581 except IOError:
571 self.ui.warn(_("trouble committing %s!\n") % f)
582 self.ui.warn(_("trouble committing %s!\n") % f)
572 raise
583 raise
573
584
574 r = self.file(f)
585 r = self.file(f)
575
586
576 meta = {}
587 meta = {}
577 cp = self.dirstate.copied(f)
588 cp = self.dirstate.copied(f)
578 if cp:
589 if cp:
579 meta["copy"] = cp
590 meta["copy"] = cp
580 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
591 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
581 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
592 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
582 fp1, fp2 = nullid, nullid
593 fp1, fp2 = nullid, nullid
583 else:
594 else:
584 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
595 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
585 if entry:
596 if entry:
586 new[f] = entry
597 new[f] = entry
587 continue
598 continue
588
599
589 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
600 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
590 # remember what we've added so that we can later calculate
601 # remember what we've added so that we can later calculate
591 # the files to pull from a set of changesets
602 # the files to pull from a set of changesets
592 changed.append(f)
603 changed.append(f)
593
604
594 # update manifest
605 # update manifest
595 m1.update(new)
606 m1.update(new)
596 for f in remove:
607 for f in remove:
597 if f in m1:
608 if f in m1:
598 del m1[f]
609 del m1[f]
599 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
610 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
600 (new, remove))
611 (new, remove))
601
612
602 # add changeset
613 # add changeset
603 new = new.keys()
614 new = new.keys()
604 new.sort()
615 new.sort()
605
616
606 user = user or self.ui.username()
617 user = user or self.ui.username()
607 if not text or force_editor:
618 if not text or force_editor:
608 edittext = []
619 edittext = []
609 if text:
620 if text:
610 edittext.append(text)
621 edittext.append(text)
611 edittext.append("")
622 edittext.append("")
612 if p2 != nullid:
623 if p2 != nullid:
613 edittext.append("HG: branch merge")
624 edittext.append("HG: branch merge")
614 edittext.extend(["HG: changed %s" % f for f in changed])
625 edittext.extend(["HG: changed %s" % f for f in changed])
615 edittext.extend(["HG: removed %s" % f for f in remove])
626 edittext.extend(["HG: removed %s" % f for f in remove])
616 if not changed and not remove:
627 if not changed and not remove:
617 edittext.append("HG: no files changed")
628 edittext.append("HG: no files changed")
618 edittext.append("")
629 edittext.append("")
619 # run editor in the repository root
630 # run editor in the repository root
620 olddir = os.getcwd()
631 olddir = os.getcwd()
621 os.chdir(self.root)
632 os.chdir(self.root)
622 text = self.ui.edit("\n".join(edittext), user)
633 text = self.ui.edit("\n".join(edittext), user)
623 os.chdir(olddir)
634 os.chdir(olddir)
624
635
625 lines = [line.rstrip() for line in text.rstrip().splitlines()]
636 lines = [line.rstrip() for line in text.rstrip().splitlines()]
626 while lines and not lines[0]:
637 while lines and not lines[0]:
627 del lines[0]
638 del lines[0]
628 if not lines:
639 if not lines:
629 return None
640 return None
630 text = '\n'.join(lines)
641 text = '\n'.join(lines)
631 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
642 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
632 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
643 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
633 parent2=xp2)
644 parent2=xp2)
634 tr.close()
645 tr.close()
635
646
636 self.dirstate.setparents(n)
647 self.dirstate.setparents(n)
637 self.dirstate.update(new, "n")
648 self.dirstate.update(new, "n")
638 self.dirstate.forget(remove)
649 self.dirstate.forget(remove)
639
650
640 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
651 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
641 return n
652 return n
642
653
643 def walk(self, node=None, files=[], match=util.always, badmatch=None):
654 def walk(self, node=None, files=[], match=util.always, badmatch=None):
644 if node:
655 if node:
645 fdict = dict.fromkeys(files)
656 fdict = dict.fromkeys(files)
646 for fn in self.manifest.read(self.changelog.read(node)[0]):
657 for fn in self.manifest.read(self.changelog.read(node)[0]):
647 for ffn in fdict:
658 for ffn in fdict:
648 # match if the file is the exact name or a directory
659 # match if the file is the exact name or a directory
649 if ffn == fn or fn.startswith("%s/" % ffn):
660 if ffn == fn or fn.startswith("%s/" % ffn):
650 del fdict[ffn]
661 del fdict[ffn]
651 break
662 break
652 if match(fn):
663 if match(fn):
653 yield 'm', fn
664 yield 'm', fn
654 for fn in fdict:
665 for fn in fdict:
655 if badmatch and badmatch(fn):
666 if badmatch and badmatch(fn):
656 if match(fn):
667 if match(fn):
657 yield 'b', fn
668 yield 'b', fn
658 else:
669 else:
659 self.ui.warn(_('%s: No such file in rev %s\n') % (
670 self.ui.warn(_('%s: No such file in rev %s\n') % (
660 util.pathto(self.getcwd(), fn), short(node)))
671 util.pathto(self.getcwd(), fn), short(node)))
661 else:
672 else:
662 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
673 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
663 yield src, fn
674 yield src, fn
664
675
665 def status(self, node1=None, node2=None, files=[], match=util.always,
676 def status(self, node1=None, node2=None, files=[], match=util.always,
666 wlock=None, list_ignored=False, list_clean=False):
677 wlock=None, list_ignored=False, list_clean=False):
667 """return status of files between two nodes or node and working directory
678 """return status of files between two nodes or node and working directory
668
679
669 If node1 is None, use the first dirstate parent instead.
680 If node1 is None, use the first dirstate parent instead.
670 If node2 is None, compare node1 with working directory.
681 If node2 is None, compare node1 with working directory.
671 """
682 """
672
683
673 def fcmp(fn, mf):
684 def fcmp(fn, mf):
674 t1 = self.wread(fn)
685 t1 = self.wread(fn)
675 return self.file(fn).cmp(mf.get(fn, nullid), t1)
686 return self.file(fn).cmp(mf.get(fn, nullid), t1)
676
687
677 def mfmatches(node):
688 def mfmatches(node):
678 change = self.changelog.read(node)
689 change = self.changelog.read(node)
679 mf = dict(self.manifest.read(change[0]))
690 mf = dict(self.manifest.read(change[0]))
680 for fn in mf.keys():
691 for fn in mf.keys():
681 if not match(fn):
692 if not match(fn):
682 del mf[fn]
693 del mf[fn]
683 return mf
694 return mf
684
695
685 modified, added, removed, deleted, unknown = [], [], [], [], []
696 modified, added, removed, deleted, unknown = [], [], [], [], []
686 ignored, clean = [], []
697 ignored, clean = [], []
687
698
688 compareworking = False
699 compareworking = False
689 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
700 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
690 compareworking = True
701 compareworking = True
691
702
692 if not compareworking:
703 if not compareworking:
693 # read the manifest from node1 before the manifest from node2,
704 # read the manifest from node1 before the manifest from node2,
694 # so that we'll hit the manifest cache if we're going through
705 # so that we'll hit the manifest cache if we're going through
695 # all the revisions in parent->child order.
706 # all the revisions in parent->child order.
696 mf1 = mfmatches(node1)
707 mf1 = mfmatches(node1)
697
708
698 # are we comparing the working directory?
709 # are we comparing the working directory?
699 if not node2:
710 if not node2:
700 if not wlock:
711 if not wlock:
701 try:
712 try:
702 wlock = self.wlock(wait=0)
713 wlock = self.wlock(wait=0)
703 except lock.LockException:
714 except lock.LockException:
704 wlock = None
715 wlock = None
705 (lookup, modified, added, removed, deleted, unknown,
716 (lookup, modified, added, removed, deleted, unknown,
706 ignored, clean) = self.dirstate.status(files, match,
717 ignored, clean) = self.dirstate.status(files, match,
707 list_ignored, list_clean)
718 list_ignored, list_clean)
708
719
709 # are we comparing working dir against its parent?
720 # are we comparing working dir against its parent?
710 if compareworking:
721 if compareworking:
711 if lookup:
722 if lookup:
712 # do a full compare of any files that might have changed
723 # do a full compare of any files that might have changed
713 mf2 = mfmatches(self.dirstate.parents()[0])
724 mf2 = mfmatches(self.dirstate.parents()[0])
714 for f in lookup:
725 for f in lookup:
715 if fcmp(f, mf2):
726 if fcmp(f, mf2):
716 modified.append(f)
727 modified.append(f)
717 else:
728 else:
718 clean.append(f)
729 clean.append(f)
719 if wlock is not None:
730 if wlock is not None:
720 self.dirstate.update([f], "n")
731 self.dirstate.update([f], "n")
721 else:
732 else:
722 # we are comparing working dir against non-parent
733 # we are comparing working dir against non-parent
723 # generate a pseudo-manifest for the working dir
734 # generate a pseudo-manifest for the working dir
724 mf2 = mfmatches(self.dirstate.parents()[0])
735 mf2 = mfmatches(self.dirstate.parents()[0])
725 for f in lookup + modified + added:
736 for f in lookup + modified + added:
726 mf2[f] = ""
737 mf2[f] = ""
727 for f in removed:
738 for f in removed:
728 if f in mf2:
739 if f in mf2:
729 del mf2[f]
740 del mf2[f]
730 else:
741 else:
731 # we are comparing two revisions
742 # we are comparing two revisions
732 mf2 = mfmatches(node2)
743 mf2 = mfmatches(node2)
733
744
734 if not compareworking:
745 if not compareworking:
735 # flush lists from dirstate before comparing manifests
746 # flush lists from dirstate before comparing manifests
736 modified, added, clean = [], [], []
747 modified, added, clean = [], [], []
737
748
738 # make sure to sort the files so we talk to the disk in a
749 # make sure to sort the files so we talk to the disk in a
739 # reasonable order
750 # reasonable order
740 mf2keys = mf2.keys()
751 mf2keys = mf2.keys()
741 mf2keys.sort()
752 mf2keys.sort()
742 for fn in mf2keys:
753 for fn in mf2keys:
743 if mf1.has_key(fn):
754 if mf1.has_key(fn):
744 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
755 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
745 modified.append(fn)
756 modified.append(fn)
746 elif list_clean:
757 elif list_clean:
747 clean.append(fn)
758 clean.append(fn)
748 del mf1[fn]
759 del mf1[fn]
749 else:
760 else:
750 added.append(fn)
761 added.append(fn)
751
762
752 removed = mf1.keys()
763 removed = mf1.keys()
753
764
754 # sort and return results:
765 # sort and return results:
755 for l in modified, added, removed, deleted, unknown, ignored, clean:
766 for l in modified, added, removed, deleted, unknown, ignored, clean:
756 l.sort()
767 l.sort()
757 return (modified, added, removed, deleted, unknown, ignored, clean)
768 return (modified, added, removed, deleted, unknown, ignored, clean)
758
769
759 def add(self, list, wlock=None):
770 def add(self, list, wlock=None):
760 if not wlock:
771 if not wlock:
761 wlock = self.wlock()
772 wlock = self.wlock()
762 for f in list:
773 for f in list:
763 p = self.wjoin(f)
774 p = self.wjoin(f)
764 if not os.path.exists(p):
775 if not os.path.exists(p):
765 self.ui.warn(_("%s does not exist!\n") % f)
776 self.ui.warn(_("%s does not exist!\n") % f)
766 elif not os.path.isfile(p):
777 elif not os.path.isfile(p):
767 self.ui.warn(_("%s not added: only files supported currently\n")
778 self.ui.warn(_("%s not added: only files supported currently\n")
768 % f)
779 % f)
769 elif self.dirstate.state(f) in 'an':
780 elif self.dirstate.state(f) in 'an':
770 self.ui.warn(_("%s already tracked!\n") % f)
781 self.ui.warn(_("%s already tracked!\n") % f)
771 else:
782 else:
772 self.dirstate.update([f], "a")
783 self.dirstate.update([f], "a")
773
784
774 def forget(self, list, wlock=None):
785 def forget(self, list, wlock=None):
775 if not wlock:
786 if not wlock:
776 wlock = self.wlock()
787 wlock = self.wlock()
777 for f in list:
788 for f in list:
778 if self.dirstate.state(f) not in 'ai':
789 if self.dirstate.state(f) not in 'ai':
779 self.ui.warn(_("%s not added!\n") % f)
790 self.ui.warn(_("%s not added!\n") % f)
780 else:
791 else:
781 self.dirstate.forget([f])
792 self.dirstate.forget([f])
782
793
783 def remove(self, list, unlink=False, wlock=None):
794 def remove(self, list, unlink=False, wlock=None):
784 if unlink:
795 if unlink:
785 for f in list:
796 for f in list:
786 try:
797 try:
787 util.unlink(self.wjoin(f))
798 util.unlink(self.wjoin(f))
788 except OSError, inst:
799 except OSError, inst:
789 if inst.errno != errno.ENOENT:
800 if inst.errno != errno.ENOENT:
790 raise
801 raise
791 if not wlock:
802 if not wlock:
792 wlock = self.wlock()
803 wlock = self.wlock()
793 for f in list:
804 for f in list:
794 p = self.wjoin(f)
805 p = self.wjoin(f)
795 if os.path.exists(p):
806 if os.path.exists(p):
796 self.ui.warn(_("%s still exists!\n") % f)
807 self.ui.warn(_("%s still exists!\n") % f)
797 elif self.dirstate.state(f) == 'a':
808 elif self.dirstate.state(f) == 'a':
798 self.dirstate.forget([f])
809 self.dirstate.forget([f])
799 elif f not in self.dirstate:
810 elif f not in self.dirstate:
800 self.ui.warn(_("%s not tracked!\n") % f)
811 self.ui.warn(_("%s not tracked!\n") % f)
801 else:
812 else:
802 self.dirstate.update([f], "r")
813 self.dirstate.update([f], "r")
803
814
804 def undelete(self, list, wlock=None):
815 def undelete(self, list, wlock=None):
805 p = self.dirstate.parents()[0]
816 p = self.dirstate.parents()[0]
806 mn = self.changelog.read(p)[0]
817 mn = self.changelog.read(p)[0]
807 m = self.manifest.read(mn)
818 m = self.manifest.read(mn)
808 if not wlock:
819 if not wlock:
809 wlock = self.wlock()
820 wlock = self.wlock()
810 for f in list:
821 for f in list:
811 if self.dirstate.state(f) not in "r":
822 if self.dirstate.state(f) not in "r":
812 self.ui.warn("%s not removed!\n" % f)
823 self.ui.warn("%s not removed!\n" % f)
813 else:
824 else:
814 t = self.file(f).read(m[f])
825 t = self.file(f).read(m[f])
815 self.wwrite(f, t)
826 self.wwrite(f, t)
816 util.set_exec(self.wjoin(f), m.execf(f))
827 util.set_exec(self.wjoin(f), m.execf(f))
817 self.dirstate.update([f], "n")
828 self.dirstate.update([f], "n")
818
829
819 def copy(self, source, dest, wlock=None):
830 def copy(self, source, dest, wlock=None):
820 p = self.wjoin(dest)
831 p = self.wjoin(dest)
821 if not os.path.exists(p):
832 if not os.path.exists(p):
822 self.ui.warn(_("%s does not exist!\n") % dest)
833 self.ui.warn(_("%s does not exist!\n") % dest)
823 elif not os.path.isfile(p):
834 elif not os.path.isfile(p):
824 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
835 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
825 else:
836 else:
826 if not wlock:
837 if not wlock:
827 wlock = self.wlock()
838 wlock = self.wlock()
828 if self.dirstate.state(dest) == '?':
839 if self.dirstate.state(dest) == '?':
829 self.dirstate.update([dest], "a")
840 self.dirstate.update([dest], "a")
830 self.dirstate.copy(source, dest)
841 self.dirstate.copy(source, dest)
831
842
832 def heads(self, start=None):
843 def heads(self, start=None):
833 heads = self.changelog.heads(start)
844 heads = self.changelog.heads(start)
834 # sort the output in rev descending order
845 # sort the output in rev descending order
835 heads = [(-self.changelog.rev(h), h) for h in heads]
846 heads = [(-self.changelog.rev(h), h) for h in heads]
836 heads.sort()
847 heads.sort()
837 return [n for (r, n) in heads]
848 return [n for (r, n) in heads]
838
849
839 # branchlookup returns a dict giving a list of branches for
850 # branchlookup returns a dict giving a list of branches for
840 # each head. A branch is defined as the tag of a node or
851 # each head. A branch is defined as the tag of a node or
841 # the branch of the node's parents. If a node has multiple
852 # the branch of the node's parents. If a node has multiple
842 # branch tags, tags are eliminated if they are visible from other
853 # branch tags, tags are eliminated if they are visible from other
843 # branch tags.
854 # branch tags.
844 #
855 #
845 # So, for this graph: a->b->c->d->e
856 # So, for this graph: a->b->c->d->e
846 # \ /
857 # \ /
847 # aa -----/
858 # aa -----/
848 # a has tag 2.6.12
859 # a has tag 2.6.12
849 # d has tag 2.6.13
860 # d has tag 2.6.13
850 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
861 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
851 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
862 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
852 # from the list.
863 # from the list.
853 #
864 #
854 # It is possible that more than one head will have the same branch tag.
865 # It is possible that more than one head will have the same branch tag.
855 # callers need to check the result for multiple heads under the same
866 # callers need to check the result for multiple heads under the same
856 # branch tag if that is a problem for them (ie checkout of a specific
867 # branch tag if that is a problem for them (ie checkout of a specific
857 # branch).
868 # branch).
858 #
869 #
859 # passing in a specific branch will limit the depth of the search
870 # passing in a specific branch will limit the depth of the search
860 # through the parents. It won't limit the branches returned in the
871 # through the parents. It won't limit the branches returned in the
861 # result though.
872 # result though.
862 def branchlookup(self, heads=None, branch=None):
873 def branchlookup(self, heads=None, branch=None):
863 if not heads:
874 if not heads:
864 heads = self.heads()
875 heads = self.heads()
865 headt = [ h for h in heads ]
876 headt = [ h for h in heads ]
866 chlog = self.changelog
877 chlog = self.changelog
867 branches = {}
878 branches = {}
868 merges = []
879 merges = []
869 seenmerge = {}
880 seenmerge = {}
870
881
871 # traverse the tree once for each head, recording in the branches
882 # traverse the tree once for each head, recording in the branches
872 # dict which tags are visible from this head. The branches
883 # dict which tags are visible from this head. The branches
873 # dict also records which tags are visible from each tag
884 # dict also records which tags are visible from each tag
874 # while we traverse.
885 # while we traverse.
875 while headt or merges:
886 while headt or merges:
876 if merges:
887 if merges:
877 n, found = merges.pop()
888 n, found = merges.pop()
878 visit = [n]
889 visit = [n]
879 else:
890 else:
880 h = headt.pop()
891 h = headt.pop()
881 visit = [h]
892 visit = [h]
882 found = [h]
893 found = [h]
883 seen = {}
894 seen = {}
884 while visit:
895 while visit:
885 n = visit.pop()
896 n = visit.pop()
886 if n in seen:
897 if n in seen:
887 continue
898 continue
888 pp = chlog.parents(n)
899 pp = chlog.parents(n)
889 tags = self.nodetags(n)
900 tags = self.nodetags(n)
890 if tags:
901 if tags:
891 for x in tags:
902 for x in tags:
892 if x == 'tip':
903 if x == 'tip':
893 continue
904 continue
894 for f in found:
905 for f in found:
895 branches.setdefault(f, {})[n] = 1
906 branches.setdefault(f, {})[n] = 1
896 branches.setdefault(n, {})[n] = 1
907 branches.setdefault(n, {})[n] = 1
897 break
908 break
898 if n not in found:
909 if n not in found:
899 found.append(n)
910 found.append(n)
900 if branch in tags:
911 if branch in tags:
901 continue
912 continue
902 seen[n] = 1
913 seen[n] = 1
903 if pp[1] != nullid and n not in seenmerge:
914 if pp[1] != nullid and n not in seenmerge:
904 merges.append((pp[1], [x for x in found]))
915 merges.append((pp[1], [x for x in found]))
905 seenmerge[n] = 1
916 seenmerge[n] = 1
906 if pp[0] != nullid:
917 if pp[0] != nullid:
907 visit.append(pp[0])
918 visit.append(pp[0])
908 # traverse the branches dict, eliminating branch tags from each
919 # traverse the branches dict, eliminating branch tags from each
909 # head that are visible from another branch tag for that head.
920 # head that are visible from another branch tag for that head.
910 out = {}
921 out = {}
911 viscache = {}
922 viscache = {}
912 for h in heads:
923 for h in heads:
913 def visible(node):
924 def visible(node):
914 if node in viscache:
925 if node in viscache:
915 return viscache[node]
926 return viscache[node]
916 ret = {}
927 ret = {}
917 visit = [node]
928 visit = [node]
918 while visit:
929 while visit:
919 x = visit.pop()
930 x = visit.pop()
920 if x in viscache:
931 if x in viscache:
921 ret.update(viscache[x])
932 ret.update(viscache[x])
922 elif x not in ret:
933 elif x not in ret:
923 ret[x] = 1
934 ret[x] = 1
924 if x in branches:
935 if x in branches:
925 visit[len(visit):] = branches[x].keys()
936 visit[len(visit):] = branches[x].keys()
926 viscache[node] = ret
937 viscache[node] = ret
927 return ret
938 return ret
928 if h not in branches:
939 if h not in branches:
929 continue
940 continue
930 # O(n^2), but somewhat limited. This only searches the
941 # O(n^2), but somewhat limited. This only searches the
931 # tags visible from a specific head, not all the tags in the
942 # tags visible from a specific head, not all the tags in the
932 # whole repo.
943 # whole repo.
933 for b in branches[h]:
944 for b in branches[h]:
934 vis = False
945 vis = False
935 for bb in branches[h].keys():
946 for bb in branches[h].keys():
936 if b != bb:
947 if b != bb:
937 if b in visible(bb):
948 if b in visible(bb):
938 vis = True
949 vis = True
939 break
950 break
940 if not vis:
951 if not vis:
941 l = out.setdefault(h, [])
952 l = out.setdefault(h, [])
942 l[len(l):] = self.nodetags(b)
953 l[len(l):] = self.nodetags(b)
943 return out
954 return out
944
955
945 def branches(self, nodes):
956 def branches(self, nodes):
946 if not nodes:
957 if not nodes:
947 nodes = [self.changelog.tip()]
958 nodes = [self.changelog.tip()]
948 b = []
959 b = []
949 for n in nodes:
960 for n in nodes:
950 t = n
961 t = n
951 while 1:
962 while 1:
952 p = self.changelog.parents(n)
963 p = self.changelog.parents(n)
953 if p[1] != nullid or p[0] == nullid:
964 if p[1] != nullid or p[0] == nullid:
954 b.append((t, n, p[0], p[1]))
965 b.append((t, n, p[0], p[1]))
955 break
966 break
956 n = p[0]
967 n = p[0]
957 return b
968 return b
958
969
959 def between(self, pairs):
970 def between(self, pairs):
960 r = []
971 r = []
961
972
962 for top, bottom in pairs:
973 for top, bottom in pairs:
963 n, l, i = top, [], 0
974 n, l, i = top, [], 0
964 f = 1
975 f = 1
965
976
966 while n != bottom:
977 while n != bottom:
967 p = self.changelog.parents(n)[0]
978 p = self.changelog.parents(n)[0]
968 if i == f:
979 if i == f:
969 l.append(n)
980 l.append(n)
970 f = f * 2
981 f = f * 2
971 n = p
982 n = p
972 i += 1
983 i += 1
973
984
974 r.append(l)
985 r.append(l)
975
986
976 return r
987 return r
977
988
978 def findincoming(self, remote, base=None, heads=None, force=False):
989 def findincoming(self, remote, base=None, heads=None, force=False):
979 """Return list of roots of the subsets of missing nodes from remote
990 """Return list of roots of the subsets of missing nodes from remote
980
991
981 If base dict is specified, assume that these nodes and their parents
992 If base dict is specified, assume that these nodes and their parents
982 exist on the remote side and that no child of a node of base exists
993 exist on the remote side and that no child of a node of base exists
983 in both remote and self.
994 in both remote and self.
984 Furthermore base will be updated to include the nodes that exists
995 Furthermore base will be updated to include the nodes that exists
985 in self and remote but no children exists in self and remote.
996 in self and remote but no children exists in self and remote.
986 If a list of heads is specified, return only nodes which are heads
997 If a list of heads is specified, return only nodes which are heads
987 or ancestors of these heads.
998 or ancestors of these heads.
988
999
989 All the ancestors of base are in self and in remote.
1000 All the ancestors of base are in self and in remote.
990 All the descendants of the list returned are missing in self.
1001 All the descendants of the list returned are missing in self.
991 (and so we know that the rest of the nodes are missing in remote, see
1002 (and so we know that the rest of the nodes are missing in remote, see
992 outgoing)
1003 outgoing)
993 """
1004 """
994 m = self.changelog.nodemap
1005 m = self.changelog.nodemap
995 search = []
1006 search = []
996 fetch = {}
1007 fetch = {}
997 seen = {}
1008 seen = {}
998 seenbranch = {}
1009 seenbranch = {}
999 if base == None:
1010 if base == None:
1000 base = {}
1011 base = {}
1001
1012
1002 if not heads:
1013 if not heads:
1003 heads = remote.heads()
1014 heads = remote.heads()
1004
1015
1005 if self.changelog.tip() == nullid:
1016 if self.changelog.tip() == nullid:
1006 base[nullid] = 1
1017 base[nullid] = 1
1007 if heads != [nullid]:
1018 if heads != [nullid]:
1008 return [nullid]
1019 return [nullid]
1009 return []
1020 return []
1010
1021
1011 # assume we're closer to the tip than the root
1022 # assume we're closer to the tip than the root
1012 # and start by examining the heads
1023 # and start by examining the heads
1013 self.ui.status(_("searching for changes\n"))
1024 self.ui.status(_("searching for changes\n"))
1014
1025
1015 unknown = []
1026 unknown = []
1016 for h in heads:
1027 for h in heads:
1017 if h not in m:
1028 if h not in m:
1018 unknown.append(h)
1029 unknown.append(h)
1019 else:
1030 else:
1020 base[h] = 1
1031 base[h] = 1
1021
1032
1022 if not unknown:
1033 if not unknown:
1023 return []
1034 return []
1024
1035
1025 req = dict.fromkeys(unknown)
1036 req = dict.fromkeys(unknown)
1026 reqcnt = 0
1037 reqcnt = 0
1027
1038
1028 # search through remote branches
1039 # search through remote branches
1029 # a 'branch' here is a linear segment of history, with four parts:
1040 # a 'branch' here is a linear segment of history, with four parts:
1030 # head, root, first parent, second parent
1041 # head, root, first parent, second parent
1031 # (a branch always has two parents (or none) by definition)
1042 # (a branch always has two parents (or none) by definition)
1032 unknown = remote.branches(unknown)
1043 unknown = remote.branches(unknown)
1033 while unknown:
1044 while unknown:
1034 r = []
1045 r = []
1035 while unknown:
1046 while unknown:
1036 n = unknown.pop(0)
1047 n = unknown.pop(0)
1037 if n[0] in seen:
1048 if n[0] in seen:
1038 continue
1049 continue
1039
1050
1040 self.ui.debug(_("examining %s:%s\n")
1051 self.ui.debug(_("examining %s:%s\n")
1041 % (short(n[0]), short(n[1])))
1052 % (short(n[0]), short(n[1])))
1042 if n[0] == nullid: # found the end of the branch
1053 if n[0] == nullid: # found the end of the branch
1043 pass
1054 pass
1044 elif n in seenbranch:
1055 elif n in seenbranch:
1045 self.ui.debug(_("branch already found\n"))
1056 self.ui.debug(_("branch already found\n"))
1046 continue
1057 continue
1047 elif n[1] and n[1] in m: # do we know the base?
1058 elif n[1] and n[1] in m: # do we know the base?
1048 self.ui.debug(_("found incomplete branch %s:%s\n")
1059 self.ui.debug(_("found incomplete branch %s:%s\n")
1049 % (short(n[0]), short(n[1])))
1060 % (short(n[0]), short(n[1])))
1050 search.append(n) # schedule branch range for scanning
1061 search.append(n) # schedule branch range for scanning
1051 seenbranch[n] = 1
1062 seenbranch[n] = 1
1052 else:
1063 else:
1053 if n[1] not in seen and n[1] not in fetch:
1064 if n[1] not in seen and n[1] not in fetch:
1054 if n[2] in m and n[3] in m:
1065 if n[2] in m and n[3] in m:
1055 self.ui.debug(_("found new changeset %s\n") %
1066 self.ui.debug(_("found new changeset %s\n") %
1056 short(n[1]))
1067 short(n[1]))
1057 fetch[n[1]] = 1 # earliest unknown
1068 fetch[n[1]] = 1 # earliest unknown
1058 for p in n[2:4]:
1069 for p in n[2:4]:
1059 if p in m:
1070 if p in m:
1060 base[p] = 1 # latest known
1071 base[p] = 1 # latest known
1061
1072
1062 for p in n[2:4]:
1073 for p in n[2:4]:
1063 if p not in req and p not in m:
1074 if p not in req and p not in m:
1064 r.append(p)
1075 r.append(p)
1065 req[p] = 1
1076 req[p] = 1
1066 seen[n[0]] = 1
1077 seen[n[0]] = 1
1067
1078
1068 if r:
1079 if r:
1069 reqcnt += 1
1080 reqcnt += 1
1070 self.ui.debug(_("request %d: %s\n") %
1081 self.ui.debug(_("request %d: %s\n") %
1071 (reqcnt, " ".join(map(short, r))))
1082 (reqcnt, " ".join(map(short, r))))
1072 for p in range(0, len(r), 10):
1083 for p in range(0, len(r), 10):
1073 for b in remote.branches(r[p:p+10]):
1084 for b in remote.branches(r[p:p+10]):
1074 self.ui.debug(_("received %s:%s\n") %
1085 self.ui.debug(_("received %s:%s\n") %
1075 (short(b[0]), short(b[1])))
1086 (short(b[0]), short(b[1])))
1076 unknown.append(b)
1087 unknown.append(b)
1077
1088
1078 # do binary search on the branches we found
1089 # do binary search on the branches we found
1079 while search:
1090 while search:
1080 n = search.pop(0)
1091 n = search.pop(0)
1081 reqcnt += 1
1092 reqcnt += 1
1082 l = remote.between([(n[0], n[1])])[0]
1093 l = remote.between([(n[0], n[1])])[0]
1083 l.append(n[1])
1094 l.append(n[1])
1084 p = n[0]
1095 p = n[0]
1085 f = 1
1096 f = 1
1086 for i in l:
1097 for i in l:
1087 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1098 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1088 if i in m:
1099 if i in m:
1089 if f <= 2:
1100 if f <= 2:
1090 self.ui.debug(_("found new branch changeset %s\n") %
1101 self.ui.debug(_("found new branch changeset %s\n") %
1091 short(p))
1102 short(p))
1092 fetch[p] = 1
1103 fetch[p] = 1
1093 base[i] = 1
1104 base[i] = 1
1094 else:
1105 else:
1095 self.ui.debug(_("narrowed branch search to %s:%s\n")
1106 self.ui.debug(_("narrowed branch search to %s:%s\n")
1096 % (short(p), short(i)))
1107 % (short(p), short(i)))
1097 search.append((p, i))
1108 search.append((p, i))
1098 break
1109 break
1099 p, f = i, f * 2
1110 p, f = i, f * 2
1100
1111
1101 # sanity check our fetch list
1112 # sanity check our fetch list
1102 for f in fetch.keys():
1113 for f in fetch.keys():
1103 if f in m:
1114 if f in m:
1104 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1115 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1105
1116
1106 if base.keys() == [nullid]:
1117 if base.keys() == [nullid]:
1107 if force:
1118 if force:
1108 self.ui.warn(_("warning: repository is unrelated\n"))
1119 self.ui.warn(_("warning: repository is unrelated\n"))
1109 else:
1120 else:
1110 raise util.Abort(_("repository is unrelated"))
1121 raise util.Abort(_("repository is unrelated"))
1111
1122
1112 self.ui.debug(_("found new changesets starting at ") +
1123 self.ui.debug(_("found new changesets starting at ") +
1113 " ".join([short(f) for f in fetch]) + "\n")
1124 " ".join([short(f) for f in fetch]) + "\n")
1114
1125
1115 self.ui.debug(_("%d total queries\n") % reqcnt)
1126 self.ui.debug(_("%d total queries\n") % reqcnt)
1116
1127
1117 return fetch.keys()
1128 return fetch.keys()
1118
1129
1119 def findoutgoing(self, remote, base=None, heads=None, force=False):
1130 def findoutgoing(self, remote, base=None, heads=None, force=False):
1120 """Return list of nodes that are roots of subsets not in remote
1131 """Return list of nodes that are roots of subsets not in remote
1121
1132
1122 If base dict is specified, assume that these nodes and their parents
1133 If base dict is specified, assume that these nodes and their parents
1123 exist on the remote side.
1134 exist on the remote side.
1124 If a list of heads is specified, return only nodes which are heads
1135 If a list of heads is specified, return only nodes which are heads
1125 or ancestors of these heads, and return a second element which
1136 or ancestors of these heads, and return a second element which
1126 contains all remote heads which get new children.
1137 contains all remote heads which get new children.
1127 """
1138 """
1128 if base == None:
1139 if base == None:
1129 base = {}
1140 base = {}
1130 self.findincoming(remote, base, heads, force=force)
1141 self.findincoming(remote, base, heads, force=force)
1131
1142
1132 self.ui.debug(_("common changesets up to ")
1143 self.ui.debug(_("common changesets up to ")
1133 + " ".join(map(short, base.keys())) + "\n")
1144 + " ".join(map(short, base.keys())) + "\n")
1134
1145
1135 remain = dict.fromkeys(self.changelog.nodemap)
1146 remain = dict.fromkeys(self.changelog.nodemap)
1136
1147
1137 # prune everything remote has from the tree
1148 # prune everything remote has from the tree
1138 del remain[nullid]
1149 del remain[nullid]
1139 remove = base.keys()
1150 remove = base.keys()
1140 while remove:
1151 while remove:
1141 n = remove.pop(0)
1152 n = remove.pop(0)
1142 if n in remain:
1153 if n in remain:
1143 del remain[n]
1154 del remain[n]
1144 for p in self.changelog.parents(n):
1155 for p in self.changelog.parents(n):
1145 remove.append(p)
1156 remove.append(p)
1146
1157
1147 # find every node whose parents have been pruned
1158 # find every node whose parents have been pruned
1148 subset = []
1159 subset = []
1149 # find every remote head that will get new children
1160 # find every remote head that will get new children
1150 updated_heads = {}
1161 updated_heads = {}
1151 for n in remain:
1162 for n in remain:
1152 p1, p2 = self.changelog.parents(n)
1163 p1, p2 = self.changelog.parents(n)
1153 if p1 not in remain and p2 not in remain:
1164 if p1 not in remain and p2 not in remain:
1154 subset.append(n)
1165 subset.append(n)
1155 if heads:
1166 if heads:
1156 if p1 in heads:
1167 if p1 in heads:
1157 updated_heads[p1] = True
1168 updated_heads[p1] = True
1158 if p2 in heads:
1169 if p2 in heads:
1159 updated_heads[p2] = True
1170 updated_heads[p2] = True
1160
1171
1161 # this is the set of all roots we have to push
1172 # this is the set of all roots we have to push
1162 if heads:
1173 if heads:
1163 return subset, updated_heads.keys()
1174 return subset, updated_heads.keys()
1164 else:
1175 else:
1165 return subset
1176 return subset
1166
1177
1167 def pull(self, remote, heads=None, force=False, lock=None):
1178 def pull(self, remote, heads=None, force=False, lock=None):
1168 mylock = False
1179 mylock = False
1169 if not lock:
1180 if not lock:
1170 lock = self.lock()
1181 lock = self.lock()
1171 mylock = True
1182 mylock = True
1172
1183
1173 try:
1184 try:
1174 fetch = self.findincoming(remote, force=force)
1185 fetch = self.findincoming(remote, force=force)
1175 if fetch == [nullid]:
1186 if fetch == [nullid]:
1176 self.ui.status(_("requesting all changes\n"))
1187 self.ui.status(_("requesting all changes\n"))
1177
1188
1178 if not fetch:
1189 if not fetch:
1179 self.ui.status(_("no changes found\n"))
1190 self.ui.status(_("no changes found\n"))
1180 return 0
1191 return 0
1181
1192
1182 if heads is None:
1193 if heads is None:
1183 cg = remote.changegroup(fetch, 'pull')
1194 cg = remote.changegroup(fetch, 'pull')
1184 else:
1195 else:
1185 cg = remote.changegroupsubset(fetch, heads, 'pull')
1196 cg = remote.changegroupsubset(fetch, heads, 'pull')
1186 return self.addchangegroup(cg, 'pull', remote.url())
1197 return self.addchangegroup(cg, 'pull', remote.url())
1187 finally:
1198 finally:
1188 if mylock:
1199 if mylock:
1189 lock.release()
1200 lock.release()
1190
1201
1191 def push(self, remote, force=False, revs=None):
1202 def push(self, remote, force=False, revs=None):
1192 # there are two ways to push to remote repo:
1203 # there are two ways to push to remote repo:
1193 #
1204 #
1194 # addchangegroup assumes local user can lock remote
1205 # addchangegroup assumes local user can lock remote
1195 # repo (local filesystem, old ssh servers).
1206 # repo (local filesystem, old ssh servers).
1196 #
1207 #
1197 # unbundle assumes local user cannot lock remote repo (new ssh
1208 # unbundle assumes local user cannot lock remote repo (new ssh
1198 # servers, http servers).
1209 # servers, http servers).
1199
1210
1200 if remote.capable('unbundle'):
1211 if remote.capable('unbundle'):
1201 return self.push_unbundle(remote, force, revs)
1212 return self.push_unbundle(remote, force, revs)
1202 return self.push_addchangegroup(remote, force, revs)
1213 return self.push_addchangegroup(remote, force, revs)
1203
1214
1204 def prepush(self, remote, force, revs):
1215 def prepush(self, remote, force, revs):
1205 base = {}
1216 base = {}
1206 remote_heads = remote.heads()
1217 remote_heads = remote.heads()
1207 inc = self.findincoming(remote, base, remote_heads, force=force)
1218 inc = self.findincoming(remote, base, remote_heads, force=force)
1208 if not force and inc:
1219 if not force and inc:
1209 self.ui.warn(_("abort: unsynced remote changes!\n"))
1220 self.ui.warn(_("abort: unsynced remote changes!\n"))
1210 self.ui.status(_("(did you forget to sync?"
1221 self.ui.status(_("(did you forget to sync?"
1211 " use push -f to force)\n"))
1222 " use push -f to force)\n"))
1212 return None, 1
1223 return None, 1
1213
1224
1214 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1225 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1215 if revs is not None:
1226 if revs is not None:
1216 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1227 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1217 else:
1228 else:
1218 bases, heads = update, self.changelog.heads()
1229 bases, heads = update, self.changelog.heads()
1219
1230
1220 if not bases:
1231 if not bases:
1221 self.ui.status(_("no changes found\n"))
1232 self.ui.status(_("no changes found\n"))
1222 return None, 1
1233 return None, 1
1223 elif not force:
1234 elif not force:
1224 # FIXME we don't properly detect creation of new heads
1235 # FIXME we don't properly detect creation of new heads
1225 # in the push -r case, assume the user knows what he's doing
1236 # in the push -r case, assume the user knows what he's doing
1226 if not revs and len(remote_heads) < len(heads) \
1237 if not revs and len(remote_heads) < len(heads) \
1227 and remote_heads != [nullid]:
1238 and remote_heads != [nullid]:
1228 self.ui.warn(_("abort: push creates new remote branches!\n"))
1239 self.ui.warn(_("abort: push creates new remote branches!\n"))
1229 self.ui.status(_("(did you forget to merge?"
1240 self.ui.status(_("(did you forget to merge?"
1230 " use push -f to force)\n"))
1241 " use push -f to force)\n"))
1231 return None, 1
1242 return None, 1
1232
1243
1233 if revs is None:
1244 if revs is None:
1234 cg = self.changegroup(update, 'push')
1245 cg = self.changegroup(update, 'push')
1235 else:
1246 else:
1236 cg = self.changegroupsubset(update, revs, 'push')
1247 cg = self.changegroupsubset(update, revs, 'push')
1237 return cg, remote_heads
1248 return cg, remote_heads
1238
1249
1239 def push_addchangegroup(self, remote, force, revs):
1250 def push_addchangegroup(self, remote, force, revs):
1240 lock = remote.lock()
1251 lock = remote.lock()
1241
1252
1242 ret = self.prepush(remote, force, revs)
1253 ret = self.prepush(remote, force, revs)
1243 if ret[0] is not None:
1254 if ret[0] is not None:
1244 cg, remote_heads = ret
1255 cg, remote_heads = ret
1245 return remote.addchangegroup(cg, 'push', self.url())
1256 return remote.addchangegroup(cg, 'push', self.url())
1246 return ret[1]
1257 return ret[1]
1247
1258
1248 def push_unbundle(self, remote, force, revs):
1259 def push_unbundle(self, remote, force, revs):
1249 # local repo finds heads on server, finds out what revs it
1260 # local repo finds heads on server, finds out what revs it
1250 # must push. once revs transferred, if server finds it has
1261 # must push. once revs transferred, if server finds it has
1251 # different heads (someone else won commit/push race), server
1262 # different heads (someone else won commit/push race), server
1252 # aborts.
1263 # aborts.
1253
1264
1254 ret = self.prepush(remote, force, revs)
1265 ret = self.prepush(remote, force, revs)
1255 if ret[0] is not None:
1266 if ret[0] is not None:
1256 cg, remote_heads = ret
1267 cg, remote_heads = ret
1257 if force: remote_heads = ['force']
1268 if force: remote_heads = ['force']
1258 return remote.unbundle(cg, remote_heads, 'push')
1269 return remote.unbundle(cg, remote_heads, 'push')
1259 return ret[1]
1270 return ret[1]
1260
1271
1261 def changegroupsubset(self, bases, heads, source):
1272 def changegroupsubset(self, bases, heads, source):
1262 """This function generates a changegroup consisting of all the nodes
1273 """This function generates a changegroup consisting of all the nodes
1263 that are descendents of any of the bases, and ancestors of any of
1274 that are descendents of any of the bases, and ancestors of any of
1264 the heads.
1275 the heads.
1265
1276
1266 It is fairly complex as determining which filenodes and which
1277 It is fairly complex as determining which filenodes and which
1267 manifest nodes need to be included for the changeset to be complete
1278 manifest nodes need to be included for the changeset to be complete
1268 is non-trivial.
1279 is non-trivial.
1269
1280
1270 Another wrinkle is doing the reverse, figuring out which changeset in
1281 Another wrinkle is doing the reverse, figuring out which changeset in
1271 the changegroup a particular filenode or manifestnode belongs to."""
1282 the changegroup a particular filenode or manifestnode belongs to."""
1272
1283
1273 self.hook('preoutgoing', throw=True, source=source)
1284 self.hook('preoutgoing', throw=True, source=source)
1274
1285
1275 # Set up some initial variables
1286 # Set up some initial variables
1276 # Make it easy to refer to self.changelog
1287 # Make it easy to refer to self.changelog
1277 cl = self.changelog
1288 cl = self.changelog
1278 # msng is short for missing - compute the list of changesets in this
1289 # msng is short for missing - compute the list of changesets in this
1279 # changegroup.
1290 # changegroup.
1280 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1291 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1281 # Some bases may turn out to be superfluous, and some heads may be
1292 # Some bases may turn out to be superfluous, and some heads may be
1282 # too. nodesbetween will return the minimal set of bases and heads
1293 # too. nodesbetween will return the minimal set of bases and heads
1283 # necessary to re-create the changegroup.
1294 # necessary to re-create the changegroup.
1284
1295
1285 # Known heads are the list of heads that it is assumed the recipient
1296 # Known heads are the list of heads that it is assumed the recipient
1286 # of this changegroup will know about.
1297 # of this changegroup will know about.
1287 knownheads = {}
1298 knownheads = {}
1288 # We assume that all parents of bases are known heads.
1299 # We assume that all parents of bases are known heads.
1289 for n in bases:
1300 for n in bases:
1290 for p in cl.parents(n):
1301 for p in cl.parents(n):
1291 if p != nullid:
1302 if p != nullid:
1292 knownheads[p] = 1
1303 knownheads[p] = 1
1293 knownheads = knownheads.keys()
1304 knownheads = knownheads.keys()
1294 if knownheads:
1305 if knownheads:
1295 # Now that we know what heads are known, we can compute which
1306 # Now that we know what heads are known, we can compute which
1296 # changesets are known. The recipient must know about all
1307 # changesets are known. The recipient must know about all
1297 # changesets required to reach the known heads from the null
1308 # changesets required to reach the known heads from the null
1298 # changeset.
1309 # changeset.
1299 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1310 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1300 junk = None
1311 junk = None
1301 # Transform the list into an ersatz set.
1312 # Transform the list into an ersatz set.
1302 has_cl_set = dict.fromkeys(has_cl_set)
1313 has_cl_set = dict.fromkeys(has_cl_set)
1303 else:
1314 else:
1304 # If there were no known heads, the recipient cannot be assumed to
1315 # If there were no known heads, the recipient cannot be assumed to
1305 # know about any changesets.
1316 # know about any changesets.
1306 has_cl_set = {}
1317 has_cl_set = {}
1307
1318
1308 # Make it easy to refer to self.manifest
1319 # Make it easy to refer to self.manifest
1309 mnfst = self.manifest
1320 mnfst = self.manifest
1310 # We don't know which manifests are missing yet
1321 # We don't know which manifests are missing yet
1311 msng_mnfst_set = {}
1322 msng_mnfst_set = {}
1312 # Nor do we know which filenodes are missing.
1323 # Nor do we know which filenodes are missing.
1313 msng_filenode_set = {}
1324 msng_filenode_set = {}
1314
1325
1315 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1326 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1316 junk = None
1327 junk = None
1317
1328
1318 # A changeset always belongs to itself, so the changenode lookup
1329 # A changeset always belongs to itself, so the changenode lookup
1319 # function for a changenode is identity.
1330 # function for a changenode is identity.
1320 def identity(x):
1331 def identity(x):
1321 return x
1332 return x
1322
1333
1323 # A function generating function. Sets up an environment for the
1334 # A function generating function. Sets up an environment for the
1324 # inner function.
1335 # inner function.
1325 def cmp_by_rev_func(revlog):
1336 def cmp_by_rev_func(revlog):
1326 # Compare two nodes by their revision number in the environment's
1337 # Compare two nodes by their revision number in the environment's
1327 # revision history. Since the revision number both represents the
1338 # revision history. Since the revision number both represents the
1328 # most efficient order to read the nodes in, and represents a
1339 # most efficient order to read the nodes in, and represents a
1329 # topological sorting of the nodes, this function is often useful.
1340 # topological sorting of the nodes, this function is often useful.
1330 def cmp_by_rev(a, b):
1341 def cmp_by_rev(a, b):
1331 return cmp(revlog.rev(a), revlog.rev(b))
1342 return cmp(revlog.rev(a), revlog.rev(b))
1332 return cmp_by_rev
1343 return cmp_by_rev
1333
1344
1334 # If we determine that a particular file or manifest node must be a
1345 # If we determine that a particular file or manifest node must be a
1335 # node that the recipient of the changegroup will already have, we can
1346 # node that the recipient of the changegroup will already have, we can
1336 # also assume the recipient will have all the parents. This function
1347 # also assume the recipient will have all the parents. This function
1337 # prunes them from the set of missing nodes.
1348 # prunes them from the set of missing nodes.
1338 def prune_parents(revlog, hasset, msngset):
1349 def prune_parents(revlog, hasset, msngset):
1339 haslst = hasset.keys()
1350 haslst = hasset.keys()
1340 haslst.sort(cmp_by_rev_func(revlog))
1351 haslst.sort(cmp_by_rev_func(revlog))
1341 for node in haslst:
1352 for node in haslst:
1342 parentlst = [p for p in revlog.parents(node) if p != nullid]
1353 parentlst = [p for p in revlog.parents(node) if p != nullid]
1343 while parentlst:
1354 while parentlst:
1344 n = parentlst.pop()
1355 n = parentlst.pop()
1345 if n not in hasset:
1356 if n not in hasset:
1346 hasset[n] = 1
1357 hasset[n] = 1
1347 p = [p for p in revlog.parents(n) if p != nullid]
1358 p = [p for p in revlog.parents(n) if p != nullid]
1348 parentlst.extend(p)
1359 parentlst.extend(p)
1349 for n in hasset:
1360 for n in hasset:
1350 msngset.pop(n, None)
1361 msngset.pop(n, None)
1351
1362
1352 # This is a function generating function used to set up an environment
1363 # This is a function generating function used to set up an environment
1353 # for the inner function to execute in.
1364 # for the inner function to execute in.
1354 def manifest_and_file_collector(changedfileset):
1365 def manifest_and_file_collector(changedfileset):
1355 # This is an information gathering function that gathers
1366 # This is an information gathering function that gathers
1356 # information from each changeset node that goes out as part of
1367 # information from each changeset node that goes out as part of
1357 # the changegroup. The information gathered is a list of which
1368 # the changegroup. The information gathered is a list of which
1358 # manifest nodes are potentially required (the recipient may
1369 # manifest nodes are potentially required (the recipient may
1359 # already have them) and total list of all files which were
1370 # already have them) and total list of all files which were
1360 # changed in any changeset in the changegroup.
1371 # changed in any changeset in the changegroup.
1361 #
1372 #
1362 # We also remember the first changenode we saw any manifest
1373 # We also remember the first changenode we saw any manifest
1363 # referenced by so we can later determine which changenode 'owns'
1374 # referenced by so we can later determine which changenode 'owns'
1364 # the manifest.
1375 # the manifest.
1365 def collect_manifests_and_files(clnode):
1376 def collect_manifests_and_files(clnode):
1366 c = cl.read(clnode)
1377 c = cl.read(clnode)
1367 for f in c[3]:
1378 for f in c[3]:
1368 # This is to make sure we only have one instance of each
1379 # This is to make sure we only have one instance of each
1369 # filename string for each filename.
1380 # filename string for each filename.
1370 changedfileset.setdefault(f, f)
1381 changedfileset.setdefault(f, f)
1371 msng_mnfst_set.setdefault(c[0], clnode)
1382 msng_mnfst_set.setdefault(c[0], clnode)
1372 return collect_manifests_and_files
1383 return collect_manifests_and_files
1373
1384
1374 # Figure out which manifest nodes (of the ones we think might be part
1385 # Figure out which manifest nodes (of the ones we think might be part
1375 # of the changegroup) the recipient must know about and remove them
1386 # of the changegroup) the recipient must know about and remove them
1376 # from the changegroup.
1387 # from the changegroup.
1377 def prune_manifests():
1388 def prune_manifests():
1378 has_mnfst_set = {}
1389 has_mnfst_set = {}
1379 for n in msng_mnfst_set:
1390 for n in msng_mnfst_set:
1380 # If a 'missing' manifest thinks it belongs to a changenode
1391 # If a 'missing' manifest thinks it belongs to a changenode
1381 # the recipient is assumed to have, obviously the recipient
1392 # the recipient is assumed to have, obviously the recipient
1382 # must have that manifest.
1393 # must have that manifest.
1383 linknode = cl.node(mnfst.linkrev(n))
1394 linknode = cl.node(mnfst.linkrev(n))
1384 if linknode in has_cl_set:
1395 if linknode in has_cl_set:
1385 has_mnfst_set[n] = 1
1396 has_mnfst_set[n] = 1
1386 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1397 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1387
1398
1388 # Use the information collected in collect_manifests_and_files to say
1399 # Use the information collected in collect_manifests_and_files to say
1389 # which changenode any manifestnode belongs to.
1400 # which changenode any manifestnode belongs to.
1390 def lookup_manifest_link(mnfstnode):
1401 def lookup_manifest_link(mnfstnode):
1391 return msng_mnfst_set[mnfstnode]
1402 return msng_mnfst_set[mnfstnode]
1392
1403
1393 # A function generating function that sets up the initial environment
1404 # A function generating function that sets up the initial environment
1394 # the inner function.
1405 # the inner function.
1395 def filenode_collector(changedfiles):
1406 def filenode_collector(changedfiles):
1396 next_rev = [0]
1407 next_rev = [0]
1397 # This gathers information from each manifestnode included in the
1408 # This gathers information from each manifestnode included in the
1398 # changegroup about which filenodes the manifest node references
1409 # changegroup about which filenodes the manifest node references
1399 # so we can include those in the changegroup too.
1410 # so we can include those in the changegroup too.
1400 #
1411 #
1401 # It also remembers which changenode each filenode belongs to. It
1412 # It also remembers which changenode each filenode belongs to. It
1402 # does this by assuming the a filenode belongs to the changenode
1413 # does this by assuming the a filenode belongs to the changenode
1403 # the first manifest that references it belongs to.
1414 # the first manifest that references it belongs to.
1404 def collect_msng_filenodes(mnfstnode):
1415 def collect_msng_filenodes(mnfstnode):
1405 r = mnfst.rev(mnfstnode)
1416 r = mnfst.rev(mnfstnode)
1406 if r == next_rev[0]:
1417 if r == next_rev[0]:
1407 # If the last rev we looked at was the one just previous,
1418 # If the last rev we looked at was the one just previous,
1408 # we only need to see a diff.
1419 # we only need to see a diff.
1409 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1420 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1410 # For each line in the delta
1421 # For each line in the delta
1411 for dline in delta.splitlines():
1422 for dline in delta.splitlines():
1412 # get the filename and filenode for that line
1423 # get the filename and filenode for that line
1413 f, fnode = dline.split('\0')
1424 f, fnode = dline.split('\0')
1414 fnode = bin(fnode[:40])
1425 fnode = bin(fnode[:40])
1415 f = changedfiles.get(f, None)
1426 f = changedfiles.get(f, None)
1416 # And if the file is in the list of files we care
1427 # And if the file is in the list of files we care
1417 # about.
1428 # about.
1418 if f is not None:
1429 if f is not None:
1419 # Get the changenode this manifest belongs to
1430 # Get the changenode this manifest belongs to
1420 clnode = msng_mnfst_set[mnfstnode]
1431 clnode = msng_mnfst_set[mnfstnode]
1421 # Create the set of filenodes for the file if
1432 # Create the set of filenodes for the file if
1422 # there isn't one already.
1433 # there isn't one already.
1423 ndset = msng_filenode_set.setdefault(f, {})
1434 ndset = msng_filenode_set.setdefault(f, {})
1424 # And set the filenode's changelog node to the
1435 # And set the filenode's changelog node to the
1425 # manifest's if it hasn't been set already.
1436 # manifest's if it hasn't been set already.
1426 ndset.setdefault(fnode, clnode)
1437 ndset.setdefault(fnode, clnode)
1427 else:
1438 else:
1428 # Otherwise we need a full manifest.
1439 # Otherwise we need a full manifest.
1429 m = mnfst.read(mnfstnode)
1440 m = mnfst.read(mnfstnode)
1430 # For every file in we care about.
1441 # For every file in we care about.
1431 for f in changedfiles:
1442 for f in changedfiles:
1432 fnode = m.get(f, None)
1443 fnode = m.get(f, None)
1433 # If it's in the manifest
1444 # If it's in the manifest
1434 if fnode is not None:
1445 if fnode is not None:
1435 # See comments above.
1446 # See comments above.
1436 clnode = msng_mnfst_set[mnfstnode]
1447 clnode = msng_mnfst_set[mnfstnode]
1437 ndset = msng_filenode_set.setdefault(f, {})
1448 ndset = msng_filenode_set.setdefault(f, {})
1438 ndset.setdefault(fnode, clnode)
1449 ndset.setdefault(fnode, clnode)
1439 # Remember the revision we hope to see next.
1450 # Remember the revision we hope to see next.
1440 next_rev[0] = r + 1
1451 next_rev[0] = r + 1
1441 return collect_msng_filenodes
1452 return collect_msng_filenodes
1442
1453
1443 # We have a list of filenodes we think we need for a file, lets remove
1454 # We have a list of filenodes we think we need for a file, lets remove
1444 # all those we now the recipient must have.
1455 # all those we now the recipient must have.
1445 def prune_filenodes(f, filerevlog):
1456 def prune_filenodes(f, filerevlog):
1446 msngset = msng_filenode_set[f]
1457 msngset = msng_filenode_set[f]
1447 hasset = {}
1458 hasset = {}
1448 # If a 'missing' filenode thinks it belongs to a changenode we
1459 # If a 'missing' filenode thinks it belongs to a changenode we
1449 # assume the recipient must have, then the recipient must have
1460 # assume the recipient must have, then the recipient must have
1450 # that filenode.
1461 # that filenode.
1451 for n in msngset:
1462 for n in msngset:
1452 clnode = cl.node(filerevlog.linkrev(n))
1463 clnode = cl.node(filerevlog.linkrev(n))
1453 if clnode in has_cl_set:
1464 if clnode in has_cl_set:
1454 hasset[n] = 1
1465 hasset[n] = 1
1455 prune_parents(filerevlog, hasset, msngset)
1466 prune_parents(filerevlog, hasset, msngset)
1456
1467
1457 # A function generator function that sets up the a context for the
1468 # A function generator function that sets up the a context for the
1458 # inner function.
1469 # inner function.
1459 def lookup_filenode_link_func(fname):
1470 def lookup_filenode_link_func(fname):
1460 msngset = msng_filenode_set[fname]
1471 msngset = msng_filenode_set[fname]
1461 # Lookup the changenode the filenode belongs to.
1472 # Lookup the changenode the filenode belongs to.
1462 def lookup_filenode_link(fnode):
1473 def lookup_filenode_link(fnode):
1463 return msngset[fnode]
1474 return msngset[fnode]
1464 return lookup_filenode_link
1475 return lookup_filenode_link
1465
1476
1466 # Now that we have all theses utility functions to help out and
1477 # Now that we have all theses utility functions to help out and
1467 # logically divide up the task, generate the group.
1478 # logically divide up the task, generate the group.
1468 def gengroup():
1479 def gengroup():
1469 # The set of changed files starts empty.
1480 # The set of changed files starts empty.
1470 changedfiles = {}
1481 changedfiles = {}
1471 # Create a changenode group generator that will call our functions
1482 # Create a changenode group generator that will call our functions
1472 # back to lookup the owning changenode and collect information.
1483 # back to lookup the owning changenode and collect information.
1473 group = cl.group(msng_cl_lst, identity,
1484 group = cl.group(msng_cl_lst, identity,
1474 manifest_and_file_collector(changedfiles))
1485 manifest_and_file_collector(changedfiles))
1475 for chnk in group:
1486 for chnk in group:
1476 yield chnk
1487 yield chnk
1477
1488
1478 # The list of manifests has been collected by the generator
1489 # The list of manifests has been collected by the generator
1479 # calling our functions back.
1490 # calling our functions back.
1480 prune_manifests()
1491 prune_manifests()
1481 msng_mnfst_lst = msng_mnfst_set.keys()
1492 msng_mnfst_lst = msng_mnfst_set.keys()
1482 # Sort the manifestnodes by revision number.
1493 # Sort the manifestnodes by revision number.
1483 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1494 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1484 # Create a generator for the manifestnodes that calls our lookup
1495 # Create a generator for the manifestnodes that calls our lookup
1485 # and data collection functions back.
1496 # and data collection functions back.
1486 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1497 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1487 filenode_collector(changedfiles))
1498 filenode_collector(changedfiles))
1488 for chnk in group:
1499 for chnk in group:
1489 yield chnk
1500 yield chnk
1490
1501
1491 # These are no longer needed, dereference and toss the memory for
1502 # These are no longer needed, dereference and toss the memory for
1492 # them.
1503 # them.
1493 msng_mnfst_lst = None
1504 msng_mnfst_lst = None
1494 msng_mnfst_set.clear()
1505 msng_mnfst_set.clear()
1495
1506
1496 changedfiles = changedfiles.keys()
1507 changedfiles = changedfiles.keys()
1497 changedfiles.sort()
1508 changedfiles.sort()
1498 # Go through all our files in order sorted by name.
1509 # Go through all our files in order sorted by name.
1499 for fname in changedfiles:
1510 for fname in changedfiles:
1500 filerevlog = self.file(fname)
1511 filerevlog = self.file(fname)
1501 # Toss out the filenodes that the recipient isn't really
1512 # Toss out the filenodes that the recipient isn't really
1502 # missing.
1513 # missing.
1503 if msng_filenode_set.has_key(fname):
1514 if msng_filenode_set.has_key(fname):
1504 prune_filenodes(fname, filerevlog)
1515 prune_filenodes(fname, filerevlog)
1505 msng_filenode_lst = msng_filenode_set[fname].keys()
1516 msng_filenode_lst = msng_filenode_set[fname].keys()
1506 else:
1517 else:
1507 msng_filenode_lst = []
1518 msng_filenode_lst = []
1508 # If any filenodes are left, generate the group for them,
1519 # If any filenodes are left, generate the group for them,
1509 # otherwise don't bother.
1520 # otherwise don't bother.
1510 if len(msng_filenode_lst) > 0:
1521 if len(msng_filenode_lst) > 0:
1511 yield changegroup.genchunk(fname)
1522 yield changegroup.genchunk(fname)
1512 # Sort the filenodes by their revision #
1523 # Sort the filenodes by their revision #
1513 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1524 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1514 # Create a group generator and only pass in a changenode
1525 # Create a group generator and only pass in a changenode
1515 # lookup function as we need to collect no information
1526 # lookup function as we need to collect no information
1516 # from filenodes.
1527 # from filenodes.
1517 group = filerevlog.group(msng_filenode_lst,
1528 group = filerevlog.group(msng_filenode_lst,
1518 lookup_filenode_link_func(fname))
1529 lookup_filenode_link_func(fname))
1519 for chnk in group:
1530 for chnk in group:
1520 yield chnk
1531 yield chnk
1521 if msng_filenode_set.has_key(fname):
1532 if msng_filenode_set.has_key(fname):
1522 # Don't need this anymore, toss it to free memory.
1533 # Don't need this anymore, toss it to free memory.
1523 del msng_filenode_set[fname]
1534 del msng_filenode_set[fname]
1524 # Signal that no more groups are left.
1535 # Signal that no more groups are left.
1525 yield changegroup.closechunk()
1536 yield changegroup.closechunk()
1526
1537
1527 if msng_cl_lst:
1538 if msng_cl_lst:
1528 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1539 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1529
1540
1530 return util.chunkbuffer(gengroup())
1541 return util.chunkbuffer(gengroup())
1531
1542
1532 def changegroup(self, basenodes, source):
1543 def changegroup(self, basenodes, source):
1533 """Generate a changegroup of all nodes that we have that a recipient
1544 """Generate a changegroup of all nodes that we have that a recipient
1534 doesn't.
1545 doesn't.
1535
1546
1536 This is much easier than the previous function as we can assume that
1547 This is much easier than the previous function as we can assume that
1537 the recipient has any changenode we aren't sending them."""
1548 the recipient has any changenode we aren't sending them."""
1538
1549
1539 self.hook('preoutgoing', throw=True, source=source)
1550 self.hook('preoutgoing', throw=True, source=source)
1540
1551
1541 cl = self.changelog
1552 cl = self.changelog
1542 nodes = cl.nodesbetween(basenodes, None)[0]
1553 nodes = cl.nodesbetween(basenodes, None)[0]
1543 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1554 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1544
1555
1545 def identity(x):
1556 def identity(x):
1546 return x
1557 return x
1547
1558
1548 def gennodelst(revlog):
1559 def gennodelst(revlog):
1549 for r in xrange(0, revlog.count()):
1560 for r in xrange(0, revlog.count()):
1550 n = revlog.node(r)
1561 n = revlog.node(r)
1551 if revlog.linkrev(n) in revset:
1562 if revlog.linkrev(n) in revset:
1552 yield n
1563 yield n
1553
1564
1554 def changed_file_collector(changedfileset):
1565 def changed_file_collector(changedfileset):
1555 def collect_changed_files(clnode):
1566 def collect_changed_files(clnode):
1556 c = cl.read(clnode)
1567 c = cl.read(clnode)
1557 for fname in c[3]:
1568 for fname in c[3]:
1558 changedfileset[fname] = 1
1569 changedfileset[fname] = 1
1559 return collect_changed_files
1570 return collect_changed_files
1560
1571
1561 def lookuprevlink_func(revlog):
1572 def lookuprevlink_func(revlog):
1562 def lookuprevlink(n):
1573 def lookuprevlink(n):
1563 return cl.node(revlog.linkrev(n))
1574 return cl.node(revlog.linkrev(n))
1564 return lookuprevlink
1575 return lookuprevlink
1565
1576
1566 def gengroup():
1577 def gengroup():
1567 # construct a list of all changed files
1578 # construct a list of all changed files
1568 changedfiles = {}
1579 changedfiles = {}
1569
1580
1570 for chnk in cl.group(nodes, identity,
1581 for chnk in cl.group(nodes, identity,
1571 changed_file_collector(changedfiles)):
1582 changed_file_collector(changedfiles)):
1572 yield chnk
1583 yield chnk
1573 changedfiles = changedfiles.keys()
1584 changedfiles = changedfiles.keys()
1574 changedfiles.sort()
1585 changedfiles.sort()
1575
1586
1576 mnfst = self.manifest
1587 mnfst = self.manifest
1577 nodeiter = gennodelst(mnfst)
1588 nodeiter = gennodelst(mnfst)
1578 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1589 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1579 yield chnk
1590 yield chnk
1580
1591
1581 for fname in changedfiles:
1592 for fname in changedfiles:
1582 filerevlog = self.file(fname)
1593 filerevlog = self.file(fname)
1583 nodeiter = gennodelst(filerevlog)
1594 nodeiter = gennodelst(filerevlog)
1584 nodeiter = list(nodeiter)
1595 nodeiter = list(nodeiter)
1585 if nodeiter:
1596 if nodeiter:
1586 yield changegroup.genchunk(fname)
1597 yield changegroup.genchunk(fname)
1587 lookup = lookuprevlink_func(filerevlog)
1598 lookup = lookuprevlink_func(filerevlog)
1588 for chnk in filerevlog.group(nodeiter, lookup):
1599 for chnk in filerevlog.group(nodeiter, lookup):
1589 yield chnk
1600 yield chnk
1590
1601
1591 yield changegroup.closechunk()
1602 yield changegroup.closechunk()
1592
1603
1593 if nodes:
1604 if nodes:
1594 self.hook('outgoing', node=hex(nodes[0]), source=source)
1605 self.hook('outgoing', node=hex(nodes[0]), source=source)
1595
1606
1596 return util.chunkbuffer(gengroup())
1607 return util.chunkbuffer(gengroup())
1597
1608
1598 def addchangegroup(self, source, srctype, url):
1609 def addchangegroup(self, source, srctype, url):
1599 """add changegroup to repo.
1610 """add changegroup to repo.
1600 returns number of heads modified or added + 1."""
1611 returns number of heads modified or added + 1."""
1601
1612
1602 def csmap(x):
1613 def csmap(x):
1603 self.ui.debug(_("add changeset %s\n") % short(x))
1614 self.ui.debug(_("add changeset %s\n") % short(x))
1604 return cl.count()
1615 return cl.count()
1605
1616
1606 def revmap(x):
1617 def revmap(x):
1607 return cl.rev(x)
1618 return cl.rev(x)
1608
1619
1609 if not source:
1620 if not source:
1610 return 0
1621 return 0
1611
1622
1612 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1623 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1613
1624
1614 changesets = files = revisions = 0
1625 changesets = files = revisions = 0
1615
1626
1616 tr = self.transaction()
1627 tr = self.transaction()
1617
1628
1618 # write changelog data to temp files so concurrent readers will not see
1629 # write changelog data to temp files so concurrent readers will not see
1619 # inconsistent view
1630 # inconsistent view
1620 cl = None
1631 cl = None
1621 try:
1632 try:
1622 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1633 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1623
1634
1624 oldheads = len(cl.heads())
1635 oldheads = len(cl.heads())
1625
1636
1626 # pull off the changeset group
1637 # pull off the changeset group
1627 self.ui.status(_("adding changesets\n"))
1638 self.ui.status(_("adding changesets\n"))
1628 cor = cl.count() - 1
1639 cor = cl.count() - 1
1629 chunkiter = changegroup.chunkiter(source)
1640 chunkiter = changegroup.chunkiter(source)
1630 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1641 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1631 raise util.Abort(_("received changelog group is empty"))
1642 raise util.Abort(_("received changelog group is empty"))
1632 cnr = cl.count() - 1
1643 cnr = cl.count() - 1
1633 changesets = cnr - cor
1644 changesets = cnr - cor
1634
1645
1635 # pull off the manifest group
1646 # pull off the manifest group
1636 self.ui.status(_("adding manifests\n"))
1647 self.ui.status(_("adding manifests\n"))
1637 chunkiter = changegroup.chunkiter(source)
1648 chunkiter = changegroup.chunkiter(source)
1638 # no need to check for empty manifest group here:
1649 # no need to check for empty manifest group here:
1639 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1650 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1640 # no new manifest will be created and the manifest group will
1651 # no new manifest will be created and the manifest group will
1641 # be empty during the pull
1652 # be empty during the pull
1642 self.manifest.addgroup(chunkiter, revmap, tr)
1653 self.manifest.addgroup(chunkiter, revmap, tr)
1643
1654
1644 # process the files
1655 # process the files
1645 self.ui.status(_("adding file changes\n"))
1656 self.ui.status(_("adding file changes\n"))
1646 while 1:
1657 while 1:
1647 f = changegroup.getchunk(source)
1658 f = changegroup.getchunk(source)
1648 if not f:
1659 if not f:
1649 break
1660 break
1650 self.ui.debug(_("adding %s revisions\n") % f)
1661 self.ui.debug(_("adding %s revisions\n") % f)
1651 fl = self.file(f)
1662 fl = self.file(f)
1652 o = fl.count()
1663 o = fl.count()
1653 chunkiter = changegroup.chunkiter(source)
1664 chunkiter = changegroup.chunkiter(source)
1654 if fl.addgroup(chunkiter, revmap, tr) is None:
1665 if fl.addgroup(chunkiter, revmap, tr) is None:
1655 raise util.Abort(_("received file revlog group is empty"))
1666 raise util.Abort(_("received file revlog group is empty"))
1656 revisions += fl.count() - o
1667 revisions += fl.count() - o
1657 files += 1
1668 files += 1
1658
1669
1659 cl.writedata()
1670 cl.writedata()
1660 finally:
1671 finally:
1661 if cl:
1672 if cl:
1662 cl.cleanup()
1673 cl.cleanup()
1663
1674
1664 # make changelog see real files again
1675 # make changelog see real files again
1665 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1676 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1666 self.changelog.checkinlinesize(tr)
1677 self.changelog.checkinlinesize(tr)
1667
1678
1668 newheads = len(self.changelog.heads())
1679 newheads = len(self.changelog.heads())
1669 heads = ""
1680 heads = ""
1670 if oldheads and newheads != oldheads:
1681 if oldheads and newheads != oldheads:
1671 heads = _(" (%+d heads)") % (newheads - oldheads)
1682 heads = _(" (%+d heads)") % (newheads - oldheads)
1672
1683
1673 self.ui.status(_("added %d changesets"
1684 self.ui.status(_("added %d changesets"
1674 " with %d changes to %d files%s\n")
1685 " with %d changes to %d files%s\n")
1675 % (changesets, revisions, files, heads))
1686 % (changesets, revisions, files, heads))
1676
1687
1677 if changesets > 0:
1688 if changesets > 0:
1678 self.hook('pretxnchangegroup', throw=True,
1689 self.hook('pretxnchangegroup', throw=True,
1679 node=hex(self.changelog.node(cor+1)), source=srctype,
1690 node=hex(self.changelog.node(cor+1)), source=srctype,
1680 url=url)
1691 url=url)
1681
1692
1682 tr.close()
1693 tr.close()
1683
1694
1684 if changesets > 0:
1695 if changesets > 0:
1685 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1696 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1686 source=srctype, url=url)
1697 source=srctype, url=url)
1687
1698
1688 for i in range(cor + 1, cnr + 1):
1699 for i in range(cor + 1, cnr + 1):
1689 self.hook("incoming", node=hex(self.changelog.node(i)),
1700 self.hook("incoming", node=hex(self.changelog.node(i)),
1690 source=srctype, url=url)
1701 source=srctype, url=url)
1691
1702
1692 return newheads - oldheads + 1
1703 return newheads - oldheads + 1
1693
1704
1694
1705
1695 def stream_in(self, remote):
1706 def stream_in(self, remote):
1696 fp = remote.stream_out()
1707 fp = remote.stream_out()
1697 resp = int(fp.readline())
1708 resp = int(fp.readline())
1698 if resp != 0:
1709 if resp != 0:
1699 raise util.Abort(_('operation forbidden by server'))
1710 raise util.Abort(_('operation forbidden by server'))
1700 self.ui.status(_('streaming all changes\n'))
1711 self.ui.status(_('streaming all changes\n'))
1701 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1712 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1702 self.ui.status(_('%d files to transfer, %s of data\n') %
1713 self.ui.status(_('%d files to transfer, %s of data\n') %
1703 (total_files, util.bytecount(total_bytes)))
1714 (total_files, util.bytecount(total_bytes)))
1704 start = time.time()
1715 start = time.time()
1705 for i in xrange(total_files):
1716 for i in xrange(total_files):
1706 name, size = fp.readline().split('\0', 1)
1717 name, size = fp.readline().split('\0', 1)
1707 size = int(size)
1718 size = int(size)
1708 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1719 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1709 ofp = self.opener(name, 'w')
1720 ofp = self.opener(name, 'w')
1710 for chunk in util.filechunkiter(fp, limit=size):
1721 for chunk in util.filechunkiter(fp, limit=size):
1711 ofp.write(chunk)
1722 ofp.write(chunk)
1712 ofp.close()
1723 ofp.close()
1713 elapsed = time.time() - start
1724 elapsed = time.time() - start
1714 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1725 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1715 (util.bytecount(total_bytes), elapsed,
1726 (util.bytecount(total_bytes), elapsed,
1716 util.bytecount(total_bytes / elapsed)))
1727 util.bytecount(total_bytes / elapsed)))
1717 self.reload()
1728 self.reload()
1718 return len(self.heads()) + 1
1729 return len(self.heads()) + 1
1719
1730
1720 def clone(self, remote, heads=[], stream=False):
1731 def clone(self, remote, heads=[], stream=False):
1721 '''clone remote repository.
1732 '''clone remote repository.
1722
1733
1723 keyword arguments:
1734 keyword arguments:
1724 heads: list of revs to clone (forces use of pull)
1735 heads: list of revs to clone (forces use of pull)
1725 stream: use streaming clone if possible'''
1736 stream: use streaming clone if possible'''
1726
1737
1727 # now, all clients that can request uncompressed clones can
1738 # now, all clients that can request uncompressed clones can
1728 # read repo formats supported by all servers that can serve
1739 # read repo formats supported by all servers that can serve
1729 # them.
1740 # them.
1730
1741
1731 # if revlog format changes, client will have to check version
1742 # if revlog format changes, client will have to check version
1732 # and format flags on "stream" capability, and use
1743 # and format flags on "stream" capability, and use
1733 # uncompressed only if compatible.
1744 # uncompressed only if compatible.
1734
1745
1735 if stream and not heads and remote.capable('stream'):
1746 if stream and not heads and remote.capable('stream'):
1736 return self.stream_in(remote)
1747 return self.stream_in(remote)
1737 return self.pull(remote, heads)
1748 return self.pull(remote, heads)
1738
1749
1739 # used to avoid circular references so destructors work
1750 # used to avoid circular references so destructors work
1740 def aftertrans(base):
1751 def aftertrans(base):
1741 p = base
1752 p = base
1742 def a():
1753 def a():
1743 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1754 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1744 util.rename(os.path.join(p, "journal.dirstate"),
1755 util.rename(os.path.join(p, "journal.dirstate"),
1745 os.path.join(p, "undo.dirstate"))
1756 os.path.join(p, "undo.dirstate"))
1746 return a
1757 return a
1747
1758
1748 def instance(ui, path, create):
1759 def instance(ui, path, create):
1749 return localrepository(ui, util.drop_scheme('file', path), create)
1760 return localrepository(ui, util.drop_scheme('file', path), create)
1750
1761
1751 def islocal(path):
1762 def islocal(path):
1752 return True
1763 return True
General Comments 0
You need to be logged in to leave comments. Login now