##// END OF EJS Templates
Demote a pull note to a debug message
Matt Mackall -
r2965:96d034d0 default
parent child Browse files
Show More
@@ -1,1748 +1,1748 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("no repo found"))
30 raise repo.RepoError(_("no repo found"))
31 path = p
31 path = p
32 self.path = os.path.join(path, ".hg")
32 self.path = os.path.join(path, ".hg")
33
33
34 if not create and not os.path.isdir(self.path):
34 if not create and not os.path.isdir(self.path):
35 raise repo.RepoError(_("repository %s not found") % path)
35 raise repo.RepoError(_("repository %s not found") % path)
36
36
37 self.root = os.path.abspath(path)
37 self.root = os.path.abspath(path)
38 self.origroot = path
38 self.origroot = path
39 self.ui = ui.ui(parentui=parentui)
39 self.ui = ui.ui(parentui=parentui)
40 self.opener = util.opener(self.path)
40 self.opener = util.opener(self.path)
41 self.wopener = util.opener(self.root)
41 self.wopener = util.opener(self.root)
42
42
43 try:
43 try:
44 self.ui.readconfig(self.join("hgrc"), self.root)
44 self.ui.readconfig(self.join("hgrc"), self.root)
45 except IOError:
45 except IOError:
46 pass
46 pass
47
47
48 v = self.ui.revlogopts
48 v = self.ui.revlogopts
49 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
49 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
50 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
50 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
51 fl = v.get('flags', None)
51 fl = v.get('flags', None)
52 flags = 0
52 flags = 0
53 if fl != None:
53 if fl != None:
54 for x in fl.split():
54 for x in fl.split():
55 flags |= revlog.flagstr(x)
55 flags |= revlog.flagstr(x)
56 elif self.revlogv1:
56 elif self.revlogv1:
57 flags = revlog.REVLOG_DEFAULT_FLAGS
57 flags = revlog.REVLOG_DEFAULT_FLAGS
58
58
59 v = self.revlogversion | flags
59 v = self.revlogversion | flags
60 self.manifest = manifest.manifest(self.opener, v)
60 self.manifest = manifest.manifest(self.opener, v)
61 self.changelog = changelog.changelog(self.opener, v)
61 self.changelog = changelog.changelog(self.opener, v)
62
62
63 # the changelog might not have the inline index flag
63 # the changelog might not have the inline index flag
64 # on. If the format of the changelog is the same as found in
64 # on. If the format of the changelog is the same as found in
65 # .hgrc, apply any flags found in the .hgrc as well.
65 # .hgrc, apply any flags found in the .hgrc as well.
66 # Otherwise, just version from the changelog
66 # Otherwise, just version from the changelog
67 v = self.changelog.version
67 v = self.changelog.version
68 if v == self.revlogversion:
68 if v == self.revlogversion:
69 v |= flags
69 v |= flags
70 self.revlogversion = v
70 self.revlogversion = v
71
71
72 self.tagscache = None
72 self.tagscache = None
73 self.nodetagscache = None
73 self.nodetagscache = None
74 self.encodepats = None
74 self.encodepats = None
75 self.decodepats = None
75 self.decodepats = None
76 self.transhandle = None
76 self.transhandle = None
77
77
78 if create:
78 if create:
79 if not os.path.exists(path):
79 if not os.path.exists(path):
80 os.mkdir(path)
80 os.mkdir(path)
81 os.mkdir(self.path)
81 os.mkdir(self.path)
82 os.mkdir(self.join("data"))
82 os.mkdir(self.join("data"))
83
83
84 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
84 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
85
85
86 def url(self):
86 def url(self):
87 return 'file:' + self.root
87 return 'file:' + self.root
88
88
89 def hook(self, name, throw=False, **args):
89 def hook(self, name, throw=False, **args):
90 def callhook(hname, funcname):
90 def callhook(hname, funcname):
91 '''call python hook. hook is callable object, looked up as
91 '''call python hook. hook is callable object, looked up as
92 name in python module. if callable returns "true", hook
92 name in python module. if callable returns "true", hook
93 fails, else passes. if hook raises exception, treated as
93 fails, else passes. if hook raises exception, treated as
94 hook failure. exception propagates if throw is "true".
94 hook failure. exception propagates if throw is "true".
95
95
96 reason for "true" meaning "hook failed" is so that
96 reason for "true" meaning "hook failed" is so that
97 unmodified commands (e.g. mercurial.commands.update) can
97 unmodified commands (e.g. mercurial.commands.update) can
98 be run as hooks without wrappers to convert return values.'''
98 be run as hooks without wrappers to convert return values.'''
99
99
100 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
100 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
101 d = funcname.rfind('.')
101 d = funcname.rfind('.')
102 if d == -1:
102 if d == -1:
103 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
103 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
104 % (hname, funcname))
104 % (hname, funcname))
105 modname = funcname[:d]
105 modname = funcname[:d]
106 try:
106 try:
107 obj = __import__(modname)
107 obj = __import__(modname)
108 except ImportError:
108 except ImportError:
109 try:
109 try:
110 # extensions are loaded with hgext_ prefix
110 # extensions are loaded with hgext_ prefix
111 obj = __import__("hgext_%s" % modname)
111 obj = __import__("hgext_%s" % modname)
112 except ImportError:
112 except ImportError:
113 raise util.Abort(_('%s hook is invalid '
113 raise util.Abort(_('%s hook is invalid '
114 '(import of "%s" failed)') %
114 '(import of "%s" failed)') %
115 (hname, modname))
115 (hname, modname))
116 try:
116 try:
117 for p in funcname.split('.')[1:]:
117 for p in funcname.split('.')[1:]:
118 obj = getattr(obj, p)
118 obj = getattr(obj, p)
119 except AttributeError, err:
119 except AttributeError, err:
120 raise util.Abort(_('%s hook is invalid '
120 raise util.Abort(_('%s hook is invalid '
121 '("%s" is not defined)') %
121 '("%s" is not defined)') %
122 (hname, funcname))
122 (hname, funcname))
123 if not callable(obj):
123 if not callable(obj):
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not callable)') %
125 '("%s" is not callable)') %
126 (hname, funcname))
126 (hname, funcname))
127 try:
127 try:
128 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
128 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
129 except (KeyboardInterrupt, util.SignalInterrupt):
129 except (KeyboardInterrupt, util.SignalInterrupt):
130 raise
130 raise
131 except Exception, exc:
131 except Exception, exc:
132 if isinstance(exc, util.Abort):
132 if isinstance(exc, util.Abort):
133 self.ui.warn(_('error: %s hook failed: %s\n') %
133 self.ui.warn(_('error: %s hook failed: %s\n') %
134 (hname, exc.args[0] % exc.args[1:]))
134 (hname, exc.args[0] % exc.args[1:]))
135 else:
135 else:
136 self.ui.warn(_('error: %s hook raised an exception: '
136 self.ui.warn(_('error: %s hook raised an exception: '
137 '%s\n') % (hname, exc))
137 '%s\n') % (hname, exc))
138 if throw:
138 if throw:
139 raise
139 raise
140 self.ui.print_exc()
140 self.ui.print_exc()
141 return True
141 return True
142 if r:
142 if r:
143 if throw:
143 if throw:
144 raise util.Abort(_('%s hook failed') % hname)
144 raise util.Abort(_('%s hook failed') % hname)
145 self.ui.warn(_('warning: %s hook failed\n') % hname)
145 self.ui.warn(_('warning: %s hook failed\n') % hname)
146 return r
146 return r
147
147
148 def runhook(name, cmd):
148 def runhook(name, cmd):
149 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
149 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
150 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
150 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
151 r = util.system(cmd, environ=env, cwd=self.root)
151 r = util.system(cmd, environ=env, cwd=self.root)
152 if r:
152 if r:
153 desc, r = util.explain_exit(r)
153 desc, r = util.explain_exit(r)
154 if throw:
154 if throw:
155 raise util.Abort(_('%s hook %s') % (name, desc))
155 raise util.Abort(_('%s hook %s') % (name, desc))
156 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
156 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
157 return r
157 return r
158
158
159 r = False
159 r = False
160 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
160 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
161 if hname.split(".", 1)[0] == name and cmd]
161 if hname.split(".", 1)[0] == name and cmd]
162 hooks.sort()
162 hooks.sort()
163 for hname, cmd in hooks:
163 for hname, cmd in hooks:
164 if cmd.startswith('python:'):
164 if cmd.startswith('python:'):
165 r = callhook(hname, cmd[7:].strip()) or r
165 r = callhook(hname, cmd[7:].strip()) or r
166 else:
166 else:
167 r = runhook(hname, cmd) or r
167 r = runhook(hname, cmd) or r
168 return r
168 return r
169
169
170 tag_disallowed = ':\r\n'
170 tag_disallowed = ':\r\n'
171
171
172 def tag(self, name, node, local=False, message=None, user=None, date=None):
172 def tag(self, name, node, local=False, message=None, user=None, date=None):
173 '''tag a revision with a symbolic name.
173 '''tag a revision with a symbolic name.
174
174
175 if local is True, the tag is stored in a per-repository file.
175 if local is True, the tag is stored in a per-repository file.
176 otherwise, it is stored in the .hgtags file, and a new
176 otherwise, it is stored in the .hgtags file, and a new
177 changeset is committed with the change.
177 changeset is committed with the change.
178
178
179 keyword arguments:
179 keyword arguments:
180
180
181 local: whether to store tag in non-version-controlled file
181 local: whether to store tag in non-version-controlled file
182 (default False)
182 (default False)
183
183
184 message: commit message to use if committing
184 message: commit message to use if committing
185
185
186 user: name of user to use if committing
186 user: name of user to use if committing
187
187
188 date: date tuple to use if committing'''
188 date: date tuple to use if committing'''
189
189
190 for c in self.tag_disallowed:
190 for c in self.tag_disallowed:
191 if c in name:
191 if c in name:
192 raise util.Abort(_('%r cannot be used in a tag name') % c)
192 raise util.Abort(_('%r cannot be used in a tag name') % c)
193
193
194 self.hook('pretag', throw=True, node=node, tag=name, local=local)
194 self.hook('pretag', throw=True, node=node, tag=name, local=local)
195
195
196 if local:
196 if local:
197 self.opener('localtags', 'a').write('%s %s\n' % (node, name))
197 self.opener('localtags', 'a').write('%s %s\n' % (node, name))
198 self.hook('tag', node=node, tag=name, local=local)
198 self.hook('tag', node=node, tag=name, local=local)
199 return
199 return
200
200
201 for x in self.status()[:5]:
201 for x in self.status()[:5]:
202 if '.hgtags' in x:
202 if '.hgtags' in x:
203 raise util.Abort(_('working copy of .hgtags is changed '
203 raise util.Abort(_('working copy of .hgtags is changed '
204 '(please commit .hgtags manually)'))
204 '(please commit .hgtags manually)'))
205
205
206 self.wfile('.hgtags', 'ab').write('%s %s\n' % (node, name))
206 self.wfile('.hgtags', 'ab').write('%s %s\n' % (node, name))
207 if self.dirstate.state('.hgtags') == '?':
207 if self.dirstate.state('.hgtags') == '?':
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 if not message:
210 if not message:
211 message = _('Added tag %s for changeset %s') % (name, node)
211 message = _('Added tag %s for changeset %s') % (name, node)
212
212
213 self.commit(['.hgtags'], message, user, date)
213 self.commit(['.hgtags'], message, user, date)
214 self.hook('tag', node=node, tag=name, local=local)
214 self.hook('tag', node=node, tag=name, local=local)
215
215
216 def tags(self):
216 def tags(self):
217 '''return a mapping of tag to node'''
217 '''return a mapping of tag to node'''
218 if not self.tagscache:
218 if not self.tagscache:
219 self.tagscache = {}
219 self.tagscache = {}
220
220
221 def parsetag(line, context):
221 def parsetag(line, context):
222 if not line:
222 if not line:
223 return
223 return
224 s = l.split(" ", 1)
224 s = l.split(" ", 1)
225 if len(s) != 2:
225 if len(s) != 2:
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 return
227 return
228 node, key = s
228 node, key = s
229 key = key.strip()
229 key = key.strip()
230 try:
230 try:
231 bin_n = bin(node)
231 bin_n = bin(node)
232 except TypeError:
232 except TypeError:
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 (context, node))
234 (context, node))
235 return
235 return
236 if bin_n not in self.changelog.nodemap:
236 if bin_n not in self.changelog.nodemap:
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 (context, key))
238 (context, key))
239 return
239 return
240 self.tagscache[key] = bin_n
240 self.tagscache[key] = bin_n
241
241
242 # read the tags file from each head, ending with the tip,
242 # read the tags file from each head, ending with the tip,
243 # and add each tag found to the map, with "newer" ones
243 # and add each tag found to the map, with "newer" ones
244 # taking precedence
244 # taking precedence
245 heads = self.heads()
245 heads = self.heads()
246 heads.reverse()
246 heads.reverse()
247 fl = self.file(".hgtags")
247 fl = self.file(".hgtags")
248 for node in heads:
248 for node in heads:
249 change = self.changelog.read(node)
249 change = self.changelog.read(node)
250 rev = self.changelog.rev(node)
250 rev = self.changelog.rev(node)
251 fn, ff = self.manifest.find(change[0], '.hgtags')
251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 if fn is None: continue
252 if fn is None: continue
253 count = 0
253 count = 0
254 for l in fl.read(fn).splitlines():
254 for l in fl.read(fn).splitlines():
255 count += 1
255 count += 1
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 (rev, short(node), count))
257 (rev, short(node), count))
258 try:
258 try:
259 f = self.opener("localtags")
259 f = self.opener("localtags")
260 count = 0
260 count = 0
261 for l in f:
261 for l in f:
262 count += 1
262 count += 1
263 parsetag(l, _("localtags, line %d") % count)
263 parsetag(l, _("localtags, line %d") % count)
264 except IOError:
264 except IOError:
265 pass
265 pass
266
266
267 self.tagscache['tip'] = self.changelog.tip()
267 self.tagscache['tip'] = self.changelog.tip()
268
268
269 return self.tagscache
269 return self.tagscache
270
270
271 def tagslist(self):
271 def tagslist(self):
272 '''return a list of tags ordered by revision'''
272 '''return a list of tags ordered by revision'''
273 l = []
273 l = []
274 for t, n in self.tags().items():
274 for t, n in self.tags().items():
275 try:
275 try:
276 r = self.changelog.rev(n)
276 r = self.changelog.rev(n)
277 except:
277 except:
278 r = -2 # sort to the beginning of the list if unknown
278 r = -2 # sort to the beginning of the list if unknown
279 l.append((r, t, n))
279 l.append((r, t, n))
280 l.sort()
280 l.sort()
281 return [(t, n) for r, t, n in l]
281 return [(t, n) for r, t, n in l]
282
282
283 def nodetags(self, node):
283 def nodetags(self, node):
284 '''return the tags associated with a node'''
284 '''return the tags associated with a node'''
285 if not self.nodetagscache:
285 if not self.nodetagscache:
286 self.nodetagscache = {}
286 self.nodetagscache = {}
287 for t, n in self.tags().items():
287 for t, n in self.tags().items():
288 self.nodetagscache.setdefault(n, []).append(t)
288 self.nodetagscache.setdefault(n, []).append(t)
289 return self.nodetagscache.get(node, [])
289 return self.nodetagscache.get(node, [])
290
290
291 def lookup(self, key):
291 def lookup(self, key):
292 try:
292 try:
293 return self.tags()[key]
293 return self.tags()[key]
294 except KeyError:
294 except KeyError:
295 if key == '.':
295 if key == '.':
296 key = self.dirstate.parents()[0]
296 key = self.dirstate.parents()[0]
297 if key == nullid:
297 if key == nullid:
298 raise repo.RepoError(_("no revision checked out"))
298 raise repo.RepoError(_("no revision checked out"))
299 try:
299 try:
300 return self.changelog.lookup(key)
300 return self.changelog.lookup(key)
301 except:
301 except:
302 raise repo.RepoError(_("unknown revision '%s'") % key)
302 raise repo.RepoError(_("unknown revision '%s'") % key)
303
303
304 def dev(self):
304 def dev(self):
305 return os.lstat(self.path).st_dev
305 return os.lstat(self.path).st_dev
306
306
307 def local(self):
307 def local(self):
308 return True
308 return True
309
309
310 def join(self, f):
310 def join(self, f):
311 return os.path.join(self.path, f)
311 return os.path.join(self.path, f)
312
312
313 def wjoin(self, f):
313 def wjoin(self, f):
314 return os.path.join(self.root, f)
314 return os.path.join(self.root, f)
315
315
316 def file(self, f):
316 def file(self, f):
317 if f[0] == '/':
317 if f[0] == '/':
318 f = f[1:]
318 f = f[1:]
319 return filelog.filelog(self.opener, f, self.revlogversion)
319 return filelog.filelog(self.opener, f, self.revlogversion)
320
320
321 def changectx(self, changeid):
321 def changectx(self, changeid):
322 return context.changectx(self, changeid)
322 return context.changectx(self, changeid)
323
323
324 def filectx(self, path, changeid=None, fileid=None):
324 def filectx(self, path, changeid=None, fileid=None):
325 """changeid can be a changeset revision, node, or tag.
325 """changeid can be a changeset revision, node, or tag.
326 fileid can be a file revision or node."""
326 fileid can be a file revision or node."""
327 return context.filectx(self, path, changeid, fileid)
327 return context.filectx(self, path, changeid, fileid)
328
328
329 def getcwd(self):
329 def getcwd(self):
330 return self.dirstate.getcwd()
330 return self.dirstate.getcwd()
331
331
332 def wfile(self, f, mode='r'):
332 def wfile(self, f, mode='r'):
333 return self.wopener(f, mode)
333 return self.wopener(f, mode)
334
334
335 def wread(self, filename):
335 def wread(self, filename):
336 if self.encodepats == None:
336 if self.encodepats == None:
337 l = []
337 l = []
338 for pat, cmd in self.ui.configitems("encode"):
338 for pat, cmd in self.ui.configitems("encode"):
339 mf = util.matcher(self.root, "", [pat], [], [])[1]
339 mf = util.matcher(self.root, "", [pat], [], [])[1]
340 l.append((mf, cmd))
340 l.append((mf, cmd))
341 self.encodepats = l
341 self.encodepats = l
342
342
343 data = self.wopener(filename, 'r').read()
343 data = self.wopener(filename, 'r').read()
344
344
345 for mf, cmd in self.encodepats:
345 for mf, cmd in self.encodepats:
346 if mf(filename):
346 if mf(filename):
347 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
347 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
348 data = util.filter(data, cmd)
348 data = util.filter(data, cmd)
349 break
349 break
350
350
351 return data
351 return data
352
352
353 def wwrite(self, filename, data, fd=None):
353 def wwrite(self, filename, data, fd=None):
354 if self.decodepats == None:
354 if self.decodepats == None:
355 l = []
355 l = []
356 for pat, cmd in self.ui.configitems("decode"):
356 for pat, cmd in self.ui.configitems("decode"):
357 mf = util.matcher(self.root, "", [pat], [], [])[1]
357 mf = util.matcher(self.root, "", [pat], [], [])[1]
358 l.append((mf, cmd))
358 l.append((mf, cmd))
359 self.decodepats = l
359 self.decodepats = l
360
360
361 for mf, cmd in self.decodepats:
361 for mf, cmd in self.decodepats:
362 if mf(filename):
362 if mf(filename):
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
364 data = util.filter(data, cmd)
364 data = util.filter(data, cmd)
365 break
365 break
366
366
367 if fd:
367 if fd:
368 return fd.write(data)
368 return fd.write(data)
369 return self.wopener(filename, 'w').write(data)
369 return self.wopener(filename, 'w').write(data)
370
370
371 def transaction(self):
371 def transaction(self):
372 tr = self.transhandle
372 tr = self.transhandle
373 if tr != None and tr.running():
373 if tr != None and tr.running():
374 return tr.nest()
374 return tr.nest()
375
375
376 # save dirstate for rollback
376 # save dirstate for rollback
377 try:
377 try:
378 ds = self.opener("dirstate").read()
378 ds = self.opener("dirstate").read()
379 except IOError:
379 except IOError:
380 ds = ""
380 ds = ""
381 self.opener("journal.dirstate", "w").write(ds)
381 self.opener("journal.dirstate", "w").write(ds)
382
382
383 tr = transaction.transaction(self.ui.warn, self.opener,
383 tr = transaction.transaction(self.ui.warn, self.opener,
384 self.join("journal"),
384 self.join("journal"),
385 aftertrans(self.path))
385 aftertrans(self.path))
386 self.transhandle = tr
386 self.transhandle = tr
387 return tr
387 return tr
388
388
389 def recover(self):
389 def recover(self):
390 l = self.lock()
390 l = self.lock()
391 if os.path.exists(self.join("journal")):
391 if os.path.exists(self.join("journal")):
392 self.ui.status(_("rolling back interrupted transaction\n"))
392 self.ui.status(_("rolling back interrupted transaction\n"))
393 transaction.rollback(self.opener, self.join("journal"))
393 transaction.rollback(self.opener, self.join("journal"))
394 self.reload()
394 self.reload()
395 return True
395 return True
396 else:
396 else:
397 self.ui.warn(_("no interrupted transaction available\n"))
397 self.ui.warn(_("no interrupted transaction available\n"))
398 return False
398 return False
399
399
400 def rollback(self, wlock=None):
400 def rollback(self, wlock=None):
401 if not wlock:
401 if not wlock:
402 wlock = self.wlock()
402 wlock = self.wlock()
403 l = self.lock()
403 l = self.lock()
404 if os.path.exists(self.join("undo")):
404 if os.path.exists(self.join("undo")):
405 self.ui.status(_("rolling back last transaction\n"))
405 self.ui.status(_("rolling back last transaction\n"))
406 transaction.rollback(self.opener, self.join("undo"))
406 transaction.rollback(self.opener, self.join("undo"))
407 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
407 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
408 self.reload()
408 self.reload()
409 self.wreload()
409 self.wreload()
410 else:
410 else:
411 self.ui.warn(_("no rollback information available\n"))
411 self.ui.warn(_("no rollback information available\n"))
412
412
413 def wreload(self):
413 def wreload(self):
414 self.dirstate.read()
414 self.dirstate.read()
415
415
416 def reload(self):
416 def reload(self):
417 self.changelog.load()
417 self.changelog.load()
418 self.manifest.load()
418 self.manifest.load()
419 self.tagscache = None
419 self.tagscache = None
420 self.nodetagscache = None
420 self.nodetagscache = None
421
421
422 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
422 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
423 desc=None):
423 desc=None):
424 try:
424 try:
425 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
425 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
426 except lock.LockHeld, inst:
426 except lock.LockHeld, inst:
427 if not wait:
427 if not wait:
428 raise
428 raise
429 self.ui.warn(_("waiting for lock on %s held by %s\n") %
429 self.ui.warn(_("waiting for lock on %s held by %s\n") %
430 (desc, inst.args[0]))
430 (desc, inst.args[0]))
431 # default to 600 seconds timeout
431 # default to 600 seconds timeout
432 l = lock.lock(self.join(lockname),
432 l = lock.lock(self.join(lockname),
433 int(self.ui.config("ui", "timeout") or 600),
433 int(self.ui.config("ui", "timeout") or 600),
434 releasefn, desc=desc)
434 releasefn, desc=desc)
435 if acquirefn:
435 if acquirefn:
436 acquirefn()
436 acquirefn()
437 return l
437 return l
438
438
439 def lock(self, wait=1):
439 def lock(self, wait=1):
440 return self.do_lock("lock", wait, acquirefn=self.reload,
440 return self.do_lock("lock", wait, acquirefn=self.reload,
441 desc=_('repository %s') % self.origroot)
441 desc=_('repository %s') % self.origroot)
442
442
443 def wlock(self, wait=1):
443 def wlock(self, wait=1):
444 return self.do_lock("wlock", wait, self.dirstate.write,
444 return self.do_lock("wlock", wait, self.dirstate.write,
445 self.wreload,
445 self.wreload,
446 desc=_('working directory of %s') % self.origroot)
446 desc=_('working directory of %s') % self.origroot)
447
447
448 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
448 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
449 "determine whether a new filenode is needed"
449 "determine whether a new filenode is needed"
450 fp1 = manifest1.get(filename, nullid)
450 fp1 = manifest1.get(filename, nullid)
451 fp2 = manifest2.get(filename, nullid)
451 fp2 = manifest2.get(filename, nullid)
452
452
453 if fp2 != nullid:
453 if fp2 != nullid:
454 # is one parent an ancestor of the other?
454 # is one parent an ancestor of the other?
455 fpa = filelog.ancestor(fp1, fp2)
455 fpa = filelog.ancestor(fp1, fp2)
456 if fpa == fp1:
456 if fpa == fp1:
457 fp1, fp2 = fp2, nullid
457 fp1, fp2 = fp2, nullid
458 elif fpa == fp2:
458 elif fpa == fp2:
459 fp2 = nullid
459 fp2 = nullid
460
460
461 # is the file unmodified from the parent? report existing entry
461 # is the file unmodified from the parent? report existing entry
462 if fp2 == nullid and text == filelog.read(fp1):
462 if fp2 == nullid and text == filelog.read(fp1):
463 return (fp1, None, None)
463 return (fp1, None, None)
464
464
465 return (None, fp1, fp2)
465 return (None, fp1, fp2)
466
466
467 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
467 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
468 orig_parent = self.dirstate.parents()[0] or nullid
468 orig_parent = self.dirstate.parents()[0] or nullid
469 p1 = p1 or self.dirstate.parents()[0] or nullid
469 p1 = p1 or self.dirstate.parents()[0] or nullid
470 p2 = p2 or self.dirstate.parents()[1] or nullid
470 p2 = p2 or self.dirstate.parents()[1] or nullid
471 c1 = self.changelog.read(p1)
471 c1 = self.changelog.read(p1)
472 c2 = self.changelog.read(p2)
472 c2 = self.changelog.read(p2)
473 m1 = self.manifest.read(c1[0]).copy()
473 m1 = self.manifest.read(c1[0]).copy()
474 m2 = self.manifest.read(c2[0])
474 m2 = self.manifest.read(c2[0])
475 changed = []
475 changed = []
476
476
477 if orig_parent == p1:
477 if orig_parent == p1:
478 update_dirstate = 1
478 update_dirstate = 1
479 else:
479 else:
480 update_dirstate = 0
480 update_dirstate = 0
481
481
482 if not wlock:
482 if not wlock:
483 wlock = self.wlock()
483 wlock = self.wlock()
484 l = self.lock()
484 l = self.lock()
485 tr = self.transaction()
485 tr = self.transaction()
486 linkrev = self.changelog.count()
486 linkrev = self.changelog.count()
487 for f in files:
487 for f in files:
488 try:
488 try:
489 t = self.wread(f)
489 t = self.wread(f)
490 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
490 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
491 r = self.file(f)
491 r = self.file(f)
492
492
493 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
493 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
494 if entry:
494 if entry:
495 m1[f] = entry
495 m1[f] = entry
496 continue
496 continue
497
497
498 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
498 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
499 changed.append(f)
499 changed.append(f)
500 if update_dirstate:
500 if update_dirstate:
501 self.dirstate.update([f], "n")
501 self.dirstate.update([f], "n")
502 except IOError:
502 except IOError:
503 try:
503 try:
504 del m1[f]
504 del m1[f]
505 if update_dirstate:
505 if update_dirstate:
506 self.dirstate.forget([f])
506 self.dirstate.forget([f])
507 except:
507 except:
508 # deleted from p2?
508 # deleted from p2?
509 pass
509 pass
510
510
511 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
511 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
512 user = user or self.ui.username()
512 user = user or self.ui.username()
513 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
513 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
514 tr.close()
514 tr.close()
515 if update_dirstate:
515 if update_dirstate:
516 self.dirstate.setparents(n, nullid)
516 self.dirstate.setparents(n, nullid)
517
517
518 def commit(self, files=None, text="", user=None, date=None,
518 def commit(self, files=None, text="", user=None, date=None,
519 match=util.always, force=False, lock=None, wlock=None,
519 match=util.always, force=False, lock=None, wlock=None,
520 force_editor=False):
520 force_editor=False):
521 commit = []
521 commit = []
522 remove = []
522 remove = []
523 changed = []
523 changed = []
524
524
525 if files:
525 if files:
526 for f in files:
526 for f in files:
527 s = self.dirstate.state(f)
527 s = self.dirstate.state(f)
528 if s in 'nmai':
528 if s in 'nmai':
529 commit.append(f)
529 commit.append(f)
530 elif s == 'r':
530 elif s == 'r':
531 remove.append(f)
531 remove.append(f)
532 else:
532 else:
533 self.ui.warn(_("%s not tracked!\n") % f)
533 self.ui.warn(_("%s not tracked!\n") % f)
534 else:
534 else:
535 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
535 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
536 commit = modified + added
536 commit = modified + added
537 remove = removed
537 remove = removed
538
538
539 p1, p2 = self.dirstate.parents()
539 p1, p2 = self.dirstate.parents()
540 c1 = self.changelog.read(p1)
540 c1 = self.changelog.read(p1)
541 c2 = self.changelog.read(p2)
541 c2 = self.changelog.read(p2)
542 m1 = self.manifest.read(c1[0]).copy()
542 m1 = self.manifest.read(c1[0]).copy()
543 m2 = self.manifest.read(c2[0])
543 m2 = self.manifest.read(c2[0])
544
544
545 if not commit and not remove and not force and p2 == nullid:
545 if not commit and not remove and not force and p2 == nullid:
546 self.ui.status(_("nothing changed\n"))
546 self.ui.status(_("nothing changed\n"))
547 return None
547 return None
548
548
549 xp1 = hex(p1)
549 xp1 = hex(p1)
550 if p2 == nullid: xp2 = ''
550 if p2 == nullid: xp2 = ''
551 else: xp2 = hex(p2)
551 else: xp2 = hex(p2)
552
552
553 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
553 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
554
554
555 if not wlock:
555 if not wlock:
556 wlock = self.wlock()
556 wlock = self.wlock()
557 if not lock:
557 if not lock:
558 lock = self.lock()
558 lock = self.lock()
559 tr = self.transaction()
559 tr = self.transaction()
560
560
561 # check in files
561 # check in files
562 new = {}
562 new = {}
563 linkrev = self.changelog.count()
563 linkrev = self.changelog.count()
564 commit.sort()
564 commit.sort()
565 for f in commit:
565 for f in commit:
566 self.ui.note(f + "\n")
566 self.ui.note(f + "\n")
567 try:
567 try:
568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
569 t = self.wread(f)
569 t = self.wread(f)
570 except IOError:
570 except IOError:
571 self.ui.warn(_("trouble committing %s!\n") % f)
571 self.ui.warn(_("trouble committing %s!\n") % f)
572 raise
572 raise
573
573
574 r = self.file(f)
574 r = self.file(f)
575
575
576 meta = {}
576 meta = {}
577 cp = self.dirstate.copied(f)
577 cp = self.dirstate.copied(f)
578 if cp:
578 if cp:
579 meta["copy"] = cp
579 meta["copy"] = cp
580 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
580 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
581 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
581 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
582 fp1, fp2 = nullid, nullid
582 fp1, fp2 = nullid, nullid
583 else:
583 else:
584 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
584 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
585 if entry:
585 if entry:
586 new[f] = entry
586 new[f] = entry
587 continue
587 continue
588
588
589 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
589 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
590 # remember what we've added so that we can later calculate
590 # remember what we've added so that we can later calculate
591 # the files to pull from a set of changesets
591 # the files to pull from a set of changesets
592 changed.append(f)
592 changed.append(f)
593
593
594 # update manifest
594 # update manifest
595 m1.update(new)
595 m1.update(new)
596 for f in remove:
596 for f in remove:
597 if f in m1:
597 if f in m1:
598 del m1[f]
598 del m1[f]
599 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
599 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
600 (new, remove))
600 (new, remove))
601
601
602 # add changeset
602 # add changeset
603 new = new.keys()
603 new = new.keys()
604 new.sort()
604 new.sort()
605
605
606 user = user or self.ui.username()
606 user = user or self.ui.username()
607 if not text or force_editor:
607 if not text or force_editor:
608 edittext = []
608 edittext = []
609 if text:
609 if text:
610 edittext.append(text)
610 edittext.append(text)
611 edittext.append("")
611 edittext.append("")
612 if p2 != nullid:
612 if p2 != nullid:
613 edittext.append("HG: branch merge")
613 edittext.append("HG: branch merge")
614 edittext.extend(["HG: changed %s" % f for f in changed])
614 edittext.extend(["HG: changed %s" % f for f in changed])
615 edittext.extend(["HG: removed %s" % f for f in remove])
615 edittext.extend(["HG: removed %s" % f for f in remove])
616 if not changed and not remove:
616 if not changed and not remove:
617 edittext.append("HG: no files changed")
617 edittext.append("HG: no files changed")
618 edittext.append("")
618 edittext.append("")
619 # run editor in the repository root
619 # run editor in the repository root
620 olddir = os.getcwd()
620 olddir = os.getcwd()
621 os.chdir(self.root)
621 os.chdir(self.root)
622 text = self.ui.edit("\n".join(edittext), user)
622 text = self.ui.edit("\n".join(edittext), user)
623 os.chdir(olddir)
623 os.chdir(olddir)
624
624
625 lines = [line.rstrip() for line in text.rstrip().splitlines()]
625 lines = [line.rstrip() for line in text.rstrip().splitlines()]
626 while lines and not lines[0]:
626 while lines and not lines[0]:
627 del lines[0]
627 del lines[0]
628 if not lines:
628 if not lines:
629 return None
629 return None
630 text = '\n'.join(lines)
630 text = '\n'.join(lines)
631 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
631 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
632 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
632 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
633 parent2=xp2)
633 parent2=xp2)
634 tr.close()
634 tr.close()
635
635
636 self.dirstate.setparents(n)
636 self.dirstate.setparents(n)
637 self.dirstate.update(new, "n")
637 self.dirstate.update(new, "n")
638 self.dirstate.forget(remove)
638 self.dirstate.forget(remove)
639
639
640 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
640 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
641 return n
641 return n
642
642
643 def walk(self, node=None, files=[], match=util.always, badmatch=None):
643 def walk(self, node=None, files=[], match=util.always, badmatch=None):
644 if node:
644 if node:
645 fdict = dict.fromkeys(files)
645 fdict = dict.fromkeys(files)
646 for fn in self.manifest.read(self.changelog.read(node)[0]):
646 for fn in self.manifest.read(self.changelog.read(node)[0]):
647 fdict.pop(fn, None)
647 fdict.pop(fn, None)
648 if match(fn):
648 if match(fn):
649 yield 'm', fn
649 yield 'm', fn
650 for fn in fdict:
650 for fn in fdict:
651 if badmatch and badmatch(fn):
651 if badmatch and badmatch(fn):
652 if match(fn):
652 if match(fn):
653 yield 'b', fn
653 yield 'b', fn
654 else:
654 else:
655 self.ui.warn(_('%s: No such file in rev %s\n') % (
655 self.ui.warn(_('%s: No such file in rev %s\n') % (
656 util.pathto(self.getcwd(), fn), short(node)))
656 util.pathto(self.getcwd(), fn), short(node)))
657 else:
657 else:
658 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
658 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
659 yield src, fn
659 yield src, fn
660
660
661 def status(self, node1=None, node2=None, files=[], match=util.always,
661 def status(self, node1=None, node2=None, files=[], match=util.always,
662 wlock=None, list_ignored=False, list_clean=False):
662 wlock=None, list_ignored=False, list_clean=False):
663 """return status of files between two nodes or node and working directory
663 """return status of files between two nodes or node and working directory
664
664
665 If node1 is None, use the first dirstate parent instead.
665 If node1 is None, use the first dirstate parent instead.
666 If node2 is None, compare node1 with working directory.
666 If node2 is None, compare node1 with working directory.
667 """
667 """
668
668
669 def fcmp(fn, mf):
669 def fcmp(fn, mf):
670 t1 = self.wread(fn)
670 t1 = self.wread(fn)
671 return self.file(fn).cmp(mf.get(fn, nullid), t1)
671 return self.file(fn).cmp(mf.get(fn, nullid), t1)
672
672
673 def mfmatches(node):
673 def mfmatches(node):
674 change = self.changelog.read(node)
674 change = self.changelog.read(node)
675 mf = dict(self.manifest.read(change[0]))
675 mf = dict(self.manifest.read(change[0]))
676 for fn in mf.keys():
676 for fn in mf.keys():
677 if not match(fn):
677 if not match(fn):
678 del mf[fn]
678 del mf[fn]
679 return mf
679 return mf
680
680
681 modified, added, removed, deleted, unknown = [], [], [], [], []
681 modified, added, removed, deleted, unknown = [], [], [], [], []
682 ignored, clean = [], []
682 ignored, clean = [], []
683
683
684 compareworking = False
684 compareworking = False
685 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
685 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
686 compareworking = True
686 compareworking = True
687
687
688 if not compareworking:
688 if not compareworking:
689 # read the manifest from node1 before the manifest from node2,
689 # read the manifest from node1 before the manifest from node2,
690 # so that we'll hit the manifest cache if we're going through
690 # so that we'll hit the manifest cache if we're going through
691 # all the revisions in parent->child order.
691 # all the revisions in parent->child order.
692 mf1 = mfmatches(node1)
692 mf1 = mfmatches(node1)
693
693
694 # are we comparing the working directory?
694 # are we comparing the working directory?
695 if not node2:
695 if not node2:
696 if not wlock:
696 if not wlock:
697 try:
697 try:
698 wlock = self.wlock(wait=0)
698 wlock = self.wlock(wait=0)
699 except lock.LockException:
699 except lock.LockException:
700 wlock = None
700 wlock = None
701 (lookup, modified, added, removed, deleted, unknown,
701 (lookup, modified, added, removed, deleted, unknown,
702 ignored, clean) = self.dirstate.status(files, match,
702 ignored, clean) = self.dirstate.status(files, match,
703 list_ignored, list_clean)
703 list_ignored, list_clean)
704
704
705 # are we comparing working dir against its parent?
705 # are we comparing working dir against its parent?
706 if compareworking:
706 if compareworking:
707 if lookup:
707 if lookup:
708 # do a full compare of any files that might have changed
708 # do a full compare of any files that might have changed
709 mf2 = mfmatches(self.dirstate.parents()[0])
709 mf2 = mfmatches(self.dirstate.parents()[0])
710 for f in lookup:
710 for f in lookup:
711 if fcmp(f, mf2):
711 if fcmp(f, mf2):
712 modified.append(f)
712 modified.append(f)
713 else:
713 else:
714 clean.append(f)
714 clean.append(f)
715 if wlock is not None:
715 if wlock is not None:
716 self.dirstate.update([f], "n")
716 self.dirstate.update([f], "n")
717 else:
717 else:
718 # we are comparing working dir against non-parent
718 # we are comparing working dir against non-parent
719 # generate a pseudo-manifest for the working dir
719 # generate a pseudo-manifest for the working dir
720 mf2 = mfmatches(self.dirstate.parents()[0])
720 mf2 = mfmatches(self.dirstate.parents()[0])
721 for f in lookup + modified + added:
721 for f in lookup + modified + added:
722 mf2[f] = ""
722 mf2[f] = ""
723 for f in removed:
723 for f in removed:
724 if f in mf2:
724 if f in mf2:
725 del mf2[f]
725 del mf2[f]
726 else:
726 else:
727 # we are comparing two revisions
727 # we are comparing two revisions
728 mf2 = mfmatches(node2)
728 mf2 = mfmatches(node2)
729
729
730 if not compareworking:
730 if not compareworking:
731 # flush lists from dirstate before comparing manifests
731 # flush lists from dirstate before comparing manifests
732 modified, added, clean = [], [], []
732 modified, added, clean = [], [], []
733
733
734 # make sure to sort the files so we talk to the disk in a
734 # make sure to sort the files so we talk to the disk in a
735 # reasonable order
735 # reasonable order
736 mf2keys = mf2.keys()
736 mf2keys = mf2.keys()
737 mf2keys.sort()
737 mf2keys.sort()
738 for fn in mf2keys:
738 for fn in mf2keys:
739 if mf1.has_key(fn):
739 if mf1.has_key(fn):
740 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
740 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
741 modified.append(fn)
741 modified.append(fn)
742 elif list_clean:
742 elif list_clean:
743 clean.append(fn)
743 clean.append(fn)
744 del mf1[fn]
744 del mf1[fn]
745 else:
745 else:
746 added.append(fn)
746 added.append(fn)
747
747
748 removed = mf1.keys()
748 removed = mf1.keys()
749
749
750 # sort and return results:
750 # sort and return results:
751 for l in modified, added, removed, deleted, unknown, ignored, clean:
751 for l in modified, added, removed, deleted, unknown, ignored, clean:
752 l.sort()
752 l.sort()
753 return (modified, added, removed, deleted, unknown, ignored, clean)
753 return (modified, added, removed, deleted, unknown, ignored, clean)
754
754
755 def add(self, list, wlock=None):
755 def add(self, list, wlock=None):
756 if not wlock:
756 if not wlock:
757 wlock = self.wlock()
757 wlock = self.wlock()
758 for f in list:
758 for f in list:
759 p = self.wjoin(f)
759 p = self.wjoin(f)
760 if not os.path.exists(p):
760 if not os.path.exists(p):
761 self.ui.warn(_("%s does not exist!\n") % f)
761 self.ui.warn(_("%s does not exist!\n") % f)
762 elif not os.path.isfile(p):
762 elif not os.path.isfile(p):
763 self.ui.warn(_("%s not added: only files supported currently\n")
763 self.ui.warn(_("%s not added: only files supported currently\n")
764 % f)
764 % f)
765 elif self.dirstate.state(f) in 'an':
765 elif self.dirstate.state(f) in 'an':
766 self.ui.warn(_("%s already tracked!\n") % f)
766 self.ui.warn(_("%s already tracked!\n") % f)
767 else:
767 else:
768 self.dirstate.update([f], "a")
768 self.dirstate.update([f], "a")
769
769
770 def forget(self, list, wlock=None):
770 def forget(self, list, wlock=None):
771 if not wlock:
771 if not wlock:
772 wlock = self.wlock()
772 wlock = self.wlock()
773 for f in list:
773 for f in list:
774 if self.dirstate.state(f) not in 'ai':
774 if self.dirstate.state(f) not in 'ai':
775 self.ui.warn(_("%s not added!\n") % f)
775 self.ui.warn(_("%s not added!\n") % f)
776 else:
776 else:
777 self.dirstate.forget([f])
777 self.dirstate.forget([f])
778
778
779 def remove(self, list, unlink=False, wlock=None):
779 def remove(self, list, unlink=False, wlock=None):
780 if unlink:
780 if unlink:
781 for f in list:
781 for f in list:
782 try:
782 try:
783 util.unlink(self.wjoin(f))
783 util.unlink(self.wjoin(f))
784 except OSError, inst:
784 except OSError, inst:
785 if inst.errno != errno.ENOENT:
785 if inst.errno != errno.ENOENT:
786 raise
786 raise
787 if not wlock:
787 if not wlock:
788 wlock = self.wlock()
788 wlock = self.wlock()
789 for f in list:
789 for f in list:
790 p = self.wjoin(f)
790 p = self.wjoin(f)
791 if os.path.exists(p):
791 if os.path.exists(p):
792 self.ui.warn(_("%s still exists!\n") % f)
792 self.ui.warn(_("%s still exists!\n") % f)
793 elif self.dirstate.state(f) == 'a':
793 elif self.dirstate.state(f) == 'a':
794 self.dirstate.forget([f])
794 self.dirstate.forget([f])
795 elif f not in self.dirstate:
795 elif f not in self.dirstate:
796 self.ui.warn(_("%s not tracked!\n") % f)
796 self.ui.warn(_("%s not tracked!\n") % f)
797 else:
797 else:
798 self.dirstate.update([f], "r")
798 self.dirstate.update([f], "r")
799
799
800 def undelete(self, list, wlock=None):
800 def undelete(self, list, wlock=None):
801 p = self.dirstate.parents()[0]
801 p = self.dirstate.parents()[0]
802 mn = self.changelog.read(p)[0]
802 mn = self.changelog.read(p)[0]
803 m = self.manifest.read(mn)
803 m = self.manifest.read(mn)
804 if not wlock:
804 if not wlock:
805 wlock = self.wlock()
805 wlock = self.wlock()
806 for f in list:
806 for f in list:
807 if self.dirstate.state(f) not in "r":
807 if self.dirstate.state(f) not in "r":
808 self.ui.warn("%s not removed!\n" % f)
808 self.ui.warn("%s not removed!\n" % f)
809 else:
809 else:
810 t = self.file(f).read(m[f])
810 t = self.file(f).read(m[f])
811 self.wwrite(f, t)
811 self.wwrite(f, t)
812 util.set_exec(self.wjoin(f), m.execf(f))
812 util.set_exec(self.wjoin(f), m.execf(f))
813 self.dirstate.update([f], "n")
813 self.dirstate.update([f], "n")
814
814
815 def copy(self, source, dest, wlock=None):
815 def copy(self, source, dest, wlock=None):
816 p = self.wjoin(dest)
816 p = self.wjoin(dest)
817 if not os.path.exists(p):
817 if not os.path.exists(p):
818 self.ui.warn(_("%s does not exist!\n") % dest)
818 self.ui.warn(_("%s does not exist!\n") % dest)
819 elif not os.path.isfile(p):
819 elif not os.path.isfile(p):
820 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
820 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
821 else:
821 else:
822 if not wlock:
822 if not wlock:
823 wlock = self.wlock()
823 wlock = self.wlock()
824 if self.dirstate.state(dest) == '?':
824 if self.dirstate.state(dest) == '?':
825 self.dirstate.update([dest], "a")
825 self.dirstate.update([dest], "a")
826 self.dirstate.copy(source, dest)
826 self.dirstate.copy(source, dest)
827
827
828 def heads(self, start=None):
828 def heads(self, start=None):
829 heads = self.changelog.heads(start)
829 heads = self.changelog.heads(start)
830 # sort the output in rev descending order
830 # sort the output in rev descending order
831 heads = [(-self.changelog.rev(h), h) for h in heads]
831 heads = [(-self.changelog.rev(h), h) for h in heads]
832 heads.sort()
832 heads.sort()
833 return [n for (r, n) in heads]
833 return [n for (r, n) in heads]
834
834
835 # branchlookup returns a dict giving a list of branches for
835 # branchlookup returns a dict giving a list of branches for
836 # each head. A branch is defined as the tag of a node or
836 # each head. A branch is defined as the tag of a node or
837 # the branch of the node's parents. If a node has multiple
837 # the branch of the node's parents. If a node has multiple
838 # branch tags, tags are eliminated if they are visible from other
838 # branch tags, tags are eliminated if they are visible from other
839 # branch tags.
839 # branch tags.
840 #
840 #
841 # So, for this graph: a->b->c->d->e
841 # So, for this graph: a->b->c->d->e
842 # \ /
842 # \ /
843 # aa -----/
843 # aa -----/
844 # a has tag 2.6.12
844 # a has tag 2.6.12
845 # d has tag 2.6.13
845 # d has tag 2.6.13
846 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
846 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
847 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
847 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
848 # from the list.
848 # from the list.
849 #
849 #
850 # It is possible that more than one head will have the same branch tag.
850 # It is possible that more than one head will have the same branch tag.
851 # callers need to check the result for multiple heads under the same
851 # callers need to check the result for multiple heads under the same
852 # branch tag if that is a problem for them (ie checkout of a specific
852 # branch tag if that is a problem for them (ie checkout of a specific
853 # branch).
853 # branch).
854 #
854 #
855 # passing in a specific branch will limit the depth of the search
855 # passing in a specific branch will limit the depth of the search
856 # through the parents. It won't limit the branches returned in the
856 # through the parents. It won't limit the branches returned in the
857 # result though.
857 # result though.
858 def branchlookup(self, heads=None, branch=None):
858 def branchlookup(self, heads=None, branch=None):
859 if not heads:
859 if not heads:
860 heads = self.heads()
860 heads = self.heads()
861 headt = [ h for h in heads ]
861 headt = [ h for h in heads ]
862 chlog = self.changelog
862 chlog = self.changelog
863 branches = {}
863 branches = {}
864 merges = []
864 merges = []
865 seenmerge = {}
865 seenmerge = {}
866
866
867 # traverse the tree once for each head, recording in the branches
867 # traverse the tree once for each head, recording in the branches
868 # dict which tags are visible from this head. The branches
868 # dict which tags are visible from this head. The branches
869 # dict also records which tags are visible from each tag
869 # dict also records which tags are visible from each tag
870 # while we traverse.
870 # while we traverse.
871 while headt or merges:
871 while headt or merges:
872 if merges:
872 if merges:
873 n, found = merges.pop()
873 n, found = merges.pop()
874 visit = [n]
874 visit = [n]
875 else:
875 else:
876 h = headt.pop()
876 h = headt.pop()
877 visit = [h]
877 visit = [h]
878 found = [h]
878 found = [h]
879 seen = {}
879 seen = {}
880 while visit:
880 while visit:
881 n = visit.pop()
881 n = visit.pop()
882 if n in seen:
882 if n in seen:
883 continue
883 continue
884 pp = chlog.parents(n)
884 pp = chlog.parents(n)
885 tags = self.nodetags(n)
885 tags = self.nodetags(n)
886 if tags:
886 if tags:
887 for x in tags:
887 for x in tags:
888 if x == 'tip':
888 if x == 'tip':
889 continue
889 continue
890 for f in found:
890 for f in found:
891 branches.setdefault(f, {})[n] = 1
891 branches.setdefault(f, {})[n] = 1
892 branches.setdefault(n, {})[n] = 1
892 branches.setdefault(n, {})[n] = 1
893 break
893 break
894 if n not in found:
894 if n not in found:
895 found.append(n)
895 found.append(n)
896 if branch in tags:
896 if branch in tags:
897 continue
897 continue
898 seen[n] = 1
898 seen[n] = 1
899 if pp[1] != nullid and n not in seenmerge:
899 if pp[1] != nullid and n not in seenmerge:
900 merges.append((pp[1], [x for x in found]))
900 merges.append((pp[1], [x for x in found]))
901 seenmerge[n] = 1
901 seenmerge[n] = 1
902 if pp[0] != nullid:
902 if pp[0] != nullid:
903 visit.append(pp[0])
903 visit.append(pp[0])
904 # traverse the branches dict, eliminating branch tags from each
904 # traverse the branches dict, eliminating branch tags from each
905 # head that are visible from another branch tag for that head.
905 # head that are visible from another branch tag for that head.
906 out = {}
906 out = {}
907 viscache = {}
907 viscache = {}
908 for h in heads:
908 for h in heads:
909 def visible(node):
909 def visible(node):
910 if node in viscache:
910 if node in viscache:
911 return viscache[node]
911 return viscache[node]
912 ret = {}
912 ret = {}
913 visit = [node]
913 visit = [node]
914 while visit:
914 while visit:
915 x = visit.pop()
915 x = visit.pop()
916 if x in viscache:
916 if x in viscache:
917 ret.update(viscache[x])
917 ret.update(viscache[x])
918 elif x not in ret:
918 elif x not in ret:
919 ret[x] = 1
919 ret[x] = 1
920 if x in branches:
920 if x in branches:
921 visit[len(visit):] = branches[x].keys()
921 visit[len(visit):] = branches[x].keys()
922 viscache[node] = ret
922 viscache[node] = ret
923 return ret
923 return ret
924 if h not in branches:
924 if h not in branches:
925 continue
925 continue
926 # O(n^2), but somewhat limited. This only searches the
926 # O(n^2), but somewhat limited. This only searches the
927 # tags visible from a specific head, not all the tags in the
927 # tags visible from a specific head, not all the tags in the
928 # whole repo.
928 # whole repo.
929 for b in branches[h]:
929 for b in branches[h]:
930 vis = False
930 vis = False
931 for bb in branches[h].keys():
931 for bb in branches[h].keys():
932 if b != bb:
932 if b != bb:
933 if b in visible(bb):
933 if b in visible(bb):
934 vis = True
934 vis = True
935 break
935 break
936 if not vis:
936 if not vis:
937 l = out.setdefault(h, [])
937 l = out.setdefault(h, [])
938 l[len(l):] = self.nodetags(b)
938 l[len(l):] = self.nodetags(b)
939 return out
939 return out
940
940
941 def branches(self, nodes):
941 def branches(self, nodes):
942 if not nodes:
942 if not nodes:
943 nodes = [self.changelog.tip()]
943 nodes = [self.changelog.tip()]
944 b = []
944 b = []
945 for n in nodes:
945 for n in nodes:
946 t = n
946 t = n
947 while 1:
947 while 1:
948 p = self.changelog.parents(n)
948 p = self.changelog.parents(n)
949 if p[1] != nullid or p[0] == nullid:
949 if p[1] != nullid or p[0] == nullid:
950 b.append((t, n, p[0], p[1]))
950 b.append((t, n, p[0], p[1]))
951 break
951 break
952 n = p[0]
952 n = p[0]
953 return b
953 return b
954
954
955 def between(self, pairs):
955 def between(self, pairs):
956 r = []
956 r = []
957
957
958 for top, bottom in pairs:
958 for top, bottom in pairs:
959 n, l, i = top, [], 0
959 n, l, i = top, [], 0
960 f = 1
960 f = 1
961
961
962 while n != bottom:
962 while n != bottom:
963 p = self.changelog.parents(n)[0]
963 p = self.changelog.parents(n)[0]
964 if i == f:
964 if i == f:
965 l.append(n)
965 l.append(n)
966 f = f * 2
966 f = f * 2
967 n = p
967 n = p
968 i += 1
968 i += 1
969
969
970 r.append(l)
970 r.append(l)
971
971
972 return r
972 return r
973
973
974 def findincoming(self, remote, base=None, heads=None, force=False):
974 def findincoming(self, remote, base=None, heads=None, force=False):
975 """Return list of roots of the subsets of missing nodes from remote
975 """Return list of roots of the subsets of missing nodes from remote
976
976
977 If base dict is specified, assume that these nodes and their parents
977 If base dict is specified, assume that these nodes and their parents
978 exist on the remote side and that no child of a node of base exists
978 exist on the remote side and that no child of a node of base exists
979 in both remote and self.
979 in both remote and self.
980 Furthermore base will be updated to include the nodes that exists
980 Furthermore base will be updated to include the nodes that exists
981 in self and remote but no children exists in self and remote.
981 in self and remote but no children exists in self and remote.
982 If a list of heads is specified, return only nodes which are heads
982 If a list of heads is specified, return only nodes which are heads
983 or ancestors of these heads.
983 or ancestors of these heads.
984
984
985 All the ancestors of base are in self and in remote.
985 All the ancestors of base are in self and in remote.
986 All the descendants of the list returned are missing in self.
986 All the descendants of the list returned are missing in self.
987 (and so we know that the rest of the nodes are missing in remote, see
987 (and so we know that the rest of the nodes are missing in remote, see
988 outgoing)
988 outgoing)
989 """
989 """
990 m = self.changelog.nodemap
990 m = self.changelog.nodemap
991 search = []
991 search = []
992 fetch = {}
992 fetch = {}
993 seen = {}
993 seen = {}
994 seenbranch = {}
994 seenbranch = {}
995 if base == None:
995 if base == None:
996 base = {}
996 base = {}
997
997
998 if not heads:
998 if not heads:
999 heads = remote.heads()
999 heads = remote.heads()
1000
1000
1001 if self.changelog.tip() == nullid:
1001 if self.changelog.tip() == nullid:
1002 base[nullid] = 1
1002 base[nullid] = 1
1003 if heads != [nullid]:
1003 if heads != [nullid]:
1004 return [nullid]
1004 return [nullid]
1005 return []
1005 return []
1006
1006
1007 # assume we're closer to the tip than the root
1007 # assume we're closer to the tip than the root
1008 # and start by examining the heads
1008 # and start by examining the heads
1009 self.ui.status(_("searching for changes\n"))
1009 self.ui.status(_("searching for changes\n"))
1010
1010
1011 unknown = []
1011 unknown = []
1012 for h in heads:
1012 for h in heads:
1013 if h not in m:
1013 if h not in m:
1014 unknown.append(h)
1014 unknown.append(h)
1015 else:
1015 else:
1016 base[h] = 1
1016 base[h] = 1
1017
1017
1018 if not unknown:
1018 if not unknown:
1019 return []
1019 return []
1020
1020
1021 req = dict.fromkeys(unknown)
1021 req = dict.fromkeys(unknown)
1022 reqcnt = 0
1022 reqcnt = 0
1023
1023
1024 # search through remote branches
1024 # search through remote branches
1025 # a 'branch' here is a linear segment of history, with four parts:
1025 # a 'branch' here is a linear segment of history, with four parts:
1026 # head, root, first parent, second parent
1026 # head, root, first parent, second parent
1027 # (a branch always has two parents (or none) by definition)
1027 # (a branch always has two parents (or none) by definition)
1028 unknown = remote.branches(unknown)
1028 unknown = remote.branches(unknown)
1029 while unknown:
1029 while unknown:
1030 r = []
1030 r = []
1031 while unknown:
1031 while unknown:
1032 n = unknown.pop(0)
1032 n = unknown.pop(0)
1033 if n[0] in seen:
1033 if n[0] in seen:
1034 continue
1034 continue
1035
1035
1036 self.ui.debug(_("examining %s:%s\n")
1036 self.ui.debug(_("examining %s:%s\n")
1037 % (short(n[0]), short(n[1])))
1037 % (short(n[0]), short(n[1])))
1038 if n[0] == nullid: # found the end of the branch
1038 if n[0] == nullid: # found the end of the branch
1039 pass
1039 pass
1040 elif n in seenbranch:
1040 elif n in seenbranch:
1041 self.ui.debug(_("branch already found\n"))
1041 self.ui.debug(_("branch already found\n"))
1042 continue
1042 continue
1043 elif n[1] and n[1] in m: # do we know the base?
1043 elif n[1] and n[1] in m: # do we know the base?
1044 self.ui.debug(_("found incomplete branch %s:%s\n")
1044 self.ui.debug(_("found incomplete branch %s:%s\n")
1045 % (short(n[0]), short(n[1])))
1045 % (short(n[0]), short(n[1])))
1046 search.append(n) # schedule branch range for scanning
1046 search.append(n) # schedule branch range for scanning
1047 seenbranch[n] = 1
1047 seenbranch[n] = 1
1048 else:
1048 else:
1049 if n[1] not in seen and n[1] not in fetch:
1049 if n[1] not in seen and n[1] not in fetch:
1050 if n[2] in m and n[3] in m:
1050 if n[2] in m and n[3] in m:
1051 self.ui.debug(_("found new changeset %s\n") %
1051 self.ui.debug(_("found new changeset %s\n") %
1052 short(n[1]))
1052 short(n[1]))
1053 fetch[n[1]] = 1 # earliest unknown
1053 fetch[n[1]] = 1 # earliest unknown
1054 for p in n[2:4]:
1054 for p in n[2:4]:
1055 if p in m:
1055 if p in m:
1056 base[p] = 1 # latest known
1056 base[p] = 1 # latest known
1057
1057
1058 for p in n[2:4]:
1058 for p in n[2:4]:
1059 if p not in req and p not in m:
1059 if p not in req and p not in m:
1060 r.append(p)
1060 r.append(p)
1061 req[p] = 1
1061 req[p] = 1
1062 seen[n[0]] = 1
1062 seen[n[0]] = 1
1063
1063
1064 if r:
1064 if r:
1065 reqcnt += 1
1065 reqcnt += 1
1066 self.ui.debug(_("request %d: %s\n") %
1066 self.ui.debug(_("request %d: %s\n") %
1067 (reqcnt, " ".join(map(short, r))))
1067 (reqcnt, " ".join(map(short, r))))
1068 for p in range(0, len(r), 10):
1068 for p in range(0, len(r), 10):
1069 for b in remote.branches(r[p:p+10]):
1069 for b in remote.branches(r[p:p+10]):
1070 self.ui.debug(_("received %s:%s\n") %
1070 self.ui.debug(_("received %s:%s\n") %
1071 (short(b[0]), short(b[1])))
1071 (short(b[0]), short(b[1])))
1072 unknown.append(b)
1072 unknown.append(b)
1073
1073
1074 # do binary search on the branches we found
1074 # do binary search on the branches we found
1075 while search:
1075 while search:
1076 n = search.pop(0)
1076 n = search.pop(0)
1077 reqcnt += 1
1077 reqcnt += 1
1078 l = remote.between([(n[0], n[1])])[0]
1078 l = remote.between([(n[0], n[1])])[0]
1079 l.append(n[1])
1079 l.append(n[1])
1080 p = n[0]
1080 p = n[0]
1081 f = 1
1081 f = 1
1082 for i in l:
1082 for i in l:
1083 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1083 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1084 if i in m:
1084 if i in m:
1085 if f <= 2:
1085 if f <= 2:
1086 self.ui.debug(_("found new branch changeset %s\n") %
1086 self.ui.debug(_("found new branch changeset %s\n") %
1087 short(p))
1087 short(p))
1088 fetch[p] = 1
1088 fetch[p] = 1
1089 base[i] = 1
1089 base[i] = 1
1090 else:
1090 else:
1091 self.ui.debug(_("narrowed branch search to %s:%s\n")
1091 self.ui.debug(_("narrowed branch search to %s:%s\n")
1092 % (short(p), short(i)))
1092 % (short(p), short(i)))
1093 search.append((p, i))
1093 search.append((p, i))
1094 break
1094 break
1095 p, f = i, f * 2
1095 p, f = i, f * 2
1096
1096
1097 # sanity check our fetch list
1097 # sanity check our fetch list
1098 for f in fetch.keys():
1098 for f in fetch.keys():
1099 if f in m:
1099 if f in m:
1100 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1100 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1101
1101
1102 if base.keys() == [nullid]:
1102 if base.keys() == [nullid]:
1103 if force:
1103 if force:
1104 self.ui.warn(_("warning: repository is unrelated\n"))
1104 self.ui.warn(_("warning: repository is unrelated\n"))
1105 else:
1105 else:
1106 raise util.Abort(_("repository is unrelated"))
1106 raise util.Abort(_("repository is unrelated"))
1107
1107
1108 self.ui.note(_("found new changesets starting at ") +
1108 self.ui.debug(_("found new changesets starting at ") +
1109 " ".join([short(f) for f in fetch]) + "\n")
1109 " ".join([short(f) for f in fetch]) + "\n")
1110
1110
1111 self.ui.debug(_("%d total queries\n") % reqcnt)
1111 self.ui.debug(_("%d total queries\n") % reqcnt)
1112
1112
1113 return fetch.keys()
1113 return fetch.keys()
1114
1114
1115 def findoutgoing(self, remote, base=None, heads=None, force=False):
1115 def findoutgoing(self, remote, base=None, heads=None, force=False):
1116 """Return list of nodes that are roots of subsets not in remote
1116 """Return list of nodes that are roots of subsets not in remote
1117
1117
1118 If base dict is specified, assume that these nodes and their parents
1118 If base dict is specified, assume that these nodes and their parents
1119 exist on the remote side.
1119 exist on the remote side.
1120 If a list of heads is specified, return only nodes which are heads
1120 If a list of heads is specified, return only nodes which are heads
1121 or ancestors of these heads, and return a second element which
1121 or ancestors of these heads, and return a second element which
1122 contains all remote heads which get new children.
1122 contains all remote heads which get new children.
1123 """
1123 """
1124 if base == None:
1124 if base == None:
1125 base = {}
1125 base = {}
1126 self.findincoming(remote, base, heads, force=force)
1126 self.findincoming(remote, base, heads, force=force)
1127
1127
1128 self.ui.debug(_("common changesets up to ")
1128 self.ui.debug(_("common changesets up to ")
1129 + " ".join(map(short, base.keys())) + "\n")
1129 + " ".join(map(short, base.keys())) + "\n")
1130
1130
1131 remain = dict.fromkeys(self.changelog.nodemap)
1131 remain = dict.fromkeys(self.changelog.nodemap)
1132
1132
1133 # prune everything remote has from the tree
1133 # prune everything remote has from the tree
1134 del remain[nullid]
1134 del remain[nullid]
1135 remove = base.keys()
1135 remove = base.keys()
1136 while remove:
1136 while remove:
1137 n = remove.pop(0)
1137 n = remove.pop(0)
1138 if n in remain:
1138 if n in remain:
1139 del remain[n]
1139 del remain[n]
1140 for p in self.changelog.parents(n):
1140 for p in self.changelog.parents(n):
1141 remove.append(p)
1141 remove.append(p)
1142
1142
1143 # find every node whose parents have been pruned
1143 # find every node whose parents have been pruned
1144 subset = []
1144 subset = []
1145 # find every remote head that will get new children
1145 # find every remote head that will get new children
1146 updated_heads = {}
1146 updated_heads = {}
1147 for n in remain:
1147 for n in remain:
1148 p1, p2 = self.changelog.parents(n)
1148 p1, p2 = self.changelog.parents(n)
1149 if p1 not in remain and p2 not in remain:
1149 if p1 not in remain and p2 not in remain:
1150 subset.append(n)
1150 subset.append(n)
1151 if heads:
1151 if heads:
1152 if p1 in heads:
1152 if p1 in heads:
1153 updated_heads[p1] = True
1153 updated_heads[p1] = True
1154 if p2 in heads:
1154 if p2 in heads:
1155 updated_heads[p2] = True
1155 updated_heads[p2] = True
1156
1156
1157 # this is the set of all roots we have to push
1157 # this is the set of all roots we have to push
1158 if heads:
1158 if heads:
1159 return subset, updated_heads.keys()
1159 return subset, updated_heads.keys()
1160 else:
1160 else:
1161 return subset
1161 return subset
1162
1162
1163 def pull(self, remote, heads=None, force=False, lock=None):
1163 def pull(self, remote, heads=None, force=False, lock=None):
1164 mylock = False
1164 mylock = False
1165 if not lock:
1165 if not lock:
1166 lock = self.lock()
1166 lock = self.lock()
1167 mylock = True
1167 mylock = True
1168
1168
1169 try:
1169 try:
1170 fetch = self.findincoming(remote, force=force)
1170 fetch = self.findincoming(remote, force=force)
1171 if fetch == [nullid]:
1171 if fetch == [nullid]:
1172 self.ui.status(_("requesting all changes\n"))
1172 self.ui.status(_("requesting all changes\n"))
1173
1173
1174 if not fetch:
1174 if not fetch:
1175 self.ui.status(_("no changes found\n"))
1175 self.ui.status(_("no changes found\n"))
1176 return 0
1176 return 0
1177
1177
1178 if heads is None:
1178 if heads is None:
1179 cg = remote.changegroup(fetch, 'pull')
1179 cg = remote.changegroup(fetch, 'pull')
1180 else:
1180 else:
1181 cg = remote.changegroupsubset(fetch, heads, 'pull')
1181 cg = remote.changegroupsubset(fetch, heads, 'pull')
1182 return self.addchangegroup(cg, 'pull', remote.url())
1182 return self.addchangegroup(cg, 'pull', remote.url())
1183 finally:
1183 finally:
1184 if mylock:
1184 if mylock:
1185 lock.release()
1185 lock.release()
1186
1186
1187 def push(self, remote, force=False, revs=None):
1187 def push(self, remote, force=False, revs=None):
1188 # there are two ways to push to remote repo:
1188 # there are two ways to push to remote repo:
1189 #
1189 #
1190 # addchangegroup assumes local user can lock remote
1190 # addchangegroup assumes local user can lock remote
1191 # repo (local filesystem, old ssh servers).
1191 # repo (local filesystem, old ssh servers).
1192 #
1192 #
1193 # unbundle assumes local user cannot lock remote repo (new ssh
1193 # unbundle assumes local user cannot lock remote repo (new ssh
1194 # servers, http servers).
1194 # servers, http servers).
1195
1195
1196 if remote.capable('unbundle'):
1196 if remote.capable('unbundle'):
1197 return self.push_unbundle(remote, force, revs)
1197 return self.push_unbundle(remote, force, revs)
1198 return self.push_addchangegroup(remote, force, revs)
1198 return self.push_addchangegroup(remote, force, revs)
1199
1199
1200 def prepush(self, remote, force, revs):
1200 def prepush(self, remote, force, revs):
1201 base = {}
1201 base = {}
1202 remote_heads = remote.heads()
1202 remote_heads = remote.heads()
1203 inc = self.findincoming(remote, base, remote_heads, force=force)
1203 inc = self.findincoming(remote, base, remote_heads, force=force)
1204 if not force and inc:
1204 if not force and inc:
1205 self.ui.warn(_("abort: unsynced remote changes!\n"))
1205 self.ui.warn(_("abort: unsynced remote changes!\n"))
1206 self.ui.status(_("(did you forget to sync?"
1206 self.ui.status(_("(did you forget to sync?"
1207 " use push -f to force)\n"))
1207 " use push -f to force)\n"))
1208 return None, 1
1208 return None, 1
1209
1209
1210 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1210 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1211 if revs is not None:
1211 if revs is not None:
1212 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1212 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1213 else:
1213 else:
1214 bases, heads = update, self.changelog.heads()
1214 bases, heads = update, self.changelog.heads()
1215
1215
1216 if not bases:
1216 if not bases:
1217 self.ui.status(_("no changes found\n"))
1217 self.ui.status(_("no changes found\n"))
1218 return None, 1
1218 return None, 1
1219 elif not force:
1219 elif not force:
1220 # FIXME we don't properly detect creation of new heads
1220 # FIXME we don't properly detect creation of new heads
1221 # in the push -r case, assume the user knows what he's doing
1221 # in the push -r case, assume the user knows what he's doing
1222 if not revs and len(remote_heads) < len(heads) \
1222 if not revs and len(remote_heads) < len(heads) \
1223 and remote_heads != [nullid]:
1223 and remote_heads != [nullid]:
1224 self.ui.warn(_("abort: push creates new remote branches!\n"))
1224 self.ui.warn(_("abort: push creates new remote branches!\n"))
1225 self.ui.status(_("(did you forget to merge?"
1225 self.ui.status(_("(did you forget to merge?"
1226 " use push -f to force)\n"))
1226 " use push -f to force)\n"))
1227 return None, 1
1227 return None, 1
1228
1228
1229 if revs is None:
1229 if revs is None:
1230 cg = self.changegroup(update, 'push')
1230 cg = self.changegroup(update, 'push')
1231 else:
1231 else:
1232 cg = self.changegroupsubset(update, revs, 'push')
1232 cg = self.changegroupsubset(update, revs, 'push')
1233 return cg, remote_heads
1233 return cg, remote_heads
1234
1234
1235 def push_addchangegroup(self, remote, force, revs):
1235 def push_addchangegroup(self, remote, force, revs):
1236 lock = remote.lock()
1236 lock = remote.lock()
1237
1237
1238 ret = self.prepush(remote, force, revs)
1238 ret = self.prepush(remote, force, revs)
1239 if ret[0] is not None:
1239 if ret[0] is not None:
1240 cg, remote_heads = ret
1240 cg, remote_heads = ret
1241 return remote.addchangegroup(cg, 'push', self.url())
1241 return remote.addchangegroup(cg, 'push', self.url())
1242 return ret[1]
1242 return ret[1]
1243
1243
1244 def push_unbundle(self, remote, force, revs):
1244 def push_unbundle(self, remote, force, revs):
1245 # local repo finds heads on server, finds out what revs it
1245 # local repo finds heads on server, finds out what revs it
1246 # must push. once revs transferred, if server finds it has
1246 # must push. once revs transferred, if server finds it has
1247 # different heads (someone else won commit/push race), server
1247 # different heads (someone else won commit/push race), server
1248 # aborts.
1248 # aborts.
1249
1249
1250 ret = self.prepush(remote, force, revs)
1250 ret = self.prepush(remote, force, revs)
1251 if ret[0] is not None:
1251 if ret[0] is not None:
1252 cg, remote_heads = ret
1252 cg, remote_heads = ret
1253 if force: remote_heads = ['force']
1253 if force: remote_heads = ['force']
1254 return remote.unbundle(cg, remote_heads, 'push')
1254 return remote.unbundle(cg, remote_heads, 'push')
1255 return ret[1]
1255 return ret[1]
1256
1256
1257 def changegroupsubset(self, bases, heads, source):
1257 def changegroupsubset(self, bases, heads, source):
1258 """This function generates a changegroup consisting of all the nodes
1258 """This function generates a changegroup consisting of all the nodes
1259 that are descendents of any of the bases, and ancestors of any of
1259 that are descendents of any of the bases, and ancestors of any of
1260 the heads.
1260 the heads.
1261
1261
1262 It is fairly complex as determining which filenodes and which
1262 It is fairly complex as determining which filenodes and which
1263 manifest nodes need to be included for the changeset to be complete
1263 manifest nodes need to be included for the changeset to be complete
1264 is non-trivial.
1264 is non-trivial.
1265
1265
1266 Another wrinkle is doing the reverse, figuring out which changeset in
1266 Another wrinkle is doing the reverse, figuring out which changeset in
1267 the changegroup a particular filenode or manifestnode belongs to."""
1267 the changegroup a particular filenode or manifestnode belongs to."""
1268
1268
1269 self.hook('preoutgoing', throw=True, source=source)
1269 self.hook('preoutgoing', throw=True, source=source)
1270
1270
1271 # Set up some initial variables
1271 # Set up some initial variables
1272 # Make it easy to refer to self.changelog
1272 # Make it easy to refer to self.changelog
1273 cl = self.changelog
1273 cl = self.changelog
1274 # msng is short for missing - compute the list of changesets in this
1274 # msng is short for missing - compute the list of changesets in this
1275 # changegroup.
1275 # changegroup.
1276 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1276 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1277 # Some bases may turn out to be superfluous, and some heads may be
1277 # Some bases may turn out to be superfluous, and some heads may be
1278 # too. nodesbetween will return the minimal set of bases and heads
1278 # too. nodesbetween will return the minimal set of bases and heads
1279 # necessary to re-create the changegroup.
1279 # necessary to re-create the changegroup.
1280
1280
1281 # Known heads are the list of heads that it is assumed the recipient
1281 # Known heads are the list of heads that it is assumed the recipient
1282 # of this changegroup will know about.
1282 # of this changegroup will know about.
1283 knownheads = {}
1283 knownheads = {}
1284 # We assume that all parents of bases are known heads.
1284 # We assume that all parents of bases are known heads.
1285 for n in bases:
1285 for n in bases:
1286 for p in cl.parents(n):
1286 for p in cl.parents(n):
1287 if p != nullid:
1287 if p != nullid:
1288 knownheads[p] = 1
1288 knownheads[p] = 1
1289 knownheads = knownheads.keys()
1289 knownheads = knownheads.keys()
1290 if knownheads:
1290 if knownheads:
1291 # Now that we know what heads are known, we can compute which
1291 # Now that we know what heads are known, we can compute which
1292 # changesets are known. The recipient must know about all
1292 # changesets are known. The recipient must know about all
1293 # changesets required to reach the known heads from the null
1293 # changesets required to reach the known heads from the null
1294 # changeset.
1294 # changeset.
1295 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1295 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1296 junk = None
1296 junk = None
1297 # Transform the list into an ersatz set.
1297 # Transform the list into an ersatz set.
1298 has_cl_set = dict.fromkeys(has_cl_set)
1298 has_cl_set = dict.fromkeys(has_cl_set)
1299 else:
1299 else:
1300 # If there were no known heads, the recipient cannot be assumed to
1300 # If there were no known heads, the recipient cannot be assumed to
1301 # know about any changesets.
1301 # know about any changesets.
1302 has_cl_set = {}
1302 has_cl_set = {}
1303
1303
1304 # Make it easy to refer to self.manifest
1304 # Make it easy to refer to self.manifest
1305 mnfst = self.manifest
1305 mnfst = self.manifest
1306 # We don't know which manifests are missing yet
1306 # We don't know which manifests are missing yet
1307 msng_mnfst_set = {}
1307 msng_mnfst_set = {}
1308 # Nor do we know which filenodes are missing.
1308 # Nor do we know which filenodes are missing.
1309 msng_filenode_set = {}
1309 msng_filenode_set = {}
1310
1310
1311 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1311 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1312 junk = None
1312 junk = None
1313
1313
1314 # A changeset always belongs to itself, so the changenode lookup
1314 # A changeset always belongs to itself, so the changenode lookup
1315 # function for a changenode is identity.
1315 # function for a changenode is identity.
1316 def identity(x):
1316 def identity(x):
1317 return x
1317 return x
1318
1318
1319 # A function generating function. Sets up an environment for the
1319 # A function generating function. Sets up an environment for the
1320 # inner function.
1320 # inner function.
1321 def cmp_by_rev_func(revlog):
1321 def cmp_by_rev_func(revlog):
1322 # Compare two nodes by their revision number in the environment's
1322 # Compare two nodes by their revision number in the environment's
1323 # revision history. Since the revision number both represents the
1323 # revision history. Since the revision number both represents the
1324 # most efficient order to read the nodes in, and represents a
1324 # most efficient order to read the nodes in, and represents a
1325 # topological sorting of the nodes, this function is often useful.
1325 # topological sorting of the nodes, this function is often useful.
1326 def cmp_by_rev(a, b):
1326 def cmp_by_rev(a, b):
1327 return cmp(revlog.rev(a), revlog.rev(b))
1327 return cmp(revlog.rev(a), revlog.rev(b))
1328 return cmp_by_rev
1328 return cmp_by_rev
1329
1329
1330 # If we determine that a particular file or manifest node must be a
1330 # If we determine that a particular file or manifest node must be a
1331 # node that the recipient of the changegroup will already have, we can
1331 # node that the recipient of the changegroup will already have, we can
1332 # also assume the recipient will have all the parents. This function
1332 # also assume the recipient will have all the parents. This function
1333 # prunes them from the set of missing nodes.
1333 # prunes them from the set of missing nodes.
1334 def prune_parents(revlog, hasset, msngset):
1334 def prune_parents(revlog, hasset, msngset):
1335 haslst = hasset.keys()
1335 haslst = hasset.keys()
1336 haslst.sort(cmp_by_rev_func(revlog))
1336 haslst.sort(cmp_by_rev_func(revlog))
1337 for node in haslst:
1337 for node in haslst:
1338 parentlst = [p for p in revlog.parents(node) if p != nullid]
1338 parentlst = [p for p in revlog.parents(node) if p != nullid]
1339 while parentlst:
1339 while parentlst:
1340 n = parentlst.pop()
1340 n = parentlst.pop()
1341 if n not in hasset:
1341 if n not in hasset:
1342 hasset[n] = 1
1342 hasset[n] = 1
1343 p = [p for p in revlog.parents(n) if p != nullid]
1343 p = [p for p in revlog.parents(n) if p != nullid]
1344 parentlst.extend(p)
1344 parentlst.extend(p)
1345 for n in hasset:
1345 for n in hasset:
1346 msngset.pop(n, None)
1346 msngset.pop(n, None)
1347
1347
1348 # This is a function generating function used to set up an environment
1348 # This is a function generating function used to set up an environment
1349 # for the inner function to execute in.
1349 # for the inner function to execute in.
1350 def manifest_and_file_collector(changedfileset):
1350 def manifest_and_file_collector(changedfileset):
1351 # This is an information gathering function that gathers
1351 # This is an information gathering function that gathers
1352 # information from each changeset node that goes out as part of
1352 # information from each changeset node that goes out as part of
1353 # the changegroup. The information gathered is a list of which
1353 # the changegroup. The information gathered is a list of which
1354 # manifest nodes are potentially required (the recipient may
1354 # manifest nodes are potentially required (the recipient may
1355 # already have them) and total list of all files which were
1355 # already have them) and total list of all files which were
1356 # changed in any changeset in the changegroup.
1356 # changed in any changeset in the changegroup.
1357 #
1357 #
1358 # We also remember the first changenode we saw any manifest
1358 # We also remember the first changenode we saw any manifest
1359 # referenced by so we can later determine which changenode 'owns'
1359 # referenced by so we can later determine which changenode 'owns'
1360 # the manifest.
1360 # the manifest.
1361 def collect_manifests_and_files(clnode):
1361 def collect_manifests_and_files(clnode):
1362 c = cl.read(clnode)
1362 c = cl.read(clnode)
1363 for f in c[3]:
1363 for f in c[3]:
1364 # This is to make sure we only have one instance of each
1364 # This is to make sure we only have one instance of each
1365 # filename string for each filename.
1365 # filename string for each filename.
1366 changedfileset.setdefault(f, f)
1366 changedfileset.setdefault(f, f)
1367 msng_mnfst_set.setdefault(c[0], clnode)
1367 msng_mnfst_set.setdefault(c[0], clnode)
1368 return collect_manifests_and_files
1368 return collect_manifests_and_files
1369
1369
1370 # Figure out which manifest nodes (of the ones we think might be part
1370 # Figure out which manifest nodes (of the ones we think might be part
1371 # of the changegroup) the recipient must know about and remove them
1371 # of the changegroup) the recipient must know about and remove them
1372 # from the changegroup.
1372 # from the changegroup.
1373 def prune_manifests():
1373 def prune_manifests():
1374 has_mnfst_set = {}
1374 has_mnfst_set = {}
1375 for n in msng_mnfst_set:
1375 for n in msng_mnfst_set:
1376 # If a 'missing' manifest thinks it belongs to a changenode
1376 # If a 'missing' manifest thinks it belongs to a changenode
1377 # the recipient is assumed to have, obviously the recipient
1377 # the recipient is assumed to have, obviously the recipient
1378 # must have that manifest.
1378 # must have that manifest.
1379 linknode = cl.node(mnfst.linkrev(n))
1379 linknode = cl.node(mnfst.linkrev(n))
1380 if linknode in has_cl_set:
1380 if linknode in has_cl_set:
1381 has_mnfst_set[n] = 1
1381 has_mnfst_set[n] = 1
1382 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1382 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1383
1383
1384 # Use the information collected in collect_manifests_and_files to say
1384 # Use the information collected in collect_manifests_and_files to say
1385 # which changenode any manifestnode belongs to.
1385 # which changenode any manifestnode belongs to.
1386 def lookup_manifest_link(mnfstnode):
1386 def lookup_manifest_link(mnfstnode):
1387 return msng_mnfst_set[mnfstnode]
1387 return msng_mnfst_set[mnfstnode]
1388
1388
1389 # A function generating function that sets up the initial environment
1389 # A function generating function that sets up the initial environment
1390 # the inner function.
1390 # the inner function.
1391 def filenode_collector(changedfiles):
1391 def filenode_collector(changedfiles):
1392 next_rev = [0]
1392 next_rev = [0]
1393 # This gathers information from each manifestnode included in the
1393 # This gathers information from each manifestnode included in the
1394 # changegroup about which filenodes the manifest node references
1394 # changegroup about which filenodes the manifest node references
1395 # so we can include those in the changegroup too.
1395 # so we can include those in the changegroup too.
1396 #
1396 #
1397 # It also remembers which changenode each filenode belongs to. It
1397 # It also remembers which changenode each filenode belongs to. It
1398 # does this by assuming the a filenode belongs to the changenode
1398 # does this by assuming the a filenode belongs to the changenode
1399 # the first manifest that references it belongs to.
1399 # the first manifest that references it belongs to.
1400 def collect_msng_filenodes(mnfstnode):
1400 def collect_msng_filenodes(mnfstnode):
1401 r = mnfst.rev(mnfstnode)
1401 r = mnfst.rev(mnfstnode)
1402 if r == next_rev[0]:
1402 if r == next_rev[0]:
1403 # If the last rev we looked at was the one just previous,
1403 # If the last rev we looked at was the one just previous,
1404 # we only need to see a diff.
1404 # we only need to see a diff.
1405 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1405 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1406 # For each line in the delta
1406 # For each line in the delta
1407 for dline in delta.splitlines():
1407 for dline in delta.splitlines():
1408 # get the filename and filenode for that line
1408 # get the filename and filenode for that line
1409 f, fnode = dline.split('\0')
1409 f, fnode = dline.split('\0')
1410 fnode = bin(fnode[:40])
1410 fnode = bin(fnode[:40])
1411 f = changedfiles.get(f, None)
1411 f = changedfiles.get(f, None)
1412 # And if the file is in the list of files we care
1412 # And if the file is in the list of files we care
1413 # about.
1413 # about.
1414 if f is not None:
1414 if f is not None:
1415 # Get the changenode this manifest belongs to
1415 # Get the changenode this manifest belongs to
1416 clnode = msng_mnfst_set[mnfstnode]
1416 clnode = msng_mnfst_set[mnfstnode]
1417 # Create the set of filenodes for the file if
1417 # Create the set of filenodes for the file if
1418 # there isn't one already.
1418 # there isn't one already.
1419 ndset = msng_filenode_set.setdefault(f, {})
1419 ndset = msng_filenode_set.setdefault(f, {})
1420 # And set the filenode's changelog node to the
1420 # And set the filenode's changelog node to the
1421 # manifest's if it hasn't been set already.
1421 # manifest's if it hasn't been set already.
1422 ndset.setdefault(fnode, clnode)
1422 ndset.setdefault(fnode, clnode)
1423 else:
1423 else:
1424 # Otherwise we need a full manifest.
1424 # Otherwise we need a full manifest.
1425 m = mnfst.read(mnfstnode)
1425 m = mnfst.read(mnfstnode)
1426 # For every file in we care about.
1426 # For every file in we care about.
1427 for f in changedfiles:
1427 for f in changedfiles:
1428 fnode = m.get(f, None)
1428 fnode = m.get(f, None)
1429 # If it's in the manifest
1429 # If it's in the manifest
1430 if fnode is not None:
1430 if fnode is not None:
1431 # See comments above.
1431 # See comments above.
1432 clnode = msng_mnfst_set[mnfstnode]
1432 clnode = msng_mnfst_set[mnfstnode]
1433 ndset = msng_filenode_set.setdefault(f, {})
1433 ndset = msng_filenode_set.setdefault(f, {})
1434 ndset.setdefault(fnode, clnode)
1434 ndset.setdefault(fnode, clnode)
1435 # Remember the revision we hope to see next.
1435 # Remember the revision we hope to see next.
1436 next_rev[0] = r + 1
1436 next_rev[0] = r + 1
1437 return collect_msng_filenodes
1437 return collect_msng_filenodes
1438
1438
1439 # We have a list of filenodes we think we need for a file, lets remove
1439 # We have a list of filenodes we think we need for a file, lets remove
1440 # all those we now the recipient must have.
1440 # all those we now the recipient must have.
1441 def prune_filenodes(f, filerevlog):
1441 def prune_filenodes(f, filerevlog):
1442 msngset = msng_filenode_set[f]
1442 msngset = msng_filenode_set[f]
1443 hasset = {}
1443 hasset = {}
1444 # If a 'missing' filenode thinks it belongs to a changenode we
1444 # If a 'missing' filenode thinks it belongs to a changenode we
1445 # assume the recipient must have, then the recipient must have
1445 # assume the recipient must have, then the recipient must have
1446 # that filenode.
1446 # that filenode.
1447 for n in msngset:
1447 for n in msngset:
1448 clnode = cl.node(filerevlog.linkrev(n))
1448 clnode = cl.node(filerevlog.linkrev(n))
1449 if clnode in has_cl_set:
1449 if clnode in has_cl_set:
1450 hasset[n] = 1
1450 hasset[n] = 1
1451 prune_parents(filerevlog, hasset, msngset)
1451 prune_parents(filerevlog, hasset, msngset)
1452
1452
1453 # A function generator function that sets up the a context for the
1453 # A function generator function that sets up the a context for the
1454 # inner function.
1454 # inner function.
1455 def lookup_filenode_link_func(fname):
1455 def lookup_filenode_link_func(fname):
1456 msngset = msng_filenode_set[fname]
1456 msngset = msng_filenode_set[fname]
1457 # Lookup the changenode the filenode belongs to.
1457 # Lookup the changenode the filenode belongs to.
1458 def lookup_filenode_link(fnode):
1458 def lookup_filenode_link(fnode):
1459 return msngset[fnode]
1459 return msngset[fnode]
1460 return lookup_filenode_link
1460 return lookup_filenode_link
1461
1461
1462 # Now that we have all theses utility functions to help out and
1462 # Now that we have all theses utility functions to help out and
1463 # logically divide up the task, generate the group.
1463 # logically divide up the task, generate the group.
1464 def gengroup():
1464 def gengroup():
1465 # The set of changed files starts empty.
1465 # The set of changed files starts empty.
1466 changedfiles = {}
1466 changedfiles = {}
1467 # Create a changenode group generator that will call our functions
1467 # Create a changenode group generator that will call our functions
1468 # back to lookup the owning changenode and collect information.
1468 # back to lookup the owning changenode and collect information.
1469 group = cl.group(msng_cl_lst, identity,
1469 group = cl.group(msng_cl_lst, identity,
1470 manifest_and_file_collector(changedfiles))
1470 manifest_and_file_collector(changedfiles))
1471 for chnk in group:
1471 for chnk in group:
1472 yield chnk
1472 yield chnk
1473
1473
1474 # The list of manifests has been collected by the generator
1474 # The list of manifests has been collected by the generator
1475 # calling our functions back.
1475 # calling our functions back.
1476 prune_manifests()
1476 prune_manifests()
1477 msng_mnfst_lst = msng_mnfst_set.keys()
1477 msng_mnfst_lst = msng_mnfst_set.keys()
1478 # Sort the manifestnodes by revision number.
1478 # Sort the manifestnodes by revision number.
1479 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1479 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1480 # Create a generator for the manifestnodes that calls our lookup
1480 # Create a generator for the manifestnodes that calls our lookup
1481 # and data collection functions back.
1481 # and data collection functions back.
1482 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1482 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1483 filenode_collector(changedfiles))
1483 filenode_collector(changedfiles))
1484 for chnk in group:
1484 for chnk in group:
1485 yield chnk
1485 yield chnk
1486
1486
1487 # These are no longer needed, dereference and toss the memory for
1487 # These are no longer needed, dereference and toss the memory for
1488 # them.
1488 # them.
1489 msng_mnfst_lst = None
1489 msng_mnfst_lst = None
1490 msng_mnfst_set.clear()
1490 msng_mnfst_set.clear()
1491
1491
1492 changedfiles = changedfiles.keys()
1492 changedfiles = changedfiles.keys()
1493 changedfiles.sort()
1493 changedfiles.sort()
1494 # Go through all our files in order sorted by name.
1494 # Go through all our files in order sorted by name.
1495 for fname in changedfiles:
1495 for fname in changedfiles:
1496 filerevlog = self.file(fname)
1496 filerevlog = self.file(fname)
1497 # Toss out the filenodes that the recipient isn't really
1497 # Toss out the filenodes that the recipient isn't really
1498 # missing.
1498 # missing.
1499 if msng_filenode_set.has_key(fname):
1499 if msng_filenode_set.has_key(fname):
1500 prune_filenodes(fname, filerevlog)
1500 prune_filenodes(fname, filerevlog)
1501 msng_filenode_lst = msng_filenode_set[fname].keys()
1501 msng_filenode_lst = msng_filenode_set[fname].keys()
1502 else:
1502 else:
1503 msng_filenode_lst = []
1503 msng_filenode_lst = []
1504 # If any filenodes are left, generate the group for them,
1504 # If any filenodes are left, generate the group for them,
1505 # otherwise don't bother.
1505 # otherwise don't bother.
1506 if len(msng_filenode_lst) > 0:
1506 if len(msng_filenode_lst) > 0:
1507 yield changegroup.genchunk(fname)
1507 yield changegroup.genchunk(fname)
1508 # Sort the filenodes by their revision #
1508 # Sort the filenodes by their revision #
1509 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1509 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1510 # Create a group generator and only pass in a changenode
1510 # Create a group generator and only pass in a changenode
1511 # lookup function as we need to collect no information
1511 # lookup function as we need to collect no information
1512 # from filenodes.
1512 # from filenodes.
1513 group = filerevlog.group(msng_filenode_lst,
1513 group = filerevlog.group(msng_filenode_lst,
1514 lookup_filenode_link_func(fname))
1514 lookup_filenode_link_func(fname))
1515 for chnk in group:
1515 for chnk in group:
1516 yield chnk
1516 yield chnk
1517 if msng_filenode_set.has_key(fname):
1517 if msng_filenode_set.has_key(fname):
1518 # Don't need this anymore, toss it to free memory.
1518 # Don't need this anymore, toss it to free memory.
1519 del msng_filenode_set[fname]
1519 del msng_filenode_set[fname]
1520 # Signal that no more groups are left.
1520 # Signal that no more groups are left.
1521 yield changegroup.closechunk()
1521 yield changegroup.closechunk()
1522
1522
1523 if msng_cl_lst:
1523 if msng_cl_lst:
1524 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1524 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1525
1525
1526 return util.chunkbuffer(gengroup())
1526 return util.chunkbuffer(gengroup())
1527
1527
1528 def changegroup(self, basenodes, source):
1528 def changegroup(self, basenodes, source):
1529 """Generate a changegroup of all nodes that we have that a recipient
1529 """Generate a changegroup of all nodes that we have that a recipient
1530 doesn't.
1530 doesn't.
1531
1531
1532 This is much easier than the previous function as we can assume that
1532 This is much easier than the previous function as we can assume that
1533 the recipient has any changenode we aren't sending them."""
1533 the recipient has any changenode we aren't sending them."""
1534
1534
1535 self.hook('preoutgoing', throw=True, source=source)
1535 self.hook('preoutgoing', throw=True, source=source)
1536
1536
1537 cl = self.changelog
1537 cl = self.changelog
1538 nodes = cl.nodesbetween(basenodes, None)[0]
1538 nodes = cl.nodesbetween(basenodes, None)[0]
1539 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1539 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1540
1540
1541 def identity(x):
1541 def identity(x):
1542 return x
1542 return x
1543
1543
1544 def gennodelst(revlog):
1544 def gennodelst(revlog):
1545 for r in xrange(0, revlog.count()):
1545 for r in xrange(0, revlog.count()):
1546 n = revlog.node(r)
1546 n = revlog.node(r)
1547 if revlog.linkrev(n) in revset:
1547 if revlog.linkrev(n) in revset:
1548 yield n
1548 yield n
1549
1549
1550 def changed_file_collector(changedfileset):
1550 def changed_file_collector(changedfileset):
1551 def collect_changed_files(clnode):
1551 def collect_changed_files(clnode):
1552 c = cl.read(clnode)
1552 c = cl.read(clnode)
1553 for fname in c[3]:
1553 for fname in c[3]:
1554 changedfileset[fname] = 1
1554 changedfileset[fname] = 1
1555 return collect_changed_files
1555 return collect_changed_files
1556
1556
1557 def lookuprevlink_func(revlog):
1557 def lookuprevlink_func(revlog):
1558 def lookuprevlink(n):
1558 def lookuprevlink(n):
1559 return cl.node(revlog.linkrev(n))
1559 return cl.node(revlog.linkrev(n))
1560 return lookuprevlink
1560 return lookuprevlink
1561
1561
1562 def gengroup():
1562 def gengroup():
1563 # construct a list of all changed files
1563 # construct a list of all changed files
1564 changedfiles = {}
1564 changedfiles = {}
1565
1565
1566 for chnk in cl.group(nodes, identity,
1566 for chnk in cl.group(nodes, identity,
1567 changed_file_collector(changedfiles)):
1567 changed_file_collector(changedfiles)):
1568 yield chnk
1568 yield chnk
1569 changedfiles = changedfiles.keys()
1569 changedfiles = changedfiles.keys()
1570 changedfiles.sort()
1570 changedfiles.sort()
1571
1571
1572 mnfst = self.manifest
1572 mnfst = self.manifest
1573 nodeiter = gennodelst(mnfst)
1573 nodeiter = gennodelst(mnfst)
1574 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1574 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1575 yield chnk
1575 yield chnk
1576
1576
1577 for fname in changedfiles:
1577 for fname in changedfiles:
1578 filerevlog = self.file(fname)
1578 filerevlog = self.file(fname)
1579 nodeiter = gennodelst(filerevlog)
1579 nodeiter = gennodelst(filerevlog)
1580 nodeiter = list(nodeiter)
1580 nodeiter = list(nodeiter)
1581 if nodeiter:
1581 if nodeiter:
1582 yield changegroup.genchunk(fname)
1582 yield changegroup.genchunk(fname)
1583 lookup = lookuprevlink_func(filerevlog)
1583 lookup = lookuprevlink_func(filerevlog)
1584 for chnk in filerevlog.group(nodeiter, lookup):
1584 for chnk in filerevlog.group(nodeiter, lookup):
1585 yield chnk
1585 yield chnk
1586
1586
1587 yield changegroup.closechunk()
1587 yield changegroup.closechunk()
1588
1588
1589 if nodes:
1589 if nodes:
1590 self.hook('outgoing', node=hex(nodes[0]), source=source)
1590 self.hook('outgoing', node=hex(nodes[0]), source=source)
1591
1591
1592 return util.chunkbuffer(gengroup())
1592 return util.chunkbuffer(gengroup())
1593
1593
1594 def addchangegroup(self, source, srctype, url):
1594 def addchangegroup(self, source, srctype, url):
1595 """add changegroup to repo.
1595 """add changegroup to repo.
1596 returns number of heads modified or added + 1."""
1596 returns number of heads modified or added + 1."""
1597
1597
1598 def csmap(x):
1598 def csmap(x):
1599 self.ui.debug(_("add changeset %s\n") % short(x))
1599 self.ui.debug(_("add changeset %s\n") % short(x))
1600 return cl.count()
1600 return cl.count()
1601
1601
1602 def revmap(x):
1602 def revmap(x):
1603 return cl.rev(x)
1603 return cl.rev(x)
1604
1604
1605 if not source:
1605 if not source:
1606 return 0
1606 return 0
1607
1607
1608 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1608 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1609
1609
1610 changesets = files = revisions = 0
1610 changesets = files = revisions = 0
1611
1611
1612 tr = self.transaction()
1612 tr = self.transaction()
1613
1613
1614 # write changelog data to temp files so concurrent readers will not see
1614 # write changelog data to temp files so concurrent readers will not see
1615 # inconsistent view
1615 # inconsistent view
1616 cl = None
1616 cl = None
1617 try:
1617 try:
1618 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1618 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1619
1619
1620 oldheads = len(cl.heads())
1620 oldheads = len(cl.heads())
1621
1621
1622 # pull off the changeset group
1622 # pull off the changeset group
1623 self.ui.status(_("adding changesets\n"))
1623 self.ui.status(_("adding changesets\n"))
1624 cor = cl.count() - 1
1624 cor = cl.count() - 1
1625 chunkiter = changegroup.chunkiter(source)
1625 chunkiter = changegroup.chunkiter(source)
1626 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1626 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1627 raise util.Abort(_("received changelog group is empty"))
1627 raise util.Abort(_("received changelog group is empty"))
1628 cnr = cl.count() - 1
1628 cnr = cl.count() - 1
1629 changesets = cnr - cor
1629 changesets = cnr - cor
1630
1630
1631 # pull off the manifest group
1631 # pull off the manifest group
1632 self.ui.status(_("adding manifests\n"))
1632 self.ui.status(_("adding manifests\n"))
1633 chunkiter = changegroup.chunkiter(source)
1633 chunkiter = changegroup.chunkiter(source)
1634 # no need to check for empty manifest group here:
1634 # no need to check for empty manifest group here:
1635 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1635 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1636 # no new manifest will be created and the manifest group will
1636 # no new manifest will be created and the manifest group will
1637 # be empty during the pull
1637 # be empty during the pull
1638 self.manifest.addgroup(chunkiter, revmap, tr)
1638 self.manifest.addgroup(chunkiter, revmap, tr)
1639
1639
1640 # process the files
1640 # process the files
1641 self.ui.status(_("adding file changes\n"))
1641 self.ui.status(_("adding file changes\n"))
1642 while 1:
1642 while 1:
1643 f = changegroup.getchunk(source)
1643 f = changegroup.getchunk(source)
1644 if not f:
1644 if not f:
1645 break
1645 break
1646 self.ui.debug(_("adding %s revisions\n") % f)
1646 self.ui.debug(_("adding %s revisions\n") % f)
1647 fl = self.file(f)
1647 fl = self.file(f)
1648 o = fl.count()
1648 o = fl.count()
1649 chunkiter = changegroup.chunkiter(source)
1649 chunkiter = changegroup.chunkiter(source)
1650 if fl.addgroup(chunkiter, revmap, tr) is None:
1650 if fl.addgroup(chunkiter, revmap, tr) is None:
1651 raise util.Abort(_("received file revlog group is empty"))
1651 raise util.Abort(_("received file revlog group is empty"))
1652 revisions += fl.count() - o
1652 revisions += fl.count() - o
1653 files += 1
1653 files += 1
1654
1654
1655 cl.writedata()
1655 cl.writedata()
1656 finally:
1656 finally:
1657 if cl:
1657 if cl:
1658 cl.cleanup()
1658 cl.cleanup()
1659
1659
1660 # make changelog see real files again
1660 # make changelog see real files again
1661 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1661 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1662 self.changelog.checkinlinesize(tr)
1662 self.changelog.checkinlinesize(tr)
1663
1663
1664 newheads = len(self.changelog.heads())
1664 newheads = len(self.changelog.heads())
1665 heads = ""
1665 heads = ""
1666 if oldheads and newheads != oldheads:
1666 if oldheads and newheads != oldheads:
1667 heads = _(" (%+d heads)") % (newheads - oldheads)
1667 heads = _(" (%+d heads)") % (newheads - oldheads)
1668
1668
1669 self.ui.status(_("added %d changesets"
1669 self.ui.status(_("added %d changesets"
1670 " with %d changes to %d files%s\n")
1670 " with %d changes to %d files%s\n")
1671 % (changesets, revisions, files, heads))
1671 % (changesets, revisions, files, heads))
1672
1672
1673 if changesets > 0:
1673 if changesets > 0:
1674 self.hook('pretxnchangegroup', throw=True,
1674 self.hook('pretxnchangegroup', throw=True,
1675 node=hex(self.changelog.node(cor+1)), source=srctype,
1675 node=hex(self.changelog.node(cor+1)), source=srctype,
1676 url=url)
1676 url=url)
1677
1677
1678 tr.close()
1678 tr.close()
1679
1679
1680 if changesets > 0:
1680 if changesets > 0:
1681 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1681 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1682 source=srctype, url=url)
1682 source=srctype, url=url)
1683
1683
1684 for i in range(cor + 1, cnr + 1):
1684 for i in range(cor + 1, cnr + 1):
1685 self.hook("incoming", node=hex(self.changelog.node(i)),
1685 self.hook("incoming", node=hex(self.changelog.node(i)),
1686 source=srctype, url=url)
1686 source=srctype, url=url)
1687
1687
1688 return newheads - oldheads + 1
1688 return newheads - oldheads + 1
1689
1689
1690
1690
1691 def stream_in(self, remote):
1691 def stream_in(self, remote):
1692 fp = remote.stream_out()
1692 fp = remote.stream_out()
1693 resp = int(fp.readline())
1693 resp = int(fp.readline())
1694 if resp != 0:
1694 if resp != 0:
1695 raise util.Abort(_('operation forbidden by server'))
1695 raise util.Abort(_('operation forbidden by server'))
1696 self.ui.status(_('streaming all changes\n'))
1696 self.ui.status(_('streaming all changes\n'))
1697 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1697 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1698 self.ui.status(_('%d files to transfer, %s of data\n') %
1698 self.ui.status(_('%d files to transfer, %s of data\n') %
1699 (total_files, util.bytecount(total_bytes)))
1699 (total_files, util.bytecount(total_bytes)))
1700 start = time.time()
1700 start = time.time()
1701 for i in xrange(total_files):
1701 for i in xrange(total_files):
1702 name, size = fp.readline().split('\0', 1)
1702 name, size = fp.readline().split('\0', 1)
1703 size = int(size)
1703 size = int(size)
1704 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1704 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1705 ofp = self.opener(name, 'w')
1705 ofp = self.opener(name, 'w')
1706 for chunk in util.filechunkiter(fp, limit=size):
1706 for chunk in util.filechunkiter(fp, limit=size):
1707 ofp.write(chunk)
1707 ofp.write(chunk)
1708 ofp.close()
1708 ofp.close()
1709 elapsed = time.time() - start
1709 elapsed = time.time() - start
1710 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1710 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1711 (util.bytecount(total_bytes), elapsed,
1711 (util.bytecount(total_bytes), elapsed,
1712 util.bytecount(total_bytes / elapsed)))
1712 util.bytecount(total_bytes / elapsed)))
1713 self.reload()
1713 self.reload()
1714 return len(self.heads()) + 1
1714 return len(self.heads()) + 1
1715
1715
1716 def clone(self, remote, heads=[], stream=False):
1716 def clone(self, remote, heads=[], stream=False):
1717 '''clone remote repository.
1717 '''clone remote repository.
1718
1718
1719 keyword arguments:
1719 keyword arguments:
1720 heads: list of revs to clone (forces use of pull)
1720 heads: list of revs to clone (forces use of pull)
1721 stream: use streaming clone if possible'''
1721 stream: use streaming clone if possible'''
1722
1722
1723 # now, all clients that can request uncompressed clones can
1723 # now, all clients that can request uncompressed clones can
1724 # read repo formats supported by all servers that can serve
1724 # read repo formats supported by all servers that can serve
1725 # them.
1725 # them.
1726
1726
1727 # if revlog format changes, client will have to check version
1727 # if revlog format changes, client will have to check version
1728 # and format flags on "stream" capability, and use
1728 # and format flags on "stream" capability, and use
1729 # uncompressed only if compatible.
1729 # uncompressed only if compatible.
1730
1730
1731 if stream and not heads and remote.capable('stream'):
1731 if stream and not heads and remote.capable('stream'):
1732 return self.stream_in(remote)
1732 return self.stream_in(remote)
1733 return self.pull(remote, heads)
1733 return self.pull(remote, heads)
1734
1734
1735 # used to avoid circular references so destructors work
1735 # used to avoid circular references so destructors work
1736 def aftertrans(base):
1736 def aftertrans(base):
1737 p = base
1737 p = base
1738 def a():
1738 def a():
1739 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1739 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1740 util.rename(os.path.join(p, "journal.dirstate"),
1740 util.rename(os.path.join(p, "journal.dirstate"),
1741 os.path.join(p, "undo.dirstate"))
1741 os.path.join(p, "undo.dirstate"))
1742 return a
1742 return a
1743
1743
1744 def instance(ui, path, create):
1744 def instance(ui, path, create):
1745 return localrepository(ui, util.drop_scheme('file', path), create)
1745 return localrepository(ui, util.drop_scheme('file', path), create)
1746
1746
1747 def islocal(path):
1747 def islocal(path):
1748 return True
1748 return True
General Comments 0
You need to be logged in to leave comments. Login now