##// END OF EJS Templates
Make lookup aware of branch labels...
Matt Mackall -
r3418:5436c8fe default
parent child Browse files
Show More
@@ -1,1802 +1,1803 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 heads = self.heads()
246 heads = self.heads()
247 heads.reverse()
247 heads.reverse()
248 fl = self.file(".hgtags")
248 fl = self.file(".hgtags")
249 for node in heads:
249 for node in heads:
250 change = self.changelog.read(node)
250 change = self.changelog.read(node)
251 rev = self.changelog.rev(node)
251 rev = self.changelog.rev(node)
252 fn, ff = self.manifest.find(change[0], '.hgtags')
252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 if fn is None: continue
253 if fn is None: continue
254 count = 0
254 count = 0
255 for l in fl.read(fn).splitlines():
255 for l in fl.read(fn).splitlines():
256 count += 1
256 count += 1
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 (rev, short(node), count))
258 (rev, short(node), count))
259 try:
259 try:
260 f = self.opener("localtags")
260 f = self.opener("localtags")
261 count = 0
261 count = 0
262 for l in f:
262 for l in f:
263 count += 1
263 count += 1
264 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
265 except IOError:
265 except IOError:
266 pass
266 pass
267
267
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def tagslist(self):
272 def tagslist(self):
273 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
274 l = []
274 l = []
275 for t, n in self.tags().items():
275 for t, n in self.tags().items():
276 try:
276 try:
277 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
278 except:
278 except:
279 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
280 l.append((r, t, n))
280 l.append((r, t, n))
281 l.sort()
281 l.sort()
282 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
283
283
284 def nodetags(self, node):
284 def nodetags(self, node):
285 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
286 if not self.nodetagscache:
286 if not self.nodetagscache:
287 self.nodetagscache = {}
287 self.nodetagscache = {}
288 for t, n in self.tags().items():
288 for t, n in self.tags().items():
289 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
290 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
291
291
292 def branchtags(self):
292 def branchtags(self):
293 if self.branchcache != None:
293 if self.branchcache != None:
294 return self.branchcache
294 return self.branchcache
295
295
296 self.branchcache = {}
296 self.branchcache = {}
297
297
298 try:
298 try:
299 f = self.opener("branches.cache")
299 f = self.opener("branches.cache")
300 last, lrev = f.readline().rstrip().split(" ", 1)
300 last, lrev = f.readline().rstrip().split(" ", 1)
301 last, lrev = bin(last), int(lrev)
301 last, lrev = bin(last), int(lrev)
302 if self.changelog.node(lrev) == last: # sanity check
302 if self.changelog.node(lrev) == last: # sanity check
303 for l in f:
303 for l in f:
304 node, label = l.rstrip().split(" ", 1)
304 node, label = l.rstrip().split(" ", 1)
305 self.branchcache[label] = bin(node)
305 self.branchcache[label] = bin(node)
306 f.close()
306 f.close()
307 except IOError:
307 except IOError:
308 last, lrev = nullid, -1
308 last, lrev = nullid, -1
309 lrev = self.changelog.rev(last)
309 lrev = self.changelog.rev(last)
310
310
311 tip = self.changelog.count() - 1
311 tip = self.changelog.count() - 1
312 if lrev != tip:
312 if lrev != tip:
313 for r in range(lrev + 1, tip + 1):
313 for r in range(lrev + 1, tip + 1):
314 n = self.changelog.node(r)
314 n = self.changelog.node(r)
315 c = self.changelog.read(n)
315 c = self.changelog.read(n)
316 b = c[5].get("branch")
316 b = c[5].get("branch")
317 if b:
317 if b:
318 self.branchcache[b] = n
318 self.branchcache[b] = n
319 self._writebranchcache()
319 self._writebranchcache()
320
320
321 return self.branchcache
321 return self.branchcache
322
322
323 def _writebranchcache(self):
323 def _writebranchcache(self):
324 f = self.opener("branches.cache", "w")
324 f = self.opener("branches.cache", "w")
325 t = self.changelog.tip()
325 t = self.changelog.tip()
326 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
326 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
327 for label, node in self.branchcache.iteritems():
327 for label, node in self.branchcache.iteritems():
328 f.write("%s %s\n" % (hex(node), label))
328 f.write("%s %s\n" % (hex(node), label))
329
329
330 def lookup(self, key):
330 def lookup(self, key):
331 try:
331 if key == '.':
332 key = self.dirstate.parents()[0]
333 if key == nullid:
334 raise repo.RepoError(_("no revision checked out"))
335 if key in self.tags():
332 return self.tags()[key]
336 return self.tags()[key]
333 except KeyError:
337 if key in self.branchtags():
334 if key == '.':
338 return self.branchtags()[key]
335 key = self.dirstate.parents()[0]
339 try:
336 if key == nullid:
340 return self.changelog.lookup(key)
337 raise repo.RepoError(_("no revision checked out"))
341 except:
338 try:
342 raise repo.RepoError(_("unknown revision '%s'") % key)
339 return self.changelog.lookup(key)
340 except:
341 raise repo.RepoError(_("unknown revision '%s'") % key)
342
343
343 def dev(self):
344 def dev(self):
344 return os.lstat(self.path).st_dev
345 return os.lstat(self.path).st_dev
345
346
346 def local(self):
347 def local(self):
347 return True
348 return True
348
349
349 def join(self, f):
350 def join(self, f):
350 return os.path.join(self.path, f)
351 return os.path.join(self.path, f)
351
352
352 def wjoin(self, f):
353 def wjoin(self, f):
353 return os.path.join(self.root, f)
354 return os.path.join(self.root, f)
354
355
355 def file(self, f):
356 def file(self, f):
356 if f[0] == '/':
357 if f[0] == '/':
357 f = f[1:]
358 f = f[1:]
358 return filelog.filelog(self.opener, f, self.revlogversion)
359 return filelog.filelog(self.opener, f, self.revlogversion)
359
360
360 def changectx(self, changeid=None):
361 def changectx(self, changeid=None):
361 return context.changectx(self, changeid)
362 return context.changectx(self, changeid)
362
363
363 def workingctx(self):
364 def workingctx(self):
364 return context.workingctx(self)
365 return context.workingctx(self)
365
366
366 def parents(self, changeid=None):
367 def parents(self, changeid=None):
367 '''
368 '''
368 get list of changectxs for parents of changeid or working directory
369 get list of changectxs for parents of changeid or working directory
369 '''
370 '''
370 if changeid is None:
371 if changeid is None:
371 pl = self.dirstate.parents()
372 pl = self.dirstate.parents()
372 else:
373 else:
373 n = self.changelog.lookup(changeid)
374 n = self.changelog.lookup(changeid)
374 pl = self.changelog.parents(n)
375 pl = self.changelog.parents(n)
375 if pl[1] == nullid:
376 if pl[1] == nullid:
376 return [self.changectx(pl[0])]
377 return [self.changectx(pl[0])]
377 return [self.changectx(pl[0]), self.changectx(pl[1])]
378 return [self.changectx(pl[0]), self.changectx(pl[1])]
378
379
379 def filectx(self, path, changeid=None, fileid=None):
380 def filectx(self, path, changeid=None, fileid=None):
380 """changeid can be a changeset revision, node, or tag.
381 """changeid can be a changeset revision, node, or tag.
381 fileid can be a file revision or node."""
382 fileid can be a file revision or node."""
382 return context.filectx(self, path, changeid, fileid)
383 return context.filectx(self, path, changeid, fileid)
383
384
384 def getcwd(self):
385 def getcwd(self):
385 return self.dirstate.getcwd()
386 return self.dirstate.getcwd()
386
387
387 def wfile(self, f, mode='r'):
388 def wfile(self, f, mode='r'):
388 return self.wopener(f, mode)
389 return self.wopener(f, mode)
389
390
390 def wread(self, filename):
391 def wread(self, filename):
391 if self.encodepats == None:
392 if self.encodepats == None:
392 l = []
393 l = []
393 for pat, cmd in self.ui.configitems("encode"):
394 for pat, cmd in self.ui.configitems("encode"):
394 mf = util.matcher(self.root, "", [pat], [], [])[1]
395 mf = util.matcher(self.root, "", [pat], [], [])[1]
395 l.append((mf, cmd))
396 l.append((mf, cmd))
396 self.encodepats = l
397 self.encodepats = l
397
398
398 data = self.wopener(filename, 'r').read()
399 data = self.wopener(filename, 'r').read()
399
400
400 for mf, cmd in self.encodepats:
401 for mf, cmd in self.encodepats:
401 if mf(filename):
402 if mf(filename):
402 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
403 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
403 data = util.filter(data, cmd)
404 data = util.filter(data, cmd)
404 break
405 break
405
406
406 return data
407 return data
407
408
408 def wwrite(self, filename, data, fd=None):
409 def wwrite(self, filename, data, fd=None):
409 if self.decodepats == None:
410 if self.decodepats == None:
410 l = []
411 l = []
411 for pat, cmd in self.ui.configitems("decode"):
412 for pat, cmd in self.ui.configitems("decode"):
412 mf = util.matcher(self.root, "", [pat], [], [])[1]
413 mf = util.matcher(self.root, "", [pat], [], [])[1]
413 l.append((mf, cmd))
414 l.append((mf, cmd))
414 self.decodepats = l
415 self.decodepats = l
415
416
416 for mf, cmd in self.decodepats:
417 for mf, cmd in self.decodepats:
417 if mf(filename):
418 if mf(filename):
418 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
419 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
419 data = util.filter(data, cmd)
420 data = util.filter(data, cmd)
420 break
421 break
421
422
422 if fd:
423 if fd:
423 return fd.write(data)
424 return fd.write(data)
424 return self.wopener(filename, 'w').write(data)
425 return self.wopener(filename, 'w').write(data)
425
426
426 def transaction(self):
427 def transaction(self):
427 tr = self.transhandle
428 tr = self.transhandle
428 if tr != None and tr.running():
429 if tr != None and tr.running():
429 return tr.nest()
430 return tr.nest()
430
431
431 # save dirstate for rollback
432 # save dirstate for rollback
432 try:
433 try:
433 ds = self.opener("dirstate").read()
434 ds = self.opener("dirstate").read()
434 except IOError:
435 except IOError:
435 ds = ""
436 ds = ""
436 self.opener("journal.dirstate", "w").write(ds)
437 self.opener("journal.dirstate", "w").write(ds)
437
438
438 tr = transaction.transaction(self.ui.warn, self.opener,
439 tr = transaction.transaction(self.ui.warn, self.opener,
439 self.join("journal"),
440 self.join("journal"),
440 aftertrans(self.path))
441 aftertrans(self.path))
441 self.transhandle = tr
442 self.transhandle = tr
442 return tr
443 return tr
443
444
444 def recover(self):
445 def recover(self):
445 l = self.lock()
446 l = self.lock()
446 if os.path.exists(self.join("journal")):
447 if os.path.exists(self.join("journal")):
447 self.ui.status(_("rolling back interrupted transaction\n"))
448 self.ui.status(_("rolling back interrupted transaction\n"))
448 transaction.rollback(self.opener, self.join("journal"))
449 transaction.rollback(self.opener, self.join("journal"))
449 self.reload()
450 self.reload()
450 return True
451 return True
451 else:
452 else:
452 self.ui.warn(_("no interrupted transaction available\n"))
453 self.ui.warn(_("no interrupted transaction available\n"))
453 return False
454 return False
454
455
455 def rollback(self, wlock=None):
456 def rollback(self, wlock=None):
456 if not wlock:
457 if not wlock:
457 wlock = self.wlock()
458 wlock = self.wlock()
458 l = self.lock()
459 l = self.lock()
459 if os.path.exists(self.join("undo")):
460 if os.path.exists(self.join("undo")):
460 self.ui.status(_("rolling back last transaction\n"))
461 self.ui.status(_("rolling back last transaction\n"))
461 transaction.rollback(self.opener, self.join("undo"))
462 transaction.rollback(self.opener, self.join("undo"))
462 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
463 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
463 self.reload()
464 self.reload()
464 self.wreload()
465 self.wreload()
465 else:
466 else:
466 self.ui.warn(_("no rollback information available\n"))
467 self.ui.warn(_("no rollback information available\n"))
467
468
468 def wreload(self):
469 def wreload(self):
469 self.dirstate.read()
470 self.dirstate.read()
470
471
471 def reload(self):
472 def reload(self):
472 self.changelog.load()
473 self.changelog.load()
473 self.manifest.load()
474 self.manifest.load()
474 self.tagscache = None
475 self.tagscache = None
475 self.nodetagscache = None
476 self.nodetagscache = None
476
477
477 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
478 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
478 desc=None):
479 desc=None):
479 try:
480 try:
480 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
481 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
481 except lock.LockHeld, inst:
482 except lock.LockHeld, inst:
482 if not wait:
483 if not wait:
483 raise
484 raise
484 self.ui.warn(_("waiting for lock on %s held by %s\n") %
485 self.ui.warn(_("waiting for lock on %s held by %s\n") %
485 (desc, inst.args[0]))
486 (desc, inst.args[0]))
486 # default to 600 seconds timeout
487 # default to 600 seconds timeout
487 l = lock.lock(self.join(lockname),
488 l = lock.lock(self.join(lockname),
488 int(self.ui.config("ui", "timeout") or 600),
489 int(self.ui.config("ui", "timeout") or 600),
489 releasefn, desc=desc)
490 releasefn, desc=desc)
490 if acquirefn:
491 if acquirefn:
491 acquirefn()
492 acquirefn()
492 return l
493 return l
493
494
494 def lock(self, wait=1):
495 def lock(self, wait=1):
495 return self.do_lock("lock", wait, acquirefn=self.reload,
496 return self.do_lock("lock", wait, acquirefn=self.reload,
496 desc=_('repository %s') % self.origroot)
497 desc=_('repository %s') % self.origroot)
497
498
498 def wlock(self, wait=1):
499 def wlock(self, wait=1):
499 return self.do_lock("wlock", wait, self.dirstate.write,
500 return self.do_lock("wlock", wait, self.dirstate.write,
500 self.wreload,
501 self.wreload,
501 desc=_('working directory of %s') % self.origroot)
502 desc=_('working directory of %s') % self.origroot)
502
503
503 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
504 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
504 """
505 """
505 commit an individual file as part of a larger transaction
506 commit an individual file as part of a larger transaction
506 """
507 """
507
508
508 t = self.wread(fn)
509 t = self.wread(fn)
509 fl = self.file(fn)
510 fl = self.file(fn)
510 fp1 = manifest1.get(fn, nullid)
511 fp1 = manifest1.get(fn, nullid)
511 fp2 = manifest2.get(fn, nullid)
512 fp2 = manifest2.get(fn, nullid)
512
513
513 meta = {}
514 meta = {}
514 cp = self.dirstate.copied(fn)
515 cp = self.dirstate.copied(fn)
515 if cp:
516 if cp:
516 meta["copy"] = cp
517 meta["copy"] = cp
517 if not manifest2: # not a branch merge
518 if not manifest2: # not a branch merge
518 meta["copyrev"] = hex(manifest1.get(cp, nullid))
519 meta["copyrev"] = hex(manifest1.get(cp, nullid))
519 fp2 = nullid
520 fp2 = nullid
520 elif fp2 != nullid: # copied on remote side
521 elif fp2 != nullid: # copied on remote side
521 meta["copyrev"] = hex(manifest1.get(cp, nullid))
522 meta["copyrev"] = hex(manifest1.get(cp, nullid))
522 else: # copied on local side, reversed
523 else: # copied on local side, reversed
523 meta["copyrev"] = hex(manifest2.get(cp))
524 meta["copyrev"] = hex(manifest2.get(cp))
524 fp2 = nullid
525 fp2 = nullid
525 self.ui.debug(_(" %s: copy %s:%s\n") %
526 self.ui.debug(_(" %s: copy %s:%s\n") %
526 (fn, cp, meta["copyrev"]))
527 (fn, cp, meta["copyrev"]))
527 fp1 = nullid
528 fp1 = nullid
528 elif fp2 != nullid:
529 elif fp2 != nullid:
529 # is one parent an ancestor of the other?
530 # is one parent an ancestor of the other?
530 fpa = fl.ancestor(fp1, fp2)
531 fpa = fl.ancestor(fp1, fp2)
531 if fpa == fp1:
532 if fpa == fp1:
532 fp1, fp2 = fp2, nullid
533 fp1, fp2 = fp2, nullid
533 elif fpa == fp2:
534 elif fpa == fp2:
534 fp2 = nullid
535 fp2 = nullid
535
536
536 # is the file unmodified from the parent? report existing entry
537 # is the file unmodified from the parent? report existing entry
537 if fp2 == nullid and not fl.cmp(fp1, t):
538 if fp2 == nullid and not fl.cmp(fp1, t):
538 return fp1
539 return fp1
539
540
540 changelist.append(fn)
541 changelist.append(fn)
541 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
542 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
542
543
543 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
544 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
544 orig_parent = self.dirstate.parents()[0] or nullid
545 orig_parent = self.dirstate.parents()[0] or nullid
545 p1 = p1 or self.dirstate.parents()[0] or nullid
546 p1 = p1 or self.dirstate.parents()[0] or nullid
546 p2 = p2 or self.dirstate.parents()[1] or nullid
547 p2 = p2 or self.dirstate.parents()[1] or nullid
547 c1 = self.changelog.read(p1)
548 c1 = self.changelog.read(p1)
548 c2 = self.changelog.read(p2)
549 c2 = self.changelog.read(p2)
549 m1 = self.manifest.read(c1[0]).copy()
550 m1 = self.manifest.read(c1[0]).copy()
550 m2 = self.manifest.read(c2[0])
551 m2 = self.manifest.read(c2[0])
551 changed = []
552 changed = []
552 removed = []
553 removed = []
553
554
554 if orig_parent == p1:
555 if orig_parent == p1:
555 update_dirstate = 1
556 update_dirstate = 1
556 else:
557 else:
557 update_dirstate = 0
558 update_dirstate = 0
558
559
559 if not wlock:
560 if not wlock:
560 wlock = self.wlock()
561 wlock = self.wlock()
561 l = self.lock()
562 l = self.lock()
562 tr = self.transaction()
563 tr = self.transaction()
563 linkrev = self.changelog.count()
564 linkrev = self.changelog.count()
564 for f in files:
565 for f in files:
565 try:
566 try:
566 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
567 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
567 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
568 except IOError:
569 except IOError:
569 try:
570 try:
570 del m1[f]
571 del m1[f]
571 if update_dirstate:
572 if update_dirstate:
572 self.dirstate.forget([f])
573 self.dirstate.forget([f])
573 removed.append(f)
574 removed.append(f)
574 except:
575 except:
575 # deleted from p2?
576 # deleted from p2?
576 pass
577 pass
577
578
578 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
579 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
579 user = user or self.ui.username()
580 user = user or self.ui.username()
580 n = self.changelog.add(mnode, changed + removed, text,
581 n = self.changelog.add(mnode, changed + removed, text,
581 tr, p1, p2, user, date)
582 tr, p1, p2, user, date)
582 tr.close()
583 tr.close()
583 if update_dirstate:
584 if update_dirstate:
584 self.dirstate.setparents(n, nullid)
585 self.dirstate.setparents(n, nullid)
585
586
586 def commit(self, files=None, text="", user=None, date=None,
587 def commit(self, files=None, text="", user=None, date=None,
587 match=util.always, force=False, lock=None, wlock=None,
588 match=util.always, force=False, lock=None, wlock=None,
588 force_editor=False):
589 force_editor=False):
589 commit = []
590 commit = []
590 remove = []
591 remove = []
591 changed = []
592 changed = []
592
593
593 if files:
594 if files:
594 for f in files:
595 for f in files:
595 s = self.dirstate.state(f)
596 s = self.dirstate.state(f)
596 if s in 'nmai':
597 if s in 'nmai':
597 commit.append(f)
598 commit.append(f)
598 elif s == 'r':
599 elif s == 'r':
599 remove.append(f)
600 remove.append(f)
600 else:
601 else:
601 self.ui.warn(_("%s not tracked!\n") % f)
602 self.ui.warn(_("%s not tracked!\n") % f)
602 else:
603 else:
603 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
604 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
604 commit = modified + added
605 commit = modified + added
605 remove = removed
606 remove = removed
606
607
607 p1, p2 = self.dirstate.parents()
608 p1, p2 = self.dirstate.parents()
608 c1 = self.changelog.read(p1)
609 c1 = self.changelog.read(p1)
609 c2 = self.changelog.read(p2)
610 c2 = self.changelog.read(p2)
610 m1 = self.manifest.read(c1[0]).copy()
611 m1 = self.manifest.read(c1[0]).copy()
611 m2 = self.manifest.read(c2[0])
612 m2 = self.manifest.read(c2[0])
612
613
613 if not commit and not remove and not force and p2 == nullid:
614 if not commit and not remove and not force and p2 == nullid:
614 self.ui.status(_("nothing changed\n"))
615 self.ui.status(_("nothing changed\n"))
615 return None
616 return None
616
617
617 xp1 = hex(p1)
618 xp1 = hex(p1)
618 if p2 == nullid: xp2 = ''
619 if p2 == nullid: xp2 = ''
619 else: xp2 = hex(p2)
620 else: xp2 = hex(p2)
620
621
621 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
622 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
622
623
623 if not wlock:
624 if not wlock:
624 wlock = self.wlock()
625 wlock = self.wlock()
625 if not lock:
626 if not lock:
626 lock = self.lock()
627 lock = self.lock()
627 tr = self.transaction()
628 tr = self.transaction()
628
629
629 # check in files
630 # check in files
630 new = {}
631 new = {}
631 linkrev = self.changelog.count()
632 linkrev = self.changelog.count()
632 commit.sort()
633 commit.sort()
633 for f in commit:
634 for f in commit:
634 self.ui.note(f + "\n")
635 self.ui.note(f + "\n")
635 try:
636 try:
636 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
637 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
637 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
638 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
638 except IOError:
639 except IOError:
639 self.ui.warn(_("trouble committing %s!\n") % f)
640 self.ui.warn(_("trouble committing %s!\n") % f)
640 raise
641 raise
641
642
642 # update manifest
643 # update manifest
643 m1.update(new)
644 m1.update(new)
644 for f in remove:
645 for f in remove:
645 if f in m1:
646 if f in m1:
646 del m1[f]
647 del m1[f]
647 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
648 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
648
649
649 # add changeset
650 # add changeset
650 new = new.keys()
651 new = new.keys()
651 new.sort()
652 new.sort()
652
653
653 user = user or self.ui.username()
654 user = user or self.ui.username()
654 if not text or force_editor:
655 if not text or force_editor:
655 edittext = []
656 edittext = []
656 if text:
657 if text:
657 edittext.append(text)
658 edittext.append(text)
658 edittext.append("")
659 edittext.append("")
659 if p2 != nullid:
660 if p2 != nullid:
660 edittext.append("HG: branch merge")
661 edittext.append("HG: branch merge")
661 edittext.extend(["HG: changed %s" % f for f in changed])
662 edittext.extend(["HG: changed %s" % f for f in changed])
662 edittext.extend(["HG: removed %s" % f for f in remove])
663 edittext.extend(["HG: removed %s" % f for f in remove])
663 if not changed and not remove:
664 if not changed and not remove:
664 edittext.append("HG: no files changed")
665 edittext.append("HG: no files changed")
665 edittext.append("")
666 edittext.append("")
666 # run editor in the repository root
667 # run editor in the repository root
667 olddir = os.getcwd()
668 olddir = os.getcwd()
668 os.chdir(self.root)
669 os.chdir(self.root)
669 text = self.ui.edit("\n".join(edittext), user)
670 text = self.ui.edit("\n".join(edittext), user)
670 os.chdir(olddir)
671 os.chdir(olddir)
671
672
672 lines = [line.rstrip() for line in text.rstrip().splitlines()]
673 lines = [line.rstrip() for line in text.rstrip().splitlines()]
673 while lines and not lines[0]:
674 while lines and not lines[0]:
674 del lines[0]
675 del lines[0]
675 if not lines:
676 if not lines:
676 return None
677 return None
677 text = '\n'.join(lines)
678 text = '\n'.join(lines)
678 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
679 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
679 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
680 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
680 parent2=xp2)
681 parent2=xp2)
681 tr.close()
682 tr.close()
682
683
683 self.dirstate.setparents(n)
684 self.dirstate.setparents(n)
684 self.dirstate.update(new, "n")
685 self.dirstate.update(new, "n")
685 self.dirstate.forget(remove)
686 self.dirstate.forget(remove)
686
687
687 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
688 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
688 return n
689 return n
689
690
690 def walk(self, node=None, files=[], match=util.always, badmatch=None):
691 def walk(self, node=None, files=[], match=util.always, badmatch=None):
691 if node:
692 if node:
692 fdict = dict.fromkeys(files)
693 fdict = dict.fromkeys(files)
693 for fn in self.manifest.read(self.changelog.read(node)[0]):
694 for fn in self.manifest.read(self.changelog.read(node)[0]):
694 for ffn in fdict:
695 for ffn in fdict:
695 # match if the file is the exact name or a directory
696 # match if the file is the exact name or a directory
696 if ffn == fn or fn.startswith("%s/" % ffn):
697 if ffn == fn or fn.startswith("%s/" % ffn):
697 del fdict[ffn]
698 del fdict[ffn]
698 break
699 break
699 if match(fn):
700 if match(fn):
700 yield 'm', fn
701 yield 'm', fn
701 for fn in fdict:
702 for fn in fdict:
702 if badmatch and badmatch(fn):
703 if badmatch and badmatch(fn):
703 if match(fn):
704 if match(fn):
704 yield 'b', fn
705 yield 'b', fn
705 else:
706 else:
706 self.ui.warn(_('%s: No such file in rev %s\n') % (
707 self.ui.warn(_('%s: No such file in rev %s\n') % (
707 util.pathto(self.getcwd(), fn), short(node)))
708 util.pathto(self.getcwd(), fn), short(node)))
708 else:
709 else:
709 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
710 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
710 yield src, fn
711 yield src, fn
711
712
712 def status(self, node1=None, node2=None, files=[], match=util.always,
713 def status(self, node1=None, node2=None, files=[], match=util.always,
713 wlock=None, list_ignored=False, list_clean=False):
714 wlock=None, list_ignored=False, list_clean=False):
714 """return status of files between two nodes or node and working directory
715 """return status of files between two nodes or node and working directory
715
716
716 If node1 is None, use the first dirstate parent instead.
717 If node1 is None, use the first dirstate parent instead.
717 If node2 is None, compare node1 with working directory.
718 If node2 is None, compare node1 with working directory.
718 """
719 """
719
720
720 def fcmp(fn, mf):
721 def fcmp(fn, mf):
721 t1 = self.wread(fn)
722 t1 = self.wread(fn)
722 return self.file(fn).cmp(mf.get(fn, nullid), t1)
723 return self.file(fn).cmp(mf.get(fn, nullid), t1)
723
724
724 def mfmatches(node):
725 def mfmatches(node):
725 change = self.changelog.read(node)
726 change = self.changelog.read(node)
726 mf = self.manifest.read(change[0]).copy()
727 mf = self.manifest.read(change[0]).copy()
727 for fn in mf.keys():
728 for fn in mf.keys():
728 if not match(fn):
729 if not match(fn):
729 del mf[fn]
730 del mf[fn]
730 return mf
731 return mf
731
732
732 modified, added, removed, deleted, unknown = [], [], [], [], []
733 modified, added, removed, deleted, unknown = [], [], [], [], []
733 ignored, clean = [], []
734 ignored, clean = [], []
734
735
735 compareworking = False
736 compareworking = False
736 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
737 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
737 compareworking = True
738 compareworking = True
738
739
739 if not compareworking:
740 if not compareworking:
740 # read the manifest from node1 before the manifest from node2,
741 # read the manifest from node1 before the manifest from node2,
741 # so that we'll hit the manifest cache if we're going through
742 # so that we'll hit the manifest cache if we're going through
742 # all the revisions in parent->child order.
743 # all the revisions in parent->child order.
743 mf1 = mfmatches(node1)
744 mf1 = mfmatches(node1)
744
745
745 # are we comparing the working directory?
746 # are we comparing the working directory?
746 if not node2:
747 if not node2:
747 if not wlock:
748 if not wlock:
748 try:
749 try:
749 wlock = self.wlock(wait=0)
750 wlock = self.wlock(wait=0)
750 except lock.LockException:
751 except lock.LockException:
751 wlock = None
752 wlock = None
752 (lookup, modified, added, removed, deleted, unknown,
753 (lookup, modified, added, removed, deleted, unknown,
753 ignored, clean) = self.dirstate.status(files, match,
754 ignored, clean) = self.dirstate.status(files, match,
754 list_ignored, list_clean)
755 list_ignored, list_clean)
755
756
756 # are we comparing working dir against its parent?
757 # are we comparing working dir against its parent?
757 if compareworking:
758 if compareworking:
758 if lookup:
759 if lookup:
759 # do a full compare of any files that might have changed
760 # do a full compare of any files that might have changed
760 mf2 = mfmatches(self.dirstate.parents()[0])
761 mf2 = mfmatches(self.dirstate.parents()[0])
761 for f in lookup:
762 for f in lookup:
762 if fcmp(f, mf2):
763 if fcmp(f, mf2):
763 modified.append(f)
764 modified.append(f)
764 else:
765 else:
765 clean.append(f)
766 clean.append(f)
766 if wlock is not None:
767 if wlock is not None:
767 self.dirstate.update([f], "n")
768 self.dirstate.update([f], "n")
768 else:
769 else:
769 # we are comparing working dir against non-parent
770 # we are comparing working dir against non-parent
770 # generate a pseudo-manifest for the working dir
771 # generate a pseudo-manifest for the working dir
771 # XXX: create it in dirstate.py ?
772 # XXX: create it in dirstate.py ?
772 mf2 = mfmatches(self.dirstate.parents()[0])
773 mf2 = mfmatches(self.dirstate.parents()[0])
773 for f in lookup + modified + added:
774 for f in lookup + modified + added:
774 mf2[f] = ""
775 mf2[f] = ""
775 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
776 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
776 for f in removed:
777 for f in removed:
777 if f in mf2:
778 if f in mf2:
778 del mf2[f]
779 del mf2[f]
779 else:
780 else:
780 # we are comparing two revisions
781 # we are comparing two revisions
781 mf2 = mfmatches(node2)
782 mf2 = mfmatches(node2)
782
783
783 if not compareworking:
784 if not compareworking:
784 # flush lists from dirstate before comparing manifests
785 # flush lists from dirstate before comparing manifests
785 modified, added, clean = [], [], []
786 modified, added, clean = [], [], []
786
787
787 # make sure to sort the files so we talk to the disk in a
788 # make sure to sort the files so we talk to the disk in a
788 # reasonable order
789 # reasonable order
789 mf2keys = mf2.keys()
790 mf2keys = mf2.keys()
790 mf2keys.sort()
791 mf2keys.sort()
791 for fn in mf2keys:
792 for fn in mf2keys:
792 if mf1.has_key(fn):
793 if mf1.has_key(fn):
793 if mf1.flags(fn) != mf2.flags(fn) or \
794 if mf1.flags(fn) != mf2.flags(fn) or \
794 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
795 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
795 modified.append(fn)
796 modified.append(fn)
796 elif list_clean:
797 elif list_clean:
797 clean.append(fn)
798 clean.append(fn)
798 del mf1[fn]
799 del mf1[fn]
799 else:
800 else:
800 added.append(fn)
801 added.append(fn)
801
802
802 removed = mf1.keys()
803 removed = mf1.keys()
803
804
804 # sort and return results:
805 # sort and return results:
805 for l in modified, added, removed, deleted, unknown, ignored, clean:
806 for l in modified, added, removed, deleted, unknown, ignored, clean:
806 l.sort()
807 l.sort()
807 return (modified, added, removed, deleted, unknown, ignored, clean)
808 return (modified, added, removed, deleted, unknown, ignored, clean)
808
809
809 def add(self, list, wlock=None):
810 def add(self, list, wlock=None):
810 if not wlock:
811 if not wlock:
811 wlock = self.wlock()
812 wlock = self.wlock()
812 for f in list:
813 for f in list:
813 p = self.wjoin(f)
814 p = self.wjoin(f)
814 if not os.path.exists(p):
815 if not os.path.exists(p):
815 self.ui.warn(_("%s does not exist!\n") % f)
816 self.ui.warn(_("%s does not exist!\n") % f)
816 elif not os.path.isfile(p):
817 elif not os.path.isfile(p):
817 self.ui.warn(_("%s not added: only files supported currently\n")
818 self.ui.warn(_("%s not added: only files supported currently\n")
818 % f)
819 % f)
819 elif self.dirstate.state(f) in 'an':
820 elif self.dirstate.state(f) in 'an':
820 self.ui.warn(_("%s already tracked!\n") % f)
821 self.ui.warn(_("%s already tracked!\n") % f)
821 else:
822 else:
822 self.dirstate.update([f], "a")
823 self.dirstate.update([f], "a")
823
824
824 def forget(self, list, wlock=None):
825 def forget(self, list, wlock=None):
825 if not wlock:
826 if not wlock:
826 wlock = self.wlock()
827 wlock = self.wlock()
827 for f in list:
828 for f in list:
828 if self.dirstate.state(f) not in 'ai':
829 if self.dirstate.state(f) not in 'ai':
829 self.ui.warn(_("%s not added!\n") % f)
830 self.ui.warn(_("%s not added!\n") % f)
830 else:
831 else:
831 self.dirstate.forget([f])
832 self.dirstate.forget([f])
832
833
833 def remove(self, list, unlink=False, wlock=None):
834 def remove(self, list, unlink=False, wlock=None):
834 if unlink:
835 if unlink:
835 for f in list:
836 for f in list:
836 try:
837 try:
837 util.unlink(self.wjoin(f))
838 util.unlink(self.wjoin(f))
838 except OSError, inst:
839 except OSError, inst:
839 if inst.errno != errno.ENOENT:
840 if inst.errno != errno.ENOENT:
840 raise
841 raise
841 if not wlock:
842 if not wlock:
842 wlock = self.wlock()
843 wlock = self.wlock()
843 for f in list:
844 for f in list:
844 p = self.wjoin(f)
845 p = self.wjoin(f)
845 if os.path.exists(p):
846 if os.path.exists(p):
846 self.ui.warn(_("%s still exists!\n") % f)
847 self.ui.warn(_("%s still exists!\n") % f)
847 elif self.dirstate.state(f) == 'a':
848 elif self.dirstate.state(f) == 'a':
848 self.dirstate.forget([f])
849 self.dirstate.forget([f])
849 elif f not in self.dirstate:
850 elif f not in self.dirstate:
850 self.ui.warn(_("%s not tracked!\n") % f)
851 self.ui.warn(_("%s not tracked!\n") % f)
851 else:
852 else:
852 self.dirstate.update([f], "r")
853 self.dirstate.update([f], "r")
853
854
854 def undelete(self, list, wlock=None):
855 def undelete(self, list, wlock=None):
855 p = self.dirstate.parents()[0]
856 p = self.dirstate.parents()[0]
856 mn = self.changelog.read(p)[0]
857 mn = self.changelog.read(p)[0]
857 m = self.manifest.read(mn)
858 m = self.manifest.read(mn)
858 if not wlock:
859 if not wlock:
859 wlock = self.wlock()
860 wlock = self.wlock()
860 for f in list:
861 for f in list:
861 if self.dirstate.state(f) not in "r":
862 if self.dirstate.state(f) not in "r":
862 self.ui.warn("%s not removed!\n" % f)
863 self.ui.warn("%s not removed!\n" % f)
863 else:
864 else:
864 t = self.file(f).read(m[f])
865 t = self.file(f).read(m[f])
865 self.wwrite(f, t)
866 self.wwrite(f, t)
866 util.set_exec(self.wjoin(f), m.execf(f))
867 util.set_exec(self.wjoin(f), m.execf(f))
867 self.dirstate.update([f], "n")
868 self.dirstate.update([f], "n")
868
869
869 def copy(self, source, dest, wlock=None):
870 def copy(self, source, dest, wlock=None):
870 p = self.wjoin(dest)
871 p = self.wjoin(dest)
871 if not os.path.exists(p):
872 if not os.path.exists(p):
872 self.ui.warn(_("%s does not exist!\n") % dest)
873 self.ui.warn(_("%s does not exist!\n") % dest)
873 elif not os.path.isfile(p):
874 elif not os.path.isfile(p):
874 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
875 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
875 else:
876 else:
876 if not wlock:
877 if not wlock:
877 wlock = self.wlock()
878 wlock = self.wlock()
878 if self.dirstate.state(dest) == '?':
879 if self.dirstate.state(dest) == '?':
879 self.dirstate.update([dest], "a")
880 self.dirstate.update([dest], "a")
880 self.dirstate.copy(source, dest)
881 self.dirstate.copy(source, dest)
881
882
882 def heads(self, start=None):
883 def heads(self, start=None):
883 heads = self.changelog.heads(start)
884 heads = self.changelog.heads(start)
884 # sort the output in rev descending order
885 # sort the output in rev descending order
885 heads = [(-self.changelog.rev(h), h) for h in heads]
886 heads = [(-self.changelog.rev(h), h) for h in heads]
886 heads.sort()
887 heads.sort()
887 return [n for (r, n) in heads]
888 return [n for (r, n) in heads]
888
889
889 # branchlookup returns a dict giving a list of branches for
890 # branchlookup returns a dict giving a list of branches for
890 # each head. A branch is defined as the tag of a node or
891 # each head. A branch is defined as the tag of a node or
891 # the branch of the node's parents. If a node has multiple
892 # the branch of the node's parents. If a node has multiple
892 # branch tags, tags are eliminated if they are visible from other
893 # branch tags, tags are eliminated if they are visible from other
893 # branch tags.
894 # branch tags.
894 #
895 #
895 # So, for this graph: a->b->c->d->e
896 # So, for this graph: a->b->c->d->e
896 # \ /
897 # \ /
897 # aa -----/
898 # aa -----/
898 # a has tag 2.6.12
899 # a has tag 2.6.12
899 # d has tag 2.6.13
900 # d has tag 2.6.13
900 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
901 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
901 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
902 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
902 # from the list.
903 # from the list.
903 #
904 #
904 # It is possible that more than one head will have the same branch tag.
905 # It is possible that more than one head will have the same branch tag.
905 # callers need to check the result for multiple heads under the same
906 # callers need to check the result for multiple heads under the same
906 # branch tag if that is a problem for them (ie checkout of a specific
907 # branch tag if that is a problem for them (ie checkout of a specific
907 # branch).
908 # branch).
908 #
909 #
909 # passing in a specific branch will limit the depth of the search
910 # passing in a specific branch will limit the depth of the search
910 # through the parents. It won't limit the branches returned in the
911 # through the parents. It won't limit the branches returned in the
911 # result though.
912 # result though.
912 def branchlookup(self, heads=None, branch=None):
913 def branchlookup(self, heads=None, branch=None):
913 if not heads:
914 if not heads:
914 heads = self.heads()
915 heads = self.heads()
915 headt = [ h for h in heads ]
916 headt = [ h for h in heads ]
916 chlog = self.changelog
917 chlog = self.changelog
917 branches = {}
918 branches = {}
918 merges = []
919 merges = []
919 seenmerge = {}
920 seenmerge = {}
920
921
921 # traverse the tree once for each head, recording in the branches
922 # traverse the tree once for each head, recording in the branches
922 # dict which tags are visible from this head. The branches
923 # dict which tags are visible from this head. The branches
923 # dict also records which tags are visible from each tag
924 # dict also records which tags are visible from each tag
924 # while we traverse.
925 # while we traverse.
925 while headt or merges:
926 while headt or merges:
926 if merges:
927 if merges:
927 n, found = merges.pop()
928 n, found = merges.pop()
928 visit = [n]
929 visit = [n]
929 else:
930 else:
930 h = headt.pop()
931 h = headt.pop()
931 visit = [h]
932 visit = [h]
932 found = [h]
933 found = [h]
933 seen = {}
934 seen = {}
934 while visit:
935 while visit:
935 n = visit.pop()
936 n = visit.pop()
936 if n in seen:
937 if n in seen:
937 continue
938 continue
938 pp = chlog.parents(n)
939 pp = chlog.parents(n)
939 tags = self.nodetags(n)
940 tags = self.nodetags(n)
940 if tags:
941 if tags:
941 for x in tags:
942 for x in tags:
942 if x == 'tip':
943 if x == 'tip':
943 continue
944 continue
944 for f in found:
945 for f in found:
945 branches.setdefault(f, {})[n] = 1
946 branches.setdefault(f, {})[n] = 1
946 branches.setdefault(n, {})[n] = 1
947 branches.setdefault(n, {})[n] = 1
947 break
948 break
948 if n not in found:
949 if n not in found:
949 found.append(n)
950 found.append(n)
950 if branch in tags:
951 if branch in tags:
951 continue
952 continue
952 seen[n] = 1
953 seen[n] = 1
953 if pp[1] != nullid and n not in seenmerge:
954 if pp[1] != nullid and n not in seenmerge:
954 merges.append((pp[1], [x for x in found]))
955 merges.append((pp[1], [x for x in found]))
955 seenmerge[n] = 1
956 seenmerge[n] = 1
956 if pp[0] != nullid:
957 if pp[0] != nullid:
957 visit.append(pp[0])
958 visit.append(pp[0])
958 # traverse the branches dict, eliminating branch tags from each
959 # traverse the branches dict, eliminating branch tags from each
959 # head that are visible from another branch tag for that head.
960 # head that are visible from another branch tag for that head.
960 out = {}
961 out = {}
961 viscache = {}
962 viscache = {}
962 for h in heads:
963 for h in heads:
963 def visible(node):
964 def visible(node):
964 if node in viscache:
965 if node in viscache:
965 return viscache[node]
966 return viscache[node]
966 ret = {}
967 ret = {}
967 visit = [node]
968 visit = [node]
968 while visit:
969 while visit:
969 x = visit.pop()
970 x = visit.pop()
970 if x in viscache:
971 if x in viscache:
971 ret.update(viscache[x])
972 ret.update(viscache[x])
972 elif x not in ret:
973 elif x not in ret:
973 ret[x] = 1
974 ret[x] = 1
974 if x in branches:
975 if x in branches:
975 visit[len(visit):] = branches[x].keys()
976 visit[len(visit):] = branches[x].keys()
976 viscache[node] = ret
977 viscache[node] = ret
977 return ret
978 return ret
978 if h not in branches:
979 if h not in branches:
979 continue
980 continue
980 # O(n^2), but somewhat limited. This only searches the
981 # O(n^2), but somewhat limited. This only searches the
981 # tags visible from a specific head, not all the tags in the
982 # tags visible from a specific head, not all the tags in the
982 # whole repo.
983 # whole repo.
983 for b in branches[h]:
984 for b in branches[h]:
984 vis = False
985 vis = False
985 for bb in branches[h].keys():
986 for bb in branches[h].keys():
986 if b != bb:
987 if b != bb:
987 if b in visible(bb):
988 if b in visible(bb):
988 vis = True
989 vis = True
989 break
990 break
990 if not vis:
991 if not vis:
991 l = out.setdefault(h, [])
992 l = out.setdefault(h, [])
992 l[len(l):] = self.nodetags(b)
993 l[len(l):] = self.nodetags(b)
993 return out
994 return out
994
995
995 def branches(self, nodes):
996 def branches(self, nodes):
996 if not nodes:
997 if not nodes:
997 nodes = [self.changelog.tip()]
998 nodes = [self.changelog.tip()]
998 b = []
999 b = []
999 for n in nodes:
1000 for n in nodes:
1000 t = n
1001 t = n
1001 while 1:
1002 while 1:
1002 p = self.changelog.parents(n)
1003 p = self.changelog.parents(n)
1003 if p[1] != nullid or p[0] == nullid:
1004 if p[1] != nullid or p[0] == nullid:
1004 b.append((t, n, p[0], p[1]))
1005 b.append((t, n, p[0], p[1]))
1005 break
1006 break
1006 n = p[0]
1007 n = p[0]
1007 return b
1008 return b
1008
1009
1009 def between(self, pairs):
1010 def between(self, pairs):
1010 r = []
1011 r = []
1011
1012
1012 for top, bottom in pairs:
1013 for top, bottom in pairs:
1013 n, l, i = top, [], 0
1014 n, l, i = top, [], 0
1014 f = 1
1015 f = 1
1015
1016
1016 while n != bottom:
1017 while n != bottom:
1017 p = self.changelog.parents(n)[0]
1018 p = self.changelog.parents(n)[0]
1018 if i == f:
1019 if i == f:
1019 l.append(n)
1020 l.append(n)
1020 f = f * 2
1021 f = f * 2
1021 n = p
1022 n = p
1022 i += 1
1023 i += 1
1023
1024
1024 r.append(l)
1025 r.append(l)
1025
1026
1026 return r
1027 return r
1027
1028
1028 def findincoming(self, remote, base=None, heads=None, force=False):
1029 def findincoming(self, remote, base=None, heads=None, force=False):
1029 """Return list of roots of the subsets of missing nodes from remote
1030 """Return list of roots of the subsets of missing nodes from remote
1030
1031
1031 If base dict is specified, assume that these nodes and their parents
1032 If base dict is specified, assume that these nodes and their parents
1032 exist on the remote side and that no child of a node of base exists
1033 exist on the remote side and that no child of a node of base exists
1033 in both remote and self.
1034 in both remote and self.
1034 Furthermore base will be updated to include the nodes that exists
1035 Furthermore base will be updated to include the nodes that exists
1035 in self and remote but no children exists in self and remote.
1036 in self and remote but no children exists in self and remote.
1036 If a list of heads is specified, return only nodes which are heads
1037 If a list of heads is specified, return only nodes which are heads
1037 or ancestors of these heads.
1038 or ancestors of these heads.
1038
1039
1039 All the ancestors of base are in self and in remote.
1040 All the ancestors of base are in self and in remote.
1040 All the descendants of the list returned are missing in self.
1041 All the descendants of the list returned are missing in self.
1041 (and so we know that the rest of the nodes are missing in remote, see
1042 (and so we know that the rest of the nodes are missing in remote, see
1042 outgoing)
1043 outgoing)
1043 """
1044 """
1044 m = self.changelog.nodemap
1045 m = self.changelog.nodemap
1045 search = []
1046 search = []
1046 fetch = {}
1047 fetch = {}
1047 seen = {}
1048 seen = {}
1048 seenbranch = {}
1049 seenbranch = {}
1049 if base == None:
1050 if base == None:
1050 base = {}
1051 base = {}
1051
1052
1052 if not heads:
1053 if not heads:
1053 heads = remote.heads()
1054 heads = remote.heads()
1054
1055
1055 if self.changelog.tip() == nullid:
1056 if self.changelog.tip() == nullid:
1056 base[nullid] = 1
1057 base[nullid] = 1
1057 if heads != [nullid]:
1058 if heads != [nullid]:
1058 return [nullid]
1059 return [nullid]
1059 return []
1060 return []
1060
1061
1061 # assume we're closer to the tip than the root
1062 # assume we're closer to the tip than the root
1062 # and start by examining the heads
1063 # and start by examining the heads
1063 self.ui.status(_("searching for changes\n"))
1064 self.ui.status(_("searching for changes\n"))
1064
1065
1065 unknown = []
1066 unknown = []
1066 for h in heads:
1067 for h in heads:
1067 if h not in m:
1068 if h not in m:
1068 unknown.append(h)
1069 unknown.append(h)
1069 else:
1070 else:
1070 base[h] = 1
1071 base[h] = 1
1071
1072
1072 if not unknown:
1073 if not unknown:
1073 return []
1074 return []
1074
1075
1075 req = dict.fromkeys(unknown)
1076 req = dict.fromkeys(unknown)
1076 reqcnt = 0
1077 reqcnt = 0
1077
1078
1078 # search through remote branches
1079 # search through remote branches
1079 # a 'branch' here is a linear segment of history, with four parts:
1080 # a 'branch' here is a linear segment of history, with four parts:
1080 # head, root, first parent, second parent
1081 # head, root, first parent, second parent
1081 # (a branch always has two parents (or none) by definition)
1082 # (a branch always has two parents (or none) by definition)
1082 unknown = remote.branches(unknown)
1083 unknown = remote.branches(unknown)
1083 while unknown:
1084 while unknown:
1084 r = []
1085 r = []
1085 while unknown:
1086 while unknown:
1086 n = unknown.pop(0)
1087 n = unknown.pop(0)
1087 if n[0] in seen:
1088 if n[0] in seen:
1088 continue
1089 continue
1089
1090
1090 self.ui.debug(_("examining %s:%s\n")
1091 self.ui.debug(_("examining %s:%s\n")
1091 % (short(n[0]), short(n[1])))
1092 % (short(n[0]), short(n[1])))
1092 if n[0] == nullid: # found the end of the branch
1093 if n[0] == nullid: # found the end of the branch
1093 pass
1094 pass
1094 elif n in seenbranch:
1095 elif n in seenbranch:
1095 self.ui.debug(_("branch already found\n"))
1096 self.ui.debug(_("branch already found\n"))
1096 continue
1097 continue
1097 elif n[1] and n[1] in m: # do we know the base?
1098 elif n[1] and n[1] in m: # do we know the base?
1098 self.ui.debug(_("found incomplete branch %s:%s\n")
1099 self.ui.debug(_("found incomplete branch %s:%s\n")
1099 % (short(n[0]), short(n[1])))
1100 % (short(n[0]), short(n[1])))
1100 search.append(n) # schedule branch range for scanning
1101 search.append(n) # schedule branch range for scanning
1101 seenbranch[n] = 1
1102 seenbranch[n] = 1
1102 else:
1103 else:
1103 if n[1] not in seen and n[1] not in fetch:
1104 if n[1] not in seen and n[1] not in fetch:
1104 if n[2] in m and n[3] in m:
1105 if n[2] in m and n[3] in m:
1105 self.ui.debug(_("found new changeset %s\n") %
1106 self.ui.debug(_("found new changeset %s\n") %
1106 short(n[1]))
1107 short(n[1]))
1107 fetch[n[1]] = 1 # earliest unknown
1108 fetch[n[1]] = 1 # earliest unknown
1108 for p in n[2:4]:
1109 for p in n[2:4]:
1109 if p in m:
1110 if p in m:
1110 base[p] = 1 # latest known
1111 base[p] = 1 # latest known
1111
1112
1112 for p in n[2:4]:
1113 for p in n[2:4]:
1113 if p not in req and p not in m:
1114 if p not in req and p not in m:
1114 r.append(p)
1115 r.append(p)
1115 req[p] = 1
1116 req[p] = 1
1116 seen[n[0]] = 1
1117 seen[n[0]] = 1
1117
1118
1118 if r:
1119 if r:
1119 reqcnt += 1
1120 reqcnt += 1
1120 self.ui.debug(_("request %d: %s\n") %
1121 self.ui.debug(_("request %d: %s\n") %
1121 (reqcnt, " ".join(map(short, r))))
1122 (reqcnt, " ".join(map(short, r))))
1122 for p in range(0, len(r), 10):
1123 for p in range(0, len(r), 10):
1123 for b in remote.branches(r[p:p+10]):
1124 for b in remote.branches(r[p:p+10]):
1124 self.ui.debug(_("received %s:%s\n") %
1125 self.ui.debug(_("received %s:%s\n") %
1125 (short(b[0]), short(b[1])))
1126 (short(b[0]), short(b[1])))
1126 unknown.append(b)
1127 unknown.append(b)
1127
1128
1128 # do binary search on the branches we found
1129 # do binary search on the branches we found
1129 while search:
1130 while search:
1130 n = search.pop(0)
1131 n = search.pop(0)
1131 reqcnt += 1
1132 reqcnt += 1
1132 l = remote.between([(n[0], n[1])])[0]
1133 l = remote.between([(n[0], n[1])])[0]
1133 l.append(n[1])
1134 l.append(n[1])
1134 p = n[0]
1135 p = n[0]
1135 f = 1
1136 f = 1
1136 for i in l:
1137 for i in l:
1137 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1138 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1138 if i in m:
1139 if i in m:
1139 if f <= 2:
1140 if f <= 2:
1140 self.ui.debug(_("found new branch changeset %s\n") %
1141 self.ui.debug(_("found new branch changeset %s\n") %
1141 short(p))
1142 short(p))
1142 fetch[p] = 1
1143 fetch[p] = 1
1143 base[i] = 1
1144 base[i] = 1
1144 else:
1145 else:
1145 self.ui.debug(_("narrowed branch search to %s:%s\n")
1146 self.ui.debug(_("narrowed branch search to %s:%s\n")
1146 % (short(p), short(i)))
1147 % (short(p), short(i)))
1147 search.append((p, i))
1148 search.append((p, i))
1148 break
1149 break
1149 p, f = i, f * 2
1150 p, f = i, f * 2
1150
1151
1151 # sanity check our fetch list
1152 # sanity check our fetch list
1152 for f in fetch.keys():
1153 for f in fetch.keys():
1153 if f in m:
1154 if f in m:
1154 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1155 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1155
1156
1156 if base.keys() == [nullid]:
1157 if base.keys() == [nullid]:
1157 if force:
1158 if force:
1158 self.ui.warn(_("warning: repository is unrelated\n"))
1159 self.ui.warn(_("warning: repository is unrelated\n"))
1159 else:
1160 else:
1160 raise util.Abort(_("repository is unrelated"))
1161 raise util.Abort(_("repository is unrelated"))
1161
1162
1162 self.ui.debug(_("found new changesets starting at ") +
1163 self.ui.debug(_("found new changesets starting at ") +
1163 " ".join([short(f) for f in fetch]) + "\n")
1164 " ".join([short(f) for f in fetch]) + "\n")
1164
1165
1165 self.ui.debug(_("%d total queries\n") % reqcnt)
1166 self.ui.debug(_("%d total queries\n") % reqcnt)
1166
1167
1167 return fetch.keys()
1168 return fetch.keys()
1168
1169
1169 def findoutgoing(self, remote, base=None, heads=None, force=False):
1170 def findoutgoing(self, remote, base=None, heads=None, force=False):
1170 """Return list of nodes that are roots of subsets not in remote
1171 """Return list of nodes that are roots of subsets not in remote
1171
1172
1172 If base dict is specified, assume that these nodes and their parents
1173 If base dict is specified, assume that these nodes and their parents
1173 exist on the remote side.
1174 exist on the remote side.
1174 If a list of heads is specified, return only nodes which are heads
1175 If a list of heads is specified, return only nodes which are heads
1175 or ancestors of these heads, and return a second element which
1176 or ancestors of these heads, and return a second element which
1176 contains all remote heads which get new children.
1177 contains all remote heads which get new children.
1177 """
1178 """
1178 if base == None:
1179 if base == None:
1179 base = {}
1180 base = {}
1180 self.findincoming(remote, base, heads, force=force)
1181 self.findincoming(remote, base, heads, force=force)
1181
1182
1182 self.ui.debug(_("common changesets up to ")
1183 self.ui.debug(_("common changesets up to ")
1183 + " ".join(map(short, base.keys())) + "\n")
1184 + " ".join(map(short, base.keys())) + "\n")
1184
1185
1185 remain = dict.fromkeys(self.changelog.nodemap)
1186 remain = dict.fromkeys(self.changelog.nodemap)
1186
1187
1187 # prune everything remote has from the tree
1188 # prune everything remote has from the tree
1188 del remain[nullid]
1189 del remain[nullid]
1189 remove = base.keys()
1190 remove = base.keys()
1190 while remove:
1191 while remove:
1191 n = remove.pop(0)
1192 n = remove.pop(0)
1192 if n in remain:
1193 if n in remain:
1193 del remain[n]
1194 del remain[n]
1194 for p in self.changelog.parents(n):
1195 for p in self.changelog.parents(n):
1195 remove.append(p)
1196 remove.append(p)
1196
1197
1197 # find every node whose parents have been pruned
1198 # find every node whose parents have been pruned
1198 subset = []
1199 subset = []
1199 # find every remote head that will get new children
1200 # find every remote head that will get new children
1200 updated_heads = {}
1201 updated_heads = {}
1201 for n in remain:
1202 for n in remain:
1202 p1, p2 = self.changelog.parents(n)
1203 p1, p2 = self.changelog.parents(n)
1203 if p1 not in remain and p2 not in remain:
1204 if p1 not in remain and p2 not in remain:
1204 subset.append(n)
1205 subset.append(n)
1205 if heads:
1206 if heads:
1206 if p1 in heads:
1207 if p1 in heads:
1207 updated_heads[p1] = True
1208 updated_heads[p1] = True
1208 if p2 in heads:
1209 if p2 in heads:
1209 updated_heads[p2] = True
1210 updated_heads[p2] = True
1210
1211
1211 # this is the set of all roots we have to push
1212 # this is the set of all roots we have to push
1212 if heads:
1213 if heads:
1213 return subset, updated_heads.keys()
1214 return subset, updated_heads.keys()
1214 else:
1215 else:
1215 return subset
1216 return subset
1216
1217
1217 def pull(self, remote, heads=None, force=False, lock=None):
1218 def pull(self, remote, heads=None, force=False, lock=None):
1218 mylock = False
1219 mylock = False
1219 if not lock:
1220 if not lock:
1220 lock = self.lock()
1221 lock = self.lock()
1221 mylock = True
1222 mylock = True
1222
1223
1223 try:
1224 try:
1224 fetch = self.findincoming(remote, force=force)
1225 fetch = self.findincoming(remote, force=force)
1225 if fetch == [nullid]:
1226 if fetch == [nullid]:
1226 self.ui.status(_("requesting all changes\n"))
1227 self.ui.status(_("requesting all changes\n"))
1227
1228
1228 if not fetch:
1229 if not fetch:
1229 self.ui.status(_("no changes found\n"))
1230 self.ui.status(_("no changes found\n"))
1230 return 0
1231 return 0
1231
1232
1232 if heads is None:
1233 if heads is None:
1233 cg = remote.changegroup(fetch, 'pull')
1234 cg = remote.changegroup(fetch, 'pull')
1234 else:
1235 else:
1235 cg = remote.changegroupsubset(fetch, heads, 'pull')
1236 cg = remote.changegroupsubset(fetch, heads, 'pull')
1236 return self.addchangegroup(cg, 'pull', remote.url())
1237 return self.addchangegroup(cg, 'pull', remote.url())
1237 finally:
1238 finally:
1238 if mylock:
1239 if mylock:
1239 lock.release()
1240 lock.release()
1240
1241
1241 def push(self, remote, force=False, revs=None):
1242 def push(self, remote, force=False, revs=None):
1242 # there are two ways to push to remote repo:
1243 # there are two ways to push to remote repo:
1243 #
1244 #
1244 # addchangegroup assumes local user can lock remote
1245 # addchangegroup assumes local user can lock remote
1245 # repo (local filesystem, old ssh servers).
1246 # repo (local filesystem, old ssh servers).
1246 #
1247 #
1247 # unbundle assumes local user cannot lock remote repo (new ssh
1248 # unbundle assumes local user cannot lock remote repo (new ssh
1248 # servers, http servers).
1249 # servers, http servers).
1249
1250
1250 if remote.capable('unbundle'):
1251 if remote.capable('unbundle'):
1251 return self.push_unbundle(remote, force, revs)
1252 return self.push_unbundle(remote, force, revs)
1252 return self.push_addchangegroup(remote, force, revs)
1253 return self.push_addchangegroup(remote, force, revs)
1253
1254
1254 def prepush(self, remote, force, revs):
1255 def prepush(self, remote, force, revs):
1255 base = {}
1256 base = {}
1256 remote_heads = remote.heads()
1257 remote_heads = remote.heads()
1257 inc = self.findincoming(remote, base, remote_heads, force=force)
1258 inc = self.findincoming(remote, base, remote_heads, force=force)
1258 if not force and inc:
1259 if not force and inc:
1259 self.ui.warn(_("abort: unsynced remote changes!\n"))
1260 self.ui.warn(_("abort: unsynced remote changes!\n"))
1260 self.ui.status(_("(did you forget to sync?"
1261 self.ui.status(_("(did you forget to sync?"
1261 " use push -f to force)\n"))
1262 " use push -f to force)\n"))
1262 return None, 1
1263 return None, 1
1263
1264
1264 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1265 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1265 if revs is not None:
1266 if revs is not None:
1266 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1267 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1267 else:
1268 else:
1268 bases, heads = update, self.changelog.heads()
1269 bases, heads = update, self.changelog.heads()
1269
1270
1270 if not bases:
1271 if not bases:
1271 self.ui.status(_("no changes found\n"))
1272 self.ui.status(_("no changes found\n"))
1272 return None, 1
1273 return None, 1
1273 elif not force:
1274 elif not force:
1274 # FIXME we don't properly detect creation of new heads
1275 # FIXME we don't properly detect creation of new heads
1275 # in the push -r case, assume the user knows what he's doing
1276 # in the push -r case, assume the user knows what he's doing
1276 if not revs and len(remote_heads) < len(heads) \
1277 if not revs and len(remote_heads) < len(heads) \
1277 and remote_heads != [nullid]:
1278 and remote_heads != [nullid]:
1278 self.ui.warn(_("abort: push creates new remote branches!\n"))
1279 self.ui.warn(_("abort: push creates new remote branches!\n"))
1279 self.ui.status(_("(did you forget to merge?"
1280 self.ui.status(_("(did you forget to merge?"
1280 " use push -f to force)\n"))
1281 " use push -f to force)\n"))
1281 return None, 1
1282 return None, 1
1282
1283
1283 if revs is None:
1284 if revs is None:
1284 cg = self.changegroup(update, 'push')
1285 cg = self.changegroup(update, 'push')
1285 else:
1286 else:
1286 cg = self.changegroupsubset(update, revs, 'push')
1287 cg = self.changegroupsubset(update, revs, 'push')
1287 return cg, remote_heads
1288 return cg, remote_heads
1288
1289
1289 def push_addchangegroup(self, remote, force, revs):
1290 def push_addchangegroup(self, remote, force, revs):
1290 lock = remote.lock()
1291 lock = remote.lock()
1291
1292
1292 ret = self.prepush(remote, force, revs)
1293 ret = self.prepush(remote, force, revs)
1293 if ret[0] is not None:
1294 if ret[0] is not None:
1294 cg, remote_heads = ret
1295 cg, remote_heads = ret
1295 return remote.addchangegroup(cg, 'push', self.url())
1296 return remote.addchangegroup(cg, 'push', self.url())
1296 return ret[1]
1297 return ret[1]
1297
1298
1298 def push_unbundle(self, remote, force, revs):
1299 def push_unbundle(self, remote, force, revs):
1299 # local repo finds heads on server, finds out what revs it
1300 # local repo finds heads on server, finds out what revs it
1300 # must push. once revs transferred, if server finds it has
1301 # must push. once revs transferred, if server finds it has
1301 # different heads (someone else won commit/push race), server
1302 # different heads (someone else won commit/push race), server
1302 # aborts.
1303 # aborts.
1303
1304
1304 ret = self.prepush(remote, force, revs)
1305 ret = self.prepush(remote, force, revs)
1305 if ret[0] is not None:
1306 if ret[0] is not None:
1306 cg, remote_heads = ret
1307 cg, remote_heads = ret
1307 if force: remote_heads = ['force']
1308 if force: remote_heads = ['force']
1308 return remote.unbundle(cg, remote_heads, 'push')
1309 return remote.unbundle(cg, remote_heads, 'push')
1309 return ret[1]
1310 return ret[1]
1310
1311
1311 def changegroupsubset(self, bases, heads, source):
1312 def changegroupsubset(self, bases, heads, source):
1312 """This function generates a changegroup consisting of all the nodes
1313 """This function generates a changegroup consisting of all the nodes
1313 that are descendents of any of the bases, and ancestors of any of
1314 that are descendents of any of the bases, and ancestors of any of
1314 the heads.
1315 the heads.
1315
1316
1316 It is fairly complex as determining which filenodes and which
1317 It is fairly complex as determining which filenodes and which
1317 manifest nodes need to be included for the changeset to be complete
1318 manifest nodes need to be included for the changeset to be complete
1318 is non-trivial.
1319 is non-trivial.
1319
1320
1320 Another wrinkle is doing the reverse, figuring out which changeset in
1321 Another wrinkle is doing the reverse, figuring out which changeset in
1321 the changegroup a particular filenode or manifestnode belongs to."""
1322 the changegroup a particular filenode or manifestnode belongs to."""
1322
1323
1323 self.hook('preoutgoing', throw=True, source=source)
1324 self.hook('preoutgoing', throw=True, source=source)
1324
1325
1325 # Set up some initial variables
1326 # Set up some initial variables
1326 # Make it easy to refer to self.changelog
1327 # Make it easy to refer to self.changelog
1327 cl = self.changelog
1328 cl = self.changelog
1328 # msng is short for missing - compute the list of changesets in this
1329 # msng is short for missing - compute the list of changesets in this
1329 # changegroup.
1330 # changegroup.
1330 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1331 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1331 # Some bases may turn out to be superfluous, and some heads may be
1332 # Some bases may turn out to be superfluous, and some heads may be
1332 # too. nodesbetween will return the minimal set of bases and heads
1333 # too. nodesbetween will return the minimal set of bases and heads
1333 # necessary to re-create the changegroup.
1334 # necessary to re-create the changegroup.
1334
1335
1335 # Known heads are the list of heads that it is assumed the recipient
1336 # Known heads are the list of heads that it is assumed the recipient
1336 # of this changegroup will know about.
1337 # of this changegroup will know about.
1337 knownheads = {}
1338 knownheads = {}
1338 # We assume that all parents of bases are known heads.
1339 # We assume that all parents of bases are known heads.
1339 for n in bases:
1340 for n in bases:
1340 for p in cl.parents(n):
1341 for p in cl.parents(n):
1341 if p != nullid:
1342 if p != nullid:
1342 knownheads[p] = 1
1343 knownheads[p] = 1
1343 knownheads = knownheads.keys()
1344 knownheads = knownheads.keys()
1344 if knownheads:
1345 if knownheads:
1345 # Now that we know what heads are known, we can compute which
1346 # Now that we know what heads are known, we can compute which
1346 # changesets are known. The recipient must know about all
1347 # changesets are known. The recipient must know about all
1347 # changesets required to reach the known heads from the null
1348 # changesets required to reach the known heads from the null
1348 # changeset.
1349 # changeset.
1349 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1350 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1350 junk = None
1351 junk = None
1351 # Transform the list into an ersatz set.
1352 # Transform the list into an ersatz set.
1352 has_cl_set = dict.fromkeys(has_cl_set)
1353 has_cl_set = dict.fromkeys(has_cl_set)
1353 else:
1354 else:
1354 # If there were no known heads, the recipient cannot be assumed to
1355 # If there were no known heads, the recipient cannot be assumed to
1355 # know about any changesets.
1356 # know about any changesets.
1356 has_cl_set = {}
1357 has_cl_set = {}
1357
1358
1358 # Make it easy to refer to self.manifest
1359 # Make it easy to refer to self.manifest
1359 mnfst = self.manifest
1360 mnfst = self.manifest
1360 # We don't know which manifests are missing yet
1361 # We don't know which manifests are missing yet
1361 msng_mnfst_set = {}
1362 msng_mnfst_set = {}
1362 # Nor do we know which filenodes are missing.
1363 # Nor do we know which filenodes are missing.
1363 msng_filenode_set = {}
1364 msng_filenode_set = {}
1364
1365
1365 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1366 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1366 junk = None
1367 junk = None
1367
1368
1368 # A changeset always belongs to itself, so the changenode lookup
1369 # A changeset always belongs to itself, so the changenode lookup
1369 # function for a changenode is identity.
1370 # function for a changenode is identity.
1370 def identity(x):
1371 def identity(x):
1371 return x
1372 return x
1372
1373
1373 # A function generating function. Sets up an environment for the
1374 # A function generating function. Sets up an environment for the
1374 # inner function.
1375 # inner function.
1375 def cmp_by_rev_func(revlog):
1376 def cmp_by_rev_func(revlog):
1376 # Compare two nodes by their revision number in the environment's
1377 # Compare two nodes by their revision number in the environment's
1377 # revision history. Since the revision number both represents the
1378 # revision history. Since the revision number both represents the
1378 # most efficient order to read the nodes in, and represents a
1379 # most efficient order to read the nodes in, and represents a
1379 # topological sorting of the nodes, this function is often useful.
1380 # topological sorting of the nodes, this function is often useful.
1380 def cmp_by_rev(a, b):
1381 def cmp_by_rev(a, b):
1381 return cmp(revlog.rev(a), revlog.rev(b))
1382 return cmp(revlog.rev(a), revlog.rev(b))
1382 return cmp_by_rev
1383 return cmp_by_rev
1383
1384
1384 # If we determine that a particular file or manifest node must be a
1385 # If we determine that a particular file or manifest node must be a
1385 # node that the recipient of the changegroup will already have, we can
1386 # node that the recipient of the changegroup will already have, we can
1386 # also assume the recipient will have all the parents. This function
1387 # also assume the recipient will have all the parents. This function
1387 # prunes them from the set of missing nodes.
1388 # prunes them from the set of missing nodes.
1388 def prune_parents(revlog, hasset, msngset):
1389 def prune_parents(revlog, hasset, msngset):
1389 haslst = hasset.keys()
1390 haslst = hasset.keys()
1390 haslst.sort(cmp_by_rev_func(revlog))
1391 haslst.sort(cmp_by_rev_func(revlog))
1391 for node in haslst:
1392 for node in haslst:
1392 parentlst = [p for p in revlog.parents(node) if p != nullid]
1393 parentlst = [p for p in revlog.parents(node) if p != nullid]
1393 while parentlst:
1394 while parentlst:
1394 n = parentlst.pop()
1395 n = parentlst.pop()
1395 if n not in hasset:
1396 if n not in hasset:
1396 hasset[n] = 1
1397 hasset[n] = 1
1397 p = [p for p in revlog.parents(n) if p != nullid]
1398 p = [p for p in revlog.parents(n) if p != nullid]
1398 parentlst.extend(p)
1399 parentlst.extend(p)
1399 for n in hasset:
1400 for n in hasset:
1400 msngset.pop(n, None)
1401 msngset.pop(n, None)
1401
1402
1402 # This is a function generating function used to set up an environment
1403 # This is a function generating function used to set up an environment
1403 # for the inner function to execute in.
1404 # for the inner function to execute in.
1404 def manifest_and_file_collector(changedfileset):
1405 def manifest_and_file_collector(changedfileset):
1405 # This is an information gathering function that gathers
1406 # This is an information gathering function that gathers
1406 # information from each changeset node that goes out as part of
1407 # information from each changeset node that goes out as part of
1407 # the changegroup. The information gathered is a list of which
1408 # the changegroup. The information gathered is a list of which
1408 # manifest nodes are potentially required (the recipient may
1409 # manifest nodes are potentially required (the recipient may
1409 # already have them) and total list of all files which were
1410 # already have them) and total list of all files which were
1410 # changed in any changeset in the changegroup.
1411 # changed in any changeset in the changegroup.
1411 #
1412 #
1412 # We also remember the first changenode we saw any manifest
1413 # We also remember the first changenode we saw any manifest
1413 # referenced by so we can later determine which changenode 'owns'
1414 # referenced by so we can later determine which changenode 'owns'
1414 # the manifest.
1415 # the manifest.
1415 def collect_manifests_and_files(clnode):
1416 def collect_manifests_and_files(clnode):
1416 c = cl.read(clnode)
1417 c = cl.read(clnode)
1417 for f in c[3]:
1418 for f in c[3]:
1418 # This is to make sure we only have one instance of each
1419 # This is to make sure we only have one instance of each
1419 # filename string for each filename.
1420 # filename string for each filename.
1420 changedfileset.setdefault(f, f)
1421 changedfileset.setdefault(f, f)
1421 msng_mnfst_set.setdefault(c[0], clnode)
1422 msng_mnfst_set.setdefault(c[0], clnode)
1422 return collect_manifests_and_files
1423 return collect_manifests_and_files
1423
1424
1424 # Figure out which manifest nodes (of the ones we think might be part
1425 # Figure out which manifest nodes (of the ones we think might be part
1425 # of the changegroup) the recipient must know about and remove them
1426 # of the changegroup) the recipient must know about and remove them
1426 # from the changegroup.
1427 # from the changegroup.
1427 def prune_manifests():
1428 def prune_manifests():
1428 has_mnfst_set = {}
1429 has_mnfst_set = {}
1429 for n in msng_mnfst_set:
1430 for n in msng_mnfst_set:
1430 # If a 'missing' manifest thinks it belongs to a changenode
1431 # If a 'missing' manifest thinks it belongs to a changenode
1431 # the recipient is assumed to have, obviously the recipient
1432 # the recipient is assumed to have, obviously the recipient
1432 # must have that manifest.
1433 # must have that manifest.
1433 linknode = cl.node(mnfst.linkrev(n))
1434 linknode = cl.node(mnfst.linkrev(n))
1434 if linknode in has_cl_set:
1435 if linknode in has_cl_set:
1435 has_mnfst_set[n] = 1
1436 has_mnfst_set[n] = 1
1436 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1437 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1437
1438
1438 # Use the information collected in collect_manifests_and_files to say
1439 # Use the information collected in collect_manifests_and_files to say
1439 # which changenode any manifestnode belongs to.
1440 # which changenode any manifestnode belongs to.
1440 def lookup_manifest_link(mnfstnode):
1441 def lookup_manifest_link(mnfstnode):
1441 return msng_mnfst_set[mnfstnode]
1442 return msng_mnfst_set[mnfstnode]
1442
1443
1443 # A function generating function that sets up the initial environment
1444 # A function generating function that sets up the initial environment
1444 # the inner function.
1445 # the inner function.
1445 def filenode_collector(changedfiles):
1446 def filenode_collector(changedfiles):
1446 next_rev = [0]
1447 next_rev = [0]
1447 # This gathers information from each manifestnode included in the
1448 # This gathers information from each manifestnode included in the
1448 # changegroup about which filenodes the manifest node references
1449 # changegroup about which filenodes the manifest node references
1449 # so we can include those in the changegroup too.
1450 # so we can include those in the changegroup too.
1450 #
1451 #
1451 # It also remembers which changenode each filenode belongs to. It
1452 # It also remembers which changenode each filenode belongs to. It
1452 # does this by assuming the a filenode belongs to the changenode
1453 # does this by assuming the a filenode belongs to the changenode
1453 # the first manifest that references it belongs to.
1454 # the first manifest that references it belongs to.
1454 def collect_msng_filenodes(mnfstnode):
1455 def collect_msng_filenodes(mnfstnode):
1455 r = mnfst.rev(mnfstnode)
1456 r = mnfst.rev(mnfstnode)
1456 if r == next_rev[0]:
1457 if r == next_rev[0]:
1457 # If the last rev we looked at was the one just previous,
1458 # If the last rev we looked at was the one just previous,
1458 # we only need to see a diff.
1459 # we only need to see a diff.
1459 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1460 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1460 # For each line in the delta
1461 # For each line in the delta
1461 for dline in delta.splitlines():
1462 for dline in delta.splitlines():
1462 # get the filename and filenode for that line
1463 # get the filename and filenode for that line
1463 f, fnode = dline.split('\0')
1464 f, fnode = dline.split('\0')
1464 fnode = bin(fnode[:40])
1465 fnode = bin(fnode[:40])
1465 f = changedfiles.get(f, None)
1466 f = changedfiles.get(f, None)
1466 # And if the file is in the list of files we care
1467 # And if the file is in the list of files we care
1467 # about.
1468 # about.
1468 if f is not None:
1469 if f is not None:
1469 # Get the changenode this manifest belongs to
1470 # Get the changenode this manifest belongs to
1470 clnode = msng_mnfst_set[mnfstnode]
1471 clnode = msng_mnfst_set[mnfstnode]
1471 # Create the set of filenodes for the file if
1472 # Create the set of filenodes for the file if
1472 # there isn't one already.
1473 # there isn't one already.
1473 ndset = msng_filenode_set.setdefault(f, {})
1474 ndset = msng_filenode_set.setdefault(f, {})
1474 # And set the filenode's changelog node to the
1475 # And set the filenode's changelog node to the
1475 # manifest's if it hasn't been set already.
1476 # manifest's if it hasn't been set already.
1476 ndset.setdefault(fnode, clnode)
1477 ndset.setdefault(fnode, clnode)
1477 else:
1478 else:
1478 # Otherwise we need a full manifest.
1479 # Otherwise we need a full manifest.
1479 m = mnfst.read(mnfstnode)
1480 m = mnfst.read(mnfstnode)
1480 # For every file in we care about.
1481 # For every file in we care about.
1481 for f in changedfiles:
1482 for f in changedfiles:
1482 fnode = m.get(f, None)
1483 fnode = m.get(f, None)
1483 # If it's in the manifest
1484 # If it's in the manifest
1484 if fnode is not None:
1485 if fnode is not None:
1485 # See comments above.
1486 # See comments above.
1486 clnode = msng_mnfst_set[mnfstnode]
1487 clnode = msng_mnfst_set[mnfstnode]
1487 ndset = msng_filenode_set.setdefault(f, {})
1488 ndset = msng_filenode_set.setdefault(f, {})
1488 ndset.setdefault(fnode, clnode)
1489 ndset.setdefault(fnode, clnode)
1489 # Remember the revision we hope to see next.
1490 # Remember the revision we hope to see next.
1490 next_rev[0] = r + 1
1491 next_rev[0] = r + 1
1491 return collect_msng_filenodes
1492 return collect_msng_filenodes
1492
1493
1493 # We have a list of filenodes we think we need for a file, lets remove
1494 # We have a list of filenodes we think we need for a file, lets remove
1494 # all those we now the recipient must have.
1495 # all those we now the recipient must have.
1495 def prune_filenodes(f, filerevlog):
1496 def prune_filenodes(f, filerevlog):
1496 msngset = msng_filenode_set[f]
1497 msngset = msng_filenode_set[f]
1497 hasset = {}
1498 hasset = {}
1498 # If a 'missing' filenode thinks it belongs to a changenode we
1499 # If a 'missing' filenode thinks it belongs to a changenode we
1499 # assume the recipient must have, then the recipient must have
1500 # assume the recipient must have, then the recipient must have
1500 # that filenode.
1501 # that filenode.
1501 for n in msngset:
1502 for n in msngset:
1502 clnode = cl.node(filerevlog.linkrev(n))
1503 clnode = cl.node(filerevlog.linkrev(n))
1503 if clnode in has_cl_set:
1504 if clnode in has_cl_set:
1504 hasset[n] = 1
1505 hasset[n] = 1
1505 prune_parents(filerevlog, hasset, msngset)
1506 prune_parents(filerevlog, hasset, msngset)
1506
1507
1507 # A function generator function that sets up the a context for the
1508 # A function generator function that sets up the a context for the
1508 # inner function.
1509 # inner function.
1509 def lookup_filenode_link_func(fname):
1510 def lookup_filenode_link_func(fname):
1510 msngset = msng_filenode_set[fname]
1511 msngset = msng_filenode_set[fname]
1511 # Lookup the changenode the filenode belongs to.
1512 # Lookup the changenode the filenode belongs to.
1512 def lookup_filenode_link(fnode):
1513 def lookup_filenode_link(fnode):
1513 return msngset[fnode]
1514 return msngset[fnode]
1514 return lookup_filenode_link
1515 return lookup_filenode_link
1515
1516
1516 # Now that we have all theses utility functions to help out and
1517 # Now that we have all theses utility functions to help out and
1517 # logically divide up the task, generate the group.
1518 # logically divide up the task, generate the group.
1518 def gengroup():
1519 def gengroup():
1519 # The set of changed files starts empty.
1520 # The set of changed files starts empty.
1520 changedfiles = {}
1521 changedfiles = {}
1521 # Create a changenode group generator that will call our functions
1522 # Create a changenode group generator that will call our functions
1522 # back to lookup the owning changenode and collect information.
1523 # back to lookup the owning changenode and collect information.
1523 group = cl.group(msng_cl_lst, identity,
1524 group = cl.group(msng_cl_lst, identity,
1524 manifest_and_file_collector(changedfiles))
1525 manifest_and_file_collector(changedfiles))
1525 for chnk in group:
1526 for chnk in group:
1526 yield chnk
1527 yield chnk
1527
1528
1528 # The list of manifests has been collected by the generator
1529 # The list of manifests has been collected by the generator
1529 # calling our functions back.
1530 # calling our functions back.
1530 prune_manifests()
1531 prune_manifests()
1531 msng_mnfst_lst = msng_mnfst_set.keys()
1532 msng_mnfst_lst = msng_mnfst_set.keys()
1532 # Sort the manifestnodes by revision number.
1533 # Sort the manifestnodes by revision number.
1533 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1534 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1534 # Create a generator for the manifestnodes that calls our lookup
1535 # Create a generator for the manifestnodes that calls our lookup
1535 # and data collection functions back.
1536 # and data collection functions back.
1536 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1537 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1537 filenode_collector(changedfiles))
1538 filenode_collector(changedfiles))
1538 for chnk in group:
1539 for chnk in group:
1539 yield chnk
1540 yield chnk
1540
1541
1541 # These are no longer needed, dereference and toss the memory for
1542 # These are no longer needed, dereference and toss the memory for
1542 # them.
1543 # them.
1543 msng_mnfst_lst = None
1544 msng_mnfst_lst = None
1544 msng_mnfst_set.clear()
1545 msng_mnfst_set.clear()
1545
1546
1546 changedfiles = changedfiles.keys()
1547 changedfiles = changedfiles.keys()
1547 changedfiles.sort()
1548 changedfiles.sort()
1548 # Go through all our files in order sorted by name.
1549 # Go through all our files in order sorted by name.
1549 for fname in changedfiles:
1550 for fname in changedfiles:
1550 filerevlog = self.file(fname)
1551 filerevlog = self.file(fname)
1551 # Toss out the filenodes that the recipient isn't really
1552 # Toss out the filenodes that the recipient isn't really
1552 # missing.
1553 # missing.
1553 if msng_filenode_set.has_key(fname):
1554 if msng_filenode_set.has_key(fname):
1554 prune_filenodes(fname, filerevlog)
1555 prune_filenodes(fname, filerevlog)
1555 msng_filenode_lst = msng_filenode_set[fname].keys()
1556 msng_filenode_lst = msng_filenode_set[fname].keys()
1556 else:
1557 else:
1557 msng_filenode_lst = []
1558 msng_filenode_lst = []
1558 # If any filenodes are left, generate the group for them,
1559 # If any filenodes are left, generate the group for them,
1559 # otherwise don't bother.
1560 # otherwise don't bother.
1560 if len(msng_filenode_lst) > 0:
1561 if len(msng_filenode_lst) > 0:
1561 yield changegroup.genchunk(fname)
1562 yield changegroup.genchunk(fname)
1562 # Sort the filenodes by their revision #
1563 # Sort the filenodes by their revision #
1563 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1564 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1564 # Create a group generator and only pass in a changenode
1565 # Create a group generator and only pass in a changenode
1565 # lookup function as we need to collect no information
1566 # lookup function as we need to collect no information
1566 # from filenodes.
1567 # from filenodes.
1567 group = filerevlog.group(msng_filenode_lst,
1568 group = filerevlog.group(msng_filenode_lst,
1568 lookup_filenode_link_func(fname))
1569 lookup_filenode_link_func(fname))
1569 for chnk in group:
1570 for chnk in group:
1570 yield chnk
1571 yield chnk
1571 if msng_filenode_set.has_key(fname):
1572 if msng_filenode_set.has_key(fname):
1572 # Don't need this anymore, toss it to free memory.
1573 # Don't need this anymore, toss it to free memory.
1573 del msng_filenode_set[fname]
1574 del msng_filenode_set[fname]
1574 # Signal that no more groups are left.
1575 # Signal that no more groups are left.
1575 yield changegroup.closechunk()
1576 yield changegroup.closechunk()
1576
1577
1577 if msng_cl_lst:
1578 if msng_cl_lst:
1578 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1579 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1579
1580
1580 return util.chunkbuffer(gengroup())
1581 return util.chunkbuffer(gengroup())
1581
1582
1582 def changegroup(self, basenodes, source):
1583 def changegroup(self, basenodes, source):
1583 """Generate a changegroup of all nodes that we have that a recipient
1584 """Generate a changegroup of all nodes that we have that a recipient
1584 doesn't.
1585 doesn't.
1585
1586
1586 This is much easier than the previous function as we can assume that
1587 This is much easier than the previous function as we can assume that
1587 the recipient has any changenode we aren't sending them."""
1588 the recipient has any changenode we aren't sending them."""
1588
1589
1589 self.hook('preoutgoing', throw=True, source=source)
1590 self.hook('preoutgoing', throw=True, source=source)
1590
1591
1591 cl = self.changelog
1592 cl = self.changelog
1592 nodes = cl.nodesbetween(basenodes, None)[0]
1593 nodes = cl.nodesbetween(basenodes, None)[0]
1593 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1594 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1594
1595
1595 def identity(x):
1596 def identity(x):
1596 return x
1597 return x
1597
1598
1598 def gennodelst(revlog):
1599 def gennodelst(revlog):
1599 for r in xrange(0, revlog.count()):
1600 for r in xrange(0, revlog.count()):
1600 n = revlog.node(r)
1601 n = revlog.node(r)
1601 if revlog.linkrev(n) in revset:
1602 if revlog.linkrev(n) in revset:
1602 yield n
1603 yield n
1603
1604
1604 def changed_file_collector(changedfileset):
1605 def changed_file_collector(changedfileset):
1605 def collect_changed_files(clnode):
1606 def collect_changed_files(clnode):
1606 c = cl.read(clnode)
1607 c = cl.read(clnode)
1607 for fname in c[3]:
1608 for fname in c[3]:
1608 changedfileset[fname] = 1
1609 changedfileset[fname] = 1
1609 return collect_changed_files
1610 return collect_changed_files
1610
1611
1611 def lookuprevlink_func(revlog):
1612 def lookuprevlink_func(revlog):
1612 def lookuprevlink(n):
1613 def lookuprevlink(n):
1613 return cl.node(revlog.linkrev(n))
1614 return cl.node(revlog.linkrev(n))
1614 return lookuprevlink
1615 return lookuprevlink
1615
1616
1616 def gengroup():
1617 def gengroup():
1617 # construct a list of all changed files
1618 # construct a list of all changed files
1618 changedfiles = {}
1619 changedfiles = {}
1619
1620
1620 for chnk in cl.group(nodes, identity,
1621 for chnk in cl.group(nodes, identity,
1621 changed_file_collector(changedfiles)):
1622 changed_file_collector(changedfiles)):
1622 yield chnk
1623 yield chnk
1623 changedfiles = changedfiles.keys()
1624 changedfiles = changedfiles.keys()
1624 changedfiles.sort()
1625 changedfiles.sort()
1625
1626
1626 mnfst = self.manifest
1627 mnfst = self.manifest
1627 nodeiter = gennodelst(mnfst)
1628 nodeiter = gennodelst(mnfst)
1628 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1629 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1629 yield chnk
1630 yield chnk
1630
1631
1631 for fname in changedfiles:
1632 for fname in changedfiles:
1632 filerevlog = self.file(fname)
1633 filerevlog = self.file(fname)
1633 nodeiter = gennodelst(filerevlog)
1634 nodeiter = gennodelst(filerevlog)
1634 nodeiter = list(nodeiter)
1635 nodeiter = list(nodeiter)
1635 if nodeiter:
1636 if nodeiter:
1636 yield changegroup.genchunk(fname)
1637 yield changegroup.genchunk(fname)
1637 lookup = lookuprevlink_func(filerevlog)
1638 lookup = lookuprevlink_func(filerevlog)
1638 for chnk in filerevlog.group(nodeiter, lookup):
1639 for chnk in filerevlog.group(nodeiter, lookup):
1639 yield chnk
1640 yield chnk
1640
1641
1641 yield changegroup.closechunk()
1642 yield changegroup.closechunk()
1642
1643
1643 if nodes:
1644 if nodes:
1644 self.hook('outgoing', node=hex(nodes[0]), source=source)
1645 self.hook('outgoing', node=hex(nodes[0]), source=source)
1645
1646
1646 return util.chunkbuffer(gengroup())
1647 return util.chunkbuffer(gengroup())
1647
1648
1648 def addchangegroup(self, source, srctype, url):
1649 def addchangegroup(self, source, srctype, url):
1649 """add changegroup to repo.
1650 """add changegroup to repo.
1650 returns number of heads modified or added + 1."""
1651 returns number of heads modified or added + 1."""
1651
1652
1652 def csmap(x):
1653 def csmap(x):
1653 self.ui.debug(_("add changeset %s\n") % short(x))
1654 self.ui.debug(_("add changeset %s\n") % short(x))
1654 return cl.count()
1655 return cl.count()
1655
1656
1656 def revmap(x):
1657 def revmap(x):
1657 return cl.rev(x)
1658 return cl.rev(x)
1658
1659
1659 if not source:
1660 if not source:
1660 return 0
1661 return 0
1661
1662
1662 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1663 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1663
1664
1664 changesets = files = revisions = 0
1665 changesets = files = revisions = 0
1665
1666
1666 tr = self.transaction()
1667 tr = self.transaction()
1667
1668
1668 # write changelog data to temp files so concurrent readers will not see
1669 # write changelog data to temp files so concurrent readers will not see
1669 # inconsistent view
1670 # inconsistent view
1670 cl = None
1671 cl = None
1671 try:
1672 try:
1672 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1673 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1673
1674
1674 oldheads = len(cl.heads())
1675 oldheads = len(cl.heads())
1675
1676
1676 # pull off the changeset group
1677 # pull off the changeset group
1677 self.ui.status(_("adding changesets\n"))
1678 self.ui.status(_("adding changesets\n"))
1678 cor = cl.count() - 1
1679 cor = cl.count() - 1
1679 chunkiter = changegroup.chunkiter(source)
1680 chunkiter = changegroup.chunkiter(source)
1680 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1681 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1681 raise util.Abort(_("received changelog group is empty"))
1682 raise util.Abort(_("received changelog group is empty"))
1682 cnr = cl.count() - 1
1683 cnr = cl.count() - 1
1683 changesets = cnr - cor
1684 changesets = cnr - cor
1684
1685
1685 # pull off the manifest group
1686 # pull off the manifest group
1686 self.ui.status(_("adding manifests\n"))
1687 self.ui.status(_("adding manifests\n"))
1687 chunkiter = changegroup.chunkiter(source)
1688 chunkiter = changegroup.chunkiter(source)
1688 # no need to check for empty manifest group here:
1689 # no need to check for empty manifest group here:
1689 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1690 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1690 # no new manifest will be created and the manifest group will
1691 # no new manifest will be created and the manifest group will
1691 # be empty during the pull
1692 # be empty during the pull
1692 self.manifest.addgroup(chunkiter, revmap, tr)
1693 self.manifest.addgroup(chunkiter, revmap, tr)
1693
1694
1694 # process the files
1695 # process the files
1695 self.ui.status(_("adding file changes\n"))
1696 self.ui.status(_("adding file changes\n"))
1696 while 1:
1697 while 1:
1697 f = changegroup.getchunk(source)
1698 f = changegroup.getchunk(source)
1698 if not f:
1699 if not f:
1699 break
1700 break
1700 self.ui.debug(_("adding %s revisions\n") % f)
1701 self.ui.debug(_("adding %s revisions\n") % f)
1701 fl = self.file(f)
1702 fl = self.file(f)
1702 o = fl.count()
1703 o = fl.count()
1703 chunkiter = changegroup.chunkiter(source)
1704 chunkiter = changegroup.chunkiter(source)
1704 if fl.addgroup(chunkiter, revmap, tr) is None:
1705 if fl.addgroup(chunkiter, revmap, tr) is None:
1705 raise util.Abort(_("received file revlog group is empty"))
1706 raise util.Abort(_("received file revlog group is empty"))
1706 revisions += fl.count() - o
1707 revisions += fl.count() - o
1707 files += 1
1708 files += 1
1708
1709
1709 cl.writedata()
1710 cl.writedata()
1710 finally:
1711 finally:
1711 if cl:
1712 if cl:
1712 cl.cleanup()
1713 cl.cleanup()
1713
1714
1714 # make changelog see real files again
1715 # make changelog see real files again
1715 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1716 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1716 self.changelog.checkinlinesize(tr)
1717 self.changelog.checkinlinesize(tr)
1717
1718
1718 newheads = len(self.changelog.heads())
1719 newheads = len(self.changelog.heads())
1719 heads = ""
1720 heads = ""
1720 if oldheads and newheads != oldheads:
1721 if oldheads and newheads != oldheads:
1721 heads = _(" (%+d heads)") % (newheads - oldheads)
1722 heads = _(" (%+d heads)") % (newheads - oldheads)
1722
1723
1723 self.ui.status(_("added %d changesets"
1724 self.ui.status(_("added %d changesets"
1724 " with %d changes to %d files%s\n")
1725 " with %d changes to %d files%s\n")
1725 % (changesets, revisions, files, heads))
1726 % (changesets, revisions, files, heads))
1726
1727
1727 if changesets > 0:
1728 if changesets > 0:
1728 self.hook('pretxnchangegroup', throw=True,
1729 self.hook('pretxnchangegroup', throw=True,
1729 node=hex(self.changelog.node(cor+1)), source=srctype,
1730 node=hex(self.changelog.node(cor+1)), source=srctype,
1730 url=url)
1731 url=url)
1731
1732
1732 tr.close()
1733 tr.close()
1733
1734
1734 if changesets > 0:
1735 if changesets > 0:
1735 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1736 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1736 source=srctype, url=url)
1737 source=srctype, url=url)
1737
1738
1738 for i in range(cor + 1, cnr + 1):
1739 for i in range(cor + 1, cnr + 1):
1739 self.hook("incoming", node=hex(self.changelog.node(i)),
1740 self.hook("incoming", node=hex(self.changelog.node(i)),
1740 source=srctype, url=url)
1741 source=srctype, url=url)
1741
1742
1742 return newheads - oldheads + 1
1743 return newheads - oldheads + 1
1743
1744
1744
1745
1745 def stream_in(self, remote):
1746 def stream_in(self, remote):
1746 fp = remote.stream_out()
1747 fp = remote.stream_out()
1747 resp = int(fp.readline())
1748 resp = int(fp.readline())
1748 if resp != 0:
1749 if resp != 0:
1749 raise util.Abort(_('operation forbidden by server'))
1750 raise util.Abort(_('operation forbidden by server'))
1750 self.ui.status(_('streaming all changes\n'))
1751 self.ui.status(_('streaming all changes\n'))
1751 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1752 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1752 self.ui.status(_('%d files to transfer, %s of data\n') %
1753 self.ui.status(_('%d files to transfer, %s of data\n') %
1753 (total_files, util.bytecount(total_bytes)))
1754 (total_files, util.bytecount(total_bytes)))
1754 start = time.time()
1755 start = time.time()
1755 for i in xrange(total_files):
1756 for i in xrange(total_files):
1756 name, size = fp.readline().split('\0', 1)
1757 name, size = fp.readline().split('\0', 1)
1757 size = int(size)
1758 size = int(size)
1758 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1759 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1759 ofp = self.opener(name, 'w')
1760 ofp = self.opener(name, 'w')
1760 for chunk in util.filechunkiter(fp, limit=size):
1761 for chunk in util.filechunkiter(fp, limit=size):
1761 ofp.write(chunk)
1762 ofp.write(chunk)
1762 ofp.close()
1763 ofp.close()
1763 elapsed = time.time() - start
1764 elapsed = time.time() - start
1764 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1765 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1765 (util.bytecount(total_bytes), elapsed,
1766 (util.bytecount(total_bytes), elapsed,
1766 util.bytecount(total_bytes / elapsed)))
1767 util.bytecount(total_bytes / elapsed)))
1767 self.reload()
1768 self.reload()
1768 return len(self.heads()) + 1
1769 return len(self.heads()) + 1
1769
1770
1770 def clone(self, remote, heads=[], stream=False):
1771 def clone(self, remote, heads=[], stream=False):
1771 '''clone remote repository.
1772 '''clone remote repository.
1772
1773
1773 keyword arguments:
1774 keyword arguments:
1774 heads: list of revs to clone (forces use of pull)
1775 heads: list of revs to clone (forces use of pull)
1775 stream: use streaming clone if possible'''
1776 stream: use streaming clone if possible'''
1776
1777
1777 # now, all clients that can request uncompressed clones can
1778 # now, all clients that can request uncompressed clones can
1778 # read repo formats supported by all servers that can serve
1779 # read repo formats supported by all servers that can serve
1779 # them.
1780 # them.
1780
1781
1781 # if revlog format changes, client will have to check version
1782 # if revlog format changes, client will have to check version
1782 # and format flags on "stream" capability, and use
1783 # and format flags on "stream" capability, and use
1783 # uncompressed only if compatible.
1784 # uncompressed only if compatible.
1784
1785
1785 if stream and not heads and remote.capable('stream'):
1786 if stream and not heads and remote.capable('stream'):
1786 return self.stream_in(remote)
1787 return self.stream_in(remote)
1787 return self.pull(remote, heads)
1788 return self.pull(remote, heads)
1788
1789
1789 # used to avoid circular references so destructors work
1790 # used to avoid circular references so destructors work
1790 def aftertrans(base):
1791 def aftertrans(base):
1791 p = base
1792 p = base
1792 def a():
1793 def a():
1793 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1794 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1794 util.rename(os.path.join(p, "journal.dirstate"),
1795 util.rename(os.path.join(p, "journal.dirstate"),
1795 os.path.join(p, "undo.dirstate"))
1796 os.path.join(p, "undo.dirstate"))
1796 return a
1797 return a
1797
1798
1798 def instance(ui, path, create):
1799 def instance(ui, path, create):
1799 return localrepository(ui, util.drop_scheme('file', path), create)
1800 return localrepository(ui, util.drop_scheme('file', path), create)
1800
1801
1801 def islocal(path):
1802 def islocal(path):
1802 return True
1803 return True
General Comments 0
You need to be logged in to leave comments. Login now