##// END OF EJS Templates
Introduce HG_PREPEND to solve pretxn races...
Matt Mackall -
r7787:b8d750da default
parent child Browse files
Show More
@@ -1,196 +1,221
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid
8 from node import bin, hex, nullid
9 from i18n import _
9 from i18n import _
10 import util, error, revlog
10 import util, error, revlog
11
11
12 def _string_escape(text):
12 def _string_escape(text):
13 """
13 """
14 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
14 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
15 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
15 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
16 >>> s
16 >>> s
17 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
17 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
18 >>> res = _string_escape(s)
18 >>> res = _string_escape(s)
19 >>> s == res.decode('string_escape')
19 >>> s == res.decode('string_escape')
20 True
20 True
21 """
21 """
22 # subset of the string_escape codec
22 # subset of the string_escape codec
23 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
23 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
24 return text.replace('\0', '\\0')
24 return text.replace('\0', '\\0')
25
25
26 class appender:
26 class appender:
27 '''the changelog index must be update last on disk, so we use this class
27 '''the changelog index must be update last on disk, so we use this class
28 to delay writes to it'''
28 to delay writes to it'''
29 def __init__(self, fp, buf):
29 def __init__(self, fp, buf):
30 self.data = buf
30 self.data = buf
31 self.fp = fp
31 self.fp = fp
32 self.offset = fp.tell()
32 self.offset = fp.tell()
33 self.size = util.fstat(fp).st_size
33 self.size = util.fstat(fp).st_size
34
34
35 def end(self):
35 def end(self):
36 return self.size + len("".join(self.data))
36 return self.size + len("".join(self.data))
37 def tell(self):
37 def tell(self):
38 return self.offset
38 return self.offset
39 def flush(self):
39 def flush(self):
40 pass
40 pass
41 def close(self):
41 def close(self):
42 self.fp.close()
42 self.fp.close()
43
43
44 def seek(self, offset, whence=0):
44 def seek(self, offset, whence=0):
45 '''virtual file offset spans real file and data'''
45 '''virtual file offset spans real file and data'''
46 if whence == 0:
46 if whence == 0:
47 self.offset = offset
47 self.offset = offset
48 elif whence == 1:
48 elif whence == 1:
49 self.offset += offset
49 self.offset += offset
50 elif whence == 2:
50 elif whence == 2:
51 self.offset = self.end() + offset
51 self.offset = self.end() + offset
52 if self.offset < self.size:
52 if self.offset < self.size:
53 self.fp.seek(self.offset)
53 self.fp.seek(self.offset)
54
54
55 def read(self, count=-1):
55 def read(self, count=-1):
56 '''only trick here is reads that span real file and data'''
56 '''only trick here is reads that span real file and data'''
57 ret = ""
57 ret = ""
58 if self.offset < self.size:
58 if self.offset < self.size:
59 s = self.fp.read(count)
59 s = self.fp.read(count)
60 ret = s
60 ret = s
61 self.offset += len(s)
61 self.offset += len(s)
62 if count > 0:
62 if count > 0:
63 count -= len(s)
63 count -= len(s)
64 if count != 0:
64 if count != 0:
65 doff = self.offset - self.size
65 doff = self.offset - self.size
66 self.data.insert(0, "".join(self.data))
66 self.data.insert(0, "".join(self.data))
67 del self.data[1:]
67 del self.data[1:]
68 s = self.data[0][doff:doff+count]
68 s = self.data[0][doff:doff+count]
69 self.offset += len(s)
69 self.offset += len(s)
70 ret += s
70 ret += s
71 return ret
71 return ret
72
72
73 def write(self, s):
73 def write(self, s):
74 self.data.append(str(s))
74 self.data.append(str(s))
75 self.offset += len(s)
75 self.offset += len(s)
76
76
77 class changelog(revlog.revlog):
77 class changelog(revlog.revlog):
78 def __init__(self, opener):
78 def __init__(self, opener):
79 revlog.revlog.__init__(self, opener, "00changelog.i")
79 revlog.revlog.__init__(self, opener, "00changelog.i")
80
80
81 def delayupdate(self):
81 def delayupdate(self):
82 "delay visibility of index updates to other readers"
82 "delay visibility of index updates to other readers"
83 self._realopener = self.opener
83 self._realopener = self.opener
84 self.opener = self._delayopener
84 self.opener = self._delayopener
85 self._delaycount = len(self)
85 self._delaycount = len(self)
86 self._delaybuf = []
86 self._delaybuf = []
87 self._delayname = None
87 self._delayname = None
88
88
89 def finalize(self, tr):
89 def finalize(self, tr):
90 "finalize index updates"
90 "finalize index updates"
91 self.opener = self._realopener
91 self.opener = self._realopener
92 # move redirected index data back into place
92 # move redirected index data back into place
93 if self._delayname:
93 if self._delayname:
94 util.rename(self._delayname + ".a", self._delayname)
94 util.rename(self._delayname + ".a", self._delayname)
95 elif self._delaybuf:
95 elif self._delaybuf:
96 fp = self.opener(self.indexfile, 'a')
96 fp = self.opener(self.indexfile, 'a')
97 fp.write("".join(self._delaybuf))
97 fp.write("".join(self._delaybuf))
98 fp.close()
98 fp.close()
99 del self._delaybuf
99 self._delaybuf = []
100 # split when we're done
100 # split when we're done
101 self.checkinlinesize(tr)
101 self.checkinlinesize(tr)
102
102
103 def _delayopener(self, name, mode='r'):
103 def _delayopener(self, name, mode='r'):
104 fp = self._realopener(name, mode)
104 fp = self._realopener(name, mode)
105 # only divert the index
105 # only divert the index
106 if not name == self.indexfile:
106 if not name == self.indexfile:
107 return fp
107 return fp
108 # if we're doing an initial clone, divert to another file
108 # if we're doing an initial clone, divert to another file
109 if self._delaycount == 0:
109 if self._delaycount == 0:
110 self._delayname = fp.name
110 self._delayname = fp.name
111 if not len(self):
111 if not len(self):
112 # make sure to truncate the file
112 # make sure to truncate the file
113 mode = mode.replace('a', 'w')
113 mode = mode.replace('a', 'w')
114 return self._realopener(name + ".a", mode)
114 return self._realopener(name + ".a", mode)
115 # otherwise, divert to memory
115 # otherwise, divert to memory
116 return appender(fp, self._delaybuf)
116 return appender(fp, self._delaybuf)
117
117
118 def readpending(self, file):
119 r = revlog.revlog(self.opener, file)
120 self.index = r.index
121 self.nodemap = r.nodemap
122 self._chunkcache = r._chunkcache
123
124 def writepending(self):
125 "create a file containing the unfinalized state for pretxnchangegroup"
126 if self._delaybuf:
127 # make a temporary copy of the index
128 fp1 = self._realopener(self.indexfile)
129 fp2 = self._realopener(self.indexfile + ".a", "w")
130 fp2.write(fp1.read())
131 # add pending data
132 fp2.write("".join(self._delaybuf))
133 fp2.close()
134 # switch modes so finalize can simply rename
135 self._delaybuf = []
136 self._delayname = fp1.name
137
138 if self._delayname:
139 return True
140
141 return False
142
118 def checkinlinesize(self, tr, fp=None):
143 def checkinlinesize(self, tr, fp=None):
119 if self.opener == self._delayopener:
144 if self.opener == self._delayopener:
120 return
145 return
121 return revlog.revlog.checkinlinesize(self, tr, fp)
146 return revlog.revlog.checkinlinesize(self, tr, fp)
122
147
123 def decode_extra(self, text):
148 def decode_extra(self, text):
124 extra = {}
149 extra = {}
125 for l in text.split('\0'):
150 for l in text.split('\0'):
126 if l:
151 if l:
127 k, v = l.decode('string_escape').split(':', 1)
152 k, v = l.decode('string_escape').split(':', 1)
128 extra[k] = v
153 extra[k] = v
129 return extra
154 return extra
130
155
131 def encode_extra(self, d):
156 def encode_extra(self, d):
132 # keys must be sorted to produce a deterministic changelog entry
157 # keys must be sorted to produce a deterministic changelog entry
133 items = [_string_escape('%s:%s' % (k, d[k])) for k in util.sort(d)]
158 items = [_string_escape('%s:%s' % (k, d[k])) for k in util.sort(d)]
134 return "\0".join(items)
159 return "\0".join(items)
135
160
136 def read(self, node):
161 def read(self, node):
137 """
162 """
138 format used:
163 format used:
139 nodeid\n : manifest node in ascii
164 nodeid\n : manifest node in ascii
140 user\n : user, no \n or \r allowed
165 user\n : user, no \n or \r allowed
141 time tz extra\n : date (time is int or float, timezone is int)
166 time tz extra\n : date (time is int or float, timezone is int)
142 : extra is metadatas, encoded and separated by '\0'
167 : extra is metadatas, encoded and separated by '\0'
143 : older versions ignore it
168 : older versions ignore it
144 files\n\n : files modified by the cset, no \n or \r allowed
169 files\n\n : files modified by the cset, no \n or \r allowed
145 (.*) : comment (free text, ideally utf-8)
170 (.*) : comment (free text, ideally utf-8)
146
171
147 changelog v0 doesn't use extra
172 changelog v0 doesn't use extra
148 """
173 """
149 text = self.revision(node)
174 text = self.revision(node)
150 if not text:
175 if not text:
151 return (nullid, "", (0, 0), [], "", {'branch': 'default'})
176 return (nullid, "", (0, 0), [], "", {'branch': 'default'})
152 last = text.index("\n\n")
177 last = text.index("\n\n")
153 desc = util.tolocal(text[last + 2:])
178 desc = util.tolocal(text[last + 2:])
154 l = text[:last].split('\n')
179 l = text[:last].split('\n')
155 manifest = bin(l[0])
180 manifest = bin(l[0])
156 user = util.tolocal(l[1])
181 user = util.tolocal(l[1])
157
182
158 extra_data = l[2].split(' ', 2)
183 extra_data = l[2].split(' ', 2)
159 if len(extra_data) != 3:
184 if len(extra_data) != 3:
160 time = float(extra_data.pop(0))
185 time = float(extra_data.pop(0))
161 try:
186 try:
162 # various tools did silly things with the time zone field.
187 # various tools did silly things with the time zone field.
163 timezone = int(extra_data[0])
188 timezone = int(extra_data[0])
164 except:
189 except:
165 timezone = 0
190 timezone = 0
166 extra = {}
191 extra = {}
167 else:
192 else:
168 time, timezone, extra = extra_data
193 time, timezone, extra = extra_data
169 time, timezone = float(time), int(timezone)
194 time, timezone = float(time), int(timezone)
170 extra = self.decode_extra(extra)
195 extra = self.decode_extra(extra)
171 if not extra.get('branch'):
196 if not extra.get('branch'):
172 extra['branch'] = 'default'
197 extra['branch'] = 'default'
173 files = l[3:]
198 files = l[3:]
174 return (manifest, user, (time, timezone), files, desc, extra)
199 return (manifest, user, (time, timezone), files, desc, extra)
175
200
176 def add(self, manifest, files, desc, transaction, p1=None, p2=None,
201 def add(self, manifest, files, desc, transaction, p1=None, p2=None,
177 user=None, date=None, extra={}):
202 user=None, date=None, extra={}):
178
203
179 user = user.strip()
204 user = user.strip()
180 if "\n" in user:
205 if "\n" in user:
181 raise error.RevlogError(_("username %s contains a newline")
206 raise error.RevlogError(_("username %s contains a newline")
182 % repr(user))
207 % repr(user))
183 user, desc = util.fromlocal(user), util.fromlocal(desc)
208 user, desc = util.fromlocal(user), util.fromlocal(desc)
184
209
185 if date:
210 if date:
186 parseddate = "%d %d" % util.parsedate(date)
211 parseddate = "%d %d" % util.parsedate(date)
187 else:
212 else:
188 parseddate = "%d %d" % util.makedate()
213 parseddate = "%d %d" % util.makedate()
189 if extra and extra.get("branch") in ("default", ""):
214 if extra and extra.get("branch") in ("default", ""):
190 del extra["branch"]
215 del extra["branch"]
191 if extra:
216 if extra:
192 extra = self.encode_extra(extra)
217 extra = self.encode_extra(extra)
193 parseddate = "%s %s" % (parseddate, extra)
218 parseddate = "%s %s" % (parseddate, extra)
194 l = [hex(manifest), user, parseddate] + util.sort(files) + ["", desc]
219 l = [hex(manifest), user, parseddate] + util.sort(files) + ["", desc]
195 text = "\n".join(l)
220 text = "\n".join(l)
196 return self.addrevision(text, transaction, len(self), p1, p2)
221 return self.addrevision(text, transaction, len(self), p1, p2)
@@ -1,115 +1,121
1 # hook.py - hook support for mercurial
1 # hook.py - hook support for mercurial
2 #
2 #
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 import util, os, sys
9 import util, os, sys
10
10
11 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
11 def _pythonhook(ui, repo, name, hname, funcname, args, throw):
12 '''call python hook. hook is callable object, looked up as
12 '''call python hook. hook is callable object, looked up as
13 name in python module. if callable returns "true", hook
13 name in python module. if callable returns "true", hook
14 fails, else passes. if hook raises exception, treated as
14 fails, else passes. if hook raises exception, treated as
15 hook failure. exception propagates if throw is "true".
15 hook failure. exception propagates if throw is "true".
16
16
17 reason for "true" meaning "hook failed" is so that
17 reason for "true" meaning "hook failed" is so that
18 unmodified commands (e.g. mercurial.commands.update) can
18 unmodified commands (e.g. mercurial.commands.update) can
19 be run as hooks without wrappers to convert return values.'''
19 be run as hooks without wrappers to convert return values.'''
20
20
21 ui.note(_("calling hook %s: %s\n") % (hname, funcname))
21 ui.note(_("calling hook %s: %s\n") % (hname, funcname))
22 obj = funcname
22 obj = funcname
23 if not callable(obj):
23 if not callable(obj):
24 d = funcname.rfind('.')
24 d = funcname.rfind('.')
25 if d == -1:
25 if d == -1:
26 raise util.Abort(_('%s hook is invalid ("%s" not in '
26 raise util.Abort(_('%s hook is invalid ("%s" not in '
27 'a module)') % (hname, funcname))
27 'a module)') % (hname, funcname))
28 modname = funcname[:d]
28 modname = funcname[:d]
29 try:
29 try:
30 obj = __import__(modname)
30 obj = __import__(modname)
31 except ImportError:
31 except ImportError:
32 try:
32 try:
33 # extensions are loaded with hgext_ prefix
33 # extensions are loaded with hgext_ prefix
34 obj = __import__("hgext_%s" % modname)
34 obj = __import__("hgext_%s" % modname)
35 except ImportError:
35 except ImportError:
36 raise util.Abort(_('%s hook is invalid '
36 raise util.Abort(_('%s hook is invalid '
37 '(import of "%s" failed)') %
37 '(import of "%s" failed)') %
38 (hname, modname))
38 (hname, modname))
39 try:
39 try:
40 for p in funcname.split('.')[1:]:
40 for p in funcname.split('.')[1:]:
41 obj = getattr(obj, p)
41 obj = getattr(obj, p)
42 except AttributeError:
42 except AttributeError:
43 raise util.Abort(_('%s hook is invalid '
43 raise util.Abort(_('%s hook is invalid '
44 '("%s" is not defined)') %
44 '("%s" is not defined)') %
45 (hname, funcname))
45 (hname, funcname))
46 if not callable(obj):
46 if not callable(obj):
47 raise util.Abort(_('%s hook is invalid '
47 raise util.Abort(_('%s hook is invalid '
48 '("%s" is not callable)') %
48 '("%s" is not callable)') %
49 (hname, funcname))
49 (hname, funcname))
50 try:
50 try:
51 r = obj(ui=ui, repo=repo, hooktype=name, **args)
51 r = obj(ui=ui, repo=repo, hooktype=name, **args)
52 except KeyboardInterrupt:
52 except KeyboardInterrupt:
53 raise
53 raise
54 except Exception, exc:
54 except Exception, exc:
55 if isinstance(exc, util.Abort):
55 if isinstance(exc, util.Abort):
56 ui.warn(_('error: %s hook failed: %s\n') %
56 ui.warn(_('error: %s hook failed: %s\n') %
57 (hname, exc.args[0]))
57 (hname, exc.args[0]))
58 else:
58 else:
59 ui.warn(_('error: %s hook raised an exception: '
59 ui.warn(_('error: %s hook raised an exception: '
60 '%s\n') % (hname, exc))
60 '%s\n') % (hname, exc))
61 if throw:
61 if throw:
62 raise
62 raise
63 ui.print_exc()
63 ui.print_exc()
64 return True
64 return True
65 if r:
65 if r:
66 if throw:
66 if throw:
67 raise util.Abort(_('%s hook failed') % hname)
67 raise util.Abort(_('%s hook failed') % hname)
68 ui.warn(_('warning: %s hook failed\n') % hname)
68 ui.warn(_('warning: %s hook failed\n') % hname)
69 return r
69 return r
70
70
71 def _exthook(ui, repo, name, cmd, args, throw):
71 def _exthook(ui, repo, name, cmd, args, throw):
72 ui.note(_("running hook %s: %s\n") % (name, cmd))
72 ui.note(_("running hook %s: %s\n") % (name, cmd))
73 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
73
74 env = {}
75 for k, v in args.iteritems():
76 if callable(v):
77 v = v()
78 env['HG_' + k.upper()] = v
79
74 if repo:
80 if repo:
75 cwd = repo.root
81 cwd = repo.root
76 else:
82 else:
77 cwd = os.getcwd()
83 cwd = os.getcwd()
78 r = util.system(cmd, environ=env, cwd=cwd)
84 r = util.system(cmd, environ=env, cwd=cwd)
79 if r:
85 if r:
80 desc, r = util.explain_exit(r)
86 desc, r = util.explain_exit(r)
81 if throw:
87 if throw:
82 raise util.Abort(_('%s hook %s') % (name, desc))
88 raise util.Abort(_('%s hook %s') % (name, desc))
83 ui.warn(_('warning: %s hook %s\n') % (name, desc))
89 ui.warn(_('warning: %s hook %s\n') % (name, desc))
84 return r
90 return r
85
91
86 _redirect = False
92 _redirect = False
87 def redirect(state):
93 def redirect(state):
88 global _redirect
94 global _redirect
89 _redirect = state
95 _redirect = state
90
96
91 def hook(ui, repo, name, throw=False, **args):
97 def hook(ui, repo, name, throw=False, **args):
92 r = False
98 r = False
93
99
94 if _redirect:
100 if _redirect:
95 # temporarily redirect stdout to stderr
101 # temporarily redirect stdout to stderr
96 oldstdout = os.dup(sys.__stdout__.fileno())
102 oldstdout = os.dup(sys.__stdout__.fileno())
97 os.dup2(sys.__stderr__.fileno(), sys.__stdout__.fileno())
103 os.dup2(sys.__stderr__.fileno(), sys.__stdout__.fileno())
98
104
99 try:
105 try:
100 for hname, cmd in util.sort(ui.configitems('hooks')):
106 for hname, cmd in util.sort(ui.configitems('hooks')):
101 if hname.split('.')[0] != name or not cmd:
107 if hname.split('.')[0] != name or not cmd:
102 continue
108 continue
103 if callable(cmd):
109 if callable(cmd):
104 r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
110 r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
105 elif cmd.startswith('python:'):
111 elif cmd.startswith('python:'):
106 r = _pythonhook(ui, repo, name, hname, cmd[7:].strip(),
112 r = _pythonhook(ui, repo, name, hname, cmd[7:].strip(),
107 args, throw) or r
113 args, throw) or r
108 else:
114 else:
109 r = _exthook(ui, repo, hname, cmd, args, throw) or r
115 r = _exthook(ui, repo, hname, cmd, args, throw) or r
110 finally:
116 finally:
111 if _redirect:
117 if _redirect:
112 os.dup2(oldstdout, sys.__stdout__.fileno())
118 os.dup2(oldstdout, sys.__stdout__.fileno())
113 os.close(oldstdout)
119 os.close(oldstdout)
114
120
115 return r
121 return r
@@ -1,2154 +1,2162
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, time, util, extensions, hook, inspect, error
13 import os, time, util, extensions, hook, inspect, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 requirements.append("fncache")
40 # create an invalid changelog
40 # create an invalid changelog
41 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
42 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
43 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
44 )
44 )
45 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
46 for r in requirements:
46 for r in requirements:
47 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
48 reqfile.close()
48 reqfile.close()
49 else:
49 else:
50 raise error.RepoError(_("repository %s not found") % path)
50 raise error.RepoError(_("repository %s not found") % path)
51 elif create:
51 elif create:
52 raise error.RepoError(_("repository %s already exists") % path)
52 raise error.RepoError(_("repository %s already exists") % path)
53 else:
53 else:
54 # find requirements
54 # find requirements
55 requirements = []
55 requirements = []
56 try:
56 try:
57 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise error.RepoError(_("requirement '%s' not supported") % r)
60 raise error.RepoError(_("requirement '%s' not supported") % r)
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
66 self.spath = self.store.path
66 self.spath = self.store.path
67 self.sopener = self.store.opener
67 self.sopener = self.store.opener
68 self.sjoin = self.store.join
68 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
70
70
71 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
72 try:
72 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
75 except IOError:
75 except IOError:
76 pass
76 pass
77
77
78 self.tagscache = None
78 self.tagscache = None
79 self._tagstypecache = None
79 self._tagstypecache = None
80 self.branchcache = None
80 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
82 self._branchcachetip = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._datafilters = {}
85 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 if 'HG_PENDING' in os.environ:
92 p = os.environ['HG_PENDING']
93 if p.startswith(self.root):
94 self.changelog.readpending('00changelog.i.a')
91 self.sopener.defversion = self.changelog.version
95 self.sopener.defversion = self.changelog.version
92 return self.changelog
96 return self.changelog
93 if name == 'manifest':
97 if name == 'manifest':
94 self.changelog
98 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
99 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
100 return self.manifest
97 if name == 'dirstate':
101 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
103 return self.dirstate
100 else:
104 else:
101 raise AttributeError(name)
105 raise AttributeError(name)
102
106
103 def __getitem__(self, changeid):
107 def __getitem__(self, changeid):
104 if changeid == None:
108 if changeid == None:
105 return context.workingctx(self)
109 return context.workingctx(self)
106 return context.changectx(self, changeid)
110 return context.changectx(self, changeid)
107
111
108 def __nonzero__(self):
112 def __nonzero__(self):
109 return True
113 return True
110
114
111 def __len__(self):
115 def __len__(self):
112 return len(self.changelog)
116 return len(self.changelog)
113
117
114 def __iter__(self):
118 def __iter__(self):
115 for i in xrange(len(self)):
119 for i in xrange(len(self)):
116 yield i
120 yield i
117
121
118 def url(self):
122 def url(self):
119 return 'file:' + self.root
123 return 'file:' + self.root
120
124
121 def hook(self, name, throw=False, **args):
125 def hook(self, name, throw=False, **args):
122 return hook.hook(self.ui, self, name, throw, **args)
126 return hook.hook(self.ui, self, name, throw, **args)
123
127
124 tag_disallowed = ':\r\n'
128 tag_disallowed = ':\r\n'
125
129
126 def _tag(self, names, node, message, local, user, date, parent=None,
130 def _tag(self, names, node, message, local, user, date, parent=None,
127 extra={}):
131 extra={}):
128 use_dirstate = parent is None
132 use_dirstate = parent is None
129
133
130 if isinstance(names, str):
134 if isinstance(names, str):
131 allchars = names
135 allchars = names
132 names = (names,)
136 names = (names,)
133 else:
137 else:
134 allchars = ''.join(names)
138 allchars = ''.join(names)
135 for c in self.tag_disallowed:
139 for c in self.tag_disallowed:
136 if c in allchars:
140 if c in allchars:
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
141 raise util.Abort(_('%r cannot be used in a tag name') % c)
138
142
139 for name in names:
143 for name in names:
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 local=local)
145 local=local)
142
146
143 def writetags(fp, names, munge, prevtags):
147 def writetags(fp, names, munge, prevtags):
144 fp.seek(0, 2)
148 fp.seek(0, 2)
145 if prevtags and prevtags[-1] != '\n':
149 if prevtags and prevtags[-1] != '\n':
146 fp.write('\n')
150 fp.write('\n')
147 for name in names:
151 for name in names:
148 m = munge and munge(name) or name
152 m = munge and munge(name) or name
149 if self._tagstypecache and name in self._tagstypecache:
153 if self._tagstypecache and name in self._tagstypecache:
150 old = self.tagscache.get(name, nullid)
154 old = self.tagscache.get(name, nullid)
151 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(old), m))
152 fp.write('%s %s\n' % (hex(node), m))
156 fp.write('%s %s\n' % (hex(node), m))
153 fp.close()
157 fp.close()
154
158
155 prevtags = ''
159 prevtags = ''
156 if local:
160 if local:
157 try:
161 try:
158 fp = self.opener('localtags', 'r+')
162 fp = self.opener('localtags', 'r+')
159 except IOError, err:
163 except IOError, err:
160 fp = self.opener('localtags', 'a')
164 fp = self.opener('localtags', 'a')
161 else:
165 else:
162 prevtags = fp.read()
166 prevtags = fp.read()
163
167
164 # local tags are stored in the current charset
168 # local tags are stored in the current charset
165 writetags(fp, names, None, prevtags)
169 writetags(fp, names, None, prevtags)
166 for name in names:
170 for name in names:
167 self.hook('tag', node=hex(node), tag=name, local=local)
171 self.hook('tag', node=hex(node), tag=name, local=local)
168 return
172 return
169
173
170 if use_dirstate:
174 if use_dirstate:
171 try:
175 try:
172 fp = self.wfile('.hgtags', 'rb+')
176 fp = self.wfile('.hgtags', 'rb+')
173 except IOError, err:
177 except IOError, err:
174 fp = self.wfile('.hgtags', 'ab')
178 fp = self.wfile('.hgtags', 'ab')
175 else:
179 else:
176 prevtags = fp.read()
180 prevtags = fp.read()
177 else:
181 else:
178 try:
182 try:
179 prevtags = self.filectx('.hgtags', parent).data()
183 prevtags = self.filectx('.hgtags', parent).data()
180 except error.LookupError:
184 except error.LookupError:
181 pass
185 pass
182 fp = self.wfile('.hgtags', 'wb')
186 fp = self.wfile('.hgtags', 'wb')
183 if prevtags:
187 if prevtags:
184 fp.write(prevtags)
188 fp.write(prevtags)
185
189
186 # committed tags are stored in UTF-8
190 # committed tags are stored in UTF-8
187 writetags(fp, names, util.fromlocal, prevtags)
191 writetags(fp, names, util.fromlocal, prevtags)
188
192
189 if use_dirstate and '.hgtags' not in self.dirstate:
193 if use_dirstate and '.hgtags' not in self.dirstate:
190 self.add(['.hgtags'])
194 self.add(['.hgtags'])
191
195
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
196 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 extra=extra)
197 extra=extra)
194
198
195 for name in names:
199 for name in names:
196 self.hook('tag', node=hex(node), tag=name, local=local)
200 self.hook('tag', node=hex(node), tag=name, local=local)
197
201
198 return tagnode
202 return tagnode
199
203
200 def tag(self, names, node, message, local, user, date):
204 def tag(self, names, node, message, local, user, date):
201 '''tag a revision with one or more symbolic names.
205 '''tag a revision with one or more symbolic names.
202
206
203 names is a list of strings or, when adding a single tag, names may be a
207 names is a list of strings or, when adding a single tag, names may be a
204 string.
208 string.
205
209
206 if local is True, the tags are stored in a per-repository file.
210 if local is True, the tags are stored in a per-repository file.
207 otherwise, they are stored in the .hgtags file, and a new
211 otherwise, they are stored in the .hgtags file, and a new
208 changeset is committed with the change.
212 changeset is committed with the change.
209
213
210 keyword arguments:
214 keyword arguments:
211
215
212 local: whether to store tags in non-version-controlled file
216 local: whether to store tags in non-version-controlled file
213 (default False)
217 (default False)
214
218
215 message: commit message to use if committing
219 message: commit message to use if committing
216
220
217 user: name of user to use if committing
221 user: name of user to use if committing
218
222
219 date: date tuple to use if committing'''
223 date: date tuple to use if committing'''
220
224
221 for x in self.status()[:5]:
225 for x in self.status()[:5]:
222 if '.hgtags' in x:
226 if '.hgtags' in x:
223 raise util.Abort(_('working copy of .hgtags is changed '
227 raise util.Abort(_('working copy of .hgtags is changed '
224 '(please commit .hgtags manually)'))
228 '(please commit .hgtags manually)'))
225
229
226 self._tag(names, node, message, local, user, date)
230 self._tag(names, node, message, local, user, date)
227
231
228 def tags(self):
232 def tags(self):
229 '''return a mapping of tag to node'''
233 '''return a mapping of tag to node'''
230 if self.tagscache:
234 if self.tagscache:
231 return self.tagscache
235 return self.tagscache
232
236
233 globaltags = {}
237 globaltags = {}
234 tagtypes = {}
238 tagtypes = {}
235
239
236 def readtags(lines, fn, tagtype):
240 def readtags(lines, fn, tagtype):
237 filetags = {}
241 filetags = {}
238 count = 0
242 count = 0
239
243
240 def warn(msg):
244 def warn(msg):
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
245 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242
246
243 for l in lines:
247 for l in lines:
244 count += 1
248 count += 1
245 if not l:
249 if not l:
246 continue
250 continue
247 s = l.split(" ", 1)
251 s = l.split(" ", 1)
248 if len(s) != 2:
252 if len(s) != 2:
249 warn(_("cannot parse entry"))
253 warn(_("cannot parse entry"))
250 continue
254 continue
251 node, key = s
255 node, key = s
252 key = util.tolocal(key.strip()) # stored in UTF-8
256 key = util.tolocal(key.strip()) # stored in UTF-8
253 try:
257 try:
254 bin_n = bin(node)
258 bin_n = bin(node)
255 except TypeError:
259 except TypeError:
256 warn(_("node '%s' is not well formed") % node)
260 warn(_("node '%s' is not well formed") % node)
257 continue
261 continue
258 if bin_n not in self.changelog.nodemap:
262 if bin_n not in self.changelog.nodemap:
259 warn(_("tag '%s' refers to unknown node") % key)
263 warn(_("tag '%s' refers to unknown node") % key)
260 continue
264 continue
261
265
262 h = []
266 h = []
263 if key in filetags:
267 if key in filetags:
264 n, h = filetags[key]
268 n, h = filetags[key]
265 h.append(n)
269 h.append(n)
266 filetags[key] = (bin_n, h)
270 filetags[key] = (bin_n, h)
267
271
268 for k, nh in filetags.iteritems():
272 for k, nh in filetags.iteritems():
269 if k not in globaltags:
273 if k not in globaltags:
270 globaltags[k] = nh
274 globaltags[k] = nh
271 tagtypes[k] = tagtype
275 tagtypes[k] = tagtype
272 continue
276 continue
273
277
274 # we prefer the global tag if:
278 # we prefer the global tag if:
275 # it supercedes us OR
279 # it supercedes us OR
276 # mutual supercedes and it has a higher rank
280 # mutual supercedes and it has a higher rank
277 # otherwise we win because we're tip-most
281 # otherwise we win because we're tip-most
278 an, ah = nh
282 an, ah = nh
279 bn, bh = globaltags[k]
283 bn, bh = globaltags[k]
280 if (bn != an and an in bh and
284 if (bn != an and an in bh and
281 (bn not in ah or len(bh) > len(ah))):
285 (bn not in ah or len(bh) > len(ah))):
282 an = bn
286 an = bn
283 ah.extend([n for n in bh if n not in ah])
287 ah.extend([n for n in bh if n not in ah])
284 globaltags[k] = an, ah
288 globaltags[k] = an, ah
285 tagtypes[k] = tagtype
289 tagtypes[k] = tagtype
286
290
287 # read the tags file from each head, ending with the tip
291 # read the tags file from each head, ending with the tip
288 f = None
292 f = None
289 for rev, node, fnode in self._hgtagsnodes():
293 for rev, node, fnode in self._hgtagsnodes():
290 f = (f and f.filectx(fnode) or
294 f = (f and f.filectx(fnode) or
291 self.filectx('.hgtags', fileid=fnode))
295 self.filectx('.hgtags', fileid=fnode))
292 readtags(f.data().splitlines(), f, "global")
296 readtags(f.data().splitlines(), f, "global")
293
297
294 try:
298 try:
295 data = util.fromlocal(self.opener("localtags").read())
299 data = util.fromlocal(self.opener("localtags").read())
296 # localtags are stored in the local character set
300 # localtags are stored in the local character set
297 # while the internal tag table is stored in UTF-8
301 # while the internal tag table is stored in UTF-8
298 readtags(data.splitlines(), "localtags", "local")
302 readtags(data.splitlines(), "localtags", "local")
299 except IOError:
303 except IOError:
300 pass
304 pass
301
305
302 self.tagscache = {}
306 self.tagscache = {}
303 self._tagstypecache = {}
307 self._tagstypecache = {}
304 for k, nh in globaltags.iteritems():
308 for k, nh in globaltags.iteritems():
305 n = nh[0]
309 n = nh[0]
306 if n != nullid:
310 if n != nullid:
307 self.tagscache[k] = n
311 self.tagscache[k] = n
308 self._tagstypecache[k] = tagtypes[k]
312 self._tagstypecache[k] = tagtypes[k]
309 self.tagscache['tip'] = self.changelog.tip()
313 self.tagscache['tip'] = self.changelog.tip()
310 return self.tagscache
314 return self.tagscache
311
315
312 def tagtype(self, tagname):
316 def tagtype(self, tagname):
313 '''
317 '''
314 return the type of the given tag. result can be:
318 return the type of the given tag. result can be:
315
319
316 'local' : a local tag
320 'local' : a local tag
317 'global' : a global tag
321 'global' : a global tag
318 None : tag does not exist
322 None : tag does not exist
319 '''
323 '''
320
324
321 self.tags()
325 self.tags()
322
326
323 return self._tagstypecache.get(tagname)
327 return self._tagstypecache.get(tagname)
324
328
325 def _hgtagsnodes(self):
329 def _hgtagsnodes(self):
326 heads = self.heads()
330 heads = self.heads()
327 heads.reverse()
331 heads.reverse()
328 last = {}
332 last = {}
329 ret = []
333 ret = []
330 for node in heads:
334 for node in heads:
331 c = self[node]
335 c = self[node]
332 rev = c.rev()
336 rev = c.rev()
333 try:
337 try:
334 fnode = c.filenode('.hgtags')
338 fnode = c.filenode('.hgtags')
335 except error.LookupError:
339 except error.LookupError:
336 continue
340 continue
337 ret.append((rev, node, fnode))
341 ret.append((rev, node, fnode))
338 if fnode in last:
342 if fnode in last:
339 ret[last[fnode]] = None
343 ret[last[fnode]] = None
340 last[fnode] = len(ret) - 1
344 last[fnode] = len(ret) - 1
341 return [item for item in ret if item]
345 return [item for item in ret if item]
342
346
343 def tagslist(self):
347 def tagslist(self):
344 '''return a list of tags ordered by revision'''
348 '''return a list of tags ordered by revision'''
345 l = []
349 l = []
346 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
347 try:
351 try:
348 r = self.changelog.rev(n)
352 r = self.changelog.rev(n)
349 except:
353 except:
350 r = -2 # sort to the beginning of the list if unknown
354 r = -2 # sort to the beginning of the list if unknown
351 l.append((r, t, n))
355 l.append((r, t, n))
352 return [(t, n) for r, t, n in util.sort(l)]
356 return [(t, n) for r, t, n in util.sort(l)]
353
357
354 def nodetags(self, node):
358 def nodetags(self, node):
355 '''return the tags associated with a node'''
359 '''return the tags associated with a node'''
356 if not self.nodetagscache:
360 if not self.nodetagscache:
357 self.nodetagscache = {}
361 self.nodetagscache = {}
358 for t, n in self.tags().iteritems():
362 for t, n in self.tags().iteritems():
359 self.nodetagscache.setdefault(n, []).append(t)
363 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
364 return self.nodetagscache.get(node, [])
361
365
362 def _branchtags(self, partial, lrev):
366 def _branchtags(self, partial, lrev):
363 # TODO: rename this function?
367 # TODO: rename this function?
364 tiprev = len(self) - 1
368 tiprev = len(self) - 1
365 if lrev != tiprev:
369 if lrev != tiprev:
366 self._updatebranchcache(partial, lrev+1, tiprev+1)
370 self._updatebranchcache(partial, lrev+1, tiprev+1)
367 self._writebranchcache(partial, self.changelog.tip(), tiprev)
371 self._writebranchcache(partial, self.changelog.tip(), tiprev)
368
372
369 return partial
373 return partial
370
374
371 def _branchheads(self):
375 def _branchheads(self):
372 tip = self.changelog.tip()
376 tip = self.changelog.tip()
373 if self.branchcache is not None and self._branchcachetip == tip:
377 if self.branchcache is not None and self._branchcachetip == tip:
374 return self.branchcache
378 return self.branchcache
375
379
376 oldtip = self._branchcachetip
380 oldtip = self._branchcachetip
377 self._branchcachetip = tip
381 self._branchcachetip = tip
378 if self.branchcache is None:
382 if self.branchcache is None:
379 self.branchcache = {} # avoid recursion in changectx
383 self.branchcache = {} # avoid recursion in changectx
380 else:
384 else:
381 self.branchcache.clear() # keep using the same dict
385 self.branchcache.clear() # keep using the same dict
382 if oldtip is None or oldtip not in self.changelog.nodemap:
386 if oldtip is None or oldtip not in self.changelog.nodemap:
383 partial, last, lrev = self._readbranchcache()
387 partial, last, lrev = self._readbranchcache()
384 else:
388 else:
385 lrev = self.changelog.rev(oldtip)
389 lrev = self.changelog.rev(oldtip)
386 partial = self._ubranchcache
390 partial = self._ubranchcache
387
391
388 self._branchtags(partial, lrev)
392 self._branchtags(partial, lrev)
389 # this private cache holds all heads (not just tips)
393 # this private cache holds all heads (not just tips)
390 self._ubranchcache = partial
394 self._ubranchcache = partial
391
395
392 # the branch cache is stored on disk as UTF-8, but in the local
396 # the branch cache is stored on disk as UTF-8, but in the local
393 # charset internally
397 # charset internally
394 for k, v in partial.iteritems():
398 for k, v in partial.iteritems():
395 self.branchcache[util.tolocal(k)] = v
399 self.branchcache[util.tolocal(k)] = v
396 return self.branchcache
400 return self.branchcache
397
401
398
402
399 def branchtags(self):
403 def branchtags(self):
400 '''return a dict where branch names map to the tipmost head of
404 '''return a dict where branch names map to the tipmost head of
401 the branch, open heads come before closed'''
405 the branch, open heads come before closed'''
402 bt = {}
406 bt = {}
403 for bn, heads in self._branchheads().iteritems():
407 for bn, heads in self._branchheads().iteritems():
404 head = None
408 head = None
405 for i in range(len(heads)-1, -1, -1):
409 for i in range(len(heads)-1, -1, -1):
406 h = heads[i]
410 h = heads[i]
407 if 'close' not in self.changelog.read(h)[5]:
411 if 'close' not in self.changelog.read(h)[5]:
408 head = h
412 head = h
409 break
413 break
410 # no open heads were found
414 # no open heads were found
411 if head is None:
415 if head is None:
412 head = heads[-1]
416 head = heads[-1]
413 bt[bn] = head
417 bt[bn] = head
414 return bt
418 return bt
415
419
416
420
417 def _readbranchcache(self):
421 def _readbranchcache(self):
418 partial = {}
422 partial = {}
419 try:
423 try:
420 f = self.opener("branchheads.cache")
424 f = self.opener("branchheads.cache")
421 lines = f.read().split('\n')
425 lines = f.read().split('\n')
422 f.close()
426 f.close()
423 except (IOError, OSError):
427 except (IOError, OSError):
424 return {}, nullid, nullrev
428 return {}, nullid, nullrev
425
429
426 try:
430 try:
427 last, lrev = lines.pop(0).split(" ", 1)
431 last, lrev = lines.pop(0).split(" ", 1)
428 last, lrev = bin(last), int(lrev)
432 last, lrev = bin(last), int(lrev)
429 if lrev >= len(self) or self[lrev].node() != last:
433 if lrev >= len(self) or self[lrev].node() != last:
430 # invalidate the cache
434 # invalidate the cache
431 raise ValueError('invalidating branch cache (tip differs)')
435 raise ValueError('invalidating branch cache (tip differs)')
432 for l in lines:
436 for l in lines:
433 if not l: continue
437 if not l: continue
434 node, label = l.split(" ", 1)
438 node, label = l.split(" ", 1)
435 partial.setdefault(label.strip(), []).append(bin(node))
439 partial.setdefault(label.strip(), []).append(bin(node))
436 except KeyboardInterrupt:
440 except KeyboardInterrupt:
437 raise
441 raise
438 except Exception, inst:
442 except Exception, inst:
439 if self.ui.debugflag:
443 if self.ui.debugflag:
440 self.ui.warn(str(inst), '\n')
444 self.ui.warn(str(inst), '\n')
441 partial, last, lrev = {}, nullid, nullrev
445 partial, last, lrev = {}, nullid, nullrev
442 return partial, last, lrev
446 return partial, last, lrev
443
447
444 def _writebranchcache(self, branches, tip, tiprev):
448 def _writebranchcache(self, branches, tip, tiprev):
445 try:
449 try:
446 f = self.opener("branchheads.cache", "w", atomictemp=True)
450 f = self.opener("branchheads.cache", "w", atomictemp=True)
447 f.write("%s %s\n" % (hex(tip), tiprev))
451 f.write("%s %s\n" % (hex(tip), tiprev))
448 for label, nodes in branches.iteritems():
452 for label, nodes in branches.iteritems():
449 for node in nodes:
453 for node in nodes:
450 f.write("%s %s\n" % (hex(node), label))
454 f.write("%s %s\n" % (hex(node), label))
451 f.rename()
455 f.rename()
452 except (IOError, OSError):
456 except (IOError, OSError):
453 pass
457 pass
454
458
455 def _updatebranchcache(self, partial, start, end):
459 def _updatebranchcache(self, partial, start, end):
456 for r in xrange(start, end):
460 for r in xrange(start, end):
457 c = self[r]
461 c = self[r]
458 b = c.branch()
462 b = c.branch()
459 bheads = partial.setdefault(b, [])
463 bheads = partial.setdefault(b, [])
460 bheads.append(c.node())
464 bheads.append(c.node())
461 for p in c.parents():
465 for p in c.parents():
462 pn = p.node()
466 pn = p.node()
463 if pn in bheads:
467 if pn in bheads:
464 bheads.remove(pn)
468 bheads.remove(pn)
465
469
466 def lookup(self, key):
470 def lookup(self, key):
467 if isinstance(key, int):
471 if isinstance(key, int):
468 return self.changelog.node(key)
472 return self.changelog.node(key)
469 elif key == '.':
473 elif key == '.':
470 return self.dirstate.parents()[0]
474 return self.dirstate.parents()[0]
471 elif key == 'null':
475 elif key == 'null':
472 return nullid
476 return nullid
473 elif key == 'tip':
477 elif key == 'tip':
474 return self.changelog.tip()
478 return self.changelog.tip()
475 n = self.changelog._match(key)
479 n = self.changelog._match(key)
476 if n:
480 if n:
477 return n
481 return n
478 if key in self.tags():
482 if key in self.tags():
479 return self.tags()[key]
483 return self.tags()[key]
480 if key in self.branchtags():
484 if key in self.branchtags():
481 return self.branchtags()[key]
485 return self.branchtags()[key]
482 n = self.changelog._partialmatch(key)
486 n = self.changelog._partialmatch(key)
483 if n:
487 if n:
484 return n
488 return n
485 try:
489 try:
486 if len(key) == 20:
490 if len(key) == 20:
487 key = hex(key)
491 key = hex(key)
488 except:
492 except:
489 pass
493 pass
490 raise error.RepoError(_("unknown revision '%s'") % key)
494 raise error.RepoError(_("unknown revision '%s'") % key)
491
495
492 def local(self):
496 def local(self):
493 return True
497 return True
494
498
495 def join(self, f):
499 def join(self, f):
496 return os.path.join(self.path, f)
500 return os.path.join(self.path, f)
497
501
498 def wjoin(self, f):
502 def wjoin(self, f):
499 return os.path.join(self.root, f)
503 return os.path.join(self.root, f)
500
504
501 def rjoin(self, f):
505 def rjoin(self, f):
502 return os.path.join(self.root, util.pconvert(f))
506 return os.path.join(self.root, util.pconvert(f))
503
507
504 def file(self, f):
508 def file(self, f):
505 if f[0] == '/':
509 if f[0] == '/':
506 f = f[1:]
510 f = f[1:]
507 return filelog.filelog(self.sopener, f)
511 return filelog.filelog(self.sopener, f)
508
512
509 def changectx(self, changeid):
513 def changectx(self, changeid):
510 return self[changeid]
514 return self[changeid]
511
515
512 def parents(self, changeid=None):
516 def parents(self, changeid=None):
513 '''get list of changectxs for parents of changeid'''
517 '''get list of changectxs for parents of changeid'''
514 return self[changeid].parents()
518 return self[changeid].parents()
515
519
516 def filectx(self, path, changeid=None, fileid=None):
520 def filectx(self, path, changeid=None, fileid=None):
517 """changeid can be a changeset revision, node, or tag.
521 """changeid can be a changeset revision, node, or tag.
518 fileid can be a file revision or node."""
522 fileid can be a file revision or node."""
519 return context.filectx(self, path, changeid, fileid)
523 return context.filectx(self, path, changeid, fileid)
520
524
521 def getcwd(self):
525 def getcwd(self):
522 return self.dirstate.getcwd()
526 return self.dirstate.getcwd()
523
527
524 def pathto(self, f, cwd=None):
528 def pathto(self, f, cwd=None):
525 return self.dirstate.pathto(f, cwd)
529 return self.dirstate.pathto(f, cwd)
526
530
527 def wfile(self, f, mode='r'):
531 def wfile(self, f, mode='r'):
528 return self.wopener(f, mode)
532 return self.wopener(f, mode)
529
533
530 def _link(self, f):
534 def _link(self, f):
531 return os.path.islink(self.wjoin(f))
535 return os.path.islink(self.wjoin(f))
532
536
533 def _filter(self, filter, filename, data):
537 def _filter(self, filter, filename, data):
534 if filter not in self.filterpats:
538 if filter not in self.filterpats:
535 l = []
539 l = []
536 for pat, cmd in self.ui.configitems(filter):
540 for pat, cmd in self.ui.configitems(filter):
537 if cmd == '!':
541 if cmd == '!':
538 continue
542 continue
539 mf = util.matcher(self.root, "", [pat], [], [])[1]
543 mf = util.matcher(self.root, "", [pat], [], [])[1]
540 fn = None
544 fn = None
541 params = cmd
545 params = cmd
542 for name, filterfn in self._datafilters.iteritems():
546 for name, filterfn in self._datafilters.iteritems():
543 if cmd.startswith(name):
547 if cmd.startswith(name):
544 fn = filterfn
548 fn = filterfn
545 params = cmd[len(name):].lstrip()
549 params = cmd[len(name):].lstrip()
546 break
550 break
547 if not fn:
551 if not fn:
548 fn = lambda s, c, **kwargs: util.filter(s, c)
552 fn = lambda s, c, **kwargs: util.filter(s, c)
549 # Wrap old filters not supporting keyword arguments
553 # Wrap old filters not supporting keyword arguments
550 if not inspect.getargspec(fn)[2]:
554 if not inspect.getargspec(fn)[2]:
551 oldfn = fn
555 oldfn = fn
552 fn = lambda s, c, **kwargs: oldfn(s, c)
556 fn = lambda s, c, **kwargs: oldfn(s, c)
553 l.append((mf, fn, params))
557 l.append((mf, fn, params))
554 self.filterpats[filter] = l
558 self.filterpats[filter] = l
555
559
556 for mf, fn, cmd in self.filterpats[filter]:
560 for mf, fn, cmd in self.filterpats[filter]:
557 if mf(filename):
561 if mf(filename):
558 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
562 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
559 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
563 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
560 break
564 break
561
565
562 return data
566 return data
563
567
564 def adddatafilter(self, name, filter):
568 def adddatafilter(self, name, filter):
565 self._datafilters[name] = filter
569 self._datafilters[name] = filter
566
570
567 def wread(self, filename):
571 def wread(self, filename):
568 if self._link(filename):
572 if self._link(filename):
569 data = os.readlink(self.wjoin(filename))
573 data = os.readlink(self.wjoin(filename))
570 else:
574 else:
571 data = self.wopener(filename, 'r').read()
575 data = self.wopener(filename, 'r').read()
572 return self._filter("encode", filename, data)
576 return self._filter("encode", filename, data)
573
577
574 def wwrite(self, filename, data, flags):
578 def wwrite(self, filename, data, flags):
575 data = self._filter("decode", filename, data)
579 data = self._filter("decode", filename, data)
576 try:
580 try:
577 os.unlink(self.wjoin(filename))
581 os.unlink(self.wjoin(filename))
578 except OSError:
582 except OSError:
579 pass
583 pass
580 if 'l' in flags:
584 if 'l' in flags:
581 self.wopener.symlink(data, filename)
585 self.wopener.symlink(data, filename)
582 else:
586 else:
583 self.wopener(filename, 'w').write(data)
587 self.wopener(filename, 'w').write(data)
584 if 'x' in flags:
588 if 'x' in flags:
585 util.set_flags(self.wjoin(filename), False, True)
589 util.set_flags(self.wjoin(filename), False, True)
586
590
587 def wwritedata(self, filename, data):
591 def wwritedata(self, filename, data):
588 return self._filter("decode", filename, data)
592 return self._filter("decode", filename, data)
589
593
590 def transaction(self):
594 def transaction(self):
591 if self._transref and self._transref():
595 if self._transref and self._transref():
592 return self._transref().nest()
596 return self._transref().nest()
593
597
594 # abort here if the journal already exists
598 # abort here if the journal already exists
595 if os.path.exists(self.sjoin("journal")):
599 if os.path.exists(self.sjoin("journal")):
596 raise error.RepoError(_("journal already exists - run hg recover"))
600 raise error.RepoError(_("journal already exists - run hg recover"))
597
601
598 # save dirstate for rollback
602 # save dirstate for rollback
599 try:
603 try:
600 ds = self.opener("dirstate").read()
604 ds = self.opener("dirstate").read()
601 except IOError:
605 except IOError:
602 ds = ""
606 ds = ""
603 self.opener("journal.dirstate", "w").write(ds)
607 self.opener("journal.dirstate", "w").write(ds)
604 self.opener("journal.branch", "w").write(self.dirstate.branch())
608 self.opener("journal.branch", "w").write(self.dirstate.branch())
605
609
606 renames = [(self.sjoin("journal"), self.sjoin("undo")),
610 renames = [(self.sjoin("journal"), self.sjoin("undo")),
607 (self.join("journal.dirstate"), self.join("undo.dirstate")),
611 (self.join("journal.dirstate"), self.join("undo.dirstate")),
608 (self.join("journal.branch"), self.join("undo.branch"))]
612 (self.join("journal.branch"), self.join("undo.branch"))]
609 tr = transaction.transaction(self.ui.warn, self.sopener,
613 tr = transaction.transaction(self.ui.warn, self.sopener,
610 self.sjoin("journal"),
614 self.sjoin("journal"),
611 aftertrans(renames),
615 aftertrans(renames),
612 self.store.createmode)
616 self.store.createmode)
613 self._transref = weakref.ref(tr)
617 self._transref = weakref.ref(tr)
614 return tr
618 return tr
615
619
616 def recover(self):
620 def recover(self):
617 l = self.lock()
621 l = self.lock()
618 try:
622 try:
619 if os.path.exists(self.sjoin("journal")):
623 if os.path.exists(self.sjoin("journal")):
620 self.ui.status(_("rolling back interrupted transaction\n"))
624 self.ui.status(_("rolling back interrupted transaction\n"))
621 transaction.rollback(self.sopener, self.sjoin("journal"))
625 transaction.rollback(self.sopener, self.sjoin("journal"))
622 self.invalidate()
626 self.invalidate()
623 return True
627 return True
624 else:
628 else:
625 self.ui.warn(_("no interrupted transaction available\n"))
629 self.ui.warn(_("no interrupted transaction available\n"))
626 return False
630 return False
627 finally:
631 finally:
628 del l
632 del l
629
633
630 def rollback(self):
634 def rollback(self):
631 wlock = lock = None
635 wlock = lock = None
632 try:
636 try:
633 wlock = self.wlock()
637 wlock = self.wlock()
634 lock = self.lock()
638 lock = self.lock()
635 if os.path.exists(self.sjoin("undo")):
639 if os.path.exists(self.sjoin("undo")):
636 self.ui.status(_("rolling back last transaction\n"))
640 self.ui.status(_("rolling back last transaction\n"))
637 transaction.rollback(self.sopener, self.sjoin("undo"))
641 transaction.rollback(self.sopener, self.sjoin("undo"))
638 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
642 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
639 try:
643 try:
640 branch = self.opener("undo.branch").read()
644 branch = self.opener("undo.branch").read()
641 self.dirstate.setbranch(branch)
645 self.dirstate.setbranch(branch)
642 except IOError:
646 except IOError:
643 self.ui.warn(_("Named branch could not be reset, "
647 self.ui.warn(_("Named branch could not be reset, "
644 "current branch still is: %s\n")
648 "current branch still is: %s\n")
645 % util.tolocal(self.dirstate.branch()))
649 % util.tolocal(self.dirstate.branch()))
646 self.invalidate()
650 self.invalidate()
647 self.dirstate.invalidate()
651 self.dirstate.invalidate()
648 else:
652 else:
649 self.ui.warn(_("no rollback information available\n"))
653 self.ui.warn(_("no rollback information available\n"))
650 finally:
654 finally:
651 del lock, wlock
655 del lock, wlock
652
656
653 def invalidate(self):
657 def invalidate(self):
654 for a in "changelog manifest".split():
658 for a in "changelog manifest".split():
655 if a in self.__dict__:
659 if a in self.__dict__:
656 delattr(self, a)
660 delattr(self, a)
657 self.tagscache = None
661 self.tagscache = None
658 self._tagstypecache = None
662 self._tagstypecache = None
659 self.nodetagscache = None
663 self.nodetagscache = None
660 self.branchcache = None
664 self.branchcache = None
661 self._ubranchcache = None
665 self._ubranchcache = None
662 self._branchcachetip = None
666 self._branchcachetip = None
663
667
664 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
668 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
665 try:
669 try:
666 l = lock.lock(lockname, 0, releasefn, desc=desc)
670 l = lock.lock(lockname, 0, releasefn, desc=desc)
667 except error.LockHeld, inst:
671 except error.LockHeld, inst:
668 if not wait:
672 if not wait:
669 raise
673 raise
670 self.ui.warn(_("waiting for lock on %s held by %r\n") %
674 self.ui.warn(_("waiting for lock on %s held by %r\n") %
671 (desc, inst.locker))
675 (desc, inst.locker))
672 # default to 600 seconds timeout
676 # default to 600 seconds timeout
673 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
677 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
674 releasefn, desc=desc)
678 releasefn, desc=desc)
675 if acquirefn:
679 if acquirefn:
676 acquirefn()
680 acquirefn()
677 return l
681 return l
678
682
679 def lock(self, wait=True):
683 def lock(self, wait=True):
680 if self._lockref and self._lockref():
684 if self._lockref and self._lockref():
681 return self._lockref()
685 return self._lockref()
682
686
683 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
687 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
684 _('repository %s') % self.origroot)
688 _('repository %s') % self.origroot)
685 self._lockref = weakref.ref(l)
689 self._lockref = weakref.ref(l)
686 return l
690 return l
687
691
688 def wlock(self, wait=True):
692 def wlock(self, wait=True):
689 if self._wlockref and self._wlockref():
693 if self._wlockref and self._wlockref():
690 return self._wlockref()
694 return self._wlockref()
691
695
692 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
696 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
693 self.dirstate.invalidate, _('working directory of %s') %
697 self.dirstate.invalidate, _('working directory of %s') %
694 self.origroot)
698 self.origroot)
695 self._wlockref = weakref.ref(l)
699 self._wlockref = weakref.ref(l)
696 return l
700 return l
697
701
698 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
702 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
699 """
703 """
700 commit an individual file as part of a larger transaction
704 commit an individual file as part of a larger transaction
701 """
705 """
702
706
703 fn = fctx.path()
707 fn = fctx.path()
704 t = fctx.data()
708 t = fctx.data()
705 fl = self.file(fn)
709 fl = self.file(fn)
706 fp1 = manifest1.get(fn, nullid)
710 fp1 = manifest1.get(fn, nullid)
707 fp2 = manifest2.get(fn, nullid)
711 fp2 = manifest2.get(fn, nullid)
708
712
709 meta = {}
713 meta = {}
710 cp = fctx.renamed()
714 cp = fctx.renamed()
711 if cp and cp[0] != fn:
715 if cp and cp[0] != fn:
712 # Mark the new revision of this file as a copy of another
716 # Mark the new revision of this file as a copy of another
713 # file. This copy data will effectively act as a parent
717 # file. This copy data will effectively act as a parent
714 # of this new revision. If this is a merge, the first
718 # of this new revision. If this is a merge, the first
715 # parent will be the nullid (meaning "look up the copy data")
719 # parent will be the nullid (meaning "look up the copy data")
716 # and the second one will be the other parent. For example:
720 # and the second one will be the other parent. For example:
717 #
721 #
718 # 0 --- 1 --- 3 rev1 changes file foo
722 # 0 --- 1 --- 3 rev1 changes file foo
719 # \ / rev2 renames foo to bar and changes it
723 # \ / rev2 renames foo to bar and changes it
720 # \- 2 -/ rev3 should have bar with all changes and
724 # \- 2 -/ rev3 should have bar with all changes and
721 # should record that bar descends from
725 # should record that bar descends from
722 # bar in rev2 and foo in rev1
726 # bar in rev2 and foo in rev1
723 #
727 #
724 # this allows this merge to succeed:
728 # this allows this merge to succeed:
725 #
729 #
726 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
730 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
727 # \ / merging rev3 and rev4 should use bar@rev2
731 # \ / merging rev3 and rev4 should use bar@rev2
728 # \- 2 --- 4 as the merge base
732 # \- 2 --- 4 as the merge base
729 #
733 #
730
734
731 cf = cp[0]
735 cf = cp[0]
732 cr = manifest1.get(cf)
736 cr = manifest1.get(cf)
733 nfp = fp2
737 nfp = fp2
734
738
735 if manifest2: # branch merge
739 if manifest2: # branch merge
736 if fp2 == nullid or cr is None: # copied on remote side
740 if fp2 == nullid or cr is None: # copied on remote side
737 if cf in manifest2:
741 if cf in manifest2:
738 cr = manifest2[cf]
742 cr = manifest2[cf]
739 nfp = fp1
743 nfp = fp1
740
744
741 # find source in nearest ancestor if we've lost track
745 # find source in nearest ancestor if we've lost track
742 if not cr:
746 if not cr:
743 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
747 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
744 (fn, cf))
748 (fn, cf))
745 for a in self['.'].ancestors():
749 for a in self['.'].ancestors():
746 if cf in a:
750 if cf in a:
747 cr = a[cf].filenode()
751 cr = a[cf].filenode()
748 break
752 break
749
753
750 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
754 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
751 meta["copy"] = cf
755 meta["copy"] = cf
752 meta["copyrev"] = hex(cr)
756 meta["copyrev"] = hex(cr)
753 fp1, fp2 = nullid, nfp
757 fp1, fp2 = nullid, nfp
754 elif fp2 != nullid:
758 elif fp2 != nullid:
755 # is one parent an ancestor of the other?
759 # is one parent an ancestor of the other?
756 fpa = fl.ancestor(fp1, fp2)
760 fpa = fl.ancestor(fp1, fp2)
757 if fpa == fp1:
761 if fpa == fp1:
758 fp1, fp2 = fp2, nullid
762 fp1, fp2 = fp2, nullid
759 elif fpa == fp2:
763 elif fpa == fp2:
760 fp2 = nullid
764 fp2 = nullid
761
765
762 # is the file unmodified from the parent? report existing entry
766 # is the file unmodified from the parent? report existing entry
763 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
767 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
764 return fp1
768 return fp1
765
769
766 changelist.append(fn)
770 changelist.append(fn)
767 return fl.add(t, meta, tr, linkrev, fp1, fp2)
771 return fl.add(t, meta, tr, linkrev, fp1, fp2)
768
772
769 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
773 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
770 if p1 is None:
774 if p1 is None:
771 p1, p2 = self.dirstate.parents()
775 p1, p2 = self.dirstate.parents()
772 return self.commit(files=files, text=text, user=user, date=date,
776 return self.commit(files=files, text=text, user=user, date=date,
773 p1=p1, p2=p2, extra=extra, empty_ok=True)
777 p1=p1, p2=p2, extra=extra, empty_ok=True)
774
778
775 def commit(self, files=None, text="", user=None, date=None,
779 def commit(self, files=None, text="", user=None, date=None,
776 match=None, force=False, force_editor=False,
780 match=None, force=False, force_editor=False,
777 p1=None, p2=None, extra={}, empty_ok=False):
781 p1=None, p2=None, extra={}, empty_ok=False):
778 wlock = lock = None
782 wlock = lock = None
779 if extra.get("close"):
783 if extra.get("close"):
780 force = True
784 force = True
781 if files:
785 if files:
782 files = util.unique(files)
786 files = util.unique(files)
783 try:
787 try:
784 wlock = self.wlock()
788 wlock = self.wlock()
785 lock = self.lock()
789 lock = self.lock()
786 use_dirstate = (p1 is None) # not rawcommit
790 use_dirstate = (p1 is None) # not rawcommit
787
791
788 if use_dirstate:
792 if use_dirstate:
789 p1, p2 = self.dirstate.parents()
793 p1, p2 = self.dirstate.parents()
790 update_dirstate = True
794 update_dirstate = True
791
795
792 if (not force and p2 != nullid and
796 if (not force and p2 != nullid and
793 (match and (match.files() or match.anypats()))):
797 (match and (match.files() or match.anypats()))):
794 raise util.Abort(_('cannot partially commit a merge '
798 raise util.Abort(_('cannot partially commit a merge '
795 '(do not specify files or patterns)'))
799 '(do not specify files or patterns)'))
796
800
797 if files:
801 if files:
798 modified, removed = [], []
802 modified, removed = [], []
799 for f in files:
803 for f in files:
800 s = self.dirstate[f]
804 s = self.dirstate[f]
801 if s in 'nma':
805 if s in 'nma':
802 modified.append(f)
806 modified.append(f)
803 elif s == 'r':
807 elif s == 'r':
804 removed.append(f)
808 removed.append(f)
805 else:
809 else:
806 self.ui.warn(_("%s not tracked!\n") % f)
810 self.ui.warn(_("%s not tracked!\n") % f)
807 changes = [modified, [], removed, [], []]
811 changes = [modified, [], removed, [], []]
808 else:
812 else:
809 changes = self.status(match=match)
813 changes = self.status(match=match)
810 else:
814 else:
811 p1, p2 = p1, p2 or nullid
815 p1, p2 = p1, p2 or nullid
812 update_dirstate = (self.dirstate.parents()[0] == p1)
816 update_dirstate = (self.dirstate.parents()[0] == p1)
813 changes = [files, [], [], [], []]
817 changes = [files, [], [], [], []]
814
818
815 ms = merge_.mergestate(self)
819 ms = merge_.mergestate(self)
816 for f in changes[0]:
820 for f in changes[0]:
817 if f in ms and ms[f] == 'u':
821 if f in ms and ms[f] == 'u':
818 raise util.Abort(_("unresolved merge conflicts "
822 raise util.Abort(_("unresolved merge conflicts "
819 "(see hg resolve)"))
823 "(see hg resolve)"))
820 wctx = context.workingctx(self, (p1, p2), text, user, date,
824 wctx = context.workingctx(self, (p1, p2), text, user, date,
821 extra, changes)
825 extra, changes)
822 return self._commitctx(wctx, force, force_editor, empty_ok,
826 return self._commitctx(wctx, force, force_editor, empty_ok,
823 use_dirstate, update_dirstate)
827 use_dirstate, update_dirstate)
824 finally:
828 finally:
825 del lock, wlock
829 del lock, wlock
826
830
827 def commitctx(self, ctx):
831 def commitctx(self, ctx):
828 """Add a new revision to current repository.
832 """Add a new revision to current repository.
829
833
830 Revision information is passed in the context.memctx argument.
834 Revision information is passed in the context.memctx argument.
831 commitctx() does not touch the working directory.
835 commitctx() does not touch the working directory.
832 """
836 """
833 wlock = lock = None
837 wlock = lock = None
834 try:
838 try:
835 wlock = self.wlock()
839 wlock = self.wlock()
836 lock = self.lock()
840 lock = self.lock()
837 return self._commitctx(ctx, force=True, force_editor=False,
841 return self._commitctx(ctx, force=True, force_editor=False,
838 empty_ok=True, use_dirstate=False,
842 empty_ok=True, use_dirstate=False,
839 update_dirstate=False)
843 update_dirstate=False)
840 finally:
844 finally:
841 del lock, wlock
845 del lock, wlock
842
846
843 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
847 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
844 use_dirstate=True, update_dirstate=True):
848 use_dirstate=True, update_dirstate=True):
845 tr = None
849 tr = None
846 valid = 0 # don't save the dirstate if this isn't set
850 valid = 0 # don't save the dirstate if this isn't set
847 try:
851 try:
848 commit = util.sort(wctx.modified() + wctx.added())
852 commit = util.sort(wctx.modified() + wctx.added())
849 remove = wctx.removed()
853 remove = wctx.removed()
850 extra = wctx.extra().copy()
854 extra = wctx.extra().copy()
851 branchname = extra['branch']
855 branchname = extra['branch']
852 user = wctx.user()
856 user = wctx.user()
853 text = wctx.description()
857 text = wctx.description()
854
858
855 p1, p2 = [p.node() for p in wctx.parents()]
859 p1, p2 = [p.node() for p in wctx.parents()]
856 c1 = self.changelog.read(p1)
860 c1 = self.changelog.read(p1)
857 c2 = self.changelog.read(p2)
861 c2 = self.changelog.read(p2)
858 m1 = self.manifest.read(c1[0]).copy()
862 m1 = self.manifest.read(c1[0]).copy()
859 m2 = self.manifest.read(c2[0])
863 m2 = self.manifest.read(c2[0])
860
864
861 if use_dirstate:
865 if use_dirstate:
862 oldname = c1[5].get("branch") # stored in UTF-8
866 oldname = c1[5].get("branch") # stored in UTF-8
863 if (not commit and not remove and not force and p2 == nullid
867 if (not commit and not remove and not force and p2 == nullid
864 and branchname == oldname):
868 and branchname == oldname):
865 self.ui.status(_("nothing changed\n"))
869 self.ui.status(_("nothing changed\n"))
866 return None
870 return None
867
871
868 xp1 = hex(p1)
872 xp1 = hex(p1)
869 if p2 == nullid: xp2 = ''
873 if p2 == nullid: xp2 = ''
870 else: xp2 = hex(p2)
874 else: xp2 = hex(p2)
871
875
872 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
876 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
873
877
874 tr = self.transaction()
878 tr = self.transaction()
875 trp = weakref.proxy(tr)
879 trp = weakref.proxy(tr)
876
880
877 # check in files
881 # check in files
878 new = {}
882 new = {}
879 changed = []
883 changed = []
880 linkrev = len(self)
884 linkrev = len(self)
881 for f in commit:
885 for f in commit:
882 self.ui.note(f + "\n")
886 self.ui.note(f + "\n")
883 try:
887 try:
884 fctx = wctx.filectx(f)
888 fctx = wctx.filectx(f)
885 newflags = fctx.flags()
889 newflags = fctx.flags()
886 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
890 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
887 if ((not changed or changed[-1] != f) and
891 if ((not changed or changed[-1] != f) and
888 m2.get(f) != new[f]):
892 m2.get(f) != new[f]):
889 # mention the file in the changelog if some
893 # mention the file in the changelog if some
890 # flag changed, even if there was no content
894 # flag changed, even if there was no content
891 # change.
895 # change.
892 if m1.flags(f) != newflags:
896 if m1.flags(f) != newflags:
893 changed.append(f)
897 changed.append(f)
894 m1.set(f, newflags)
898 m1.set(f, newflags)
895 if use_dirstate:
899 if use_dirstate:
896 self.dirstate.normal(f)
900 self.dirstate.normal(f)
897
901
898 except (OSError, IOError):
902 except (OSError, IOError):
899 if use_dirstate:
903 if use_dirstate:
900 self.ui.warn(_("trouble committing %s!\n") % f)
904 self.ui.warn(_("trouble committing %s!\n") % f)
901 raise
905 raise
902 else:
906 else:
903 remove.append(f)
907 remove.append(f)
904
908
905 updated, added = [], []
909 updated, added = [], []
906 for f in util.sort(changed):
910 for f in util.sort(changed):
907 if f in m1 or f in m2:
911 if f in m1 or f in m2:
908 updated.append(f)
912 updated.append(f)
909 else:
913 else:
910 added.append(f)
914 added.append(f)
911
915
912 # update manifest
916 # update manifest
913 m1.update(new)
917 m1.update(new)
914 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
918 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
915 removed1 = []
919 removed1 = []
916
920
917 for f in removed:
921 for f in removed:
918 if f in m1:
922 if f in m1:
919 del m1[f]
923 del m1[f]
920 removed1.append(f)
924 removed1.append(f)
921 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
925 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
922 (new, removed1))
926 (new, removed1))
923
927
924 # add changeset
928 # add changeset
925 if (not empty_ok and not text) or force_editor:
929 if (not empty_ok and not text) or force_editor:
926 edittext = []
930 edittext = []
927 if text:
931 if text:
928 edittext.append(text)
932 edittext.append(text)
929 edittext.append("")
933 edittext.append("")
930 edittext.append("") # Empty line between message and comments.
934 edittext.append("") # Empty line between message and comments.
931 edittext.append(_("HG: Enter commit message."
935 edittext.append(_("HG: Enter commit message."
932 " Lines beginning with 'HG:' are removed."))
936 " Lines beginning with 'HG:' are removed."))
933 edittext.append("HG: --")
937 edittext.append("HG: --")
934 edittext.append("HG: user: %s" % user)
938 edittext.append("HG: user: %s" % user)
935 if p2 != nullid:
939 if p2 != nullid:
936 edittext.append("HG: branch merge")
940 edittext.append("HG: branch merge")
937 if branchname:
941 if branchname:
938 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
942 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
939 edittext.extend(["HG: added %s" % f for f in added])
943 edittext.extend(["HG: added %s" % f for f in added])
940 edittext.extend(["HG: changed %s" % f for f in updated])
944 edittext.extend(["HG: changed %s" % f for f in updated])
941 edittext.extend(["HG: removed %s" % f for f in removed])
945 edittext.extend(["HG: removed %s" % f for f in removed])
942 if not added and not updated and not removed:
946 if not added and not updated and not removed:
943 edittext.append("HG: no files changed")
947 edittext.append("HG: no files changed")
944 edittext.append("")
948 edittext.append("")
945 # run editor in the repository root
949 # run editor in the repository root
946 olddir = os.getcwd()
950 olddir = os.getcwd()
947 os.chdir(self.root)
951 os.chdir(self.root)
948 text = self.ui.edit("\n".join(edittext), user)
952 text = self.ui.edit("\n".join(edittext), user)
949 os.chdir(olddir)
953 os.chdir(olddir)
950
954
951 lines = [line.rstrip() for line in text.rstrip().splitlines()]
955 lines = [line.rstrip() for line in text.rstrip().splitlines()]
952 while lines and not lines[0]:
956 while lines and not lines[0]:
953 del lines[0]
957 del lines[0]
954 if not lines and use_dirstate:
958 if not lines and use_dirstate:
955 raise util.Abort(_("empty commit message"))
959 raise util.Abort(_("empty commit message"))
956 text = '\n'.join(lines)
960 text = '\n'.join(lines)
957
961
962 self.changelog.delayupdate()
958 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
963 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
959 user, wctx.date(), extra)
964 user, wctx.date(), extra)
965 p = lambda: self.changelog.writepending() and self.root or ""
960 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
966 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
961 parent2=xp2)
967 parent2=xp2, pending=p)
968 self.changelog.finalize(trp)
962 tr.close()
969 tr.close()
963
970
964 if self.branchcache:
971 if self.branchcache:
965 self.branchtags()
972 self.branchtags()
966
973
967 if use_dirstate or update_dirstate:
974 if use_dirstate or update_dirstate:
968 self.dirstate.setparents(n)
975 self.dirstate.setparents(n)
969 if use_dirstate:
976 if use_dirstate:
970 for f in removed:
977 for f in removed:
971 self.dirstate.forget(f)
978 self.dirstate.forget(f)
972 valid = 1 # our dirstate updates are complete
979 valid = 1 # our dirstate updates are complete
973
980
974 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
981 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
975 return n
982 return n
976 finally:
983 finally:
977 if not valid: # don't save our updated dirstate
984 if not valid: # don't save our updated dirstate
978 self.dirstate.invalidate()
985 self.dirstate.invalidate()
979 del tr
986 del tr
980
987
981 def walk(self, match, node=None):
988 def walk(self, match, node=None):
982 '''
989 '''
983 walk recursively through the directory tree or a given
990 walk recursively through the directory tree or a given
984 changeset, finding all files matched by the match
991 changeset, finding all files matched by the match
985 function
992 function
986 '''
993 '''
987 return self[node].walk(match)
994 return self[node].walk(match)
988
995
989 def status(self, node1='.', node2=None, match=None,
996 def status(self, node1='.', node2=None, match=None,
990 ignored=False, clean=False, unknown=False):
997 ignored=False, clean=False, unknown=False):
991 """return status of files between two nodes or node and working directory
998 """return status of files between two nodes or node and working directory
992
999
993 If node1 is None, use the first dirstate parent instead.
1000 If node1 is None, use the first dirstate parent instead.
994 If node2 is None, compare node1 with working directory.
1001 If node2 is None, compare node1 with working directory.
995 """
1002 """
996
1003
997 def mfmatches(ctx):
1004 def mfmatches(ctx):
998 mf = ctx.manifest().copy()
1005 mf = ctx.manifest().copy()
999 for fn in mf.keys():
1006 for fn in mf.keys():
1000 if not match(fn):
1007 if not match(fn):
1001 del mf[fn]
1008 del mf[fn]
1002 return mf
1009 return mf
1003
1010
1004 if isinstance(node1, context.changectx):
1011 if isinstance(node1, context.changectx):
1005 ctx1 = node1
1012 ctx1 = node1
1006 else:
1013 else:
1007 ctx1 = self[node1]
1014 ctx1 = self[node1]
1008 if isinstance(node2, context.changectx):
1015 if isinstance(node2, context.changectx):
1009 ctx2 = node2
1016 ctx2 = node2
1010 else:
1017 else:
1011 ctx2 = self[node2]
1018 ctx2 = self[node2]
1012
1019
1013 working = ctx2.rev() is None
1020 working = ctx2.rev() is None
1014 parentworking = working and ctx1 == self['.']
1021 parentworking = working and ctx1 == self['.']
1015 match = match or match_.always(self.root, self.getcwd())
1022 match = match or match_.always(self.root, self.getcwd())
1016 listignored, listclean, listunknown = ignored, clean, unknown
1023 listignored, listclean, listunknown = ignored, clean, unknown
1017
1024
1018 # load earliest manifest first for caching reasons
1025 # load earliest manifest first for caching reasons
1019 if not working and ctx2.rev() < ctx1.rev():
1026 if not working and ctx2.rev() < ctx1.rev():
1020 ctx2.manifest()
1027 ctx2.manifest()
1021
1028
1022 if not parentworking:
1029 if not parentworking:
1023 def bad(f, msg):
1030 def bad(f, msg):
1024 if f not in ctx1:
1031 if f not in ctx1:
1025 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1032 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1026 return False
1033 return False
1027 match.bad = bad
1034 match.bad = bad
1028
1035
1029 if working: # we need to scan the working dir
1036 if working: # we need to scan the working dir
1030 s = self.dirstate.status(match, listignored, listclean, listunknown)
1037 s = self.dirstate.status(match, listignored, listclean, listunknown)
1031 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1038 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1032
1039
1033 # check for any possibly clean files
1040 # check for any possibly clean files
1034 if parentworking and cmp:
1041 if parentworking and cmp:
1035 fixup = []
1042 fixup = []
1036 # do a full compare of any files that might have changed
1043 # do a full compare of any files that might have changed
1037 for f in cmp:
1044 for f in cmp:
1038 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1045 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1039 or ctx1[f].cmp(ctx2[f].data())):
1046 or ctx1[f].cmp(ctx2[f].data())):
1040 modified.append(f)
1047 modified.append(f)
1041 else:
1048 else:
1042 fixup.append(f)
1049 fixup.append(f)
1043
1050
1044 if listclean:
1051 if listclean:
1045 clean += fixup
1052 clean += fixup
1046
1053
1047 # update dirstate for files that are actually clean
1054 # update dirstate for files that are actually clean
1048 if fixup:
1055 if fixup:
1049 wlock = None
1056 wlock = None
1050 try:
1057 try:
1051 try:
1058 try:
1052 wlock = self.wlock(False)
1059 wlock = self.wlock(False)
1053 for f in fixup:
1060 for f in fixup:
1054 self.dirstate.normal(f)
1061 self.dirstate.normal(f)
1055 except lock.LockError:
1062 except lock.LockError:
1056 pass
1063 pass
1057 finally:
1064 finally:
1058 del wlock
1065 del wlock
1059
1066
1060 if not parentworking:
1067 if not parentworking:
1061 mf1 = mfmatches(ctx1)
1068 mf1 = mfmatches(ctx1)
1062 if working:
1069 if working:
1063 # we are comparing working dir against non-parent
1070 # we are comparing working dir against non-parent
1064 # generate a pseudo-manifest for the working dir
1071 # generate a pseudo-manifest for the working dir
1065 mf2 = mfmatches(self['.'])
1072 mf2 = mfmatches(self['.'])
1066 for f in cmp + modified + added:
1073 for f in cmp + modified + added:
1067 mf2[f] = None
1074 mf2[f] = None
1068 mf2.set(f, ctx2.flags(f))
1075 mf2.set(f, ctx2.flags(f))
1069 for f in removed:
1076 for f in removed:
1070 if f in mf2:
1077 if f in mf2:
1071 del mf2[f]
1078 del mf2[f]
1072 else:
1079 else:
1073 # we are comparing two revisions
1080 # we are comparing two revisions
1074 deleted, unknown, ignored = [], [], []
1081 deleted, unknown, ignored = [], [], []
1075 mf2 = mfmatches(ctx2)
1082 mf2 = mfmatches(ctx2)
1076
1083
1077 modified, added, clean = [], [], []
1084 modified, added, clean = [], [], []
1078 for fn in mf2:
1085 for fn in mf2:
1079 if fn in mf1:
1086 if fn in mf1:
1080 if (mf1.flags(fn) != mf2.flags(fn) or
1087 if (mf1.flags(fn) != mf2.flags(fn) or
1081 (mf1[fn] != mf2[fn] and
1088 (mf1[fn] != mf2[fn] and
1082 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1089 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1083 modified.append(fn)
1090 modified.append(fn)
1084 elif listclean:
1091 elif listclean:
1085 clean.append(fn)
1092 clean.append(fn)
1086 del mf1[fn]
1093 del mf1[fn]
1087 else:
1094 else:
1088 added.append(fn)
1095 added.append(fn)
1089 removed = mf1.keys()
1096 removed = mf1.keys()
1090
1097
1091 r = modified, added, removed, deleted, unknown, ignored, clean
1098 r = modified, added, removed, deleted, unknown, ignored, clean
1092 [l.sort() for l in r]
1099 [l.sort() for l in r]
1093 return r
1100 return r
1094
1101
1095 def add(self, list):
1102 def add(self, list):
1096 wlock = self.wlock()
1103 wlock = self.wlock()
1097 try:
1104 try:
1098 rejected = []
1105 rejected = []
1099 for f in list:
1106 for f in list:
1100 p = self.wjoin(f)
1107 p = self.wjoin(f)
1101 try:
1108 try:
1102 st = os.lstat(p)
1109 st = os.lstat(p)
1103 except:
1110 except:
1104 self.ui.warn(_("%s does not exist!\n") % f)
1111 self.ui.warn(_("%s does not exist!\n") % f)
1105 rejected.append(f)
1112 rejected.append(f)
1106 continue
1113 continue
1107 if st.st_size > 10000000:
1114 if st.st_size > 10000000:
1108 self.ui.warn(_("%s: files over 10MB may cause memory and"
1115 self.ui.warn(_("%s: files over 10MB may cause memory and"
1109 " performance problems\n"
1116 " performance problems\n"
1110 "(use 'hg revert %s' to unadd the file)\n")
1117 "(use 'hg revert %s' to unadd the file)\n")
1111 % (f, f))
1118 % (f, f))
1112 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1119 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1113 self.ui.warn(_("%s not added: only files and symlinks "
1120 self.ui.warn(_("%s not added: only files and symlinks "
1114 "supported currently\n") % f)
1121 "supported currently\n") % f)
1115 rejected.append(p)
1122 rejected.append(p)
1116 elif self.dirstate[f] in 'amn':
1123 elif self.dirstate[f] in 'amn':
1117 self.ui.warn(_("%s already tracked!\n") % f)
1124 self.ui.warn(_("%s already tracked!\n") % f)
1118 elif self.dirstate[f] == 'r':
1125 elif self.dirstate[f] == 'r':
1119 self.dirstate.normallookup(f)
1126 self.dirstate.normallookup(f)
1120 else:
1127 else:
1121 self.dirstate.add(f)
1128 self.dirstate.add(f)
1122 return rejected
1129 return rejected
1123 finally:
1130 finally:
1124 del wlock
1131 del wlock
1125
1132
1126 def forget(self, list):
1133 def forget(self, list):
1127 wlock = self.wlock()
1134 wlock = self.wlock()
1128 try:
1135 try:
1129 for f in list:
1136 for f in list:
1130 if self.dirstate[f] != 'a':
1137 if self.dirstate[f] != 'a':
1131 self.ui.warn(_("%s not added!\n") % f)
1138 self.ui.warn(_("%s not added!\n") % f)
1132 else:
1139 else:
1133 self.dirstate.forget(f)
1140 self.dirstate.forget(f)
1134 finally:
1141 finally:
1135 del wlock
1142 del wlock
1136
1143
1137 def remove(self, list, unlink=False):
1144 def remove(self, list, unlink=False):
1138 wlock = None
1145 wlock = None
1139 try:
1146 try:
1140 if unlink:
1147 if unlink:
1141 for f in list:
1148 for f in list:
1142 try:
1149 try:
1143 util.unlink(self.wjoin(f))
1150 util.unlink(self.wjoin(f))
1144 except OSError, inst:
1151 except OSError, inst:
1145 if inst.errno != errno.ENOENT:
1152 if inst.errno != errno.ENOENT:
1146 raise
1153 raise
1147 wlock = self.wlock()
1154 wlock = self.wlock()
1148 for f in list:
1155 for f in list:
1149 if unlink and os.path.exists(self.wjoin(f)):
1156 if unlink and os.path.exists(self.wjoin(f)):
1150 self.ui.warn(_("%s still exists!\n") % f)
1157 self.ui.warn(_("%s still exists!\n") % f)
1151 elif self.dirstate[f] == 'a':
1158 elif self.dirstate[f] == 'a':
1152 self.dirstate.forget(f)
1159 self.dirstate.forget(f)
1153 elif f not in self.dirstate:
1160 elif f not in self.dirstate:
1154 self.ui.warn(_("%s not tracked!\n") % f)
1161 self.ui.warn(_("%s not tracked!\n") % f)
1155 else:
1162 else:
1156 self.dirstate.remove(f)
1163 self.dirstate.remove(f)
1157 finally:
1164 finally:
1158 del wlock
1165 del wlock
1159
1166
1160 def undelete(self, list):
1167 def undelete(self, list):
1161 wlock = None
1168 wlock = None
1162 try:
1169 try:
1163 manifests = [self.manifest.read(self.changelog.read(p)[0])
1170 manifests = [self.manifest.read(self.changelog.read(p)[0])
1164 for p in self.dirstate.parents() if p != nullid]
1171 for p in self.dirstate.parents() if p != nullid]
1165 wlock = self.wlock()
1172 wlock = self.wlock()
1166 for f in list:
1173 for f in list:
1167 if self.dirstate[f] != 'r':
1174 if self.dirstate[f] != 'r':
1168 self.ui.warn(_("%s not removed!\n") % f)
1175 self.ui.warn(_("%s not removed!\n") % f)
1169 else:
1176 else:
1170 m = f in manifests[0] and manifests[0] or manifests[1]
1177 m = f in manifests[0] and manifests[0] or manifests[1]
1171 t = self.file(f).read(m[f])
1178 t = self.file(f).read(m[f])
1172 self.wwrite(f, t, m.flags(f))
1179 self.wwrite(f, t, m.flags(f))
1173 self.dirstate.normal(f)
1180 self.dirstate.normal(f)
1174 finally:
1181 finally:
1175 del wlock
1182 del wlock
1176
1183
1177 def copy(self, source, dest):
1184 def copy(self, source, dest):
1178 wlock = None
1185 wlock = None
1179 try:
1186 try:
1180 p = self.wjoin(dest)
1187 p = self.wjoin(dest)
1181 if not (os.path.exists(p) or os.path.islink(p)):
1188 if not (os.path.exists(p) or os.path.islink(p)):
1182 self.ui.warn(_("%s does not exist!\n") % dest)
1189 self.ui.warn(_("%s does not exist!\n") % dest)
1183 elif not (os.path.isfile(p) or os.path.islink(p)):
1190 elif not (os.path.isfile(p) or os.path.islink(p)):
1184 self.ui.warn(_("copy failed: %s is not a file or a "
1191 self.ui.warn(_("copy failed: %s is not a file or a "
1185 "symbolic link\n") % dest)
1192 "symbolic link\n") % dest)
1186 else:
1193 else:
1187 wlock = self.wlock()
1194 wlock = self.wlock()
1188 if self.dirstate[dest] in '?r':
1195 if self.dirstate[dest] in '?r':
1189 self.dirstate.add(dest)
1196 self.dirstate.add(dest)
1190 self.dirstate.copy(source, dest)
1197 self.dirstate.copy(source, dest)
1191 finally:
1198 finally:
1192 del wlock
1199 del wlock
1193
1200
1194 def heads(self, start=None, closed=True):
1201 def heads(self, start=None, closed=True):
1195 heads = self.changelog.heads(start)
1202 heads = self.changelog.heads(start)
1196 def display(head):
1203 def display(head):
1197 if closed:
1204 if closed:
1198 return True
1205 return True
1199 extras = self.changelog.read(head)[5]
1206 extras = self.changelog.read(head)[5]
1200 return ('close' not in extras)
1207 return ('close' not in extras)
1201 # sort the output in rev descending order
1208 # sort the output in rev descending order
1202 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1209 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1203 return [n for (r, n) in util.sort(heads)]
1210 return [n for (r, n) in util.sort(heads)]
1204
1211
1205 def branchheads(self, branch=None, start=None, closed=True):
1212 def branchheads(self, branch=None, start=None, closed=True):
1206 if branch is None:
1213 if branch is None:
1207 branch = self[None].branch()
1214 branch = self[None].branch()
1208 branches = self._branchheads()
1215 branches = self._branchheads()
1209 if branch not in branches:
1216 if branch not in branches:
1210 return []
1217 return []
1211 bheads = branches[branch]
1218 bheads = branches[branch]
1212 # the cache returns heads ordered lowest to highest
1219 # the cache returns heads ordered lowest to highest
1213 bheads.reverse()
1220 bheads.reverse()
1214 if start is not None:
1221 if start is not None:
1215 # filter out the heads that cannot be reached from startrev
1222 # filter out the heads that cannot be reached from startrev
1216 bheads = self.changelog.nodesbetween([start], bheads)[2]
1223 bheads = self.changelog.nodesbetween([start], bheads)[2]
1217 if not closed:
1224 if not closed:
1218 bheads = [h for h in bheads if
1225 bheads = [h for h in bheads if
1219 ('close' not in self.changelog.read(h)[5])]
1226 ('close' not in self.changelog.read(h)[5])]
1220 return bheads
1227 return bheads
1221
1228
1222 def branches(self, nodes):
1229 def branches(self, nodes):
1223 if not nodes:
1230 if not nodes:
1224 nodes = [self.changelog.tip()]
1231 nodes = [self.changelog.tip()]
1225 b = []
1232 b = []
1226 for n in nodes:
1233 for n in nodes:
1227 t = n
1234 t = n
1228 while 1:
1235 while 1:
1229 p = self.changelog.parents(n)
1236 p = self.changelog.parents(n)
1230 if p[1] != nullid or p[0] == nullid:
1237 if p[1] != nullid or p[0] == nullid:
1231 b.append((t, n, p[0], p[1]))
1238 b.append((t, n, p[0], p[1]))
1232 break
1239 break
1233 n = p[0]
1240 n = p[0]
1234 return b
1241 return b
1235
1242
1236 def between(self, pairs):
1243 def between(self, pairs):
1237 r = []
1244 r = []
1238
1245
1239 for top, bottom in pairs:
1246 for top, bottom in pairs:
1240 n, l, i = top, [], 0
1247 n, l, i = top, [], 0
1241 f = 1
1248 f = 1
1242
1249
1243 while n != bottom and n != nullid:
1250 while n != bottom and n != nullid:
1244 p = self.changelog.parents(n)[0]
1251 p = self.changelog.parents(n)[0]
1245 if i == f:
1252 if i == f:
1246 l.append(n)
1253 l.append(n)
1247 f = f * 2
1254 f = f * 2
1248 n = p
1255 n = p
1249 i += 1
1256 i += 1
1250
1257
1251 r.append(l)
1258 r.append(l)
1252
1259
1253 return r
1260 return r
1254
1261
1255 def findincoming(self, remote, base=None, heads=None, force=False):
1262 def findincoming(self, remote, base=None, heads=None, force=False):
1256 """Return list of roots of the subsets of missing nodes from remote
1263 """Return list of roots of the subsets of missing nodes from remote
1257
1264
1258 If base dict is specified, assume that these nodes and their parents
1265 If base dict is specified, assume that these nodes and their parents
1259 exist on the remote side and that no child of a node of base exists
1266 exist on the remote side and that no child of a node of base exists
1260 in both remote and self.
1267 in both remote and self.
1261 Furthermore base will be updated to include the nodes that exists
1268 Furthermore base will be updated to include the nodes that exists
1262 in self and remote but no children exists in self and remote.
1269 in self and remote but no children exists in self and remote.
1263 If a list of heads is specified, return only nodes which are heads
1270 If a list of heads is specified, return only nodes which are heads
1264 or ancestors of these heads.
1271 or ancestors of these heads.
1265
1272
1266 All the ancestors of base are in self and in remote.
1273 All the ancestors of base are in self and in remote.
1267 All the descendants of the list returned are missing in self.
1274 All the descendants of the list returned are missing in self.
1268 (and so we know that the rest of the nodes are missing in remote, see
1275 (and so we know that the rest of the nodes are missing in remote, see
1269 outgoing)
1276 outgoing)
1270 """
1277 """
1271 return self.findcommonincoming(remote, base, heads, force)[1]
1278 return self.findcommonincoming(remote, base, heads, force)[1]
1272
1279
1273 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1280 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1274 """Return a tuple (common, missing roots, heads) used to identify
1281 """Return a tuple (common, missing roots, heads) used to identify
1275 missing nodes from remote.
1282 missing nodes from remote.
1276
1283
1277 If base dict is specified, assume that these nodes and their parents
1284 If base dict is specified, assume that these nodes and their parents
1278 exist on the remote side and that no child of a node of base exists
1285 exist on the remote side and that no child of a node of base exists
1279 in both remote and self.
1286 in both remote and self.
1280 Furthermore base will be updated to include the nodes that exists
1287 Furthermore base will be updated to include the nodes that exists
1281 in self and remote but no children exists in self and remote.
1288 in self and remote but no children exists in self and remote.
1282 If a list of heads is specified, return only nodes which are heads
1289 If a list of heads is specified, return only nodes which are heads
1283 or ancestors of these heads.
1290 or ancestors of these heads.
1284
1291
1285 All the ancestors of base are in self and in remote.
1292 All the ancestors of base are in self and in remote.
1286 """
1293 """
1287 m = self.changelog.nodemap
1294 m = self.changelog.nodemap
1288 search = []
1295 search = []
1289 fetch = {}
1296 fetch = {}
1290 seen = {}
1297 seen = {}
1291 seenbranch = {}
1298 seenbranch = {}
1292 if base == None:
1299 if base == None:
1293 base = {}
1300 base = {}
1294
1301
1295 if not heads:
1302 if not heads:
1296 heads = remote.heads()
1303 heads = remote.heads()
1297
1304
1298 if self.changelog.tip() == nullid:
1305 if self.changelog.tip() == nullid:
1299 base[nullid] = 1
1306 base[nullid] = 1
1300 if heads != [nullid]:
1307 if heads != [nullid]:
1301 return [nullid], [nullid], list(heads)
1308 return [nullid], [nullid], list(heads)
1302 return [nullid], [], []
1309 return [nullid], [], []
1303
1310
1304 # assume we're closer to the tip than the root
1311 # assume we're closer to the tip than the root
1305 # and start by examining the heads
1312 # and start by examining the heads
1306 self.ui.status(_("searching for changes\n"))
1313 self.ui.status(_("searching for changes\n"))
1307
1314
1308 unknown = []
1315 unknown = []
1309 for h in heads:
1316 for h in heads:
1310 if h not in m:
1317 if h not in m:
1311 unknown.append(h)
1318 unknown.append(h)
1312 else:
1319 else:
1313 base[h] = 1
1320 base[h] = 1
1314
1321
1315 heads = unknown
1322 heads = unknown
1316 if not unknown:
1323 if not unknown:
1317 return base.keys(), [], []
1324 return base.keys(), [], []
1318
1325
1319 req = dict.fromkeys(unknown)
1326 req = dict.fromkeys(unknown)
1320 reqcnt = 0
1327 reqcnt = 0
1321
1328
1322 # search through remote branches
1329 # search through remote branches
1323 # a 'branch' here is a linear segment of history, with four parts:
1330 # a 'branch' here is a linear segment of history, with four parts:
1324 # head, root, first parent, second parent
1331 # head, root, first parent, second parent
1325 # (a branch always has two parents (or none) by definition)
1332 # (a branch always has two parents (or none) by definition)
1326 unknown = remote.branches(unknown)
1333 unknown = remote.branches(unknown)
1327 while unknown:
1334 while unknown:
1328 r = []
1335 r = []
1329 while unknown:
1336 while unknown:
1330 n = unknown.pop(0)
1337 n = unknown.pop(0)
1331 if n[0] in seen:
1338 if n[0] in seen:
1332 continue
1339 continue
1333
1340
1334 self.ui.debug(_("examining %s:%s\n")
1341 self.ui.debug(_("examining %s:%s\n")
1335 % (short(n[0]), short(n[1])))
1342 % (short(n[0]), short(n[1])))
1336 if n[0] == nullid: # found the end of the branch
1343 if n[0] == nullid: # found the end of the branch
1337 pass
1344 pass
1338 elif n in seenbranch:
1345 elif n in seenbranch:
1339 self.ui.debug(_("branch already found\n"))
1346 self.ui.debug(_("branch already found\n"))
1340 continue
1347 continue
1341 elif n[1] and n[1] in m: # do we know the base?
1348 elif n[1] and n[1] in m: # do we know the base?
1342 self.ui.debug(_("found incomplete branch %s:%s\n")
1349 self.ui.debug(_("found incomplete branch %s:%s\n")
1343 % (short(n[0]), short(n[1])))
1350 % (short(n[0]), short(n[1])))
1344 search.append(n[0:2]) # schedule branch range for scanning
1351 search.append(n[0:2]) # schedule branch range for scanning
1345 seenbranch[n] = 1
1352 seenbranch[n] = 1
1346 else:
1353 else:
1347 if n[1] not in seen and n[1] not in fetch:
1354 if n[1] not in seen and n[1] not in fetch:
1348 if n[2] in m and n[3] in m:
1355 if n[2] in m and n[3] in m:
1349 self.ui.debug(_("found new changeset %s\n") %
1356 self.ui.debug(_("found new changeset %s\n") %
1350 short(n[1]))
1357 short(n[1]))
1351 fetch[n[1]] = 1 # earliest unknown
1358 fetch[n[1]] = 1 # earliest unknown
1352 for p in n[2:4]:
1359 for p in n[2:4]:
1353 if p in m:
1360 if p in m:
1354 base[p] = 1 # latest known
1361 base[p] = 1 # latest known
1355
1362
1356 for p in n[2:4]:
1363 for p in n[2:4]:
1357 if p not in req and p not in m:
1364 if p not in req and p not in m:
1358 r.append(p)
1365 r.append(p)
1359 req[p] = 1
1366 req[p] = 1
1360 seen[n[0]] = 1
1367 seen[n[0]] = 1
1361
1368
1362 if r:
1369 if r:
1363 reqcnt += 1
1370 reqcnt += 1
1364 self.ui.debug(_("request %d: %s\n") %
1371 self.ui.debug(_("request %d: %s\n") %
1365 (reqcnt, " ".join(map(short, r))))
1372 (reqcnt, " ".join(map(short, r))))
1366 for p in xrange(0, len(r), 10):
1373 for p in xrange(0, len(r), 10):
1367 for b in remote.branches(r[p:p+10]):
1374 for b in remote.branches(r[p:p+10]):
1368 self.ui.debug(_("received %s:%s\n") %
1375 self.ui.debug(_("received %s:%s\n") %
1369 (short(b[0]), short(b[1])))
1376 (short(b[0]), short(b[1])))
1370 unknown.append(b)
1377 unknown.append(b)
1371
1378
1372 # do binary search on the branches we found
1379 # do binary search on the branches we found
1373 while search:
1380 while search:
1374 newsearch = []
1381 newsearch = []
1375 reqcnt += 1
1382 reqcnt += 1
1376 for n, l in zip(search, remote.between(search)):
1383 for n, l in zip(search, remote.between(search)):
1377 l.append(n[1])
1384 l.append(n[1])
1378 p = n[0]
1385 p = n[0]
1379 f = 1
1386 f = 1
1380 for i in l:
1387 for i in l:
1381 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1388 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1382 if i in m:
1389 if i in m:
1383 if f <= 2:
1390 if f <= 2:
1384 self.ui.debug(_("found new branch changeset %s\n") %
1391 self.ui.debug(_("found new branch changeset %s\n") %
1385 short(p))
1392 short(p))
1386 fetch[p] = 1
1393 fetch[p] = 1
1387 base[i] = 1
1394 base[i] = 1
1388 else:
1395 else:
1389 self.ui.debug(_("narrowed branch search to %s:%s\n")
1396 self.ui.debug(_("narrowed branch search to %s:%s\n")
1390 % (short(p), short(i)))
1397 % (short(p), short(i)))
1391 newsearch.append((p, i))
1398 newsearch.append((p, i))
1392 break
1399 break
1393 p, f = i, f * 2
1400 p, f = i, f * 2
1394 search = newsearch
1401 search = newsearch
1395
1402
1396 # sanity check our fetch list
1403 # sanity check our fetch list
1397 for f in fetch.keys():
1404 for f in fetch.keys():
1398 if f in m:
1405 if f in m:
1399 raise error.RepoError(_("already have changeset ")
1406 raise error.RepoError(_("already have changeset ")
1400 + short(f[:4]))
1407 + short(f[:4]))
1401
1408
1402 if base.keys() == [nullid]:
1409 if base.keys() == [nullid]:
1403 if force:
1410 if force:
1404 self.ui.warn(_("warning: repository is unrelated\n"))
1411 self.ui.warn(_("warning: repository is unrelated\n"))
1405 else:
1412 else:
1406 raise util.Abort(_("repository is unrelated"))
1413 raise util.Abort(_("repository is unrelated"))
1407
1414
1408 self.ui.debug(_("found new changesets starting at ") +
1415 self.ui.debug(_("found new changesets starting at ") +
1409 " ".join([short(f) for f in fetch]) + "\n")
1416 " ".join([short(f) for f in fetch]) + "\n")
1410
1417
1411 self.ui.debug(_("%d total queries\n") % reqcnt)
1418 self.ui.debug(_("%d total queries\n") % reqcnt)
1412
1419
1413 return base.keys(), fetch.keys(), heads
1420 return base.keys(), fetch.keys(), heads
1414
1421
1415 def findoutgoing(self, remote, base=None, heads=None, force=False):
1422 def findoutgoing(self, remote, base=None, heads=None, force=False):
1416 """Return list of nodes that are roots of subsets not in remote
1423 """Return list of nodes that are roots of subsets not in remote
1417
1424
1418 If base dict is specified, assume that these nodes and their parents
1425 If base dict is specified, assume that these nodes and their parents
1419 exist on the remote side.
1426 exist on the remote side.
1420 If a list of heads is specified, return only nodes which are heads
1427 If a list of heads is specified, return only nodes which are heads
1421 or ancestors of these heads, and return a second element which
1428 or ancestors of these heads, and return a second element which
1422 contains all remote heads which get new children.
1429 contains all remote heads which get new children.
1423 """
1430 """
1424 if base == None:
1431 if base == None:
1425 base = {}
1432 base = {}
1426 self.findincoming(remote, base, heads, force=force)
1433 self.findincoming(remote, base, heads, force=force)
1427
1434
1428 self.ui.debug(_("common changesets up to ")
1435 self.ui.debug(_("common changesets up to ")
1429 + " ".join(map(short, base.keys())) + "\n")
1436 + " ".join(map(short, base.keys())) + "\n")
1430
1437
1431 remain = dict.fromkeys(self.changelog.nodemap)
1438 remain = dict.fromkeys(self.changelog.nodemap)
1432
1439
1433 # prune everything remote has from the tree
1440 # prune everything remote has from the tree
1434 del remain[nullid]
1441 del remain[nullid]
1435 remove = base.keys()
1442 remove = base.keys()
1436 while remove:
1443 while remove:
1437 n = remove.pop(0)
1444 n = remove.pop(0)
1438 if n in remain:
1445 if n in remain:
1439 del remain[n]
1446 del remain[n]
1440 for p in self.changelog.parents(n):
1447 for p in self.changelog.parents(n):
1441 remove.append(p)
1448 remove.append(p)
1442
1449
1443 # find every node whose parents have been pruned
1450 # find every node whose parents have been pruned
1444 subset = []
1451 subset = []
1445 # find every remote head that will get new children
1452 # find every remote head that will get new children
1446 updated_heads = {}
1453 updated_heads = {}
1447 for n in remain:
1454 for n in remain:
1448 p1, p2 = self.changelog.parents(n)
1455 p1, p2 = self.changelog.parents(n)
1449 if p1 not in remain and p2 not in remain:
1456 if p1 not in remain and p2 not in remain:
1450 subset.append(n)
1457 subset.append(n)
1451 if heads:
1458 if heads:
1452 if p1 in heads:
1459 if p1 in heads:
1453 updated_heads[p1] = True
1460 updated_heads[p1] = True
1454 if p2 in heads:
1461 if p2 in heads:
1455 updated_heads[p2] = True
1462 updated_heads[p2] = True
1456
1463
1457 # this is the set of all roots we have to push
1464 # this is the set of all roots we have to push
1458 if heads:
1465 if heads:
1459 return subset, updated_heads.keys()
1466 return subset, updated_heads.keys()
1460 else:
1467 else:
1461 return subset
1468 return subset
1462
1469
1463 def pull(self, remote, heads=None, force=False):
1470 def pull(self, remote, heads=None, force=False):
1464 lock = self.lock()
1471 lock = self.lock()
1465 try:
1472 try:
1466 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1473 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1467 force=force)
1474 force=force)
1468 if fetch == [nullid]:
1475 if fetch == [nullid]:
1469 self.ui.status(_("requesting all changes\n"))
1476 self.ui.status(_("requesting all changes\n"))
1470
1477
1471 if not fetch:
1478 if not fetch:
1472 self.ui.status(_("no changes found\n"))
1479 self.ui.status(_("no changes found\n"))
1473 return 0
1480 return 0
1474
1481
1475 if heads is None and remote.capable('changegroupsubset'):
1482 if heads is None and remote.capable('changegroupsubset'):
1476 heads = rheads
1483 heads = rheads
1477
1484
1478 if heads is None:
1485 if heads is None:
1479 cg = remote.changegroup(fetch, 'pull')
1486 cg = remote.changegroup(fetch, 'pull')
1480 else:
1487 else:
1481 if not remote.capable('changegroupsubset'):
1488 if not remote.capable('changegroupsubset'):
1482 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1489 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1483 cg = remote.changegroupsubset(fetch, heads, 'pull')
1490 cg = remote.changegroupsubset(fetch, heads, 'pull')
1484 return self.addchangegroup(cg, 'pull', remote.url())
1491 return self.addchangegroup(cg, 'pull', remote.url())
1485 finally:
1492 finally:
1486 del lock
1493 del lock
1487
1494
1488 def push(self, remote, force=False, revs=None):
1495 def push(self, remote, force=False, revs=None):
1489 # there are two ways to push to remote repo:
1496 # there are two ways to push to remote repo:
1490 #
1497 #
1491 # addchangegroup assumes local user can lock remote
1498 # addchangegroup assumes local user can lock remote
1492 # repo (local filesystem, old ssh servers).
1499 # repo (local filesystem, old ssh servers).
1493 #
1500 #
1494 # unbundle assumes local user cannot lock remote repo (new ssh
1501 # unbundle assumes local user cannot lock remote repo (new ssh
1495 # servers, http servers).
1502 # servers, http servers).
1496
1503
1497 if remote.capable('unbundle'):
1504 if remote.capable('unbundle'):
1498 return self.push_unbundle(remote, force, revs)
1505 return self.push_unbundle(remote, force, revs)
1499 return self.push_addchangegroup(remote, force, revs)
1506 return self.push_addchangegroup(remote, force, revs)
1500
1507
1501 def prepush(self, remote, force, revs):
1508 def prepush(self, remote, force, revs):
1502 common = {}
1509 common = {}
1503 remote_heads = remote.heads()
1510 remote_heads = remote.heads()
1504 inc = self.findincoming(remote, common, remote_heads, force=force)
1511 inc = self.findincoming(remote, common, remote_heads, force=force)
1505
1512
1506 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1513 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1507 if revs is not None:
1514 if revs is not None:
1508 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1515 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1509 else:
1516 else:
1510 bases, heads = update, self.changelog.heads()
1517 bases, heads = update, self.changelog.heads()
1511
1518
1512 if not bases:
1519 if not bases:
1513 self.ui.status(_("no changes found\n"))
1520 self.ui.status(_("no changes found\n"))
1514 return None, 1
1521 return None, 1
1515 elif not force:
1522 elif not force:
1516 # check if we're creating new remote heads
1523 # check if we're creating new remote heads
1517 # to be a remote head after push, node must be either
1524 # to be a remote head after push, node must be either
1518 # - unknown locally
1525 # - unknown locally
1519 # - a local outgoing head descended from update
1526 # - a local outgoing head descended from update
1520 # - a remote head that's known locally and not
1527 # - a remote head that's known locally and not
1521 # ancestral to an outgoing head
1528 # ancestral to an outgoing head
1522
1529
1523 warn = 0
1530 warn = 0
1524
1531
1525 if remote_heads == [nullid]:
1532 if remote_heads == [nullid]:
1526 warn = 0
1533 warn = 0
1527 elif not revs and len(heads) > len(remote_heads):
1534 elif not revs and len(heads) > len(remote_heads):
1528 warn = 1
1535 warn = 1
1529 else:
1536 else:
1530 newheads = list(heads)
1537 newheads = list(heads)
1531 for r in remote_heads:
1538 for r in remote_heads:
1532 if r in self.changelog.nodemap:
1539 if r in self.changelog.nodemap:
1533 desc = self.changelog.heads(r, heads)
1540 desc = self.changelog.heads(r, heads)
1534 l = [h for h in heads if h in desc]
1541 l = [h for h in heads if h in desc]
1535 if not l:
1542 if not l:
1536 newheads.append(r)
1543 newheads.append(r)
1537 else:
1544 else:
1538 newheads.append(r)
1545 newheads.append(r)
1539 if len(newheads) > len(remote_heads):
1546 if len(newheads) > len(remote_heads):
1540 warn = 1
1547 warn = 1
1541
1548
1542 if warn:
1549 if warn:
1543 self.ui.warn(_("abort: push creates new remote heads!\n"))
1550 self.ui.warn(_("abort: push creates new remote heads!\n"))
1544 self.ui.status(_("(did you forget to merge?"
1551 self.ui.status(_("(did you forget to merge?"
1545 " use push -f to force)\n"))
1552 " use push -f to force)\n"))
1546 return None, 0
1553 return None, 0
1547 elif inc:
1554 elif inc:
1548 self.ui.warn(_("note: unsynced remote changes!\n"))
1555 self.ui.warn(_("note: unsynced remote changes!\n"))
1549
1556
1550
1557
1551 if revs is None:
1558 if revs is None:
1552 # use the fast path, no race possible on push
1559 # use the fast path, no race possible on push
1553 cg = self._changegroup(common.keys(), 'push')
1560 cg = self._changegroup(common.keys(), 'push')
1554 else:
1561 else:
1555 cg = self.changegroupsubset(update, revs, 'push')
1562 cg = self.changegroupsubset(update, revs, 'push')
1556 return cg, remote_heads
1563 return cg, remote_heads
1557
1564
1558 def push_addchangegroup(self, remote, force, revs):
1565 def push_addchangegroup(self, remote, force, revs):
1559 lock = remote.lock()
1566 lock = remote.lock()
1560 try:
1567 try:
1561 ret = self.prepush(remote, force, revs)
1568 ret = self.prepush(remote, force, revs)
1562 if ret[0] is not None:
1569 if ret[0] is not None:
1563 cg, remote_heads = ret
1570 cg, remote_heads = ret
1564 return remote.addchangegroup(cg, 'push', self.url())
1571 return remote.addchangegroup(cg, 'push', self.url())
1565 return ret[1]
1572 return ret[1]
1566 finally:
1573 finally:
1567 del lock
1574 del lock
1568
1575
1569 def push_unbundle(self, remote, force, revs):
1576 def push_unbundle(self, remote, force, revs):
1570 # local repo finds heads on server, finds out what revs it
1577 # local repo finds heads on server, finds out what revs it
1571 # must push. once revs transferred, if server finds it has
1578 # must push. once revs transferred, if server finds it has
1572 # different heads (someone else won commit/push race), server
1579 # different heads (someone else won commit/push race), server
1573 # aborts.
1580 # aborts.
1574
1581
1575 ret = self.prepush(remote, force, revs)
1582 ret = self.prepush(remote, force, revs)
1576 if ret[0] is not None:
1583 if ret[0] is not None:
1577 cg, remote_heads = ret
1584 cg, remote_heads = ret
1578 if force: remote_heads = ['force']
1585 if force: remote_heads = ['force']
1579 return remote.unbundle(cg, remote_heads, 'push')
1586 return remote.unbundle(cg, remote_heads, 'push')
1580 return ret[1]
1587 return ret[1]
1581
1588
1582 def changegroupinfo(self, nodes, source):
1589 def changegroupinfo(self, nodes, source):
1583 if self.ui.verbose or source == 'bundle':
1590 if self.ui.verbose or source == 'bundle':
1584 self.ui.status(_("%d changesets found\n") % len(nodes))
1591 self.ui.status(_("%d changesets found\n") % len(nodes))
1585 if self.ui.debugflag:
1592 if self.ui.debugflag:
1586 self.ui.debug(_("list of changesets:\n"))
1593 self.ui.debug(_("list of changesets:\n"))
1587 for node in nodes:
1594 for node in nodes:
1588 self.ui.debug("%s\n" % hex(node))
1595 self.ui.debug("%s\n" % hex(node))
1589
1596
1590 def changegroupsubset(self, bases, heads, source, extranodes=None):
1597 def changegroupsubset(self, bases, heads, source, extranodes=None):
1591 """This function generates a changegroup consisting of all the nodes
1598 """This function generates a changegroup consisting of all the nodes
1592 that are descendents of any of the bases, and ancestors of any of
1599 that are descendents of any of the bases, and ancestors of any of
1593 the heads.
1600 the heads.
1594
1601
1595 It is fairly complex as determining which filenodes and which
1602 It is fairly complex as determining which filenodes and which
1596 manifest nodes need to be included for the changeset to be complete
1603 manifest nodes need to be included for the changeset to be complete
1597 is non-trivial.
1604 is non-trivial.
1598
1605
1599 Another wrinkle is doing the reverse, figuring out which changeset in
1606 Another wrinkle is doing the reverse, figuring out which changeset in
1600 the changegroup a particular filenode or manifestnode belongs to.
1607 the changegroup a particular filenode or manifestnode belongs to.
1601
1608
1602 The caller can specify some nodes that must be included in the
1609 The caller can specify some nodes that must be included in the
1603 changegroup using the extranodes argument. It should be a dict
1610 changegroup using the extranodes argument. It should be a dict
1604 where the keys are the filenames (or 1 for the manifest), and the
1611 where the keys are the filenames (or 1 for the manifest), and the
1605 values are lists of (node, linknode) tuples, where node is a wanted
1612 values are lists of (node, linknode) tuples, where node is a wanted
1606 node and linknode is the changelog node that should be transmitted as
1613 node and linknode is the changelog node that should be transmitted as
1607 the linkrev.
1614 the linkrev.
1608 """
1615 """
1609
1616
1610 if extranodes is None:
1617 if extranodes is None:
1611 # can we go through the fast path ?
1618 # can we go through the fast path ?
1612 heads.sort()
1619 heads.sort()
1613 allheads = self.heads()
1620 allheads = self.heads()
1614 allheads.sort()
1621 allheads.sort()
1615 if heads == allheads:
1622 if heads == allheads:
1616 common = []
1623 common = []
1617 # parents of bases are known from both sides
1624 # parents of bases are known from both sides
1618 for n in bases:
1625 for n in bases:
1619 for p in self.changelog.parents(n):
1626 for p in self.changelog.parents(n):
1620 if p != nullid:
1627 if p != nullid:
1621 common.append(p)
1628 common.append(p)
1622 return self._changegroup(common, source)
1629 return self._changegroup(common, source)
1623
1630
1624 self.hook('preoutgoing', throw=True, source=source)
1631 self.hook('preoutgoing', throw=True, source=source)
1625
1632
1626 # Set up some initial variables
1633 # Set up some initial variables
1627 # Make it easy to refer to self.changelog
1634 # Make it easy to refer to self.changelog
1628 cl = self.changelog
1635 cl = self.changelog
1629 # msng is short for missing - compute the list of changesets in this
1636 # msng is short for missing - compute the list of changesets in this
1630 # changegroup.
1637 # changegroup.
1631 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1638 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1632 self.changegroupinfo(msng_cl_lst, source)
1639 self.changegroupinfo(msng_cl_lst, source)
1633 # Some bases may turn out to be superfluous, and some heads may be
1640 # Some bases may turn out to be superfluous, and some heads may be
1634 # too. nodesbetween will return the minimal set of bases and heads
1641 # too. nodesbetween will return the minimal set of bases and heads
1635 # necessary to re-create the changegroup.
1642 # necessary to re-create the changegroup.
1636
1643
1637 # Known heads are the list of heads that it is assumed the recipient
1644 # Known heads are the list of heads that it is assumed the recipient
1638 # of this changegroup will know about.
1645 # of this changegroup will know about.
1639 knownheads = {}
1646 knownheads = {}
1640 # We assume that all parents of bases are known heads.
1647 # We assume that all parents of bases are known heads.
1641 for n in bases:
1648 for n in bases:
1642 for p in cl.parents(n):
1649 for p in cl.parents(n):
1643 if p != nullid:
1650 if p != nullid:
1644 knownheads[p] = 1
1651 knownheads[p] = 1
1645 knownheads = knownheads.keys()
1652 knownheads = knownheads.keys()
1646 if knownheads:
1653 if knownheads:
1647 # Now that we know what heads are known, we can compute which
1654 # Now that we know what heads are known, we can compute which
1648 # changesets are known. The recipient must know about all
1655 # changesets are known. The recipient must know about all
1649 # changesets required to reach the known heads from the null
1656 # changesets required to reach the known heads from the null
1650 # changeset.
1657 # changeset.
1651 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1658 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1652 junk = None
1659 junk = None
1653 # Transform the list into an ersatz set.
1660 # Transform the list into an ersatz set.
1654 has_cl_set = dict.fromkeys(has_cl_set)
1661 has_cl_set = dict.fromkeys(has_cl_set)
1655 else:
1662 else:
1656 # If there were no known heads, the recipient cannot be assumed to
1663 # If there were no known heads, the recipient cannot be assumed to
1657 # know about any changesets.
1664 # know about any changesets.
1658 has_cl_set = {}
1665 has_cl_set = {}
1659
1666
1660 # Make it easy to refer to self.manifest
1667 # Make it easy to refer to self.manifest
1661 mnfst = self.manifest
1668 mnfst = self.manifest
1662 # We don't know which manifests are missing yet
1669 # We don't know which manifests are missing yet
1663 msng_mnfst_set = {}
1670 msng_mnfst_set = {}
1664 # Nor do we know which filenodes are missing.
1671 # Nor do we know which filenodes are missing.
1665 msng_filenode_set = {}
1672 msng_filenode_set = {}
1666
1673
1667 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1674 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1668 junk = None
1675 junk = None
1669
1676
1670 # A changeset always belongs to itself, so the changenode lookup
1677 # A changeset always belongs to itself, so the changenode lookup
1671 # function for a changenode is identity.
1678 # function for a changenode is identity.
1672 def identity(x):
1679 def identity(x):
1673 return x
1680 return x
1674
1681
1675 # A function generating function. Sets up an environment for the
1682 # A function generating function. Sets up an environment for the
1676 # inner function.
1683 # inner function.
1677 def cmp_by_rev_func(revlog):
1684 def cmp_by_rev_func(revlog):
1678 # Compare two nodes by their revision number in the environment's
1685 # Compare two nodes by their revision number in the environment's
1679 # revision history. Since the revision number both represents the
1686 # revision history. Since the revision number both represents the
1680 # most efficient order to read the nodes in, and represents a
1687 # most efficient order to read the nodes in, and represents a
1681 # topological sorting of the nodes, this function is often useful.
1688 # topological sorting of the nodes, this function is often useful.
1682 def cmp_by_rev(a, b):
1689 def cmp_by_rev(a, b):
1683 return cmp(revlog.rev(a), revlog.rev(b))
1690 return cmp(revlog.rev(a), revlog.rev(b))
1684 return cmp_by_rev
1691 return cmp_by_rev
1685
1692
1686 # If we determine that a particular file or manifest node must be a
1693 # If we determine that a particular file or manifest node must be a
1687 # node that the recipient of the changegroup will already have, we can
1694 # node that the recipient of the changegroup will already have, we can
1688 # also assume the recipient will have all the parents. This function
1695 # also assume the recipient will have all the parents. This function
1689 # prunes them from the set of missing nodes.
1696 # prunes them from the set of missing nodes.
1690 def prune_parents(revlog, hasset, msngset):
1697 def prune_parents(revlog, hasset, msngset):
1691 haslst = hasset.keys()
1698 haslst = hasset.keys()
1692 haslst.sort(cmp_by_rev_func(revlog))
1699 haslst.sort(cmp_by_rev_func(revlog))
1693 for node in haslst:
1700 for node in haslst:
1694 parentlst = [p for p in revlog.parents(node) if p != nullid]
1701 parentlst = [p for p in revlog.parents(node) if p != nullid]
1695 while parentlst:
1702 while parentlst:
1696 n = parentlst.pop()
1703 n = parentlst.pop()
1697 if n not in hasset:
1704 if n not in hasset:
1698 hasset[n] = 1
1705 hasset[n] = 1
1699 p = [p for p in revlog.parents(n) if p != nullid]
1706 p = [p for p in revlog.parents(n) if p != nullid]
1700 parentlst.extend(p)
1707 parentlst.extend(p)
1701 for n in hasset:
1708 for n in hasset:
1702 msngset.pop(n, None)
1709 msngset.pop(n, None)
1703
1710
1704 # This is a function generating function used to set up an environment
1711 # This is a function generating function used to set up an environment
1705 # for the inner function to execute in.
1712 # for the inner function to execute in.
1706 def manifest_and_file_collector(changedfileset):
1713 def manifest_and_file_collector(changedfileset):
1707 # This is an information gathering function that gathers
1714 # This is an information gathering function that gathers
1708 # information from each changeset node that goes out as part of
1715 # information from each changeset node that goes out as part of
1709 # the changegroup. The information gathered is a list of which
1716 # the changegroup. The information gathered is a list of which
1710 # manifest nodes are potentially required (the recipient may
1717 # manifest nodes are potentially required (the recipient may
1711 # already have them) and total list of all files which were
1718 # already have them) and total list of all files which were
1712 # changed in any changeset in the changegroup.
1719 # changed in any changeset in the changegroup.
1713 #
1720 #
1714 # We also remember the first changenode we saw any manifest
1721 # We also remember the first changenode we saw any manifest
1715 # referenced by so we can later determine which changenode 'owns'
1722 # referenced by so we can later determine which changenode 'owns'
1716 # the manifest.
1723 # the manifest.
1717 def collect_manifests_and_files(clnode):
1724 def collect_manifests_and_files(clnode):
1718 c = cl.read(clnode)
1725 c = cl.read(clnode)
1719 for f in c[3]:
1726 for f in c[3]:
1720 # This is to make sure we only have one instance of each
1727 # This is to make sure we only have one instance of each
1721 # filename string for each filename.
1728 # filename string for each filename.
1722 changedfileset.setdefault(f, f)
1729 changedfileset.setdefault(f, f)
1723 msng_mnfst_set.setdefault(c[0], clnode)
1730 msng_mnfst_set.setdefault(c[0], clnode)
1724 return collect_manifests_and_files
1731 return collect_manifests_and_files
1725
1732
1726 # Figure out which manifest nodes (of the ones we think might be part
1733 # Figure out which manifest nodes (of the ones we think might be part
1727 # of the changegroup) the recipient must know about and remove them
1734 # of the changegroup) the recipient must know about and remove them
1728 # from the changegroup.
1735 # from the changegroup.
1729 def prune_manifests():
1736 def prune_manifests():
1730 has_mnfst_set = {}
1737 has_mnfst_set = {}
1731 for n in msng_mnfst_set:
1738 for n in msng_mnfst_set:
1732 # If a 'missing' manifest thinks it belongs to a changenode
1739 # If a 'missing' manifest thinks it belongs to a changenode
1733 # the recipient is assumed to have, obviously the recipient
1740 # the recipient is assumed to have, obviously the recipient
1734 # must have that manifest.
1741 # must have that manifest.
1735 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1742 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1736 if linknode in has_cl_set:
1743 if linknode in has_cl_set:
1737 has_mnfst_set[n] = 1
1744 has_mnfst_set[n] = 1
1738 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1745 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1739
1746
1740 # Use the information collected in collect_manifests_and_files to say
1747 # Use the information collected in collect_manifests_and_files to say
1741 # which changenode any manifestnode belongs to.
1748 # which changenode any manifestnode belongs to.
1742 def lookup_manifest_link(mnfstnode):
1749 def lookup_manifest_link(mnfstnode):
1743 return msng_mnfst_set[mnfstnode]
1750 return msng_mnfst_set[mnfstnode]
1744
1751
1745 # A function generating function that sets up the initial environment
1752 # A function generating function that sets up the initial environment
1746 # the inner function.
1753 # the inner function.
1747 def filenode_collector(changedfiles):
1754 def filenode_collector(changedfiles):
1748 next_rev = [0]
1755 next_rev = [0]
1749 # This gathers information from each manifestnode included in the
1756 # This gathers information from each manifestnode included in the
1750 # changegroup about which filenodes the manifest node references
1757 # changegroup about which filenodes the manifest node references
1751 # so we can include those in the changegroup too.
1758 # so we can include those in the changegroup too.
1752 #
1759 #
1753 # It also remembers which changenode each filenode belongs to. It
1760 # It also remembers which changenode each filenode belongs to. It
1754 # does this by assuming the a filenode belongs to the changenode
1761 # does this by assuming the a filenode belongs to the changenode
1755 # the first manifest that references it belongs to.
1762 # the first manifest that references it belongs to.
1756 def collect_msng_filenodes(mnfstnode):
1763 def collect_msng_filenodes(mnfstnode):
1757 r = mnfst.rev(mnfstnode)
1764 r = mnfst.rev(mnfstnode)
1758 if r == next_rev[0]:
1765 if r == next_rev[0]:
1759 # If the last rev we looked at was the one just previous,
1766 # If the last rev we looked at was the one just previous,
1760 # we only need to see a diff.
1767 # we only need to see a diff.
1761 deltamf = mnfst.readdelta(mnfstnode)
1768 deltamf = mnfst.readdelta(mnfstnode)
1762 # For each line in the delta
1769 # For each line in the delta
1763 for f, fnode in deltamf.iteritems():
1770 for f, fnode in deltamf.iteritems():
1764 f = changedfiles.get(f, None)
1771 f = changedfiles.get(f, None)
1765 # And if the file is in the list of files we care
1772 # And if the file is in the list of files we care
1766 # about.
1773 # about.
1767 if f is not None:
1774 if f is not None:
1768 # Get the changenode this manifest belongs to
1775 # Get the changenode this manifest belongs to
1769 clnode = msng_mnfst_set[mnfstnode]
1776 clnode = msng_mnfst_set[mnfstnode]
1770 # Create the set of filenodes for the file if
1777 # Create the set of filenodes for the file if
1771 # there isn't one already.
1778 # there isn't one already.
1772 ndset = msng_filenode_set.setdefault(f, {})
1779 ndset = msng_filenode_set.setdefault(f, {})
1773 # And set the filenode's changelog node to the
1780 # And set the filenode's changelog node to the
1774 # manifest's if it hasn't been set already.
1781 # manifest's if it hasn't been set already.
1775 ndset.setdefault(fnode, clnode)
1782 ndset.setdefault(fnode, clnode)
1776 else:
1783 else:
1777 # Otherwise we need a full manifest.
1784 # Otherwise we need a full manifest.
1778 m = mnfst.read(mnfstnode)
1785 m = mnfst.read(mnfstnode)
1779 # For every file in we care about.
1786 # For every file in we care about.
1780 for f in changedfiles:
1787 for f in changedfiles:
1781 fnode = m.get(f, None)
1788 fnode = m.get(f, None)
1782 # If it's in the manifest
1789 # If it's in the manifest
1783 if fnode is not None:
1790 if fnode is not None:
1784 # See comments above.
1791 # See comments above.
1785 clnode = msng_mnfst_set[mnfstnode]
1792 clnode = msng_mnfst_set[mnfstnode]
1786 ndset = msng_filenode_set.setdefault(f, {})
1793 ndset = msng_filenode_set.setdefault(f, {})
1787 ndset.setdefault(fnode, clnode)
1794 ndset.setdefault(fnode, clnode)
1788 # Remember the revision we hope to see next.
1795 # Remember the revision we hope to see next.
1789 next_rev[0] = r + 1
1796 next_rev[0] = r + 1
1790 return collect_msng_filenodes
1797 return collect_msng_filenodes
1791
1798
1792 # We have a list of filenodes we think we need for a file, lets remove
1799 # We have a list of filenodes we think we need for a file, lets remove
1793 # all those we now the recipient must have.
1800 # all those we now the recipient must have.
1794 def prune_filenodes(f, filerevlog):
1801 def prune_filenodes(f, filerevlog):
1795 msngset = msng_filenode_set[f]
1802 msngset = msng_filenode_set[f]
1796 hasset = {}
1803 hasset = {}
1797 # If a 'missing' filenode thinks it belongs to a changenode we
1804 # If a 'missing' filenode thinks it belongs to a changenode we
1798 # assume the recipient must have, then the recipient must have
1805 # assume the recipient must have, then the recipient must have
1799 # that filenode.
1806 # that filenode.
1800 for n in msngset:
1807 for n in msngset:
1801 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1808 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1802 if clnode in has_cl_set:
1809 if clnode in has_cl_set:
1803 hasset[n] = 1
1810 hasset[n] = 1
1804 prune_parents(filerevlog, hasset, msngset)
1811 prune_parents(filerevlog, hasset, msngset)
1805
1812
1806 # A function generator function that sets up the a context for the
1813 # A function generator function that sets up the a context for the
1807 # inner function.
1814 # inner function.
1808 def lookup_filenode_link_func(fname):
1815 def lookup_filenode_link_func(fname):
1809 msngset = msng_filenode_set[fname]
1816 msngset = msng_filenode_set[fname]
1810 # Lookup the changenode the filenode belongs to.
1817 # Lookup the changenode the filenode belongs to.
1811 def lookup_filenode_link(fnode):
1818 def lookup_filenode_link(fnode):
1812 return msngset[fnode]
1819 return msngset[fnode]
1813 return lookup_filenode_link
1820 return lookup_filenode_link
1814
1821
1815 # Add the nodes that were explicitly requested.
1822 # Add the nodes that were explicitly requested.
1816 def add_extra_nodes(name, nodes):
1823 def add_extra_nodes(name, nodes):
1817 if not extranodes or name not in extranodes:
1824 if not extranodes or name not in extranodes:
1818 return
1825 return
1819
1826
1820 for node, linknode in extranodes[name]:
1827 for node, linknode in extranodes[name]:
1821 if node not in nodes:
1828 if node not in nodes:
1822 nodes[node] = linknode
1829 nodes[node] = linknode
1823
1830
1824 # Now that we have all theses utility functions to help out and
1831 # Now that we have all theses utility functions to help out and
1825 # logically divide up the task, generate the group.
1832 # logically divide up the task, generate the group.
1826 def gengroup():
1833 def gengroup():
1827 # The set of changed files starts empty.
1834 # The set of changed files starts empty.
1828 changedfiles = {}
1835 changedfiles = {}
1829 # Create a changenode group generator that will call our functions
1836 # Create a changenode group generator that will call our functions
1830 # back to lookup the owning changenode and collect information.
1837 # back to lookup the owning changenode and collect information.
1831 group = cl.group(msng_cl_lst, identity,
1838 group = cl.group(msng_cl_lst, identity,
1832 manifest_and_file_collector(changedfiles))
1839 manifest_and_file_collector(changedfiles))
1833 for chnk in group:
1840 for chnk in group:
1834 yield chnk
1841 yield chnk
1835
1842
1836 # The list of manifests has been collected by the generator
1843 # The list of manifests has been collected by the generator
1837 # calling our functions back.
1844 # calling our functions back.
1838 prune_manifests()
1845 prune_manifests()
1839 add_extra_nodes(1, msng_mnfst_set)
1846 add_extra_nodes(1, msng_mnfst_set)
1840 msng_mnfst_lst = msng_mnfst_set.keys()
1847 msng_mnfst_lst = msng_mnfst_set.keys()
1841 # Sort the manifestnodes by revision number.
1848 # Sort the manifestnodes by revision number.
1842 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1849 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1843 # Create a generator for the manifestnodes that calls our lookup
1850 # Create a generator for the manifestnodes that calls our lookup
1844 # and data collection functions back.
1851 # and data collection functions back.
1845 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1852 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1846 filenode_collector(changedfiles))
1853 filenode_collector(changedfiles))
1847 for chnk in group:
1854 for chnk in group:
1848 yield chnk
1855 yield chnk
1849
1856
1850 # These are no longer needed, dereference and toss the memory for
1857 # These are no longer needed, dereference and toss the memory for
1851 # them.
1858 # them.
1852 msng_mnfst_lst = None
1859 msng_mnfst_lst = None
1853 msng_mnfst_set.clear()
1860 msng_mnfst_set.clear()
1854
1861
1855 if extranodes:
1862 if extranodes:
1856 for fname in extranodes:
1863 for fname in extranodes:
1857 if isinstance(fname, int):
1864 if isinstance(fname, int):
1858 continue
1865 continue
1859 msng_filenode_set.setdefault(fname, {})
1866 msng_filenode_set.setdefault(fname, {})
1860 changedfiles[fname] = 1
1867 changedfiles[fname] = 1
1861 # Go through all our files in order sorted by name.
1868 # Go through all our files in order sorted by name.
1862 for fname in util.sort(changedfiles):
1869 for fname in util.sort(changedfiles):
1863 filerevlog = self.file(fname)
1870 filerevlog = self.file(fname)
1864 if not len(filerevlog):
1871 if not len(filerevlog):
1865 raise util.Abort(_("empty or missing revlog for %s") % fname)
1872 raise util.Abort(_("empty or missing revlog for %s") % fname)
1866 # Toss out the filenodes that the recipient isn't really
1873 # Toss out the filenodes that the recipient isn't really
1867 # missing.
1874 # missing.
1868 if fname in msng_filenode_set:
1875 if fname in msng_filenode_set:
1869 prune_filenodes(fname, filerevlog)
1876 prune_filenodes(fname, filerevlog)
1870 add_extra_nodes(fname, msng_filenode_set[fname])
1877 add_extra_nodes(fname, msng_filenode_set[fname])
1871 msng_filenode_lst = msng_filenode_set[fname].keys()
1878 msng_filenode_lst = msng_filenode_set[fname].keys()
1872 else:
1879 else:
1873 msng_filenode_lst = []
1880 msng_filenode_lst = []
1874 # If any filenodes are left, generate the group for them,
1881 # If any filenodes are left, generate the group for them,
1875 # otherwise don't bother.
1882 # otherwise don't bother.
1876 if len(msng_filenode_lst) > 0:
1883 if len(msng_filenode_lst) > 0:
1877 yield changegroup.chunkheader(len(fname))
1884 yield changegroup.chunkheader(len(fname))
1878 yield fname
1885 yield fname
1879 # Sort the filenodes by their revision #
1886 # Sort the filenodes by their revision #
1880 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1887 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1881 # Create a group generator and only pass in a changenode
1888 # Create a group generator and only pass in a changenode
1882 # lookup function as we need to collect no information
1889 # lookup function as we need to collect no information
1883 # from filenodes.
1890 # from filenodes.
1884 group = filerevlog.group(msng_filenode_lst,
1891 group = filerevlog.group(msng_filenode_lst,
1885 lookup_filenode_link_func(fname))
1892 lookup_filenode_link_func(fname))
1886 for chnk in group:
1893 for chnk in group:
1887 yield chnk
1894 yield chnk
1888 if fname in msng_filenode_set:
1895 if fname in msng_filenode_set:
1889 # Don't need this anymore, toss it to free memory.
1896 # Don't need this anymore, toss it to free memory.
1890 del msng_filenode_set[fname]
1897 del msng_filenode_set[fname]
1891 # Signal that no more groups are left.
1898 # Signal that no more groups are left.
1892 yield changegroup.closechunk()
1899 yield changegroup.closechunk()
1893
1900
1894 if msng_cl_lst:
1901 if msng_cl_lst:
1895 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1902 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1896
1903
1897 return util.chunkbuffer(gengroup())
1904 return util.chunkbuffer(gengroup())
1898
1905
1899 def changegroup(self, basenodes, source):
1906 def changegroup(self, basenodes, source):
1900 # to avoid a race we use changegroupsubset() (issue1320)
1907 # to avoid a race we use changegroupsubset() (issue1320)
1901 return self.changegroupsubset(basenodes, self.heads(), source)
1908 return self.changegroupsubset(basenodes, self.heads(), source)
1902
1909
1903 def _changegroup(self, common, source):
1910 def _changegroup(self, common, source):
1904 """Generate a changegroup of all nodes that we have that a recipient
1911 """Generate a changegroup of all nodes that we have that a recipient
1905 doesn't.
1912 doesn't.
1906
1913
1907 This is much easier than the previous function as we can assume that
1914 This is much easier than the previous function as we can assume that
1908 the recipient has any changenode we aren't sending them.
1915 the recipient has any changenode we aren't sending them.
1909
1916
1910 common is the set of common nodes between remote and self"""
1917 common is the set of common nodes between remote and self"""
1911
1918
1912 self.hook('preoutgoing', throw=True, source=source)
1919 self.hook('preoutgoing', throw=True, source=source)
1913
1920
1914 cl = self.changelog
1921 cl = self.changelog
1915 nodes = cl.findmissing(common)
1922 nodes = cl.findmissing(common)
1916 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1923 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1917 self.changegroupinfo(nodes, source)
1924 self.changegroupinfo(nodes, source)
1918
1925
1919 def identity(x):
1926 def identity(x):
1920 return x
1927 return x
1921
1928
1922 def gennodelst(log):
1929 def gennodelst(log):
1923 for r in log:
1930 for r in log:
1924 if log.linkrev(r) in revset:
1931 if log.linkrev(r) in revset:
1925 yield log.node(r)
1932 yield log.node(r)
1926
1933
1927 def changed_file_collector(changedfileset):
1934 def changed_file_collector(changedfileset):
1928 def collect_changed_files(clnode):
1935 def collect_changed_files(clnode):
1929 c = cl.read(clnode)
1936 c = cl.read(clnode)
1930 for fname in c[3]:
1937 for fname in c[3]:
1931 changedfileset[fname] = 1
1938 changedfileset[fname] = 1
1932 return collect_changed_files
1939 return collect_changed_files
1933
1940
1934 def lookuprevlink_func(revlog):
1941 def lookuprevlink_func(revlog):
1935 def lookuprevlink(n):
1942 def lookuprevlink(n):
1936 return cl.node(revlog.linkrev(revlog.rev(n)))
1943 return cl.node(revlog.linkrev(revlog.rev(n)))
1937 return lookuprevlink
1944 return lookuprevlink
1938
1945
1939 def gengroup():
1946 def gengroup():
1940 # construct a list of all changed files
1947 # construct a list of all changed files
1941 changedfiles = {}
1948 changedfiles = {}
1942
1949
1943 for chnk in cl.group(nodes, identity,
1950 for chnk in cl.group(nodes, identity,
1944 changed_file_collector(changedfiles)):
1951 changed_file_collector(changedfiles)):
1945 yield chnk
1952 yield chnk
1946
1953
1947 mnfst = self.manifest
1954 mnfst = self.manifest
1948 nodeiter = gennodelst(mnfst)
1955 nodeiter = gennodelst(mnfst)
1949 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1956 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1950 yield chnk
1957 yield chnk
1951
1958
1952 for fname in util.sort(changedfiles):
1959 for fname in util.sort(changedfiles):
1953 filerevlog = self.file(fname)
1960 filerevlog = self.file(fname)
1954 if not len(filerevlog):
1961 if not len(filerevlog):
1955 raise util.Abort(_("empty or missing revlog for %s") % fname)
1962 raise util.Abort(_("empty or missing revlog for %s") % fname)
1956 nodeiter = gennodelst(filerevlog)
1963 nodeiter = gennodelst(filerevlog)
1957 nodeiter = list(nodeiter)
1964 nodeiter = list(nodeiter)
1958 if nodeiter:
1965 if nodeiter:
1959 yield changegroup.chunkheader(len(fname))
1966 yield changegroup.chunkheader(len(fname))
1960 yield fname
1967 yield fname
1961 lookup = lookuprevlink_func(filerevlog)
1968 lookup = lookuprevlink_func(filerevlog)
1962 for chnk in filerevlog.group(nodeiter, lookup):
1969 for chnk in filerevlog.group(nodeiter, lookup):
1963 yield chnk
1970 yield chnk
1964
1971
1965 yield changegroup.closechunk()
1972 yield changegroup.closechunk()
1966
1973
1967 if nodes:
1974 if nodes:
1968 self.hook('outgoing', node=hex(nodes[0]), source=source)
1975 self.hook('outgoing', node=hex(nodes[0]), source=source)
1969
1976
1970 return util.chunkbuffer(gengroup())
1977 return util.chunkbuffer(gengroup())
1971
1978
1972 def addchangegroup(self, source, srctype, url, emptyok=False):
1979 def addchangegroup(self, source, srctype, url, emptyok=False):
1973 """add changegroup to repo.
1980 """add changegroup to repo.
1974
1981
1975 return values:
1982 return values:
1976 - nothing changed or no source: 0
1983 - nothing changed or no source: 0
1977 - more heads than before: 1+added heads (2..n)
1984 - more heads than before: 1+added heads (2..n)
1978 - less heads than before: -1-removed heads (-2..-n)
1985 - less heads than before: -1-removed heads (-2..-n)
1979 - number of heads stays the same: 1
1986 - number of heads stays the same: 1
1980 """
1987 """
1981 def csmap(x):
1988 def csmap(x):
1982 self.ui.debug(_("add changeset %s\n") % short(x))
1989 self.ui.debug(_("add changeset %s\n") % short(x))
1983 return len(cl)
1990 return len(cl)
1984
1991
1985 def revmap(x):
1992 def revmap(x):
1986 return cl.rev(x)
1993 return cl.rev(x)
1987
1994
1988 if not source:
1995 if not source:
1989 return 0
1996 return 0
1990
1997
1991 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1998 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1992
1999
1993 changesets = files = revisions = 0
2000 changesets = files = revisions = 0
1994
2001
1995 # write changelog data to temp files so concurrent readers will not see
2002 # write changelog data to temp files so concurrent readers will not see
1996 # inconsistent view
2003 # inconsistent view
1997 cl = self.changelog
2004 cl = self.changelog
1998 cl.delayupdate()
2005 cl.delayupdate()
1999 oldheads = len(cl.heads())
2006 oldheads = len(cl.heads())
2000
2007
2001 tr = self.transaction()
2008 tr = self.transaction()
2002 try:
2009 try:
2003 trp = weakref.proxy(tr)
2010 trp = weakref.proxy(tr)
2004 # pull off the changeset group
2011 # pull off the changeset group
2005 self.ui.status(_("adding changesets\n"))
2012 self.ui.status(_("adding changesets\n"))
2006 cor = len(cl) - 1
2013 cor = len(cl) - 1
2007 chunkiter = changegroup.chunkiter(source)
2014 chunkiter = changegroup.chunkiter(source)
2008 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2015 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2009 raise util.Abort(_("received changelog group is empty"))
2016 raise util.Abort(_("received changelog group is empty"))
2010 cnr = len(cl) - 1
2017 cnr = len(cl) - 1
2011 changesets = cnr - cor
2018 changesets = cnr - cor
2012
2019
2013 # pull off the manifest group
2020 # pull off the manifest group
2014 self.ui.status(_("adding manifests\n"))
2021 self.ui.status(_("adding manifests\n"))
2015 chunkiter = changegroup.chunkiter(source)
2022 chunkiter = changegroup.chunkiter(source)
2016 # no need to check for empty manifest group here:
2023 # no need to check for empty manifest group here:
2017 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2024 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2018 # no new manifest will be created and the manifest group will
2025 # no new manifest will be created and the manifest group will
2019 # be empty during the pull
2026 # be empty during the pull
2020 self.manifest.addgroup(chunkiter, revmap, trp)
2027 self.manifest.addgroup(chunkiter, revmap, trp)
2021
2028
2022 # process the files
2029 # process the files
2023 self.ui.status(_("adding file changes\n"))
2030 self.ui.status(_("adding file changes\n"))
2024 while 1:
2031 while 1:
2025 f = changegroup.getchunk(source)
2032 f = changegroup.getchunk(source)
2026 if not f:
2033 if not f:
2027 break
2034 break
2028 self.ui.debug(_("adding %s revisions\n") % f)
2035 self.ui.debug(_("adding %s revisions\n") % f)
2029 fl = self.file(f)
2036 fl = self.file(f)
2030 o = len(fl)
2037 o = len(fl)
2031 chunkiter = changegroup.chunkiter(source)
2038 chunkiter = changegroup.chunkiter(source)
2032 if fl.addgroup(chunkiter, revmap, trp) is None:
2039 if fl.addgroup(chunkiter, revmap, trp) is None:
2033 raise util.Abort(_("received file revlog group is empty"))
2040 raise util.Abort(_("received file revlog group is empty"))
2034 revisions += len(fl) - o
2041 revisions += len(fl) - o
2035 files += 1
2042 files += 1
2036
2043
2037 # make changelog see real files again
2038 cl.finalize(trp)
2039
2040 newheads = len(self.changelog.heads())
2044 newheads = len(self.changelog.heads())
2041 heads = ""
2045 heads = ""
2042 if oldheads and newheads != oldheads:
2046 if oldheads and newheads != oldheads:
2043 heads = _(" (%+d heads)") % (newheads - oldheads)
2047 heads = _(" (%+d heads)") % (newheads - oldheads)
2044
2048
2045 self.ui.status(_("added %d changesets"
2049 self.ui.status(_("added %d changesets"
2046 " with %d changes to %d files%s\n")
2050 " with %d changes to %d files%s\n")
2047 % (changesets, revisions, files, heads))
2051 % (changesets, revisions, files, heads))
2048
2052
2049 if changesets > 0:
2053 if changesets > 0:
2054 p = lambda: self.changelog.writepending() and self.root or ""
2050 self.hook('pretxnchangegroup', throw=True,
2055 self.hook('pretxnchangegroup', throw=True,
2051 node=hex(self.changelog.node(cor+1)), source=srctype,
2056 node=hex(self.changelog.node(cor+1)), source=srctype,
2052 url=url)
2057 url=url, pending=p)
2058
2059 # make changelog see real files again
2060 cl.finalize(trp)
2053
2061
2054 tr.close()
2062 tr.close()
2055 finally:
2063 finally:
2056 del tr
2064 del tr
2057
2065
2058 if changesets > 0:
2066 if changesets > 0:
2059 # forcefully update the on-disk branch cache
2067 # forcefully update the on-disk branch cache
2060 self.ui.debug(_("updating the branch cache\n"))
2068 self.ui.debug(_("updating the branch cache\n"))
2061 self.branchtags()
2069 self.branchtags()
2062 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2070 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2063 source=srctype, url=url)
2071 source=srctype, url=url)
2064
2072
2065 for i in xrange(cor + 1, cnr + 1):
2073 for i in xrange(cor + 1, cnr + 1):
2066 self.hook("incoming", node=hex(self.changelog.node(i)),
2074 self.hook("incoming", node=hex(self.changelog.node(i)),
2067 source=srctype, url=url)
2075 source=srctype, url=url)
2068
2076
2069 # never return 0 here:
2077 # never return 0 here:
2070 if newheads < oldheads:
2078 if newheads < oldheads:
2071 return newheads - oldheads - 1
2079 return newheads - oldheads - 1
2072 else:
2080 else:
2073 return newheads - oldheads + 1
2081 return newheads - oldheads + 1
2074
2082
2075
2083
2076 def stream_in(self, remote):
2084 def stream_in(self, remote):
2077 fp = remote.stream_out()
2085 fp = remote.stream_out()
2078 l = fp.readline()
2086 l = fp.readline()
2079 try:
2087 try:
2080 resp = int(l)
2088 resp = int(l)
2081 except ValueError:
2089 except ValueError:
2082 raise error.ResponseError(
2090 raise error.ResponseError(
2083 _('Unexpected response from remote server:'), l)
2091 _('Unexpected response from remote server:'), l)
2084 if resp == 1:
2092 if resp == 1:
2085 raise util.Abort(_('operation forbidden by server'))
2093 raise util.Abort(_('operation forbidden by server'))
2086 elif resp == 2:
2094 elif resp == 2:
2087 raise util.Abort(_('locking the remote repository failed'))
2095 raise util.Abort(_('locking the remote repository failed'))
2088 elif resp != 0:
2096 elif resp != 0:
2089 raise util.Abort(_('the server sent an unknown error code'))
2097 raise util.Abort(_('the server sent an unknown error code'))
2090 self.ui.status(_('streaming all changes\n'))
2098 self.ui.status(_('streaming all changes\n'))
2091 l = fp.readline()
2099 l = fp.readline()
2092 try:
2100 try:
2093 total_files, total_bytes = map(int, l.split(' ', 1))
2101 total_files, total_bytes = map(int, l.split(' ', 1))
2094 except (ValueError, TypeError):
2102 except (ValueError, TypeError):
2095 raise error.ResponseError(
2103 raise error.ResponseError(
2096 _('Unexpected response from remote server:'), l)
2104 _('Unexpected response from remote server:'), l)
2097 self.ui.status(_('%d files to transfer, %s of data\n') %
2105 self.ui.status(_('%d files to transfer, %s of data\n') %
2098 (total_files, util.bytecount(total_bytes)))
2106 (total_files, util.bytecount(total_bytes)))
2099 start = time.time()
2107 start = time.time()
2100 for i in xrange(total_files):
2108 for i in xrange(total_files):
2101 # XXX doesn't support '\n' or '\r' in filenames
2109 # XXX doesn't support '\n' or '\r' in filenames
2102 l = fp.readline()
2110 l = fp.readline()
2103 try:
2111 try:
2104 name, size = l.split('\0', 1)
2112 name, size = l.split('\0', 1)
2105 size = int(size)
2113 size = int(size)
2106 except (ValueError, TypeError):
2114 except (ValueError, TypeError):
2107 raise error.ResponseError(
2115 raise error.ResponseError(
2108 _('Unexpected response from remote server:'), l)
2116 _('Unexpected response from remote server:'), l)
2109 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2117 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2110 ofp = self.sopener(name, 'w')
2118 ofp = self.sopener(name, 'w')
2111 for chunk in util.filechunkiter(fp, limit=size):
2119 for chunk in util.filechunkiter(fp, limit=size):
2112 ofp.write(chunk)
2120 ofp.write(chunk)
2113 ofp.close()
2121 ofp.close()
2114 elapsed = time.time() - start
2122 elapsed = time.time() - start
2115 if elapsed <= 0:
2123 if elapsed <= 0:
2116 elapsed = 0.001
2124 elapsed = 0.001
2117 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2125 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2118 (util.bytecount(total_bytes), elapsed,
2126 (util.bytecount(total_bytes), elapsed,
2119 util.bytecount(total_bytes / elapsed)))
2127 util.bytecount(total_bytes / elapsed)))
2120 self.invalidate()
2128 self.invalidate()
2121 return len(self.heads()) + 1
2129 return len(self.heads()) + 1
2122
2130
2123 def clone(self, remote, heads=[], stream=False):
2131 def clone(self, remote, heads=[], stream=False):
2124 '''clone remote repository.
2132 '''clone remote repository.
2125
2133
2126 keyword arguments:
2134 keyword arguments:
2127 heads: list of revs to clone (forces use of pull)
2135 heads: list of revs to clone (forces use of pull)
2128 stream: use streaming clone if possible'''
2136 stream: use streaming clone if possible'''
2129
2137
2130 # now, all clients that can request uncompressed clones can
2138 # now, all clients that can request uncompressed clones can
2131 # read repo formats supported by all servers that can serve
2139 # read repo formats supported by all servers that can serve
2132 # them.
2140 # them.
2133
2141
2134 # if revlog format changes, client will have to check version
2142 # if revlog format changes, client will have to check version
2135 # and format flags on "stream" capability, and use
2143 # and format flags on "stream" capability, and use
2136 # uncompressed only if compatible.
2144 # uncompressed only if compatible.
2137
2145
2138 if stream and not heads and remote.capable('stream'):
2146 if stream and not heads and remote.capable('stream'):
2139 return self.stream_in(remote)
2147 return self.stream_in(remote)
2140 return self.pull(remote, heads)
2148 return self.pull(remote, heads)
2141
2149
2142 # used to avoid circular references so destructors work
2150 # used to avoid circular references so destructors work
2143 def aftertrans(files):
2151 def aftertrans(files):
2144 renamefiles = [tuple(t) for t in files]
2152 renamefiles = [tuple(t) for t in files]
2145 def a():
2153 def a():
2146 for src, dest in renamefiles:
2154 for src, dest in renamefiles:
2147 util.rename(src, dest)
2155 util.rename(src, dest)
2148 return a
2156 return a
2149
2157
2150 def instance(ui, path, create):
2158 def instance(ui, path, create):
2151 return localrepository(ui, util.drop_scheme('file', path), create)
2159 return localrepository(ui, util.drop_scheme('file', path), create)
2152
2160
2153 def islocal(path):
2161 def islocal(path):
2154 return True
2162 return True
@@ -1,55 +1,58
1 # simple script to be used in hooks
1 # simple script to be used in hooks
2 # copy it to the current directory when the test starts:
2 # copy it to the current directory when the test starts:
3 #
3 #
4 # cp "$TESTDIR"/printenv.py .
4 # cp "$TESTDIR"/printenv.py .
5 #
5 #
6 # put something like this in the repo .hg/hgrc:
6 # put something like this in the repo .hg/hgrc:
7 #
7 #
8 # [hooks]
8 # [hooks]
9 # changegroup = python ../printenv.py <hookname> [exit] [output]
9 # changegroup = python ../printenv.py <hookname> [exit] [output]
10 #
10 #
11 # - <hookname> is a mandatory argument (e.g. "changegroup")
11 # - <hookname> is a mandatory argument (e.g. "changegroup")
12 # - [exit] is the exit code of the hook (default: 0)
12 # - [exit] is the exit code of the hook (default: 0)
13 # - [output] is the name of the output file (default: use sys.stdout)
13 # - [output] is the name of the output file (default: use sys.stdout)
14 # the file will be opened in append mode.
14 # the file will be opened in append mode.
15 #
15 #
16 import os
16 import os
17 import sys
17 import sys
18
18
19 try:
19 try:
20 import msvcrt
20 import msvcrt
21 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
21 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
22 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
22 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
23 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
23 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
24 except ImportError:
24 except ImportError:
25 pass
25 pass
26
26
27 exitcode = 0
27 exitcode = 0
28 out = sys.stdout
28 out = sys.stdout
29
29
30 name = sys.argv[1]
30 name = sys.argv[1]
31 if len(sys.argv) > 2:
31 if len(sys.argv) > 2:
32 exitcode = int(sys.argv[2])
32 exitcode = int(sys.argv[2])
33 if len(sys.argv) > 3:
33 if len(sys.argv) > 3:
34 out = open(sys.argv[3], "ab")
34 out = open(sys.argv[3], "ab")
35
35
36 # variables with empty values may not exist on all platforms, filter
36 # variables with empty values may not exist on all platforms, filter
37 # them now for portability sake.
37 # them now for portability sake.
38 env = [k for k, v in os.environ.iteritems()
38 env = [k for k, v in os.environ.iteritems()
39 if k.startswith("HG_") and v]
39 if k.startswith("HG_") and v]
40 env.sort()
40 env.sort()
41
41
42 # edit the variable part of the variable
42 # edit the variable part of the variable
43 url = os.environ.get("HG_URL", "")
43 url = os.environ.get("HG_URL", "")
44 if url.startswith("file:"):
44 if url.startswith("file:"):
45 os.environ["HG_URL"] = "file:"
45 os.environ["HG_URL"] = "file:"
46 elif url.startswith("remote:http"):
46 elif url.startswith("remote:http"):
47 os.environ["HG_URL"] = "remote:http"
47 os.environ["HG_URL"] = "remote:http"
48
48
49 if "HG_PENDING" in os.environ:
50 os.environ["HG_PENDING"] = os.environ["HG_PENDING"] and "true"
51
49 out.write("%s hook: " % name)
52 out.write("%s hook: " % name)
50 for v in env:
53 for v in env:
51 out.write("%s=%s " % (v, os.environ[v]))
54 out.write("%s=%s " % (v, os.environ[v]))
52 out.write("\n")
55 out.write("\n")
53 out.close()
56 out.close()
54
57
55 sys.exit(exitcode)
58 sys.exit(exitcode)
@@ -1,154 +1,154
1 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
1 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
2 pretxncommit hook: HG_NODE=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PARENT1=0000000000000000000000000000000000000000
2 pretxncommit hook: HG_NODE=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=true
3 0:29b62aeb769f
3 0:29b62aeb769f
4 commit hook: HG_NODE=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PARENT1=0000000000000000000000000000000000000000
4 commit hook: HG_NODE=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PARENT1=0000000000000000000000000000000000000000
5 commit.b hook: HG_NODE=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PARENT1=0000000000000000000000000000000000000000
5 commit.b hook: HG_NODE=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PARENT1=0000000000000000000000000000000000000000
6 updating working directory
6 updating working directory
7 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
7 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 precommit hook: HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
8 precommit hook: HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
9 pretxncommit hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
9 pretxncommit hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PENDING=true
10 1:b702efe96888
10 1:b702efe96888
11 commit hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
11 commit hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
12 commit.b hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
12 commit.b hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
13 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
13 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
14 precommit hook: HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
14 precommit hook: HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
15 pretxncommit hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
15 pretxncommit hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PENDING=true
16 2:1324a5531bac
16 2:1324a5531bac
17 commit hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
17 commit hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
18 commit.b hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
18 commit.b hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
19 created new head
19 created new head
20 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
20 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
21 (branch merge, don't forget to commit)
21 (branch merge, don't forget to commit)
22 precommit hook: HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
22 precommit hook: HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
23 pretxncommit hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
23 pretxncommit hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PENDING=true
24 3:4c52fb2e4022
24 3:4c52fb2e4022
25 commit hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
25 commit hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
26 commit.b hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
26 commit.b hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
27 pre-identify hook: HG_ARGS=id
27 pre-identify hook: HG_ARGS=id
28 warning: pre-identify hook exited with status 1
28 warning: pre-identify hook exited with status 1
29 pre-cat hook: HG_ARGS=cat b
29 pre-cat hook: HG_ARGS=cat b
30 post-cat hook: HG_ARGS=cat b HG_RESULT=0
30 post-cat hook: HG_ARGS=cat b HG_RESULT=0
31 b
31 b
32 prechangegroup hook: HG_SOURCE=pull HG_URL=file:
32 prechangegroup hook: HG_SOURCE=pull HG_URL=file:
33 changegroup hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_SOURCE=pull HG_URL=file:
33 changegroup hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_SOURCE=pull HG_URL=file:
34 incoming hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_SOURCE=pull HG_URL=file:
34 incoming hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_SOURCE=pull HG_URL=file:
35 incoming hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_SOURCE=pull HG_URL=file:
35 incoming hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_SOURCE=pull HG_URL=file:
36 incoming hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_SOURCE=pull HG_URL=file:
36 incoming hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_SOURCE=pull HG_URL=file:
37 pulling from ../a
37 pulling from ../a
38 searching for changes
38 searching for changes
39 adding changesets
39 adding changesets
40 adding manifests
40 adding manifests
41 adding file changes
41 adding file changes
42 added 3 changesets with 2 changes to 2 files
42 added 3 changesets with 2 changes to 2 files
43 (run 'hg update' to get a working copy)
43 (run 'hg update' to get a working copy)
44 pretag hook: HG_LOCAL=0 HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_TAG=a
44 pretag hook: HG_LOCAL=0 HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_TAG=a
45 precommit hook: HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
45 precommit hook: HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
46 pretxncommit hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
46 pretxncommit hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321 HG_PENDING=true
47 4:8ea2ef7ad3e8
47 4:8ea2ef7ad3e8
48 commit hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
48 commit hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
49 commit.b hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
49 commit.b hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
50 tag hook: HG_LOCAL=0 HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_TAG=a
50 tag hook: HG_LOCAL=0 HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_TAG=a
51 pretag hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=la
51 pretag hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=la
52 tag hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=la
52 tag hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=la
53 pretag hook: HG_LOCAL=0 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fa
53 pretag hook: HG_LOCAL=0 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fa
54 pretag.forbid hook: HG_LOCAL=0 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fa
54 pretag.forbid hook: HG_LOCAL=0 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fa
55 abort: pretag.forbid hook exited with status 1
55 abort: pretag.forbid hook exited with status 1
56 pretag hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fla
56 pretag hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fla
57 pretag.forbid hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fla
57 pretag.forbid hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fla
58 abort: pretag.forbid hook exited with status 1
58 abort: pretag.forbid hook exited with status 1
59 4:8ea2ef7ad3e8
59 4:8ea2ef7ad3e8
60 precommit hook: HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
60 precommit hook: HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
61 pretxncommit hook: HG_NODE=fad284daf8c032148abaffcd745dafeceefceb61 HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
61 pretxncommit hook: HG_NODE=fad284daf8c032148abaffcd745dafeceefceb61 HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PENDING=true
62 5:fad284daf8c0
62 5:fad284daf8c0
63 pretxncommit.forbid hook: HG_NODE=fad284daf8c032148abaffcd745dafeceefceb61 HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
63 pretxncommit.forbid hook: HG_NODE=fad284daf8c032148abaffcd745dafeceefceb61 HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PENDING=true
64 transaction abort!
64 transaction abort!
65 rollback completed
65 rollback completed
66 abort: pretxncommit.forbid1 hook exited with status 1
66 abort: pretxncommit.forbid1 hook exited with status 1
67 4:8ea2ef7ad3e8
67 4:8ea2ef7ad3e8
68 precommit hook: HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
68 precommit hook: HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
69 precommit.forbid hook: HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
69 precommit.forbid hook: HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
70 abort: precommit.forbid hook exited with status 1
70 abort: precommit.forbid hook exited with status 1
71 4:8ea2ef7ad3e8
71 4:8ea2ef7ad3e8
72 preupdate hook: HG_PARENT1=b702efe96888
72 preupdate hook: HG_PARENT1=b702efe96888
73 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
73 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
74 preupdate hook: HG_PARENT1=8ea2ef7ad3e8
74 preupdate hook: HG_PARENT1=8ea2ef7ad3e8
75 update hook: HG_ERROR=0 HG_PARENT1=8ea2ef7ad3e8
75 update hook: HG_ERROR=0 HG_PARENT1=8ea2ef7ad3e8
76 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
77 3:4c52fb2e4022
77 3:4c52fb2e4022
78 prechangegroup.forbid hook: HG_SOURCE=pull HG_URL=file:
78 prechangegroup.forbid hook: HG_SOURCE=pull HG_URL=file:
79 pulling from ../a
79 pulling from ../a
80 searching for changes
80 searching for changes
81 abort: prechangegroup.forbid hook exited with status 1
81 abort: prechangegroup.forbid hook exited with status 1
82 4:8ea2ef7ad3e8
82 4:8ea2ef7ad3e8
83 pretxnchangegroup.forbid hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_SOURCE=pull HG_URL=file:
83 pretxnchangegroup.forbid hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PENDING=true HG_SOURCE=pull HG_URL=file:
84 pulling from ../a
84 pulling from ../a
85 searching for changes
85 searching for changes
86 adding changesets
86 adding changesets
87 adding manifests
87 adding manifests
88 adding file changes
88 adding file changes
89 added 1 changesets with 1 changes to 1 files
89 added 1 changesets with 1 changes to 1 files
90 transaction abort!
90 transaction abort!
91 rollback completed
91 rollback completed
92 abort: pretxnchangegroup.forbid1 hook exited with status 1
92 abort: pretxnchangegroup.forbid1 hook exited with status 1
93 3:4c52fb2e4022
93 3:4c52fb2e4022
94 preoutgoing hook: HG_SOURCE=pull
94 preoutgoing hook: HG_SOURCE=pull
95 outgoing hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_SOURCE=pull
95 outgoing hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_SOURCE=pull
96 pulling from ../a
96 pulling from ../a
97 searching for changes
97 searching for changes
98 adding changesets
98 adding changesets
99 adding manifests
99 adding manifests
100 adding file changes
100 adding file changes
101 added 1 changesets with 1 changes to 1 files
101 added 1 changesets with 1 changes to 1 files
102 (run 'hg update' to get a working copy)
102 (run 'hg update' to get a working copy)
103 rolling back last transaction
103 rolling back last transaction
104 preoutgoing hook: HG_SOURCE=pull
104 preoutgoing hook: HG_SOURCE=pull
105 preoutgoing.forbid hook: HG_SOURCE=pull
105 preoutgoing.forbid hook: HG_SOURCE=pull
106 pulling from ../a
106 pulling from ../a
107 searching for changes
107 searching for changes
108 abort: preoutgoing.forbid hook exited with status 1
108 abort: preoutgoing.forbid hook exited with status 1
109 # test python hooks
109 # test python hooks
110 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
110 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
111 error: preoutgoing.raise hook raised an exception: exception from hook
111 error: preoutgoing.raise hook raised an exception: exception from hook
112 pulling from ../a
112 pulling from ../a
113 searching for changes
113 searching for changes
114 error: preoutgoing.abort hook failed: raise abort from hook
114 error: preoutgoing.abort hook failed: raise abort from hook
115 abort: raise abort from hook
115 abort: raise abort from hook
116 pulling from ../a
116 pulling from ../a
117 searching for changes
117 searching for changes
118 hook args:
118 hook args:
119 hooktype preoutgoing
119 hooktype preoutgoing
120 source pull
120 source pull
121 abort: preoutgoing.fail hook failed
121 abort: preoutgoing.fail hook failed
122 pulling from ../a
122 pulling from ../a
123 searching for changes
123 searching for changes
124 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
124 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
125 pulling from ../a
125 pulling from ../a
126 searching for changes
126 searching for changes
127 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
127 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
128 pulling from ../a
128 pulling from ../a
129 searching for changes
129 searching for changes
130 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
130 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
131 pulling from ../a
131 pulling from ../a
132 searching for changes
132 searching for changes
133 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
133 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
134 pulling from ../a
134 pulling from ../a
135 searching for changes
135 searching for changes
136 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
136 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
137 pulling from ../a
137 pulling from ../a
138 searching for changes
138 searching for changes
139 hook args:
139 hook args:
140 hooktype preoutgoing
140 hooktype preoutgoing
141 source pull
141 source pull
142 adding changesets
142 adding changesets
143 adding manifests
143 adding manifests
144 adding file changes
144 adding file changes
145 added 1 changesets with 1 changes to 1 files
145 added 1 changesets with 1 changes to 1 files
146 (run 'hg update' to get a working copy)
146 (run 'hg update' to get a working copy)
147 # make sure --traceback works
147 # make sure --traceback works
148 Traceback (most recent call last):
148 Traceback (most recent call last):
149 Automatically installed hook
149 Automatically installed hook
150 foo
150 foo
151 calling hook commit.auto: <function autohook>
151 calling hook commit.auto: <function autohook>
152 Automatically installed hook
152 Automatically installed hook
153 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
153 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
154 hooks.commit.auto=<function autohook>
154 hooks.commit.auto=<function autohook>
General Comments 0
You need to be logged in to leave comments. Login now